blob: f468df7b75c53f987b11d0727f9577a425832804 [file] [log] [blame]
<!-- HTML header for doxygen 1.8.15-->
<!-- Remember to use version doxygen 1.8.15 +-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.15"/>
<meta name="robots" content="NOINDEX, NOFOLLOW" /> <!-- Prevent indexing by search engines -->
<title>Compute Library: NELSTMLayer Class Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(initResizable);
/* @license-end */</script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="stylesheet.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="Compute Library" src="https://raw.githubusercontent.com/ARM-software/ComputeLibrary/gh-pages/ACL_logo.png" style="max-width: 100%;margin-top: 15px;margin-left: 10px"/>
<td style="padding-left: 0.5em;">
<div id="projectname">
&#160;<span id="projectnumber">20.02.1</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.15 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml','');});
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#pub-methods">Public Member Functions</a> &#124;
<a href="#pub-static-methods">Static Public Member Functions</a> </div>
<div class="headertitle">
<div class="title">NELSTMLayer Class Reference</div> </div>
</div><!--header-->
<div class="contents">
<p>Basic function to run <a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a>.
<a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#details">More...</a></p>
<p><code>#include &lt;<a class="el" href="_n_e_l_s_t_m_layer_8h_source.xhtml">NELSTMLayer.h</a>&gt;</code></p>
<div class="dynheader">
Collaboration diagram for NELSTMLayer:</div>
<div class="dyncontent">
<div class="center"><iframe scrolling="no" frameborder="0" src="classarm__compute_1_1_n_e_l_s_t_m_layer__coll__graph.svg" width="120" height="112"><p><b>This browser is not able to show SVG: try Firefox, Chrome, Safari, or Opera instead.</b></p></iframe>
</div>
<center><span class="legend">[<a target="top" href="graph_legend.xhtml">legend</a>]</span></center></div>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:a8d8d5b5c66b732b3fc9494b0e743ed3f"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#a8d8d5b5c66b732b3fc9494b0e743ed3f">NELSTMLayer</a> (std::shared_ptr&lt; <a class="el" href="classarm__compute_1_1_i_memory_manager.xhtml">IMemoryManager</a> &gt; memory_manager=nullptr)</td></tr>
<tr class="memdesc:a8d8d5b5c66b732b3fc9494b0e743ed3f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Default constructor. <a href="#a8d8d5b5c66b732b3fc9494b0e743ed3f">More...</a><br /></td></tr>
<tr class="separator:a8d8d5b5c66b732b3fc9494b0e743ed3f"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa899feaf94d69eb04afb0cd412869548"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa899feaf94d69eb04afb0cd412869548">configure</a> (const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *input, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *input_to_forget_weights, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *input_to_cell_weights, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *input_to_output_weights, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *recurrent_to_forget_weights, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *recurrent_to_cell_weights, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *recurrent_to_output_weights, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *forget_gate_bias, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *cell_bias, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output_gate_bias, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output_state_in, const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *cell_state_in, <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *scratch_buffer, <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output_state_out, <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *cell_state_out, <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output, const <a class="el" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams</a>&lt; <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> &gt; &amp;lstm_params, const <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a> &amp;activation_info, float cell_threshold=0.f, float projection_threshold=0.f)</td></tr>
<tr class="memdesc:aa899feaf94d69eb04afb0cd412869548"><td class="mdescLeft">&#160;</td><td class="mdescRight">Initialize function's tensors. <a href="#aa899feaf94d69eb04afb0cd412869548">More...</a><br /></td></tr>
<tr class="separator:aa899feaf94d69eb04afb0cd412869548"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:ad1717410afd0be936c6213a63c8005fb"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a> () override</td></tr>
<tr class="memdesc:ad1717410afd0be936c6213a63c8005fb"><td class="mdescLeft">&#160;</td><td class="mdescRight">Run the kernels contained in the function. <a href="#ad1717410afd0be936c6213a63c8005fb">More...</a><br /></td></tr>
<tr class="separator:ad1717410afd0be936c6213a63c8005fb"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:aa9b93ef660fc3c5b4b19d3fc7b891b77"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa9b93ef660fc3c5b4b19d3fc7b891b77">prepare</a> () override</td></tr>
<tr class="memdesc:aa9b93ef660fc3c5b4b19d3fc7b891b77"><td class="mdescLeft">&#160;</td><td class="mdescRight">Prepare the function for executing. <a href="#aa9b93ef660fc3c5b4b19d3fc7b891b77">More...</a><br /></td></tr>
<tr class="separator:aa9b93ef660fc3c5b4b19d3fc7b891b77"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="inherit_header pub_methods_classarm__compute_1_1_i_function"><td colspan="2" onclick="javascript:toggleInherit('pub_methods_classarm__compute_1_1_i_function')"><img src="closed.png" alt="-"/>&#160;Public Member Functions inherited from <a class="el" href="classarm__compute_1_1_i_function.xhtml">IFunction</a></td></tr>
<tr class="memitem:ab921ecc3f3f6ae2b4bd61f3e1998d8c4 inherit pub_methods_classarm__compute_1_1_i_function"><td class="memItemLeft" align="right" valign="top">virtual&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarm__compute_1_1_i_function.xhtml#ab921ecc3f3f6ae2b4bd61f3e1998d8c4">~IFunction</a> ()=default</td></tr>
<tr class="memdesc:ab921ecc3f3f6ae2b4bd61f3e1998d8c4 inherit pub_methods_classarm__compute_1_1_i_function"><td class="mdescLeft">&#160;</td><td class="mdescRight">Destructor. <a href="classarm__compute_1_1_i_function.xhtml#ab921ecc3f3f6ae2b4bd61f3e1998d8c4">More...</a><br /></td></tr>
<tr class="separator:ab921ecc3f3f6ae2b4bd61f3e1998d8c4 inherit pub_methods_classarm__compute_1_1_i_function"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-static-methods"></a>
Static Public Member Functions</h2></td></tr>
<tr class="memitem:aa05bceba37ded272a464a90becd9cd99"><td class="memItemLeft" align="right" valign="top">static <a class="el" href="classarm__compute_1_1_status.xhtml">Status</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa05bceba37ded272a464a90becd9cd99">validate</a> (const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *input, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *input_to_forget_weights, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *input_to_cell_weights, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *input_to_output_weights, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *recurrent_to_forget_weights, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *recurrent_to_cell_weights, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *recurrent_to_output_weights, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *forget_gate_bias, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_bias, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output_gate_bias, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output_state_in, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_state_in, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *scratch_buffer, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output_state_out, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_state_out, const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *output, const <a class="el" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams</a>&lt; <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> &gt; &amp;lstm_params, const <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a> &amp;activation_info, float cell_threshold=0.f, float projection_threshold=0.f)</td></tr>
<tr class="memdesc:aa05bceba37ded272a464a90becd9cd99"><td class="mdescLeft">&#160;</td><td class="mdescRight">Static function to check if given info will lead to a valid configuration of <a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a>. <a href="#aa05bceba37ded272a464a90becd9cd99">More...</a><br /></td></tr>
<tr class="separator:aa05bceba37ded272a464a90becd9cd99"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><p>Basic function to run <a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a>. </p>
<p class="definition">Definition at line <a class="el" href="_n_e_l_s_t_m_layer_8h_source.xhtml#l00047">47</a> of file <a class="el" href="_n_e_l_s_t_m_layer_8h_source.xhtml">NELSTMLayer.h</a>.</p>
</div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2>
<a id="a8d8d5b5c66b732b3fc9494b0e743ed3f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a8d8d5b5c66b732b3fc9494b0e743ed3f">&#9670;&nbsp;</a></span>NELSTMLayer()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a> </td>
<td>(</td>
<td class="paramtype">std::shared_ptr&lt; <a class="el" href="classarm__compute_1_1_i_memory_manager.xhtml">IMemoryManager</a> &gt;&#160;</td>
<td class="paramname"><em>memory_manager</em> = <code>nullptr</code></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p>Default constructor. </p>
<p class="definition">Definition at line <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00040">40</a> of file <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml">NELSTMLayer.cpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160; : _memory_group(std::move(memory_manager)), _fully_connected_input_gate(), _accum_input_gate1(), _subtract_input_gate(), _pixelwise_mul_input_gate(), _activation_input_gate(),</div><div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160; _fully_connected_forget_gate(), _accum_forget_gate1(), _pixelwise_mul_forget_gate(), _activation_forget_gate(), _fully_connected_cell_state(), _gemm_cell_state1(), _transpose_cell_state(),</div><div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160; _accum_cell_state1(), _accum_cell_state2(), _pixelwise_mul_cell_state1(), _activation_cell_state(), _cell_clip(), _pixelwise_mul_cell_state2(), _fully_connected_output(),</div><div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160; _pixelwise_mul_output_state1(), _accum_output1(), _activation_output(), _activation_output_state(), _pixelwise_mul_output_state2(), _fully_connected_output_state(), _projection_clip(),</div><div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160; _copy_cell_state(), _copy_output(), _concat_scratch_buffer(), _concat_inputs_forget_gate(), _concat_weights_forget_gate(), _concat_weights_input_gate(), _concat_weights_output(),</div><div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160; _mean_std_norm_input_gate(), _pixelwise_mul_input_gate_coeff(), _accum_input_gate_bias(), _mean_std_norm_forget_gate(), _pixelwise_mul_forget_gate_coeff(), _accum_forget_gate_bias(),</div><div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160; _mean_std_norm_cell_gate(), _pixelwise_mul_cell_gate_coeff(), _accum_cell_gate_bias(), _mean_std_norm_output_gate(), _pixelwise_mul_output_gate_coeff(), _accum_output_gate_bias(), _input_gate_out1(),</div><div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160; _input_gate_out2(), _input_gate_out3(), _input_gate_out4(), _forget_gate_out1(), _forget_gate_out2(), _forget_gate_out3(), _forget_gate_out4(), _forget_gate_out5(), _forget_gate_out6(),</div><div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160; _cell_state_out1(), _cell_state_out2(), _cell_state_out3(), _cell_state_out4(), _cell_state_out5(), _output1(), _output2(), _output3(), _output4(), _cell_state_activation(), _output_state1(), _ones(),</div><div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160; _input_layer_norm_out1(), _input_layer_norm_out2(), _forget_layer_norm_out1(), _forget_layer_norm_out2(), _cell_layer_norm_out1(), _cell_layer_norm_out2(), _output_layer_norm_out1(),</div><div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; _output_layer_norm_out2(), _run_peephole_opt(<span class="keyword">false</span>), _run_cifg_opt(<span class="keyword">false</span>), _perform_cell_clipping(<span class="keyword">false</span>), _has_projection_weights(<span class="keyword">false</span>), _perform_projection_clipping(<span class="keyword">false</span>), _is_prepared(<span class="keyword">false</span>),</div><div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160; _is_layer_norm_lstm(<span class="keyword">false</span>)</div><div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;{</div><div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160;}</div></div><!-- fragment -->
</div>
</div>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="aa899feaf94d69eb04afb0cd412869548"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa899feaf94d69eb04afb0cd412869548">&#9670;&nbsp;</a></span>configure()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">void configure </td>
<td>(</td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>input</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>input_to_forget_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>input_to_cell_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>input_to_output_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>recurrent_to_forget_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>recurrent_to_cell_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>recurrent_to_output_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>forget_gate_bias</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>cell_bias</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>output_gate_bias</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>output_state_in</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>cell_state_in</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>scratch_buffer</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>output_state_out</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>cell_state_out</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *&#160;</td>
<td class="paramname"><em>output</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams</a>&lt; <a class="el" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> &gt; &amp;&#160;</td>
<td class="paramname"><em>lstm_params</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a> &amp;&#160;</td>
<td class="paramname"><em>activation_info</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">float&#160;</td>
<td class="paramname"><em>cell_threshold</em> = <code>0.f</code>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">float&#160;</td>
<td class="paramname"><em>projection_threshold</em> = <code>0.f</code>&#160;</td>
</tr>
<tr>
<td></td>
<td>)</td>
<td></td><td></td>
</tr>
</table>
</div><div class="memdoc">
<p>Initialize function's tensors. </p>
<dl class="params"><dt>Parameters</dt><dd>
<table class="params">
<tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Source tensor. Input is a 2D tensor with dimensions [input_size, batch_size]. Data types supported: F16/F32. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">input_to_forget_weights</td><td>2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">input_to_cell_weights</td><td>2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">input_to_output_weights</td><td>2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">recurrent_to_forget_weights</td><td>2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">recurrent_to_cell_weights</td><td>2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">recurrent_to_output_weights</td><td>2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">forget_gate_bias</td><td>1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_bias</td><td>1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">output_gate_bias</td><td>1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">output_state_in</td><td>2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_state_in</td><td>2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[out]</td><td class="paramname">scratch_buffer</td><td>2D tensor with dimensions [num_units * 4, batch_size] with CIFG or [num_units * 3, batch_size] without CIGF. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[out]</td><td class="paramname">output_state_out</td><td>2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[out]</td><td class="paramname">cell_state_out</td><td>2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[out]</td><td class="paramname">output</td><td>Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size]. Data types supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">lstm_params</td><td>(Optional) Weights tensors used in peephole optimization: input_to_input_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. recurrent_to_input_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. cell_to_input_weights 1D weights tensor with dimensions [num_units]. Can be nullptr. Data type supported: Same as <code>input</code>. cell_to_forget_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. cell_to_output_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. input_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code> projection_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. projection_bias 1D weights tensor with dimensions [output_size]. Data type supported: Same as <code>input</code>. input_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. forget_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. cell_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. output_layer_norm_coefficients 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">activation_info</td><td>Contains activation information described in <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_threshold</td><td>The clipping threshold for the cell state, such that values are bound within [-cell_clip, cell_clip]. If set to 0.0 then clipping is disabled. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">projection_threshold</td><td>The clipping threshold for the output from the projection layer, such that values are bound within [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled. </td></tr>
</table>
</dd>
</dl>
<p>lstm_res = PixelwiseMul(output, Activation(cell_state)) </p><pre class="fragment"> -- Clip(lstm_res * projection_weights + projection_bias, projection_threshold) , if there is a projection
/
</pre><p> output_state = &ndash; \ &ndash; lstm_res , otherwise</p>
<p class="definition">Definition at line <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00056">56</a> of file <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml">NELSTMLayer.cpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160;{</div><div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; <a class="code" href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>,</div><div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>,</div><div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>,</div><div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, cell_bias, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>,</div><div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; output_state_in, cell_state_in,</div><div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160; scratch_buffer, output_state_out, cell_state_out, output);</div><div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;</div><div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; _is_layer_norm_lstm = lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>();</div><div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160;</div><div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160; <span class="comment">// Set lstm parameters</span></div><div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; <a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams&lt;ITensorInfo&gt;</a> lstm_params_info;</div><div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; {</div><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; lstm_params_info.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a28c80440058d5c9b0bc1e1a4622c734a">set_peephole_params</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">cell_to_forget_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">cell_to_output_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>());</div><div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; }</div><div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a127009377712009a84cd0c48aa7e1edd">has_projection</a>())</div><div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; {</div><div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; lstm_params_info.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aea777d30779bab2d14630ea7e8516615">set_projection_params</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ab1b3d5364f11bca8cacef026c8038dba">projection_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(),</div><div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ad676992a90d193409fa6a28a001af6c8">projection_bias</a>() != <span class="keyword">nullptr</span> ? lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ad676992a90d193409fa6a28a001af6c8">projection_bias</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>() : <span class="keyword">nullptr</span>);</div><div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; }</div><div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; <span class="keywordflow">if</span>(!lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">has_cifg_opt</a>())</div><div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; {</div><div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *cell_to_input_weights_info = (lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>()) ? lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">cell_to_input_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>() : <span class="keyword">nullptr</span>;</div><div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; lstm_params_info.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#adac8095c0cd29d443206dfcaf67f3607">set_cifg_params</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">input_to_input_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">recurrent_to_input_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(),</div><div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; cell_to_input_weights_info, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>());</div><div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; }</div><div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160;</div><div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; <span class="comment">// Validate</span></div><div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; <a class="code" href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a>(<a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa05bceba37ded272a464a90becd9cd99">NELSTMLayer::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info(), <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>-&gt;info(),</div><div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>-&gt;info(), <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>-&gt;info(),</div><div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>-&gt;info(), <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>-&gt;info(), <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>-&gt;info(),</div><div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>-&gt;info(), cell_bias-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>-&gt;info(),</div><div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; output_state_in-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), cell_state_in-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(),</div><div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; scratch_buffer-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), output_state_out-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), cell_state_out-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(), output-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>(),</div><div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; lstm_params_info, activation_info, cell_threshold, projection_threshold));</div><div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160;</div><div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> cell_state_shape = cell_state_in-&gt;<a class="code" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">tensor_shape</a>();</div><div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160;</div><div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; <span class="comment">// Configure block that calculates the forget gate</span></div><div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; <span class="comment">// forget_gate = Activation(input * input_to_forget_weights + output_state_in * recurrent_to_forget_weights + PixelWiseMul(cell_state, cell_to_forget_weights) + forget_gate_bias)</span></div><div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; <span class="comment">// We optimize this as follows:</span></div><div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; <span class="comment">// forget_gate = Activation( (input,output_state_in) * (input_to_forget_weights,recurrent_to_forget_weights) + PixelWiseMul(cell_state, cell_to_forget_weights) + forget_gate_bias)</span></div><div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160; _forget_gate_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; _forget_gate_out3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; _forget_gate_out5.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160;</div><div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; std::vector&lt;const ITensor *&gt; inputs_vector;</div><div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; inputs_vector.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>);</div><div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; inputs_vector.emplace_back(output_state_in);</div><div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160;</div><div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_gate_out2);</div><div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; _concat_inputs_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a4260af03ff25dfc42684ba62aab9a532">configure</a>(inputs_vector, &amp;_forget_gate_out2, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>);</div><div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160;</div><div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; std::vector&lt;const ITensor *&gt; weights_vector;</div><div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160;</div><div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; weights_vector.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>);</div><div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; weights_vector.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>);</div><div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160;</div><div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; _concat_weights_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a4260af03ff25dfc42684ba62aab9a532">configure</a>(weights_vector, &amp;_forget_gate_out6, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>);</div><div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160;</div><div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_gate_out5);</div><div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; _fully_connected_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a91fb7694ae938cfec69ff6474451de49">configure</a>(&amp;_forget_gate_out2, &amp;_forget_gate_out6, (_is_layer_norm_lstm) ? <span class="keyword">nullptr</span> : <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, &amp;_forget_gate_out5);</div><div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_gate_out1);</div><div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_gate_out3);</div><div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; _forget_gate_out6.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160;</div><div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> *forget_gate_out = &amp;_forget_gate_out5;</div><div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; {</div><div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; _forget_gate_out4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160;</div><div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; _run_peephole_opt = <span class="keyword">true</span>;</div><div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_gate_out4);</div><div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; _pixelwise_mul_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(cell_state_in, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">cell_to_forget_weights</a>(), &amp;_forget_gate_out4, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; _accum_forget_gate1.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a17b1a968874510a32f0bdcc78e0cb360">configure</a>(&amp;_forget_gate_out5, &amp;_forget_gate_out4, &amp;_forget_gate_out3, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; _forget_gate_out4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; _forget_gate_out5.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; forget_gate_out = &amp;_forget_gate_out3;</div><div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; }</div><div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; {</div><div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; _forget_gate_out3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; }</div><div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; {</div><div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; _forget_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; _forget_layer_norm_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_layer_norm_out1);</div><div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_forget_layer_norm_out2);</div><div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; _mean_std_norm_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#ac5f95670c8b3e74458ce27030ea3d41e">configure</a>(forget_gate_out);</div><div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; _pixelwise_mul_forget_gate_coeff.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(forget_gate_out, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">forget_layer_norm_weights</a>(), &amp;_forget_layer_norm_out1, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; <span class="comment">// forget_gate_out is going to be reassigned, so allocate the tensor that it was assigned to before</span></div><div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; forget_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; _accum_forget_gate_bias.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_forget_layer_norm_out1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, &amp;_forget_layer_norm_out2, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; _forget_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; forget_gate_out = &amp;_forget_layer_norm_out2;</div><div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; }</div><div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; _activation_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(forget_gate_out, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::ActivationFunction::LOGISTIC</a>));</div><div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160;</div><div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; <span class="comment">// Configure block that calculates the input gate</span></div><div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; <span class="comment">// input_gate = Activation(input * input_to_input_weights + output_state * recurrent_to_input_weights + PixelWiseMul(cell_state, cell_to_input_weights) + input_gate_bias), without CIFG</span></div><div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; <span class="comment">// input_gate = 1 - forget_gate, with CIFG</span></div><div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <span class="comment">// We optimize this as follows:</span></div><div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; <span class="comment">// input_gate = Activation((input,output_state) * (input_to_input_weights,recurrent_to_input_weights) + PixelWiseMul(cell_state, cell_to_input_weights) + input_gate_bias), without CIFG</span></div><div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; _input_gate_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> *input_gate_out = &amp;_input_gate_out1;</div><div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">has_cifg_opt</a>())</div><div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; {</div><div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_input_gate_out1);</div><div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; _ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; _subtract_input_gate.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_ones, forget_gate_out, &amp;_input_gate_out1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; _ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; _run_cifg_opt = <span class="keyword">true</span>;</div><div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; }</div><div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; {</div><div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; _input_gate_out3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; _input_gate_out4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160;</div><div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; std::vector&lt;const ITensor *&gt; lstm_weights;</div><div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; lstm_weights.emplace_back(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">input_to_input_weights</a>());</div><div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; lstm_weights.emplace_back(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">recurrent_to_input_weights</a>());</div><div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160;</div><div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; _concat_weights_input_gate.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a4260af03ff25dfc42684ba62aab9a532">configure</a>(lstm_weights, &amp;_input_gate_out2, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>);</div><div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160;</div><div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_input_gate_out1);</div><div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_input_gate_out4);</div><div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160;</div><div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160; _fully_connected_input_gate.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a91fb7694ae938cfec69ff6474451de49">configure</a>(&amp;_forget_gate_out2, &amp;_input_gate_out2, (_is_layer_norm_lstm) ? <span class="keyword">nullptr</span> : lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>(), &amp;_input_gate_out3);</div><div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; _input_gate_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; input_gate_out = &amp;_input_gate_out3;</div><div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160;</div><div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; <span class="keywordflow">if</span>(_run_peephole_opt)</div><div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; {</div><div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_input_gate_out4);</div><div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; _pixelwise_mul_input_gate.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(cell_state_in, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">cell_to_input_weights</a>(), &amp;_input_gate_out4, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; _accum_input_gate1.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a17b1a968874510a32f0bdcc78e0cb360">configure</a>(&amp;_input_gate_out3, &amp;_input_gate_out4, &amp;_input_gate_out1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; _input_gate_out3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; _input_gate_out4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; input_gate_out = &amp;_input_gate_out1;</div><div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; }</div><div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; {</div><div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; _input_gate_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; }</div><div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160;</div><div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; {</div><div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; _input_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; _input_layer_norm_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_input_layer_norm_out1);</div><div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_input_layer_norm_out2);</div><div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; _mean_std_norm_input_gate.<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#ac5f95670c8b3e74458ce27030ea3d41e">configure</a>(input_gate_out);</div><div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; _pixelwise_mul_input_gate_coeff.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(input_gate_out, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>(), &amp;_input_layer_norm_out1, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; <span class="comment">// input_gate_out is going to be reassigned, so allocate the tensor that it was assigned to before</span></div><div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160; input_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; _accum_input_gate_bias.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_input_layer_norm_out1, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>(), &amp;_input_layer_norm_out2, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; _input_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160; input_gate_out = &amp;_input_layer_norm_out2;</div><div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; }</div><div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160; _activation_input_gate.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(input_gate_out, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::ActivationFunction::LOGISTIC</a>));</div><div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; }</div><div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160;</div><div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; <span class="comment">// Configure block that calculates the cell state</span></div><div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; <span class="comment">// cell_state = Clip((PixelwiseMul(input_gate, Activation(input * input_to_cell_weights + output_state_in * recurrent_to_cell_weights + cell_bias)) + PixelwiseMul(forget_gate, cell_state)), cell_threshold)</span></div><div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> cell_state1_shape = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a69cb11b5b37f94a6bea9eaad9d13cccf">compute_transposed_shape</a>(*<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>-&gt;info());</div><div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160; _cell_state_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160; _cell_state_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state1_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160; _cell_state_out3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160; _cell_state_out4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160; _cell_state_out5.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160;</div><div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_state_out1);</div><div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160; _fully_connected_cell_state.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a91fb7694ae938cfec69ff6474451de49">configure</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, (_is_layer_norm_lstm) ? <span class="keyword">nullptr</span> : cell_bias, &amp;_cell_state_out1);</div><div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_state_out2);</div><div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160; _transpose_cell_state.<a class="code" href="classarm__compute_1_1_n_e_transpose_kernel.xhtml#a83a344e60eb7db895953a942abf16628">configure</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>, &amp;_cell_state_out2);</div><div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_state_out3);</div><div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160; _gemm_cell_state1.<a class="code" href="classarm__compute_1_1_n_e_g_e_m_m.xhtml#a385241dcc5062af6ecac8bdafe01bb2a">configure</a>(output_state_in, &amp;_cell_state_out2, <span class="keyword">nullptr</span>, &amp;_cell_state_out3, 1.f, 0.f);</div><div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160; _cell_state_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_state_out4);</div><div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160; _accum_cell_state1.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_cell_state_out1, &amp;_cell_state_out3, &amp;_cell_state_out4, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> *cell_state_out_ptr = &amp;_cell_state_out4;</div><div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160; {</div><div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160; _cell_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160; _cell_layer_norm_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_layer_norm_out1);</div><div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_layer_norm_out2);</div><div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160; _mean_std_norm_cell_gate.<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#ac5f95670c8b3e74458ce27030ea3d41e">configure</a>(cell_state_out_ptr);</div><div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160; _pixelwise_mul_cell_gate_coeff.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(cell_state_out_ptr, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">cell_layer_norm_weights</a>(), &amp;_cell_layer_norm_out1, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160; <span class="comment">// cell_state_out_ptr is going to be reassigned, so allocate the tensor that it was assigned to before</span></div><div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160; cell_state_out_ptr-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160; _accum_cell_gate_bias.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_cell_layer_norm_out1, cell_bias, &amp;_cell_layer_norm_out2, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160; _cell_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; cell_state_out_ptr = &amp;_cell_layer_norm_out2;</div><div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160; }</div><div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160; _activation_cell_state.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(cell_state_out_ptr, <span class="keyword">nullptr</span>, activation_info);</div><div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_state_out5);</div><div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160; _pixelwise_mul_cell_state1.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(cell_state_out_ptr, input_gate_out, &amp;_cell_state_out5, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160; cell_state_out_ptr-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160; _pixelwise_mul_cell_state2.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(forget_gate_out, cell_state_in, &amp;_cell_state_out3, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160; _accum_cell_state2.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_cell_state_out5, &amp;_cell_state_out3, &amp;_cell_state_out1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160; _cell_state_out3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160; _cell_state_out5.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160; <span class="comment">// Perform clipping</span></div><div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160; <span class="keywordflow">if</span>(cell_threshold != 0.f)</div><div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160; {</div><div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160; _perform_cell_clipping = <span class="keyword">true</span>;</div><div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160; _cell_clip.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;_cell_state_out1, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU</a>, -cell_threshold, cell_threshold));</div><div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160; }</div><div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160;</div><div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160; <span class="comment">// Configure block that calculates the output</span></div><div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160; <span class="comment">// output_state_out = Activation(input * input_to_output_weights + output_state_in * recurrent_to_output_weights + PixelWiseMul(cell_state, cell_to_output_weights) + output_gate_bias)</span></div><div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160; <span class="comment">// We optimize this as follows:</span></div><div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160; <span class="comment">// output_state_out = Activation( (input,output_state_in) * (input_to_output_weights, recurrent_to_output_weights) + PixelWiseMul(cell_state, cell_to_output_weights) + output_gate_bias)</span></div><div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160; _output1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160; _output4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160;</div><div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160; std::vector&lt;const ITensor *&gt; in_out_weights;</div><div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160; in_out_weights.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>);</div><div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160; in_out_weights.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>);</div><div class="line"><a name="l00285"></a><span class="lineno"> 285</span>&#160;</div><div class="line"><a name="l00286"></a><span class="lineno"> 286</span>&#160; _concat_weights_output.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a4260af03ff25dfc42684ba62aab9a532">configure</a>(in_out_weights, &amp;_output2, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>);</div><div class="line"><a name="l00287"></a><span class="lineno"> 287</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_output1);</div><div class="line"><a name="l00288"></a><span class="lineno"> 288</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_output4);</div><div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160;</div><div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160; _fully_connected_output.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a91fb7694ae938cfec69ff6474451de49">configure</a>(&amp;_forget_gate_out2, &amp;_output2, (_is_layer_norm_lstm) ? <span class="keyword">nullptr</span> : <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>, &amp;_output4);</div><div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160;</div><div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160; _output2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160; _forget_gate_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00294"></a><span class="lineno"> 294</span>&#160;</div><div class="line"><a name="l00295"></a><span class="lineno"> 295</span>&#160; <a class="code" href="classarm__compute_1_1_tensor.xhtml">Tensor</a> *output_gate_out = &amp;_output4;</div><div class="line"><a name="l00296"></a><span class="lineno"> 296</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00297"></a><span class="lineno"> 297</span>&#160; {</div><div class="line"><a name="l00298"></a><span class="lineno"> 298</span>&#160; _output3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(_cell_state_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">tensor_shape</a>(), 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00299"></a><span class="lineno"> 299</span>&#160;</div><div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_output3);</div><div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160; _pixelwise_mul_output_state1.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(&amp;_cell_state_out1, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">cell_to_output_weights</a>(), &amp;_output3, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160; _accum_output1.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a17b1a968874510a32f0bdcc78e0cb360">configure</a>(&amp;_output4, &amp;_output3, &amp;_output1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160; _output4.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160; output_gate_out = &amp;_output1;</div><div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160;</div><div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; <span class="comment">// Allocate intermediate buffers</span></div><div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160; _output3.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160; }</div><div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160; {</div><div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160; _output1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160; }</div><div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160; {</div><div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160; _output_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160; _output_layer_norm_out2.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_output_layer_norm_out1);</div><div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_output_layer_norm_out2);</div><div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160; _mean_std_norm_output_gate.<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#ac5f95670c8b3e74458ce27030ea3d41e">configure</a>(output_gate_out);</div><div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160; _pixelwise_mul_output_gate_coeff.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(output_gate_out, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">output_layer_norm_weights</a>(), &amp;_output_layer_norm_out1, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160; <span class="comment">// output_gate_out is going to be reassigned, so allocate the tensor that it was assigned to before</span></div><div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160; output_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00323"></a><span class="lineno"> 323</span>&#160; _accum_output_gate_bias.<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">configure</a>(&amp;_output_layer_norm_out1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>, &amp;_output_layer_norm_out2, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>);</div><div class="line"><a name="l00324"></a><span class="lineno"> 324</span>&#160; _output_layer_norm_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00325"></a><span class="lineno"> 325</span>&#160; output_gate_out = &amp;_output_layer_norm_out2;</div><div class="line"><a name="l00326"></a><span class="lineno"> 326</span>&#160; }</div><div class="line"><a name="l00327"></a><span class="lineno"> 327</span>&#160; _activation_output.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(output_gate_out, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::ActivationFunction::LOGISTIC</a>));</div><div class="line"><a name="l00328"></a><span class="lineno"> 328</span>&#160;</div><div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160; <span class="comment">// Configure block that calculates the output state</span><span class="comment"></span></div><div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160;<span class="comment"> /** lstm_res = PixelwiseMul(output, Activation(cell_state))</span></div><div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160;<span class="comment"> *</span></div><div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160;<span class="comment"> * -- Clip(lstm_res * projection_weights + projection_bias, projection_threshold) , if there is a projection</span></div><div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160;<span class="comment"> * /</span></div><div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160;<span class="comment"> * output_state = --</span></div><div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160;<span class="comment"> * \</span></div><div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160;<span class="comment"> * -- lstm_res , otherwise</span></div><div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160;<span class="comment"> */</span></div><div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160; <a class="code" href="classarm__compute_1_1_i_tensor.xhtml">ITensor</a> *output_state_out_tmp = lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a127009377712009a84cd0c48aa7e1edd">has_projection</a>() ? &amp;_output_state1 : output_state_out;</div><div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160; _cell_state_activation.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160; _output_state1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">init</a>(<a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(cell_state_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;info()-&gt;data_type()));</div><div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160;</div><div class="line"><a name="l00342"></a><span class="lineno"> 342</span>&#160; _memory_group.<a class="code" href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">manage</a>(&amp;_cell_state_activation);</div><div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160; _activation_output_state.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(&amp;_cell_state_out1, &amp;_cell_state_activation, activation_info);</div><div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160; _pixelwise_mul_output_state2.<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">configure</a>(&amp;_cell_state_activation, output_gate_out, output_state_out_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>);</div><div class="line"><a name="l00345"></a><span class="lineno"> 345</span>&#160; _cell_state_activation.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160; output_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160;</div><div class="line"><a name="l00348"></a><span class="lineno"> 348</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a127009377712009a84cd0c48aa7e1edd">has_projection</a>())</div><div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160; {</div><div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160; _has_projection_weights = <span class="keyword">true</span>;</div><div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160; _fully_connected_output_state.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a91fb7694ae938cfec69ff6474451de49">configure</a>(output_state_out_tmp, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ab1b3d5364f11bca8cacef026c8038dba">projection_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ad676992a90d193409fa6a28a001af6c8">projection_bias</a>(), output_state_out);</div><div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160; _output_state1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160; <span class="comment">// Perform clipping</span></div><div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160; <span class="keywordflow">if</span>(projection_threshold != 0.f)</div><div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160; {</div><div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; _perform_projection_clipping = <span class="keyword">true</span>;</div><div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160; _projection_clip.<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">configure</a>(output_state_out, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU</a>, -projection_threshold, projection_threshold));</div><div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160; }</div><div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160; }</div><div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160;</div><div class="line"><a name="l00361"></a><span class="lineno"> 361</span>&#160; <span class="comment">// Copy cell state and output</span></div><div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; _copy_cell_state.<a class="code" href="classarm__compute_1_1_n_e_copy_kernel.xhtml#ab914edcfa864626e92184b2b0ec23c30">configure</a>(&amp;_cell_state_out1, cell_state_out);</div><div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160; _copy_output.<a class="code" href="classarm__compute_1_1_n_e_copy_kernel.xhtml#ab914edcfa864626e92184b2b0ec23c30">configure</a>(output_state_out, output);</div><div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160;</div><div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160; <span class="comment">// Vector for holding the tensors to store in scratch buffer</span></div><div class="line"><a name="l00366"></a><span class="lineno"> 366</span>&#160; std::vector&lt;ITensor *&gt; scratch_inputs;</div><div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160; <span class="keywordflow">if</span>(!lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">has_cifg_opt</a>())</div><div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160; {</div><div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160; scratch_inputs.emplace_back(input_gate_out);</div><div class="line"><a name="l00370"></a><span class="lineno"> 370</span>&#160; }</div><div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160; scratch_inputs.emplace_back(&amp;_cell_state_out1);</div><div class="line"><a name="l00372"></a><span class="lineno"> 372</span>&#160; scratch_inputs.emplace_back(forget_gate_out);</div><div class="line"><a name="l00373"></a><span class="lineno"> 373</span>&#160; scratch_inputs.emplace_back(output_gate_out);</div><div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160; _concat_scratch_buffer.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a4260af03ff25dfc42684ba62aab9a532">configure</a>(scratch_inputs, scratch_buffer, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>);</div><div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160; input_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00376"></a><span class="lineno"> 376</span>&#160; _cell_state_out1.<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160; forget_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160; output_gate_out-&gt;<a class="code" href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">allocator</a>()-&gt;<a class="code" href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">allocate</a>();</div><div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160;}</div><div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_addition_xhtml_a17b1a968874510a32f0bdcc78e0cb360"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a17b1a968874510a32f0bdcc78e0cb360">arm_compute::NEArithmeticAddition::configure</a></div><div class="ttdeci">void configure(ITensor *input1, ITensor *input2, ITensor *output, ConvertPolicy policy)</div><div class="ttdoc">Initialise the kernel's inputs, output and conversion policy.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_addition_8cpp_source.xhtml#l00034">NEArithmeticAddition.cpp:34</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_ab1b3d5364f11bca8cacef026c8038dba"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#ab1b3d5364f11bca8cacef026c8038dba">arm_compute::LSTMParams::projection_weights</a></div><div class="ttdeci">const T * projection_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00150">LSTMParams.h:150</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_afa54d4a35e697cb14a38359616709681"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">arm_compute::LSTMParams::input_to_input_weights</a></div><div class="ttdeci">const T * input_to_input_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00120">LSTMParams.h:120</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a507bd7e4d98cb3e45d3e820d8bac422a"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">arm_compute::test::validation::output_gate_bias</a></div><div class="ttdeci">auto output_gate_bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00484">LSTMLayerQuantized.cpp:484</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_shape_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_shape.xhtml">arm_compute::TensorShape</a></div><div class="ttdoc">Shape of a tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_shape_8h_source.xhtml#l00039">TensorShape.h:39</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a2d615f651270885a1b996046e9902a3c"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">arm_compute::LSTMParams::use_layer_norm</a></div><div class="ttdeci">bool use_layer_norm() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00195">LSTMParams.h:195</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_allocator_xhtml_a3fc6adad84b23f10d54d5a7b6928f872"><div class="ttname"><a href="classarm__compute_1_1_tensor_allocator.xhtml#a3fc6adad84b23f10d54d5a7b6928f872">arm_compute::TensorAllocator::init</a></div><div class="ttdeci">void init(const TensorAllocator &amp;allocator, const Coordinates &amp;coords, TensorInfo &amp;sub_info)</div><div class="ttdoc">Shares the same backing memory with another tensor allocator, while the tensor info might be differen...</div><div class="ttdef"><b>Definition:</b> <a href="src_2runtime_2_tensor_allocator_8cpp_source.xhtml#l00108">TensorAllocator.cpp:108</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_aa05bceba37ded272a464a90becd9cd99"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa05bceba37ded272a464a90becd9cd99">arm_compute::NELSTMLayer::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *input_to_forget_weights, const ITensorInfo *input_to_cell_weights, const ITensorInfo *input_to_output_weights, const ITensorInfo *recurrent_to_forget_weights, const ITensorInfo *recurrent_to_cell_weights, const ITensorInfo *recurrent_to_output_weights, const ITensorInfo *forget_gate_bias, const ITensorInfo *cell_bias, const ITensorInfo *output_gate_bias, const ITensorInfo *output_state_in, const ITensorInfo *cell_state_in, const ITensorInfo *scratch_buffer, const ITensorInfo *output_state_out, const ITensorInfo *cell_state_out, const ITensorInfo *output, const LSTMParams&lt; ITensorInfo &gt; &amp;lstm_params, const ActivationLayerInfo &amp;activation_info, float cell_threshold=0.f, float projection_threshold=0.f)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NELSTMLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00381">NELSTMLayer.cpp:381</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a23be92a19e0d7c174ed444e709518afd"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">arm_compute::LSTMParams::has_peephole_opt</a></div><div class="ttdeci">bool has_peephole_opt() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00180">LSTMParams.h:180</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a55daaf57fb833fc416d779c28f7a3c85"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">arm_compute::test::validation::forget_gate_bias</a></div><div class="ttdeci">auto forget_gate_bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00482">LSTMLayerQuantized.cpp:482</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_aafb05bcc27f0879701152cd664c632ce"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">arm_compute::LSTMParams::cell_to_input_weights</a></div><div class="ttdeci">const T * cell_to_input_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00130">LSTMParams.h:130</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a28c80440058d5c9b0bc1e1a4622c734a"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a28c80440058d5c9b0bc1e1a4622c734a">arm_compute::LSTMParams::set_peephole_params</a></div><div class="ttdeci">LSTMParams &amp; set_peephole_params(const T *cell_to_forget_weights, const T *cell_to_output_weights)</div><div class="ttdoc">Set peephole tensor parameters.</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00093">LSTMParams.h:93</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_activation_layer_kernel_xhtml_adfb5ef37594fc9371c4a2b95e3d5e31b"><div class="ttname"><a href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#adfb5ef37594fc9371c4a2b95e3d5e31b">arm_compute::NEActivationLayerKernel::configure</a></div><div class="ttdeci">void configure(ITensor *input, ITensor *output, ActivationLayerInfo activation_info)</div><div class="ttdoc">Set the input and output tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_activation_layer_kernel_8cpp_source.xhtml#l00121">NEActivationLayerKernel.cpp:121</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_aae040c52316d86a4df2c7cdf179049bf"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">arm_compute::LSTMParams::has_cifg_opt</a></div><div class="ttdeci">bool has_cifg_opt() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00190">LSTMParams.h:190</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_addition_kernel_xhtml_ae549ed675eab6d763ac6ffd18d226c27"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_addition_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">arm_compute::NEArithmeticAdditionKernel::configure</a></div><div class="ttdeci">void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy)</div><div class="ttdoc">Initialise the kernel's input, output and border mode.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_addition_kernel_8cpp_source.xhtml#l00915">NEArithmeticAdditionKernel.cpp:915</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a934af5defc72f38841ce8955e2151473"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">arm_compute::LSTMParams::cell_to_output_weights</a></div><div class="ttdeci">const T * cell_to_output_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00145">LSTMParams.h:145</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac62dfdcc14798598d953342789c9927e"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">arm_compute::test::validation::recurrent_to_forget_weights</a></div><div class="ttdeci">auto recurrent_to_forget_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00478">LSTMLayerQuantized.cpp:478</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml">arm_compute::ITensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_info_8h_source.xhtml#l00040">ITensorInfo.h:40</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel_xhtml_ab214c0eb5acd40bde3f8a3fb6c0a6613"><div class="ttname"><a href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#ab214c0eb5acd40bde3f8a3fb6c0a6613">arm_compute::NEPixelWiseMultiplicationKernel::configure</a></div><div class="ttdeci">void configure(const ITensor *input1, const ITensor *input2, ITensor *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)</div><div class="ttdoc">Initialise the kernel's input, output and border mode.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_pixel_wise_multiplication_kernel_8cpp_source.xhtml#l00581">NEPixelWiseMultiplicationKernel.cpp:581</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a938dcd406ce611ef5345ad2531cdb948"><div class="ttname"><a href="_error_8h.xhtml#a938dcd406ce611ef5345ad2531cdb948">ARM_COMPUTE_ERROR_THROW_ON</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_THROW_ON(status)</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00455">Error.h:455</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml">arm_compute::ActivationLayerInfo</a></div><div class="ttdoc">Activation Layer Information class.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l01615">Types.h:1615</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_i_tensor.xhtml">arm_compute::ITensor</a></div><div class="ttdoc">Interface for NEON tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_i_tensor_8h_source.xhtml#l00036">ITensor.h:36</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac547a66fe26967afb94760061ee0d0d1"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">arm_compute::test::validation::input_to_cell_weights</a></div><div class="ttdeci">auto input_to_cell_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00475">LSTMLayerQuantized.cpp:475</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_xhtml_adbd0cf83a8e1b335a9bf405a8e5019fa"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml#adbd0cf83a8e1b335a9bf405a8e5019fa">arm_compute::Tensor::allocator</a></div><div class="ttdeci">TensorAllocator * allocator()</div><div class="ttdoc">Return a pointer to the tensor's allocator.</div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8cpp_source.xhtml#l00048">Tensor.cpp:48</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_xhtml_a47d74e4e51f9b1a636c4831bd747a97c"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">arm_compute::Tensor::info</a></div><div class="ttdeci">ITensorInfo * info() const override</div><div class="ttdoc">Interface to be implemented by the child class to return the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8cpp_source.xhtml#l00033">Tensor.cpp:33</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1misc_1_1shape__calculator_xhtml_a69cb11b5b37f94a6bea9eaad9d13cccf"><div class="ttname"><a href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a69cb11b5b37f94a6bea9eaad9d13cccf">arm_compute::misc::shape_calculator::compute_transposed_shape</a></div><div class="ttdeci">TensorShape compute_transposed_shape(const ITensorInfo &amp;input)</div><div class="ttdoc">Calculate the transposed shape of a tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_shape_calculator_8h_source.xhtml#l00426">ShapeCalculator.h:426</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a8fcf2ddd9a1d58b1b280f5c0aed71845"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">arm_compute::test::validation::input</a></div><div class="ttdeci">auto input</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00487">LSTMLayerQuantized.cpp:487</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_aab02df8a9ee45153f2fd76e934407fbd"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">arm_compute::test::validation::recurrent_to_output_weights</a></div><div class="ttdeci">auto recurrent_to_output_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00480">LSTMLayerQuantized.cpp:480</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer_xhtml_ac5f95670c8b3e74458ce27030ea3d41e"><div class="ttname"><a href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#ac5f95670c8b3e74458ce27030ea3d41e">arm_compute::NEMeanStdDevNormalizationLayer::configure</a></div><div class="ttdeci">void configure(ITensor *input, ITensor *output=nullptr, float epsilon=1e-8f)</div><div class="ttdoc">Initialise the function's input and outputs.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_mean_std_dev_normalization_layer_8cpp_source.xhtml#l00031">NEMeanStdDevNormalizationLayer.cpp:31</a></div></div>
<div class="ttc" id="classarm__compute_1_1_memory_group_xhtml_a6fc0a49304c152c20a0f6df0634fb3cd"><div class="ttname"><a href="classarm__compute_1_1_memory_group.xhtml#a6fc0a49304c152c20a0f6df0634fb3cd">arm_compute::MemoryGroup::manage</a></div><div class="ttdeci">void manage(IMemoryManageable *obj) override</div><div class="ttdoc">Sets a object to be managed by the given memory group.</div><div class="ttdef"><b>Definition:</b> <a href="_memory_group_8h_source.xhtml#l00079">MemoryGroup.h:79</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a35e4b6311397e1f9532fb37560aa9996"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">arm_compute::LSTMParams::recurrent_to_input_weights</a></div><div class="ttdeci">const T * recurrent_to_input_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00125">LSTMParams.h:125</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ace4dd633420fa8d8aa71f60ff730f01f"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">arm_compute::test::validation::input_to_output_weights</a></div><div class="ttdeci">auto input_to_output_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00476">LSTMLayerQuantized.cpp:476</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_ad676992a90d193409fa6a28a001af6c8"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#ad676992a90d193409fa6a28a001af6c8">arm_compute::LSTMParams::projection_bias</a></div><div class="ttdeci">const T * projection_bias() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00155">LSTMParams.h:155</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_concatenate_layer_xhtml_a4260af03ff25dfc42684ba62aab9a532"><div class="ttname"><a href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a4260af03ff25dfc42684ba62aab9a532">arm_compute::NEConcatenateLayer::configure</a></div><div class="ttdeci">void configure(std::vector&lt; ITensor * &gt; inputs_vector, ITensor *output, size_t axis)</div><div class="ttdoc">Initialise the kernel's inputs vector and output.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_concatenate_layer_8cpp_source.xhtml#l00049">NEConcatenateLayer.cpp:49</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml_aa96e81276ee4f87ab386cd05a5539a7d"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">arm_compute::Window::DimX</a></div><div class="ttdeci">static constexpr size_t DimX</div><div class="ttdoc">Alias for dimension 0 also known as X dimension.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00043">Window.h:43</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a7c66505457d00ece3aa4b34cab80757d"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">arm_compute::ITensorInfo::tensor_shape</a></div><div class="ttdeci">virtual const TensorShape &amp; tensor_shape() const =0</div><div class="ttdoc">Size for each dimension of the tensor.</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a477486a9c5189cff8af1cdd9d7e8d573"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">arm_compute::LSTMParams::cell_to_forget_weights</a></div><div class="ttdeci">const T * cell_to_forget_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00140">LSTMParams.h:140</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_fully_connected_layer_xhtml_a91fb7694ae938cfec69ff6474451de49"><div class="ttname"><a href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a91fb7694ae938cfec69ff6474451de49">arm_compute::NEFullyConnectedLayer::configure</a></div><div class="ttdeci">void configure(const ITensor *input, const ITensor *weights, const ITensor *biases, ITensor *output, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())</div><div class="ttdoc">Set the input and output tensors.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_fully_connected_layer_8cpp_source.xhtml#l00141">NEFullyConnectedLayer.cpp:141</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_allocator_xhtml_a6e509c2a177b0b29e9e2369535094dee"><div class="ttname"><a href="classarm__compute_1_1_tensor_allocator.xhtml#a6e509c2a177b0b29e9e2369535094dee">arm_compute::TensorAllocator::allocate</a></div><div class="ttdeci">void allocate() override</div><div class="ttdoc">Allocate size specified by TensorInfo of CPU memory.</div><div class="ttdef"><b>Definition:</b> <a href="src_2runtime_2_tensor_allocator_8cpp_source.xhtml#l00133">TensorAllocator.cpp:133</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_abd478bedc7c65b72ead0d05cbd16d437"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">arm_compute::LSTMParams::cell_layer_norm_weights</a></div><div class="ttdeci">const T * cell_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00170">LSTMParams.h:170</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_xhtml_a0e95dc1e53c361348314873b168ae237"><div class="ttname"><a href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">arm_compute::ITensor::info</a></div><div class="ttdeci">virtual ITensorInfo * info() const =0</div><div class="ttdoc">Interface to be implemented by the child class to return the tensor's metadata.</div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml">arm_compute::Tensor</a></div><div class="ttdoc">Basic implementation of the tensor interface.</div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8h_source.xhtml#l00037">Tensor.h:37</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac2236dfe2a3fc5fa4e125348829cbeb2"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">arm_compute::test::validation::recurrent_to_cell_weights</a></div><div class="ttdeci">auto recurrent_to_cell_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00479">LSTMLayerQuantized.cpp:479</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a23361ca1393c0dc196fbf4e627e07119"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">arm_compute::LSTMParams::input_layer_norm_weights</a></div><div class="ttdeci">const T * input_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00160">LSTMParams.h:160</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml_a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC</a></div><div class="ttdoc">Logistic ( )</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_aea777d30779bab2d14630ea7e8516615"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#aea777d30779bab2d14630ea7e8516615">arm_compute::LSTMParams::set_projection_params</a></div><div class="ttdeci">LSTMParams &amp; set_projection_params(const T *projection_weights, const T *projection_bias)</div><div class="ttdoc">Set projection tensor parameters.</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00079">LSTMParams.h:79</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_copy_kernel_xhtml_ab914edcfa864626e92184b2b0ec23c30"><div class="ttname"><a href="classarm__compute_1_1_n_e_copy_kernel.xhtml#ab914edcfa864626e92184b2b0ec23c30">arm_compute::NECopyKernel::configure</a></div><div class="ttdeci">void configure(const ITensor *input, ITensor *output, const PaddingList &amp;padding=PaddingList())</div><div class="ttdoc">Initialize the kernel's input, output.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_copy_kernel_8cpp_source.xhtml#l00078">NECopyKernel.cpp:78</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel_xhtml_ae549ed675eab6d763ac6ffd18d226c27"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel.xhtml#ae549ed675eab6d763ac6ffd18d226c27">arm_compute::NEArithmeticSubtractionKernel::configure</a></div><div class="ttdeci">void configure(const ITensor *input1, const ITensor *input2, ITensor *output, ConvertPolicy policy)</div><div class="ttdoc">Initialise the kernel's input, output and border mode.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_subtraction_kernel_8cpp_source.xhtml#l00493">NEArithmeticSubtractionKernel.cpp:493</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_adac8095c0cd29d443206dfcaf67f3607"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#adac8095c0cd29d443206dfcaf67f3607">arm_compute::LSTMParams::set_cifg_params</a></div><div class="ttdeci">LSTMParams &amp; set_cifg_params(const T *input_to_input_weights, const T *recurrent_to_input_weights, const T *cell_to_input_weights, const T *input_gate_bias)</div><div class="ttdoc">Set CIFG tensor parameters.</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00063">LSTMParams.h:63</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml_a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU</a></div><div class="ttdoc">Lower and Upper Bounded Rectifier ( )</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a127009377712009a84cd0c48aa7e1edd"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a127009377712009a84cd0c48aa7e1edd">arm_compute::LSTMParams::has_projection</a></div><div class="ttdeci">bool has_projection() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00185">LSTMParams.h:185</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a921b705e9e3e0fe928928447869e62a5"><div class="ttname"><a href="_validate_8h.xhtml#a921b705e9e3e0fe928928447869e62a5">ARM_COMPUTE_ERROR_ON_NULLPTR</a></div><div class="ttdeci">#define ARM_COMPUTE_ERROR_ON_NULLPTR(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00161">Validate.h:161</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_xhtml_a385241dcc5062af6ecac8bdafe01bb2a"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m.xhtml#a385241dcc5062af6ecac8bdafe01bb2a">arm_compute::NEGEMM::configure</a></div><div class="ttdeci">void configure(const ITensor *a, const ITensor *b, const ITensor *c, ITensor *d, float alpha, float beta, const GEMMInfo &amp;gemm_info=GEMMInfo())</div><div class="ttdoc">Initialise the kernel's inputs, output.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_8cpp_source.xhtml#l00051">NEGEMM.cpp:51</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a208874b46a667263fa309537c5355318"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">arm_compute::LSTMParams::output_layer_norm_weights</a></div><div class="ttdeci">const T * output_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00175">LSTMParams.h:175</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a29a7a1636c6a8fd9e423d55c36e991a0"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">arm_compute::LSTMParams::input_gate_bias</a></div><div class="ttdeci">const T * input_gate_bias() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00135">LSTMParams.h:135</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_info.xhtml">arm_compute::TensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_info_8h_source.xhtml#l00045">TensorInfo.h:45</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86"><div class="ttname"><a href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">arm_compute::ConvertPolicy::SATURATE</a></div><div class="ttdoc">Saturate.</div></div>
<div class="ttc" id="namespacearm__compute_xhtml_add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06"><div class="ttname"><a href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">arm_compute::RoundingPolicy::TO_ZERO</a></div><div class="ttdoc">Truncates the least significant values that are lost in operations.</div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a3b793c410cba57a1395184692a018356"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">arm_compute::test::validation::input_to_forget_weights</a></div><div class="ttdeci">auto input_to_forget_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00474">LSTMLayerQuantized.cpp:474</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_transpose_kernel_xhtml_a83a344e60eb7db895953a942abf16628"><div class="ttname"><a href="classarm__compute_1_1_n_e_transpose_kernel.xhtml#a83a344e60eb7db895953a942abf16628">arm_compute::NETransposeKernel::configure</a></div><div class="ttdeci">void configure(const ITensor *input, ITensor *output)</div><div class="ttdoc">Initialise the kernel's input and output.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_transpose_kernel_8cpp_source.xhtml#l00499">NETransposeKernel.cpp:499</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml">arm_compute::LSTMParams</a></div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00038">LSTMParams.h:38</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a213908108c07594027bc2b829fe7ee4a"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">arm_compute::LSTMParams::forget_layer_norm_weights</a></div><div class="ttdeci">const T * forget_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00165">LSTMParams.h:165</a></div></div>
</div><!-- fragment -->
<p class="reference">References <a class="el" href="src_2runtime_2_tensor_allocator_8cpp_source.xhtml#l00133">TensorAllocator::allocate()</a>, <a class="el" href="runtime_2_tensor_8cpp_source.xhtml#l00048">Tensor::allocator()</a>, <a class="el" href="_validate_8h_source.xhtml#l00161">ARM_COMPUTE_ERROR_ON_NULLPTR</a>, <a class="el" href="_error_8h_source.xhtml#l00455">ARM_COMPUTE_ERROR_THROW_ON</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00170">LSTMParams&lt; T &gt;::cell_layer_norm_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00140">LSTMParams&lt; T &gt;::cell_to_forget_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00130">LSTMParams&lt; T &gt;::cell_to_input_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00145">LSTMParams&lt; T &gt;::cell_to_output_weights()</a>, <a class="el" href="_shape_calculator_8h_source.xhtml#l00426">arm_compute::misc::shape_calculator::compute_transposed_shape()</a>, <a class="el" href="_n_e_arithmetic_addition_8cpp_source.xhtml#l00034">NEArithmeticAddition::configure()</a>, <a class="el" href="_n_e_mean_std_dev_normalization_layer_8cpp_source.xhtml#l00031">NEMeanStdDevNormalizationLayer::configure()</a>, <a class="el" href="_n_e_copy_kernel_8cpp_source.xhtml#l00078">NECopyKernel::configure()</a>, <a class="el" href="_n_e_transpose_kernel_8cpp_source.xhtml#l00499">NETransposeKernel::configure()</a>, <a class="el" href="_n_e_concatenate_layer_8cpp_source.xhtml#l00049">NEConcatenateLayer::configure()</a>, <a class="el" href="_n_e_activation_layer_kernel_8cpp_source.xhtml#l00121">NEActivationLayerKernel::configure()</a>, <a class="el" href="_n_e_pixel_wise_multiplication_kernel_8cpp_source.xhtml#l00581">NEPixelWiseMultiplicationKernel::configure()</a>, <a class="el" href="_n_e_arithmetic_subtraction_kernel_8cpp_source.xhtml#l00493">NEArithmeticSubtractionKernel::configure()</a>, <a class="el" href="_n_e_arithmetic_addition_kernel_8cpp_source.xhtml#l00915">NEArithmeticAdditionKernel::configure()</a>, <a class="el" href="_n_e_g_e_m_m_8cpp_source.xhtml#l00051">NEGEMM::configure()</a>, <a class="el" href="_n_e_fully_connected_layer_8cpp_source.xhtml#l00141">NEFullyConnectedLayer::configure()</a>, <a class="el" href="_window_8h_source.xhtml#l00043">Window::DimX</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00482">arm_compute::test::validation::forget_gate_bias</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00165">LSTMParams&lt; T &gt;::forget_layer_norm_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00190">LSTMParams&lt; T &gt;::has_cifg_opt()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00180">LSTMParams&lt; T &gt;::has_peephole_opt()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00185">LSTMParams&lt; T &gt;::has_projection()</a>, <a class="el" href="classarm__compute_1_1_i_tensor.xhtml#a0e95dc1e53c361348314873b168ae237">ITensor::info()</a>, <a class="el" href="runtime_2_tensor_8cpp_source.xhtml#l00033">Tensor::info()</a>, <a class="el" href="src_2runtime_2_tensor_allocator_8cpp_source.xhtml#l00108">TensorAllocator::init()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00487">arm_compute::test::validation::input</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00135">LSTMParams&lt; T &gt;::input_gate_bias()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00160">LSTMParams&lt; T &gt;::input_layer_norm_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00475">arm_compute::test::validation::input_to_cell_weights</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00474">arm_compute::test::validation::input_to_forget_weights</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00120">LSTMParams&lt; T &gt;::input_to_input_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00476">arm_compute::test::validation::input_to_output_weights</a>, <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::LOGISTIC</a>, <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">ActivationLayerInfo::LU_BOUNDED_RELU</a>, <a class="el" href="_memory_group_8h_source.xhtml#l00079">MemoryGroup::manage()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00484">arm_compute::test::validation::output_gate_bias</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00175">LSTMParams&lt; T &gt;::output_layer_norm_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00155">LSTMParams&lt; T &gt;::projection_bias()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00150">LSTMParams&lt; T &gt;::projection_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00479">arm_compute::test::validation::recurrent_to_cell_weights</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00478">arm_compute::test::validation::recurrent_to_forget_weights</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00125">LSTMParams&lt; T &gt;::recurrent_to_input_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00480">arm_compute::test::validation::recurrent_to_output_weights</a>, <a class="el" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">arm_compute::SATURATE</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00063">LSTMParams&lt; T &gt;::set_cifg_params()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00093">LSTMParams&lt; T &gt;::set_peephole_params()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00079">LSTMParams&lt; T &gt;::set_projection_params()</a>, <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml#a7c66505457d00ece3aa4b34cab80757d">ITensorInfo::tensor_shape()</a>, <a class="el" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">arm_compute::TO_ZERO</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00195">LSTMParams&lt; T &gt;::use_layer_norm()</a>, and <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00381">NELSTMLayer::validate()</a>.</p>
</div>
</div>
<a id="aa9b93ef660fc3c5b4b19d3fc7b891b77"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa9b93ef660fc3c5b4b19d3fc7b891b77">&#9670;&nbsp;</a></span>prepare()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">void prepare </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">override</span><span class="mlabel">virtual</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Prepare the function for executing. </p>
<p>Any one off pre-processing step required by the function is handled here</p>
<dl class="section note"><dt>Note</dt><dd>Prepare stage might not need all the function's buffers' backing memory to be available in order to execute </dd></dl>
<p>Reimplemented from <a class="el" href="classarm__compute_1_1_i_function.xhtml#a820f7291c24155a2980512fae45aac26">IFunction</a>.</p>
<p class="definition">Definition at line <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00716">716</a> of file <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml">NELSTMLayer.cpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00717"></a><span class="lineno"> 717</span>&#160;{</div><div class="line"><a name="l00718"></a><span class="lineno"> 718</span>&#160; <span class="keywordflow">if</span>(!_is_prepared)</div><div class="line"><a name="l00719"></a><span class="lineno"> 719</span>&#160; {</div><div class="line"><a name="l00720"></a><span class="lineno"> 720</span>&#160; _concat_weights_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00721"></a><span class="lineno"> 721</span>&#160; <span class="keywordflow">if</span>(!_run_cifg_opt)</div><div class="line"><a name="l00722"></a><span class="lineno"> 722</span>&#160; {</div><div class="line"><a name="l00723"></a><span class="lineno"> 723</span>&#160; _concat_weights_input_gate.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00724"></a><span class="lineno"> 724</span>&#160; }</div><div class="line"><a name="l00725"></a><span class="lineno"> 725</span>&#160; _concat_weights_output.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00726"></a><span class="lineno"> 726</span>&#160; _is_prepared = <span class="keyword">true</span>;</div><div class="line"><a name="l00727"></a><span class="lineno"> 727</span>&#160; }</div><div class="line"><a name="l00728"></a><span class="lineno"> 728</span>&#160;}</div><div class="ttc" id="classarm__compute_1_1_n_e_concatenate_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NEConcatenateLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_concatenate_layer_8cpp_source.xhtml#l00177">NEConcatenateLayer.cpp:177</a></div></div>
</div><!-- fragment -->
<p class="reference">References <a class="el" href="_n_e_concatenate_layer_8cpp_source.xhtml#l00177">NEConcatenateLayer::run()</a>.</p>
<p class="reference">Referenced by <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00611">NELSTMLayer::run()</a>.</p>
</div>
</div>
<a id="ad1717410afd0be936c6213a63c8005fb"></a>
<h2 class="memtitle"><span class="permalink"><a href="#ad1717410afd0be936c6213a63c8005fb">&#9670;&nbsp;</a></span>run()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">void run </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">override</span><span class="mlabel">virtual</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Run the kernels contained in the function. </p>
<p>For NEON kernels:</p><ul>
<li>Multi-threading is used for the kernels which are parallelisable.</li>
<li>By default std::thread::hardware_concurrency() threads are used.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd><a class="el" href="classarm__compute_1_1_c_p_p_scheduler.xhtml#ae64eebaa07f4d2da6cc2ba538c3cb095">CPPScheduler::set_num_threads()</a> can be used to manually set the number of threads</dd></dl>
<p>For OpenCL kernels:</p><ul>
<li>All the kernels are enqueued on the queue associated with <a class="el" href="classarm__compute_1_1_c_l_scheduler.xhtml" title="Provides global access to a CL context and command queue.">CLScheduler</a>.</li>
<li>The queue is then flushed.</li>
</ul>
<dl class="section note"><dt>Note</dt><dd>The function will not block until the kernels are executed. It is the user's responsibility to wait. </dd>
<dd>
Will call <a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa9b93ef660fc3c5b4b19d3fc7b891b77" title="Prepare the function for executing.">prepare()</a> on first run if hasn't been done </dd></dl>
<p>Implements <a class="el" href="classarm__compute_1_1_i_function.xhtml#a18954417d3124a8095783ea13dc6d00b">IFunction</a>.</p>
<p class="definition">Definition at line <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00611">611</a> of file <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml">NELSTMLayer.cpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00612"></a><span class="lineno"> 612</span>&#160;{</div><div class="line"><a name="l00613"></a><span class="lineno"> 613</span>&#160; <a class="code" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa9b93ef660fc3c5b4b19d3fc7b891b77">prepare</a>();</div><div class="line"><a name="l00614"></a><span class="lineno"> 614</span>&#160;</div><div class="line"><a name="l00615"></a><span class="lineno"> 615</span>&#160; <a class="code" href="classarm__compute_1_1_memory_group_resource_scope.xhtml">MemoryGroupResourceScope</a> scope_mg(_memory_group);</div><div class="line"><a name="l00616"></a><span class="lineno"> 616</span>&#160;</div><div class="line"><a name="l00617"></a><span class="lineno"> 617</span>&#160; _concat_inputs_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00618"></a><span class="lineno"> 618</span>&#160; _fully_connected_forget_gate.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00619"></a><span class="lineno"> 619</span>&#160;</div><div class="line"><a name="l00620"></a><span class="lineno"> 620</span>&#160; <span class="keywordflow">if</span>(_run_peephole_opt)</div><div class="line"><a name="l00621"></a><span class="lineno"> 621</span>&#160; {</div><div class="line"><a name="l00622"></a><span class="lineno"> 622</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_forget_gate, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00623"></a><span class="lineno"> 623</span>&#160; _accum_forget_gate1.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00624"></a><span class="lineno"> 624</span>&#160; }</div><div class="line"><a name="l00625"></a><span class="lineno"> 625</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00626"></a><span class="lineno"> 626</span>&#160; {</div><div class="line"><a name="l00627"></a><span class="lineno"> 627</span>&#160; _mean_std_norm_forget_gate.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function_no_border.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00628"></a><span class="lineno"> 628</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_forget_gate_coeff, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00629"></a><span class="lineno"> 629</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_accum_forget_gate_bias, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00630"></a><span class="lineno"> 630</span>&#160; }</div><div class="line"><a name="l00631"></a><span class="lineno"> 631</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_activation_forget_gate, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00632"></a><span class="lineno"> 632</span>&#160;</div><div class="line"><a name="l00633"></a><span class="lineno"> 633</span>&#160; <span class="keywordflow">if</span>(_run_cifg_opt)</div><div class="line"><a name="l00634"></a><span class="lineno"> 634</span>&#160; {</div><div class="line"><a name="l00635"></a><span class="lineno"> 635</span>&#160; <span class="keywordflow">if</span>(_ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">data_type</a>() == <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94">DataType::F16</a>)</div><div class="line"><a name="l00636"></a><span class="lineno"> 636</span>&#160; {</div><div class="line"><a name="l00637"></a><span class="lineno"> 637</span>&#160; std::fill_n(reinterpret_cast&lt;half *&gt;(_ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a24954cca5108a24706441fd99a7fb04c">buffer</a>()), _ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a18064e0011c3869d884653e9e7c47b66">total_size</a>() / _ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#aa459796b5489eca8a9160cb5dcf1a103">element_size</a>(), 1);</div><div class="line"><a name="l00638"></a><span class="lineno"> 638</span>&#160; }</div><div class="line"><a name="l00639"></a><span class="lineno"> 639</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00640"></a><span class="lineno"> 640</span>&#160; {</div><div class="line"><a name="l00641"></a><span class="lineno"> 641</span>&#160; std::fill_n(reinterpret_cast&lt;float *&gt;(_ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a24954cca5108a24706441fd99a7fb04c">buffer</a>()), _ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a18064e0011c3869d884653e9e7c47b66">total_size</a>() / _ones.<a class="code" href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">info</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#aa459796b5489eca8a9160cb5dcf1a103">element_size</a>(), 1);</div><div class="line"><a name="l00642"></a><span class="lineno"> 642</span>&#160; }</div><div class="line"><a name="l00643"></a><span class="lineno"> 643</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_subtract_input_gate, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00644"></a><span class="lineno"> 644</span>&#160; }</div><div class="line"><a name="l00645"></a><span class="lineno"> 645</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00646"></a><span class="lineno"> 646</span>&#160; {</div><div class="line"><a name="l00647"></a><span class="lineno"> 647</span>&#160; _fully_connected_input_gate.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00648"></a><span class="lineno"> 648</span>&#160;</div><div class="line"><a name="l00649"></a><span class="lineno"> 649</span>&#160; <span class="keywordflow">if</span>(_run_peephole_opt)</div><div class="line"><a name="l00650"></a><span class="lineno"> 650</span>&#160; {</div><div class="line"><a name="l00651"></a><span class="lineno"> 651</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_input_gate, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00652"></a><span class="lineno"> 652</span>&#160; _accum_input_gate1.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00653"></a><span class="lineno"> 653</span>&#160; }</div><div class="line"><a name="l00654"></a><span class="lineno"> 654</span>&#160;</div><div class="line"><a name="l00655"></a><span class="lineno"> 655</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00656"></a><span class="lineno"> 656</span>&#160; {</div><div class="line"><a name="l00657"></a><span class="lineno"> 657</span>&#160; _mean_std_norm_input_gate.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function_no_border.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00658"></a><span class="lineno"> 658</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_input_gate_coeff, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00659"></a><span class="lineno"> 659</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_accum_input_gate_bias, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00660"></a><span class="lineno"> 660</span>&#160; }</div><div class="line"><a name="l00661"></a><span class="lineno"> 661</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_activation_input_gate, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00662"></a><span class="lineno"> 662</span>&#160; }</div><div class="line"><a name="l00663"></a><span class="lineno"> 663</span>&#160;</div><div class="line"><a name="l00664"></a><span class="lineno"> 664</span>&#160; _fully_connected_cell_state.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00665"></a><span class="lineno"> 665</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_transpose_cell_state, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00666"></a><span class="lineno"> 666</span>&#160; _gemm_cell_state1.<a class="code" href="classarm__compute_1_1_n_e_g_e_m_m.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00667"></a><span class="lineno"> 667</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_accum_cell_state1, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00668"></a><span class="lineno"> 668</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00669"></a><span class="lineno"> 669</span>&#160; {</div><div class="line"><a name="l00670"></a><span class="lineno"> 670</span>&#160; _mean_std_norm_cell_gate.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function_no_border.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00671"></a><span class="lineno"> 671</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_cell_gate_coeff, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00672"></a><span class="lineno"> 672</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_accum_cell_gate_bias, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00673"></a><span class="lineno"> 673</span>&#160; }</div><div class="line"><a name="l00674"></a><span class="lineno"> 674</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_activation_cell_state, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00675"></a><span class="lineno"> 675</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_cell_state1, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00676"></a><span class="lineno"> 676</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_cell_state2, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00677"></a><span class="lineno"> 677</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_accum_cell_state2, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00678"></a><span class="lineno"> 678</span>&#160;</div><div class="line"><a name="l00679"></a><span class="lineno"> 679</span>&#160; <span class="keywordflow">if</span>(_perform_cell_clipping)</div><div class="line"><a name="l00680"></a><span class="lineno"> 680</span>&#160; {</div><div class="line"><a name="l00681"></a><span class="lineno"> 681</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_cell_clip, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00682"></a><span class="lineno"> 682</span>&#160; }</div><div class="line"><a name="l00683"></a><span class="lineno"> 683</span>&#160;</div><div class="line"><a name="l00684"></a><span class="lineno"> 684</span>&#160; _fully_connected_output.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00685"></a><span class="lineno"> 685</span>&#160; <span class="keywordflow">if</span>(_run_peephole_opt)</div><div class="line"><a name="l00686"></a><span class="lineno"> 686</span>&#160; {</div><div class="line"><a name="l00687"></a><span class="lineno"> 687</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_output_state1, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00688"></a><span class="lineno"> 688</span>&#160; _accum_output1.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00689"></a><span class="lineno"> 689</span>&#160; }</div><div class="line"><a name="l00690"></a><span class="lineno"> 690</span>&#160; <span class="keywordflow">if</span>(_is_layer_norm_lstm)</div><div class="line"><a name="l00691"></a><span class="lineno"> 691</span>&#160; {</div><div class="line"><a name="l00692"></a><span class="lineno"> 692</span>&#160; _mean_std_norm_output_gate.<a class="code" href="classarm__compute_1_1_i_n_e_simple_function_no_border.xhtml#a92fe532c342ae2b07956a65520c05362">run</a>();</div><div class="line"><a name="l00693"></a><span class="lineno"> 693</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_output_gate_coeff, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00694"></a><span class="lineno"> 694</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_accum_output_gate_bias, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00695"></a><span class="lineno"> 695</span>&#160; }</div><div class="line"><a name="l00696"></a><span class="lineno"> 696</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_activation_output, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00697"></a><span class="lineno"> 697</span>&#160;</div><div class="line"><a name="l00698"></a><span class="lineno"> 698</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_activation_output_state, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00699"></a><span class="lineno"> 699</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_pixelwise_mul_output_state2, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00700"></a><span class="lineno"> 700</span>&#160;</div><div class="line"><a name="l00701"></a><span class="lineno"> 701</span>&#160; <span class="keywordflow">if</span>(_has_projection_weights)</div><div class="line"><a name="l00702"></a><span class="lineno"> 702</span>&#160; {</div><div class="line"><a name="l00703"></a><span class="lineno"> 703</span>&#160; _fully_connected_output_state.<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00704"></a><span class="lineno"> 704</span>&#160; <span class="keywordflow">if</span>(_perform_projection_clipping)</div><div class="line"><a name="l00705"></a><span class="lineno"> 705</span>&#160; {</div><div class="line"><a name="l00706"></a><span class="lineno"> 706</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_projection_clip, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00707"></a><span class="lineno"> 707</span>&#160; }</div><div class="line"><a name="l00708"></a><span class="lineno"> 708</span>&#160; }</div><div class="line"><a name="l00709"></a><span class="lineno"> 709</span>&#160;</div><div class="line"><a name="l00710"></a><span class="lineno"> 710</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_copy_cell_state, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00711"></a><span class="lineno"> 711</span>&#160; <a class="code" href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">NEScheduler::get</a>().<a class="code" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">schedule</a>(&amp;_copy_output, <a class="code" href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">Window::DimY</a>);</div><div class="line"><a name="l00712"></a><span class="lineno"> 712</span>&#160;</div><div class="line"><a name="l00713"></a><span class="lineno"> 713</span>&#160; _concat_scratch_buffer.<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">run</a>();</div><div class="line"><a name="l00714"></a><span class="lineno"> 714</span>&#160;}</div><div class="ttc" id="classarm__compute_1_1_n_e_concatenate_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NEConcatenateLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_concatenate_layer_8cpp_source.xhtml#l00177">NEConcatenateLayer.cpp:177</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_n_e_simple_function_no_border_xhtml_a92fe532c342ae2b07956a65520c05362"><div class="ttname"><a href="classarm__compute_1_1_i_n_e_simple_function_no_border.xhtml#a92fe532c342ae2b07956a65520c05362">arm_compute::INESimpleFunctionNoBorder::run</a></div><div class="ttdeci">void run() override final</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_i_n_e_simple_function_no_border_8cpp_source.xhtml#l00037">INESimpleFunctionNoBorder.cpp:37</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_n_e_simple_function_xhtml_a92fe532c342ae2b07956a65520c05362"><div class="ttname"><a href="classarm__compute_1_1_i_n_e_simple_function.xhtml#a92fe532c342ae2b07956a65520c05362">arm_compute::INESimpleFunction::run</a></div><div class="ttdeci">void run() override final</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_i_n_e_simple_function_8cpp_source.xhtml#l00036">INESimpleFunction.cpp:36</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a7cfb31af63202568efef5214acfbf3ba"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">arm_compute::ITensorInfo::data_type</a></div><div class="ttdeci">virtual DataType data_type() const =0</div><div class="ttdoc">Data type used for each element of the tensor.</div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94">arm_compute::Format::F16</a></div><div class="ttdoc">1 channel, 1 F16 per channel</div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_xhtml_a47d74e4e51f9b1a636c4831bd747a97c"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml#a47d74e4e51f9b1a636c4831bd747a97c">arm_compute::Tensor::info</a></div><div class="ttdeci">ITensorInfo * info() const override</div><div class="ttdoc">Interface to be implemented by the child class to return the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8cpp_source.xhtml#l00033">Tensor.cpp:33</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NEGEMM::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_8cpp_source.xhtml#l00285">NEGEMM.cpp:285</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_aa459796b5489eca8a9160cb5dcf1a103"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#aa459796b5489eca8a9160cb5dcf1a103">arm_compute::ITensorInfo::element_size</a></div><div class="ttdeci">virtual size_t element_size() const =0</div><div class="ttdoc">Element size in bytes calculated as data_size() * num_channels()</div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_l_s_t_m_layer_xhtml_aa9b93ef660fc3c5b4b19d3fc7b891b77"><div class="ttname"><a href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml#aa9b93ef660fc3c5b4b19d3fc7b891b77">arm_compute::NELSTMLayer::prepare</a></div><div class="ttdeci">void prepare() override</div><div class="ttdoc">Prepare the function for executing.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00716">NELSTMLayer.cpp:716</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml_ad2d402364fa822b0b7775081291eeca9"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml#ad2d402364fa822b0b7775081291eeca9">arm_compute::Window::DimY</a></div><div class="ttdeci">static constexpr size_t DimY</div><div class="ttdoc">Alias for dimension 1 also known as Y dimension.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00045">Window.h:45</a></div></div>
<div class="ttc" id="classarm__compute_1_1_memory_group_resource_scope_xhtml"><div class="ttname"><a href="classarm__compute_1_1_memory_group_resource_scope.xhtml">arm_compute::MemoryGroupResourceScope</a></div><div class="ttdoc">Memory group resources scope handling class.</div><div class="ttdef"><b>Definition:</b> <a href="_i_memory_group_8h_source.xhtml#l00082">IMemoryGroup.h:82</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a18064e0011c3869d884653e9e7c47b66"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a18064e0011c3869d884653e9e7c47b66">arm_compute::ITensorInfo::total_size</a></div><div class="ttdeci">virtual size_t total_size() const =0</div><div class="ttdoc">Returns the total size of the tensor in bytes.</div></div>
<div class="ttc" id="classarm__compute_1_1_i_scheduler_xhtml_a4e58f95544bd5ac6559a421671bd9842"><div class="ttname"><a href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">arm_compute::IScheduler::schedule</a></div><div class="ttdeci">virtual void schedule(ICPPKernel *kernel, const Hints &amp;hints)=0</div><div class="ttdoc">Runs the kernel in the same thread as the caller synchronously.</div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_fully_connected_layer_xhtml_ad1717410afd0be936c6213a63c8005fb"><div class="ttname"><a href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#ad1717410afd0be936c6213a63c8005fb">arm_compute::NEFullyConnectedLayer::run</a></div><div class="ttdeci">void run() override</div><div class="ttdoc">Run the kernels contained in the function.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_fully_connected_layer_8cpp_source.xhtml#l00381">NEFullyConnectedLayer.cpp:381</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_xhtml_a24954cca5108a24706441fd99a7fb04c"><div class="ttname"><a href="classarm__compute_1_1_tensor.xhtml#a24954cca5108a24706441fd99a7fb04c">arm_compute::Tensor::buffer</a></div><div class="ttdeci">uint8_t * buffer() const override</div><div class="ttdoc">Interface to be implemented by the child class to return a pointer to CPU memory.</div><div class="ttdef"><b>Definition:</b> <a href="runtime_2_tensor_8cpp_source.xhtml#l00043">Tensor.cpp:43</a></div></div>
<div class="ttc" id="classarm__compute_1_1_scheduler_xhtml_a0d63ca713bab377aabcfb63c192b8429"><div class="ttname"><a href="classarm__compute_1_1_scheduler.xhtml#a0d63ca713bab377aabcfb63c192b8429">arm_compute::Scheduler::get</a></div><div class="ttdeci">static IScheduler &amp; get()</div><div class="ttdoc">Access the scheduler singleton.</div><div class="ttdef"><b>Definition:</b> <a href="_scheduler_8cpp_source.xhtml#l00095">Scheduler.cpp:95</a></div></div>
</div><!-- fragment -->
<p class="reference">References <a class="el" href="runtime_2_tensor_8cpp_source.xhtml#l00043">Tensor::buffer()</a>, <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml#a7cfb31af63202568efef5214acfbf3ba">ITensorInfo::data_type()</a>, <a class="el" href="_window_8h_source.xhtml#l00045">Window::DimY</a>, <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml#aa459796b5489eca8a9160cb5dcf1a103">ITensorInfo::element_size()</a>, <a class="el" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94">arm_compute::F16</a>, <a class="el" href="_scheduler_8cpp_source.xhtml#l00095">Scheduler::get()</a>, <a class="el" href="runtime_2_tensor_8cpp_source.xhtml#l00033">Tensor::info()</a>, <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00716">NELSTMLayer::prepare()</a>, <a class="el" href="_i_n_e_simple_function_8cpp_source.xhtml#l00036">INESimpleFunction::run()</a>, <a class="el" href="_i_n_e_simple_function_no_border_8cpp_source.xhtml#l00037">INESimpleFunctionNoBorder::run()</a>, <a class="el" href="_n_e_concatenate_layer_8cpp_source.xhtml#l00177">NEConcatenateLayer::run()</a>, <a class="el" href="_n_e_g_e_m_m_8cpp_source.xhtml#l00285">NEGEMM::run()</a>, <a class="el" href="_n_e_fully_connected_layer_8cpp_source.xhtml#l00381">NEFullyConnectedLayer::run()</a>, <a class="el" href="classarm__compute_1_1_i_scheduler.xhtml#a4e58f95544bd5ac6559a421671bd9842">IScheduler::schedule()</a>, and <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml#a18064e0011c3869d884653e9e7c47b66">ITensorInfo::total_size()</a>.</p>
</div>
</div>
<a id="aa05bceba37ded272a464a90becd9cd99"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa05bceba37ded272a464a90becd9cd99">&#9670;&nbsp;</a></span>validate()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classarm__compute_1_1_status.xhtml">Status</a> validate </td>
<td>(</td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>input</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>input_to_forget_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>input_to_cell_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>input_to_output_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>recurrent_to_forget_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>recurrent_to_cell_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>recurrent_to_output_weights</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>forget_gate_bias</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>cell_bias</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>output_gate_bias</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>output_state_in</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>cell_state_in</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>scratch_buffer</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>output_state_out</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>cell_state_out</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> *&#160;</td>
<td class="paramname"><em>output</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_l_s_t_m_params.xhtml">LSTMParams</a>&lt; <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml">ITensorInfo</a> &gt; &amp;&#160;</td>
<td class="paramname"><em>lstm_params</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a> &amp;&#160;</td>
<td class="paramname"><em>activation_info</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">float&#160;</td>
<td class="paramname"><em>cell_threshold</em> = <code>0.f</code>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">float&#160;</td>
<td class="paramname"><em>projection_threshold</em> = <code>0.f</code>&#160;</td>
</tr>
<tr>
<td></td>
<td>)</td>
<td></td><td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">static</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Static function to check if given info will lead to a valid configuration of <a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a>. </p>
<dl class="params"><dt>Parameters</dt><dd>
<table class="params">
<tr><td class="paramdir">[in]</td><td class="paramname">input</td><td>Source tensor. Input is a 2D tensor with dimensions [input_size, batch_size]. Data types supported: F16/F32. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">input_to_forget_weights</td><td>2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">input_to_cell_weights</td><td>2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">input_to_output_weights</td><td>2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">recurrent_to_forget_weights</td><td>2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">recurrent_to_cell_weights</td><td>2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">recurrent_to_output_weights</td><td>2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">forget_gate_bias</td><td>1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_bias</td><td>1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">output_gate_bias</td><td>1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">output_state_in</td><td>2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_state_in</td><td>2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">scratch_buffer</td><td>2D tensor with dimensions [num_units * 4, batch_size] with CIFG or [num_units * 3, batch_size] without CIGF. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">output_state_out</td><td>2D weights tensor with dimensions [output_size, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_state_out</td><td>2D tensor with dimensions [num_units, batch_size]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">output</td><td>Destination tensor. Output is a 2D tensor with dimensions [output_size, batch_size]. Data types supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">lstm_params</td><td>(Optional) Weights tensors used in peephole optimization: input_to_input_weights 2D weights tensor with dimensions [input_size, num_units]. Data type supported: Same as <code>input</code>. recurrent_to_input_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. cell_to_input_weights 1D weights tensor with dimensions [num_units]. Can be nullptr. Data type supported: Same as <code>input</code>. cell_to_forget_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. cell_to_output_weights 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code>. input_gate_bias 1D weights tensor with dimensions [num_units]. Data type supported: Same as <code>input</code> projection_weights 2D weights tensor with dimensions [output_size, num_units]. Data type supported: Same as <code>input</code>. projection_bias 1D weights tensor with dimensions [output_size]. Data type supported: Same as <code>input</code>. input_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as <code>input</code>. forget_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as <code>input</code>. cell_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as <code>input</code>. output_layer_norm_coefficients 1D weights tensor info with dimensions [num_units]. Data type supported: Same as <code>input</code>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">activation_info</td><td>Contains activation information described in <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">cell_threshold</td><td>The clipping threshold for the cell state, such that values are bound within [-cell_clip, cell_clip]. If set to 0.0 then clipping is disabled. </td></tr>
<tr><td class="paramdir">[in]</td><td class="paramname">projection_threshold</td><td>The clipping threshold for the output from the projection layer, such that values are bound within [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.</td></tr>
</table>
</dd>
</dl>
<dl class="section return"><dt>Returns</dt><dd>a status </dd></dl>
<p class="definition">Definition at line <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00381">381</a> of file <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml">NELSTMLayer.cpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160;{</div><div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>,</div><div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>,</div><div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>,</div><div class="line"><a name="l00392"></a><span class="lineno"> 392</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, cell_bias, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>,</div><div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160; output_state_in, cell_state_in,</div><div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160; scratch_buffer, output_state_out, cell_state_out, output);</div><div class="line"><a name="l00395"></a><span class="lineno"> 395</span>&#160;</div><div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; <span class="comment">// Check data types</span></div><div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; <a class="code" href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, 1, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94">DataType::F16</a>, <a class="code" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">DataType::F32</a>);</div><div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160; <a class="code" href="_validate_8h.xhtml#a8f3ff7da485ff7e75dab07baadf5b4bd">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>,</div><div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>,</div><div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>,</div><div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160; <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, cell_bias, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>,</div><div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160; output_state_in, cell_state_in,</div><div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160; scratch_buffer, output_state_out, cell_state_out, output);</div><div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160;</div><div class="line"><a name="l00405"></a><span class="lineno"> 405</span>&#160; <span class="comment">// Check dimensions</span></div><div class="line"><a name="l00406"></a><span class="lineno"> 406</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00407"></a><span class="lineno"> 407</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00408"></a><span class="lineno"> 408</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00409"></a><span class="lineno"> 409</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00410"></a><span class="lineno"> 410</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">recurrent_to_forget_weights</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00411"></a><span class="lineno"> 411</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">recurrent_to_cell_weights</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00412"></a><span class="lineno"> 412</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>-&gt;num_dimensions() &gt; 2);</div><div class="line"><a name="l00413"></a><span class="lineno"> 413</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>-&gt;num_dimensions() &gt; 1);</div><div class="line"><a name="l00414"></a><span class="lineno"> 414</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(cell_bias-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00415"></a><span class="lineno"> 415</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>-&gt;num_dimensions() &gt; 1);</div><div class="line"><a name="l00416"></a><span class="lineno"> 416</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_state_in-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00417"></a><span class="lineno"> 417</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(cell_state_in-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00418"></a><span class="lineno"> 418</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(scratch_buffer-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00419"></a><span class="lineno"> 419</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output_state_out-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00420"></a><span class="lineno"> 420</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(cell_state_out-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00421"></a><span class="lineno"> 421</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(output-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00422"></a><span class="lineno"> 422</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(cell_bias-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0) * 4 != scratch_buffer-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0)</div><div class="line"><a name="l00423"></a><span class="lineno"> 423</span>&#160; &amp;&amp; cell_bias-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0) * 3 != scratch_buffer-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0));</div><div class="line"><a name="l00424"></a><span class="lineno"> 424</span>&#160;</div><div class="line"><a name="l00425"></a><span class="lineno"> 425</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> num_batches = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;dimension(1);</div><div class="line"><a name="l00426"></a><span class="lineno"> 426</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> num_cells = <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>-&gt;dimension(1);</div><div class="line"><a name="l00427"></a><span class="lineno"> 427</span>&#160;</div><div class="line"><a name="l00428"></a><span class="lineno"> 428</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>())</div><div class="line"><a name="l00429"></a><span class="lineno"> 429</span>&#160; {</div><div class="line"><a name="l00430"></a><span class="lineno"> 430</span>&#160; <span class="comment">// If CIFG is used, input layer normalization weights tensor is omitted</span></div><div class="line"><a name="l00431"></a><span class="lineno"> 431</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">has_cifg_opt</a>())</div><div class="line"><a name="l00432"></a><span class="lineno"> 432</span>&#160; {</div><div class="line"><a name="l00433"></a><span class="lineno"> 433</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>() != <span class="keyword">nullptr</span>);</div><div class="line"><a name="l00434"></a><span class="lineno"> 434</span>&#160; }</div><div class="line"><a name="l00435"></a><span class="lineno"> 435</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00436"></a><span class="lineno"> 436</span>&#160; {</div><div class="line"><a name="l00437"></a><span class="lineno"> 437</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>());</div><div class="line"><a name="l00438"></a><span class="lineno"> 438</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00439"></a><span class="lineno"> 439</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0) != num_batches);</div><div class="line"><a name="l00440"></a><span class="lineno"> 440</span>&#160; <a class="code" href="_validate_8h.xhtml#a8f3ff7da485ff7e75dab07baadf5b4bd">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>());</div><div class="line"><a name="l00441"></a><span class="lineno"> 441</span>&#160; }</div><div class="line"><a name="l00442"></a><span class="lineno"> 442</span>&#160;</div><div class="line"><a name="l00443"></a><span class="lineno"> 443</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">forget_layer_norm_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">cell_layer_norm_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">output_layer_norm_weights</a>());</div><div class="line"><a name="l00444"></a><span class="lineno"> 444</span>&#160; <a class="code" href="_validate_8h.xhtml#a8f3ff7da485ff7e75dab07baadf5b4bd">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">forget_layer_norm_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">cell_layer_norm_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">output_layer_norm_weights</a>());</div><div class="line"><a name="l00445"></a><span class="lineno"> 445</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">forget_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00446"></a><span class="lineno"> 446</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">cell_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00447"></a><span class="lineno"> 447</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">output_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00448"></a><span class="lineno"> 448</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">forget_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0) != num_batches);</div><div class="line"><a name="l00449"></a><span class="lineno"> 449</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">cell_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0) != num_batches);</div><div class="line"><a name="l00450"></a><span class="lineno"> 450</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">output_layer_norm_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">dimension</a>(0) != num_batches);</div><div class="line"><a name="l00451"></a><span class="lineno"> 451</span>&#160; }</div><div class="line"><a name="l00452"></a><span class="lineno"> 452</span>&#160;</div><div class="line"><a name="l00453"></a><span class="lineno"> 453</span>&#160; <span class="comment">// Check peephole optimization</span></div><div class="line"><a name="l00454"></a><span class="lineno"> 454</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00455"></a><span class="lineno"> 455</span>&#160; {</div><div class="line"><a name="l00456"></a><span class="lineno"> 456</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">cell_to_output_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">cell_to_forget_weights</a>());</div><div class="line"><a name="l00457"></a><span class="lineno"> 457</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">cell_to_forget_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00458"></a><span class="lineno"> 458</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">cell_to_output_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00459"></a><span class="lineno"> 459</span>&#160; }</div><div class="line"><a name="l00460"></a><span class="lineno"> 460</span>&#160;</div><div class="line"><a name="l00461"></a><span class="lineno"> 461</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> units_out_transposed_shape = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a69cb11b5b37f94a6bea9eaad9d13cccf">compute_transposed_shape</a>(*<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>);</div><div class="line"><a name="l00462"></a><span class="lineno"> 462</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> num_units_transposed_shape = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a69cb11b5b37f94a6bea9eaad9d13cccf">compute_transposed_shape</a>(*<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>);</div><div class="line"><a name="l00463"></a><span class="lineno"> 463</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> units_out_transposed_info = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(units_out_transposed_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00464"></a><span class="lineno"> 464</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> num_units_transposed_info = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(num_units_transposed_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00465"></a><span class="lineno"> 465</span>&#160;</div><div class="line"><a name="l00466"></a><span class="lineno"> 466</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> input_gate = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a>(num_cells, num_batches), 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00467"></a><span class="lineno"> 467</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> forget_gate = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a>(num_cells, num_batches), 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00468"></a><span class="lineno"> 468</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> output_gate_tmp = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a>(num_cells, num_batches), 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00469"></a><span class="lineno"> 469</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> cell_state_tmp = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(<a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a>(num_cells, num_batches), 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00470"></a><span class="lineno"> 470</span>&#160;</div><div class="line"><a name="l00471"></a><span class="lineno"> 471</span>&#160; std::vector&lt;const ITensorInfo *&gt; inputs_vector;</div><div class="line"><a name="l00472"></a><span class="lineno"> 472</span>&#160; inputs_vector.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>);</div><div class="line"><a name="l00473"></a><span class="lineno"> 473</span>&#160; inputs_vector.emplace_back(output_state_in);</div><div class="line"><a name="l00474"></a><span class="lineno"> 474</span>&#160; <span class="keyword">const</span> <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> concat_shape = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a6100aeb494088632647c3e0d639c99ab">arm_compute::misc::shape_calculator::calculate_concatenate_shape</a>(inputs_vector, 0);</div><div class="line"><a name="l00475"></a><span class="lineno"> 475</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> forget_gate_concat = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(concat_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00476"></a><span class="lineno"> 476</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a6e77b7a36830679af4f991604feab114">NEConcatenateLayer::validate</a>(inputs_vector, &amp;forget_gate_concat, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>));</div><div class="line"><a name="l00477"></a><span class="lineno"> 477</span>&#160;</div><div class="line"><a name="l00478"></a><span class="lineno"> 478</span>&#160; <span class="comment">// Validate forget gate</span></div><div class="line"><a name="l00479"></a><span class="lineno"> 479</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a8da875051f2d75a497fb2de9cdd2e6cb">NEFullyConnectedLayer::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">input_to_forget_weights</a>, (lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>()) ? <span class="keyword">nullptr</span> : <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, &amp;forget_gate));</div><div class="line"><a name="l00480"></a><span class="lineno"> 480</span>&#160;</div><div class="line"><a name="l00481"></a><span class="lineno"> 481</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00482"></a><span class="lineno"> 482</span>&#160; {</div><div class="line"><a name="l00483"></a><span class="lineno"> 483</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(cell_state_in, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">cell_to_forget_weights</a>(), &amp;forget_gate, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00484"></a><span class="lineno"> 484</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;forget_gate, &amp;forget_gate, &amp;forget_gate, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00485"></a><span class="lineno"> 485</span>&#160; }</div><div class="line"><a name="l00486"></a><span class="lineno"> 486</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>())</div><div class="line"><a name="l00487"></a><span class="lineno"> 487</span>&#160; {</div><div class="line"><a name="l00488"></a><span class="lineno"> 488</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#a0a84b209b1d887a523005907e7028e2e">NEMeanStdDevNormalizationLayer::validate</a>(&amp;forget_gate));</div><div class="line"><a name="l00489"></a><span class="lineno"> 489</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;forget_gate, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">forget_layer_norm_weights</a>(), &amp;forget_gate, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>,</div><div class="line"><a name="l00490"></a><span class="lineno"> 490</span>&#160; <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00491"></a><span class="lineno"> 491</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;forget_gate, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">forget_gate_bias</a>, &amp;forget_gate, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00492"></a><span class="lineno"> 492</span>&#160; }</div><div class="line"><a name="l00493"></a><span class="lineno"> 493</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(&amp;forget_gate, &amp;forget_gate, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::ActivationFunction::LOGISTIC</a>)));</div><div class="line"><a name="l00494"></a><span class="lineno"> 494</span>&#160;</div><div class="line"><a name="l00495"></a><span class="lineno"> 495</span>&#160; <span class="comment">// Validate input gate</span></div><div class="line"><a name="l00496"></a><span class="lineno"> 496</span>&#160; <span class="keywordflow">if</span>(!lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">has_cifg_opt</a>())</div><div class="line"><a name="l00497"></a><span class="lineno"> 497</span>&#160; {</div><div class="line"><a name="l00498"></a><span class="lineno"> 498</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">input_to_input_weights</a>(),</div><div class="line"><a name="l00499"></a><span class="lineno"> 499</span>&#160; lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">recurrent_to_input_weights</a>(),</div><div class="line"><a name="l00500"></a><span class="lineno"> 500</span>&#160; lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>());</div><div class="line"><a name="l00501"></a><span class="lineno"> 501</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">input_to_input_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00502"></a><span class="lineno"> 502</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">recurrent_to_input_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 2);</div><div class="line"><a name="l00503"></a><span class="lineno"> 503</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00504"></a><span class="lineno"> 504</span>&#160;</div><div class="line"><a name="l00505"></a><span class="lineno"> 505</span>&#160; std::vector&lt;const ITensorInfo *&gt; lstm_weights;</div><div class="line"><a name="l00506"></a><span class="lineno"> 506</span>&#160; lstm_weights.emplace_back(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">input_to_input_weights</a>());</div><div class="line"><a name="l00507"></a><span class="lineno"> 507</span>&#160; lstm_weights.emplace_back(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">recurrent_to_input_weights</a>());</div><div class="line"><a name="l00508"></a><span class="lineno"> 508</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> lstm_weights_concat_shape = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a6100aeb494088632647c3e0d639c99ab">arm_compute::misc::shape_calculator::calculate_concatenate_shape</a>(lstm_weights, 0);</div><div class="line"><a name="l00509"></a><span class="lineno"> 509</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> lstm_gate_concat = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(lstm_weights_concat_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00510"></a><span class="lineno"> 510</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a6e77b7a36830679af4f991604feab114">NEConcatenateLayer::validate</a>(lstm_weights, &amp;lstm_gate_concat, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>));</div><div class="line"><a name="l00511"></a><span class="lineno"> 511</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a8da875051f2d75a497fb2de9cdd2e6cb">NEFullyConnectedLayer::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">input_to_input_weights</a>(), (lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>()) ? <span class="keyword">nullptr</span> : lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>(), &amp;input_gate));</div><div class="line"><a name="l00512"></a><span class="lineno"> 512</span>&#160;</div><div class="line"><a name="l00513"></a><span class="lineno"> 513</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00514"></a><span class="lineno"> 514</span>&#160; {</div><div class="line"><a name="l00515"></a><span class="lineno"> 515</span>&#160; <a class="code" href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">cell_to_input_weights</a>());</div><div class="line"><a name="l00516"></a><span class="lineno"> 516</span>&#160; <a class="code" href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">cell_to_input_weights</a>()-&gt;<a class="code" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">num_dimensions</a>() &gt; 1);</div><div class="line"><a name="l00517"></a><span class="lineno"> 517</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(cell_state_in, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">cell_to_input_weights</a>(), &amp;input_gate, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00518"></a><span class="lineno"> 518</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;input_gate, &amp;input_gate, &amp;input_gate, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00519"></a><span class="lineno"> 519</span>&#160; }</div><div class="line"><a name="l00520"></a><span class="lineno"> 520</span>&#160;</div><div class="line"><a name="l00521"></a><span class="lineno"> 521</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>())</div><div class="line"><a name="l00522"></a><span class="lineno"> 522</span>&#160; {</div><div class="line"><a name="l00523"></a><span class="lineno"> 523</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#a0a84b209b1d887a523005907e7028e2e">NEMeanStdDevNormalizationLayer::validate</a>(&amp;input_gate));</div><div class="line"><a name="l00524"></a><span class="lineno"> 524</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;input_gate, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">input_layer_norm_weights</a>(), &amp;input_gate, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00525"></a><span class="lineno"> 525</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;input_gate, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">input_gate_bias</a>(), &amp;input_gate, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00526"></a><span class="lineno"> 526</span>&#160; }</div><div class="line"><a name="l00527"></a><span class="lineno"> 527</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(&amp;input_gate, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::ActivationFunction::LOGISTIC</a>)));</div><div class="line"><a name="l00528"></a><span class="lineno"> 528</span>&#160; }</div><div class="line"><a name="l00529"></a><span class="lineno"> 529</span>&#160; <span class="keywordflow">else</span></div><div class="line"><a name="l00530"></a><span class="lineno"> 530</span>&#160; {</div><div class="line"><a name="l00531"></a><span class="lineno"> 531</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticSubtractionKernel::validate</a>(&amp;forget_gate, &amp;forget_gate, &amp;forget_gate, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00532"></a><span class="lineno"> 532</span>&#160; }</div><div class="line"><a name="l00533"></a><span class="lineno"> 533</span>&#160;</div><div class="line"><a name="l00534"></a><span class="lineno"> 534</span>&#160; <span class="comment">// Validate cell state</span></div><div class="line"><a name="l00535"></a><span class="lineno"> 535</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a8da875051f2d75a497fb2de9cdd2e6cb">NEFullyConnectedLayer::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">input_to_cell_weights</a>, (lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>()) ? <span class="keyword">nullptr</span> : cell_bias, &amp;cell_state_tmp));</div><div class="line"><a name="l00536"></a><span class="lineno"> 536</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_g_e_m_m.xhtml#a3493ba7d1f2057740ff5931fa00a44ac">NEGEMM::validate</a>(output_state_in, &amp;units_out_transposed_info, <span class="keyword">nullptr</span>, &amp;cell_state_tmp, 1.f, 0.f, <a class="code" href="classarm__compute_1_1_g_e_m_m_info.xhtml">GEMMInfo</a>()));</div><div class="line"><a name="l00537"></a><span class="lineno"> 537</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;cell_state_tmp, &amp;cell_state_tmp, &amp;cell_state_tmp, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00538"></a><span class="lineno"> 538</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>())</div><div class="line"><a name="l00539"></a><span class="lineno"> 539</span>&#160; {</div><div class="line"><a name="l00540"></a><span class="lineno"> 540</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#a0a84b209b1d887a523005907e7028e2e">NEMeanStdDevNormalizationLayer::validate</a>(&amp;cell_state_tmp));</div><div class="line"><a name="l00541"></a><span class="lineno"> 541</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;cell_state_tmp, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">cell_layer_norm_weights</a>(), &amp;cell_state_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>,</div><div class="line"><a name="l00542"></a><span class="lineno"> 542</span>&#160; <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00543"></a><span class="lineno"> 543</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;cell_state_tmp, cell_bias, &amp;cell_state_tmp, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00544"></a><span class="lineno"> 544</span>&#160; }</div><div class="line"><a name="l00545"></a><span class="lineno"> 545</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(&amp;cell_state_tmp, <span class="keyword">nullptr</span>, activation_info));</div><div class="line"><a name="l00546"></a><span class="lineno"> 546</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;cell_state_tmp, &amp;input_gate, &amp;cell_state_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00547"></a><span class="lineno"> 547</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;cell_state_tmp, &amp;forget_gate, &amp;cell_state_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00548"></a><span class="lineno"> 548</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;cell_state_tmp, &amp;cell_state_tmp, &amp;cell_state_tmp, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00549"></a><span class="lineno"> 549</span>&#160; <span class="keywordflow">if</span>(cell_threshold != 0.f)</div><div class="line"><a name="l00550"></a><span class="lineno"> 550</span>&#160; {</div><div class="line"><a name="l00551"></a><span class="lineno"> 551</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(&amp;cell_state_tmp, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU</a>, -cell_threshold,</div><div class="line"><a name="l00552"></a><span class="lineno"> 552</span>&#160; cell_threshold)));</div><div class="line"><a name="l00553"></a><span class="lineno"> 553</span>&#160; }</div><div class="line"><a name="l00554"></a><span class="lineno"> 554</span>&#160;</div><div class="line"><a name="l00555"></a><span class="lineno"> 555</span>&#160; <span class="comment">// Validate output gate tmp</span></div><div class="line"><a name="l00556"></a><span class="lineno"> 556</span>&#160; std::vector&lt;const ITensorInfo *&gt; in_out_weights;</div><div class="line"><a name="l00557"></a><span class="lineno"> 557</span>&#160; in_out_weights.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>);</div><div class="line"><a name="l00558"></a><span class="lineno"> 558</span>&#160; in_out_weights.emplace_back(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">recurrent_to_output_weights</a>);</div><div class="line"><a name="l00559"></a><span class="lineno"> 559</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_shape.xhtml">TensorShape</a> in_out_weights_concat_shape = <a class="code" href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a6100aeb494088632647c3e0d639c99ab">arm_compute::misc::shape_calculator::calculate_concatenate_shape</a>(in_out_weights, 0);</div><div class="line"><a name="l00560"></a><span class="lineno"> 560</span>&#160; <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a> in_out_gate_concat = <a class="code" href="classarm__compute_1_1_tensor_info.xhtml">TensorInfo</a>(in_out_weights_concat_shape, 1, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>-&gt;data_type());</div><div class="line"><a name="l00561"></a><span class="lineno"> 561</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a6e77b7a36830679af4f991604feab114">NEConcatenateLayer::validate</a>(in_out_weights, &amp;in_out_gate_concat, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>));</div><div class="line"><a name="l00562"></a><span class="lineno"> 562</span>&#160;</div><div class="line"><a name="l00563"></a><span class="lineno"> 563</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a8da875051f2d75a497fb2de9cdd2e6cb">NEFullyConnectedLayer::validate</a>(<a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">input</a>, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">input_to_output_weights</a>, (lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>()) ? <span class="keyword">nullptr</span> : <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>, &amp;output_gate_tmp));</div><div class="line"><a name="l00564"></a><span class="lineno"> 564</span>&#160;</div><div class="line"><a name="l00565"></a><span class="lineno"> 565</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">has_peephole_opt</a>())</div><div class="line"><a name="l00566"></a><span class="lineno"> 566</span>&#160; {</div><div class="line"><a name="l00567"></a><span class="lineno"> 567</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;cell_state_tmp, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">cell_to_output_weights</a>(), &amp;output_gate_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>,</div><div class="line"><a name="l00568"></a><span class="lineno"> 568</span>&#160; <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00569"></a><span class="lineno"> 569</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;output_gate_tmp, &amp;output_gate_tmp, &amp;output_gate_tmp, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00570"></a><span class="lineno"> 570</span>&#160; }</div><div class="line"><a name="l00571"></a><span class="lineno"> 571</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">use_layer_norm</a>())</div><div class="line"><a name="l00572"></a><span class="lineno"> 572</span>&#160; {</div><div class="line"><a name="l00573"></a><span class="lineno"> 573</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#a0a84b209b1d887a523005907e7028e2e">NEMeanStdDevNormalizationLayer::validate</a>(&amp;output_gate_tmp));</div><div class="line"><a name="l00574"></a><span class="lineno"> 574</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;output_gate_tmp, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">output_layer_norm_weights</a>(), &amp;output_gate_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>,</div><div class="line"><a name="l00575"></a><span class="lineno"> 575</span>&#160; <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00576"></a><span class="lineno"> 576</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">NEArithmeticAddition::validate</a>(&amp;output_gate_tmp, <a class="code" href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">output_gate_bias</a>, &amp;output_gate_tmp, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>));</div><div class="line"><a name="l00577"></a><span class="lineno"> 577</span>&#160; }</div><div class="line"><a name="l00578"></a><span class="lineno"> 578</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(&amp;output_gate_tmp, <span class="keyword">nullptr</span>, <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::ActivationFunction::LOGISTIC</a>)));</div><div class="line"><a name="l00579"></a><span class="lineno"> 579</span>&#160;</div><div class="line"><a name="l00580"></a><span class="lineno"> 580</span>&#160; <span class="comment">// Validate output state</span></div><div class="line"><a name="l00581"></a><span class="lineno"> 581</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(&amp;cell_state_tmp, &amp;cell_state_tmp, activation_info));</div><div class="line"><a name="l00582"></a><span class="lineno"> 582</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">NEPixelWiseMultiplicationKernel::validate</a>(&amp;cell_state_tmp, &amp;output_gate_tmp, &amp;output_gate_tmp, 1, <a class="code" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">ConvertPolicy::SATURATE</a>, <a class="code" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">RoundingPolicy::TO_ZERO</a>));</div><div class="line"><a name="l00583"></a><span class="lineno"> 583</span>&#160; <span class="keywordflow">if</span>(lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#a127009377712009a84cd0c48aa7e1edd">has_projection</a>())</div><div class="line"><a name="l00584"></a><span class="lineno"> 584</span>&#160; {</div><div class="line"><a name="l00585"></a><span class="lineno"> 585</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a8da875051f2d75a497fb2de9cdd2e6cb">NEFullyConnectedLayer::validate</a>(&amp;output_gate_tmp, lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ab1b3d5364f11bca8cacef026c8038dba">projection_weights</a>(), lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#ad676992a90d193409fa6a28a001af6c8">projection_bias</a>(), output_state_out));</div><div class="line"><a name="l00586"></a><span class="lineno"> 586</span>&#160; <span class="keywordflow">if</span>(projection_threshold != 0.f)</div><div class="line"><a name="l00587"></a><span class="lineno"> 587</span>&#160; {</div><div class="line"><a name="l00588"></a><span class="lineno"> 588</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">NEActivationLayerKernel::validate</a>(output_state_out, output_state_out,</div><div class="line"><a name="l00589"></a><span class="lineno"> 589</span>&#160; <a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml">ActivationLayerInfo</a>(<a class="code" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU</a>, -projection_threshold, projection_threshold)));</div><div class="line"><a name="l00590"></a><span class="lineno"> 590</span>&#160; }</div><div class="line"><a name="l00591"></a><span class="lineno"> 591</span>&#160; }</div><div class="line"><a name="l00592"></a><span class="lineno"> 592</span>&#160;</div><div class="line"><a name="l00593"></a><span class="lineno"> 593</span>&#160; <span class="comment">// Validate copy kernel</span></div><div class="line"><a name="l00594"></a><span class="lineno"> 594</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_copy_kernel.xhtml#ace5d816c012fffa933058b4e6c63253d">NECopyKernel::validate</a>(&amp;cell_state_tmp, cell_state_out));</div><div class="line"><a name="l00595"></a><span class="lineno"> 595</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_copy_kernel.xhtml#ace5d816c012fffa933058b4e6c63253d">NECopyKernel::validate</a>(output_state_out, output));</div><div class="line"><a name="l00596"></a><span class="lineno"> 596</span>&#160;</div><div class="line"><a name="l00597"></a><span class="lineno"> 597</span>&#160; <span class="comment">// Validate scratch concatenation</span></div><div class="line"><a name="l00598"></a><span class="lineno"> 598</span>&#160; std::vector&lt;ITensorInfo *&gt; inputs_vector_info_raw;</div><div class="line"><a name="l00599"></a><span class="lineno"> 599</span>&#160; <span class="keywordflow">if</span>(!lstm_params.<a class="code" href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">has_cifg_opt</a>())</div><div class="line"><a name="l00600"></a><span class="lineno"> 600</span>&#160; {</div><div class="line"><a name="l00601"></a><span class="lineno"> 601</span>&#160; inputs_vector_info_raw.push_back(&amp;input_gate);</div><div class="line"><a name="l00602"></a><span class="lineno"> 602</span>&#160; }</div><div class="line"><a name="l00603"></a><span class="lineno"> 603</span>&#160; inputs_vector_info_raw.push_back(&amp;cell_state_tmp);</div><div class="line"><a name="l00604"></a><span class="lineno"> 604</span>&#160; inputs_vector_info_raw.push_back(&amp;forget_gate);</div><div class="line"><a name="l00605"></a><span class="lineno"> 605</span>&#160; inputs_vector_info_raw.push_back(&amp;output_gate_tmp);</div><div class="line"><a name="l00606"></a><span class="lineno"> 606</span>&#160;</div><div class="line"><a name="l00607"></a><span class="lineno"> 607</span>&#160; <a class="code" href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a>(<a class="code" href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a6e77b7a36830679af4f991604feab114">NEConcatenateLayer::validate</a>(inputs_vector_info_raw, scratch_buffer, <a class="code" href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">Window::DimX</a>));</div><div class="line"><a name="l00608"></a><span class="lineno"> 608</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classarm__compute_1_1_status.xhtml">Status</a>{};</div><div class="line"><a name="l00609"></a><span class="lineno"> 609</span>&#160;}</div><div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a1f4e725b8e1ea36b30e09dc08ae6961d"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">arm_compute::ITensorInfo::num_dimensions</a></div><div class="ttdeci">virtual size_t num_dimensions() const =0</div><div class="ttdoc">The number of dimensions of the tensor (rank)</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_ab1b3d5364f11bca8cacef026c8038dba"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#ab1b3d5364f11bca8cacef026c8038dba">arm_compute::LSTMParams::projection_weights</a></div><div class="ttdeci">const T * projection_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00150">LSTMParams.h:150</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_afa54d4a35e697cb14a38359616709681"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#afa54d4a35e697cb14a38359616709681">arm_compute::LSTMParams::input_to_input_weights</a></div><div class="ttdeci">const T * input_to_input_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00120">LSTMParams.h:120</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a507bd7e4d98cb3e45d3e820d8bac422a"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a507bd7e4d98cb3e45d3e820d8bac422a">arm_compute::test::validation::output_gate_bias</a></div><div class="ttdeci">auto output_gate_bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00484">LSTMLayerQuantized.cpp:484</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_shape_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_shape.xhtml">arm_compute::TensorShape</a></div><div class="ttdoc">Shape of a tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_shape_8h_source.xhtml#l00039">TensorShape.h:39</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1misc_1_1shape__calculator_xhtml_a6100aeb494088632647c3e0d639c99ab"><div class="ttname"><a href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a6100aeb494088632647c3e0d639c99ab">arm_compute::misc::shape_calculator::calculate_concatenate_shape</a></div><div class="ttdeci">TensorShape calculate_concatenate_shape(const std::vector&lt; T * &gt; &amp;input, size_t axis)</div><div class="ttdoc">Calculate the concatenate output shape of the concatenate operation along a single axis.</div><div class="ttdef"><b>Definition:</b> <a href="_shape_calculator_8h_source.xhtml#l01315">ShapeCalculator.h:1315</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a2d615f651270885a1b996046e9902a3c"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a2d615f651270885a1b996046e9902a3c">arm_compute::LSTMParams::use_layer_norm</a></div><div class="ttdeci">bool use_layer_norm() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00195">LSTMParams.h:195</a></div></div>
<div class="ttc" id="classarm__compute_1_1_i_tensor_info_xhtml_a178f0d3d87f959e00a743328d95359d2"><div class="ttname"><a href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">arm_compute::ITensorInfo::dimension</a></div><div class="ttdeci">virtual size_t dimension(size_t index) const =0</div><div class="ttdoc">Return the size of the requested dimension.</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a23be92a19e0d7c174ed444e709518afd"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23be92a19e0d7c174ed444e709518afd">arm_compute::LSTMParams::has_peephole_opt</a></div><div class="ttdeci">bool has_peephole_opt() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00180">LSTMParams.h:180</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_a8f3ff7da485ff7e75dab07baadf5b4bd"><div class="ttname"><a href="_validate_8h.xhtml#a8f3ff7da485ff7e75dab07baadf5b4bd">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00545">Validate.h:545</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a55daaf57fb833fc416d779c28f7a3c85"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a55daaf57fb833fc416d779c28f7a3c85">arm_compute::test::validation::forget_gate_bias</a></div><div class="ttdeci">auto forget_gate_bias</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00482">LSTMLayerQuantized.cpp:482</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_aafb05bcc27f0879701152cd664c632ce"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#aafb05bcc27f0879701152cd664c632ce">arm_compute::LSTMParams::cell_to_input_weights</a></div><div class="ttdeci">const T * cell_to_input_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00130">LSTMParams.h:130</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a8a1e1c105f0bdaf37db408c7cfcb77a4"><div class="ttname"><a href="_error_8h.xhtml#a8a1e1c105f0bdaf37db408c7cfcb77a4">ARM_COMPUTE_RETURN_ON_ERROR</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ON_ERROR(status)</div><div class="ttdoc">Checks if a status contains an error and returns it.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00204">Error.h:204</a></div></div>
<div class="ttc" id="_validate_8h_xhtml_ae7eed178dac535c6e727061b1f5bc6eb"><div class="ttname"><a href="_validate_8h.xhtml#ae7eed178dac535c6e727061b1f5bc6eb">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(t, c,...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00792">Validate.h:792</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">arm_compute::Format::F32</a></div><div class="ttdoc">1 channel, 1 F32 per channel</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_aae040c52316d86a4df2c7cdf179049bf"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#aae040c52316d86a4df2c7cdf179049bf">arm_compute::LSTMParams::has_cifg_opt</a></div><div class="ttdeci">bool has_cifg_opt() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00190">LSTMParams.h:190</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a934af5defc72f38841ce8955e2151473"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a934af5defc72f38841ce8955e2151473">arm_compute::LSTMParams::cell_to_output_weights</a></div><div class="ttdeci">const T * cell_to_output_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00145">LSTMParams.h:145</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac62dfdcc14798598d953342789c9927e"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac62dfdcc14798598d953342789c9927e">arm_compute::test::validation::recurrent_to_forget_weights</a></div><div class="ttdeci">auto recurrent_to_forget_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00478">LSTMLayerQuantized.cpp:478</a></div></div>
<div class="ttc" id="classarm__compute_1_1_status_xhtml"><div class="ttname"><a href="classarm__compute_1_1_status.xhtml">arm_compute::Status</a></div><div class="ttdoc">Status class.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00052">Error.h:52</a></div></div>
<div class="ttc" id="_error_8h_xhtml_a206d6e247e0957ac3dee45d27756fc25"><div class="ttname"><a href="_error_8h.xhtml#a206d6e247e0957ac3dee45d27756fc25">ARM_COMPUTE_RETURN_ERROR_ON</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON(cond)</div><div class="ttdoc">If the condition is true, an error is returned.</div><div class="ttdef"><b>Definition:</b> <a href="_error_8h_source.xhtml#l00296">Error.h:296</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml">arm_compute::ActivationLayerInfo</a></div><div class="ttdoc">Activation Layer Information class.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l01615">Types.h:1615</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94"><div class="ttname"><a href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94">arm_compute::Format::F16</a></div><div class="ttdoc">1 channel, 1 F16 per channel</div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac547a66fe26967afb94760061ee0d0d1"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac547a66fe26967afb94760061ee0d0d1">arm_compute::test::validation::input_to_cell_weights</a></div><div class="ttdeci">auto input_to_cell_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00475">LSTMLayerQuantized.cpp:475</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1misc_1_1shape__calculator_xhtml_a69cb11b5b37f94a6bea9eaad9d13cccf"><div class="ttname"><a href="namespacearm__compute_1_1misc_1_1shape__calculator.xhtml#a69cb11b5b37f94a6bea9eaad9d13cccf">arm_compute::misc::shape_calculator::compute_transposed_shape</a></div><div class="ttdeci">TensorShape compute_transposed_shape(const ITensorInfo &amp;input)</div><div class="ttdoc">Calculate the transposed shape of a tensor.</div><div class="ttdef"><b>Definition:</b> <a href="_shape_calculator_8h_source.xhtml#l00426">ShapeCalculator.h:426</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a8fcf2ddd9a1d58b1b280f5c0aed71845"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a8fcf2ddd9a1d58b1b280f5c0aed71845">arm_compute::test::validation::input</a></div><div class="ttdeci">auto input</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00487">LSTMLayerQuantized.cpp:487</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_aab02df8a9ee45153f2fd76e934407fbd"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#aab02df8a9ee45153f2fd76e934407fbd">arm_compute::test::validation::recurrent_to_output_weights</a></div><div class="ttdeci">auto recurrent_to_output_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00480">LSTMLayerQuantized.cpp:480</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_addition_xhtml_a5e951bf3e414ddcd908245bcf284b08f"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_addition.xhtml#a5e951bf3e414ddcd908245bcf284b08f">arm_compute::NEArithmeticAddition::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEArithmeticAddition.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_addition_8cpp_source.xhtml#l00040">NEArithmeticAddition.cpp:40</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a35e4b6311397e1f9532fb37560aa9996"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a35e4b6311397e1f9532fb37560aa9996">arm_compute::LSTMParams::recurrent_to_input_weights</a></div><div class="ttdeci">const T * recurrent_to_input_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00125">LSTMParams.h:125</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ace4dd633420fa8d8aa71f60ff730f01f"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ace4dd633420fa8d8aa71f60ff730f01f">arm_compute::test::validation::input_to_output_weights</a></div><div class="ttdeci">auto input_to_output_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00476">LSTMLayerQuantized.cpp:476</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_ad676992a90d193409fa6a28a001af6c8"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#ad676992a90d193409fa6a28a001af6c8">arm_compute::LSTMParams::projection_bias</a></div><div class="ttdeci">const T * projection_bias() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00155">LSTMParams.h:155</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_g_e_m_m_xhtml_a3493ba7d1f2057740ff5931fa00a44ac"><div class="ttname"><a href="classarm__compute_1_1_n_e_g_e_m_m.xhtml#a3493ba7d1f2057740ff5931fa00a44ac">arm_compute::NEGEMM::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *a, const ITensorInfo *b, const ITensorInfo *c, const ITensorInfo *output, float alpha, float beta, const GEMMInfo &amp;gemm_info=GEMMInfo())</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEGEMM.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_g_e_m_m_8cpp_source.xhtml#l00172">NEGEMM.cpp:172</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel_xhtml_a705182fc799dce8ee017368eea0ca539"><div class="ttname"><a href="classarm__compute_1_1_n_e_pixel_wise_multiplication_kernel.xhtml#a705182fc799dce8ee017368eea0ca539">arm_compute::NEPixelWiseMultiplicationKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, float scale, ConvertPolicy overflow_policy, RoundingPolicy rounding_policy)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEPixelWiseMultiplicatio...</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_pixel_wise_multiplication_kernel_8cpp_source.xhtml#l00709">NEPixelWiseMultiplicationKernel.cpp:709</a></div></div>
<div class="ttc" id="classarm__compute_1_1_window_xhtml_aa96e81276ee4f87ab386cd05a5539a7d"><div class="ttname"><a href="classarm__compute_1_1_window.xhtml#aa96e81276ee4f87ab386cd05a5539a7d">arm_compute::Window::DimX</a></div><div class="ttdeci">static constexpr size_t DimX</div><div class="ttdoc">Alias for dimension 0 also known as X dimension.</div><div class="ttdef"><b>Definition:</b> <a href="_window_8h_source.xhtml#l00043">Window.h:43</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a477486a9c5189cff8af1cdd9d7e8d573"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a477486a9c5189cff8af1cdd9d7e8d573">arm_compute::LSTMParams::cell_to_forget_weights</a></div><div class="ttdeci">const T * cell_to_forget_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00140">LSTMParams.h:140</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_abd478bedc7c65b72ead0d05cbd16d437"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#abd478bedc7c65b72ead0d05cbd16d437">arm_compute::LSTMParams::cell_layer_norm_weights</a></div><div class="ttdeci">const T * cell_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00170">LSTMParams.h:170</a></div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_ac2236dfe2a3fc5fa4e125348829cbeb2"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#ac2236dfe2a3fc5fa4e125348829cbeb2">arm_compute::test::validation::recurrent_to_cell_weights</a></div><div class="ttdeci">auto recurrent_to_cell_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00479">LSTMLayerQuantized.cpp:479</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a23361ca1393c0dc196fbf4e627e07119"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a23361ca1393c0dc196fbf4e627e07119">arm_compute::LSTMParams::input_layer_norm_weights</a></div><div class="ttdeci">const T * input_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00160">LSTMParams.h:160</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml_a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">arm_compute::ActivationLayerInfo::ActivationFunction::LOGISTIC</a></div><div class="ttdoc">Logistic ( )</div></div>
<div class="ttc" id="_validate_8h_xhtml_aff911654521523937ff24372a870b89f"><div class="ttname"><a href="_validate_8h.xhtml#aff911654521523937ff24372a870b89f">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a></div><div class="ttdeci">#define ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(...)</div><div class="ttdef"><b>Definition:</b> <a href="_validate_8h_source.xhtml#l00163">Validate.h:163</a></div></div>
<div class="ttc" id="classarm__compute_1_1_activation_layer_info_xhtml_a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a"><div class="ttname"><a href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">arm_compute::ActivationLayerInfo::ActivationFunction::LU_BOUNDED_RELU</a></div><div class="ttdoc">Lower and Upper Bounded Rectifier ( )</div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a127009377712009a84cd0c48aa7e1edd"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a127009377712009a84cd0c48aa7e1edd">arm_compute::LSTMParams::has_projection</a></div><div class="ttdeci">bool has_projection() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00185">LSTMParams.h:185</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_activation_layer_kernel_xhtml_aa37e2d0b4cd4f835bfa2a2df4a0bdd2c"><div class="ttname"><a href="classarm__compute_1_1_n_e_activation_layer_kernel.xhtml#aa37e2d0b4cd4f835bfa2a2df4a0bdd2c">arm_compute::NEActivationLayerKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *output, const ActivationLayerInfo &amp;act_info)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEActivationLayerKernel.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_activation_layer_kernel_8cpp_source.xhtml#l00778">NEActivationLayerKernel.cpp:778</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_copy_kernel_xhtml_ace5d816c012fffa933058b4e6c63253d"><div class="ttname"><a href="classarm__compute_1_1_n_e_copy_kernel.xhtml#ace5d816c012fffa933058b4e6c63253d">arm_compute::NECopyKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *output, const PaddingList &amp;padding=PaddingList())</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NECopyKernel.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_copy_kernel_8cpp_source.xhtml#l00102">NECopyKernel.cpp:102</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_fully_connected_layer_xhtml_a8da875051f2d75a497fb2de9cdd2e6cb"><div class="ttname"><a href="classarm__compute_1_1_n_e_fully_connected_layer.xhtml#a8da875051f2d75a497fb2de9cdd2e6cb">arm_compute::NEFullyConnectedLayer::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *weights, const ITensorInfo *biases, const ITensorInfo *output, FullyConnectedLayerInfo fc_info=FullyConnectedLayerInfo())</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEFullyConnectedLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_fully_connected_layer_8cpp_source.xhtml#l00275">NEFullyConnectedLayer.cpp:275</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_concatenate_layer_xhtml_a6e77b7a36830679af4f991604feab114"><div class="ttname"><a href="classarm__compute_1_1_n_e_concatenate_layer.xhtml#a6e77b7a36830679af4f991604feab114">arm_compute::NEConcatenateLayer::validate</a></div><div class="ttdeci">static Status validate(const std::vector&lt; ITensorInfo * &gt; &amp;inputs_vector, const ITensorInfo *output, size_t axis)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEConcatenateLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_concatenate_layer_8cpp_source.xhtml#l00059">NEConcatenateLayer.cpp:59</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a208874b46a667263fa309537c5355318"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a208874b46a667263fa309537c5355318">arm_compute::LSTMParams::output_layer_norm_weights</a></div><div class="ttdeci">const T * output_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00175">LSTMParams.h:175</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a29a7a1636c6a8fd9e423d55c36e991a0"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a29a7a1636c6a8fd9e423d55c36e991a0">arm_compute::LSTMParams::input_gate_bias</a></div><div class="ttdeci">const T * input_gate_bias() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00135">LSTMParams.h:135</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer_xhtml_a0a84b209b1d887a523005907e7028e2e"><div class="ttname"><a href="classarm__compute_1_1_n_e_mean_std_dev_normalization_layer.xhtml#a0a84b209b1d887a523005907e7028e2e">arm_compute::NEMeanStdDevNormalizationLayer::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input, const ITensorInfo *output=nullptr, float epsilon=1e-8f)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEMeanStdDevNormalizatio...</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_mean_std_dev_normalization_layer_8cpp_source.xhtml#l00038">NEMeanStdDevNormalizationLayer.cpp:38</a></div></div>
<div class="ttc" id="classarm__compute_1_1_tensor_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_tensor_info.xhtml">arm_compute::TensorInfo</a></div><div class="ttdoc">Store the tensor's metadata.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_info_8h_source.xhtml#l00045">TensorInfo.h:45</a></div></div>
<div class="ttc" id="classarm__compute_1_1_g_e_m_m_info_xhtml"><div class="ttname"><a href="classarm__compute_1_1_g_e_m_m_info.xhtml">arm_compute::GEMMInfo</a></div><div class="ttdoc">GEMM information class.</div><div class="ttdef"><b>Definition:</b> <a href="arm__compute_2core_2_types_8h_source.xhtml#l01983">Types.h:1983</a></div></div>
<div class="ttc" id="namespacearm__compute_xhtml_a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86"><div class="ttname"><a href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">arm_compute::ConvertPolicy::SATURATE</a></div><div class="ttdoc">Saturate.</div></div>
<div class="ttc" id="namespacearm__compute_xhtml_add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06"><div class="ttname"><a href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">arm_compute::RoundingPolicy::TO_ZERO</a></div><div class="ttdoc">Truncates the least significant values that are lost in operations.</div></div>
<div class="ttc" id="namespacearm__compute_1_1test_1_1validation_xhtml_a3b793c410cba57a1395184692a018356"><div class="ttname"><a href="namespacearm__compute_1_1test_1_1validation.xhtml#a3b793c410cba57a1395184692a018356">arm_compute::test::validation::input_to_forget_weights</a></div><div class="ttdeci">auto input_to_forget_weights</div><div class="ttdef"><b>Definition:</b> <a href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00474">LSTMLayerQuantized.cpp:474</a></div></div>
<div class="ttc" id="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel_xhtml_a5e951bf3e414ddcd908245bcf284b08f"><div class="ttname"><a href="classarm__compute_1_1_n_e_arithmetic_subtraction_kernel.xhtml#a5e951bf3e414ddcd908245bcf284b08f">arm_compute::NEArithmeticSubtractionKernel::validate</a></div><div class="ttdeci">static Status validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output, ConvertPolicy policy)</div><div class="ttdoc">Static function to check if given info will lead to a valid configuration of NEArithmeticSubtractionK...</div><div class="ttdef"><b>Definition:</b> <a href="_n_e_arithmetic_subtraction_kernel_8cpp_source.xhtml#l00542">NEArithmeticSubtractionKernel.cpp:542</a></div></div>
<div class="ttc" id="classarm__compute_1_1_l_s_t_m_params_xhtml_a213908108c07594027bc2b829fe7ee4a"><div class="ttname"><a href="classarm__compute_1_1_l_s_t_m_params.xhtml#a213908108c07594027bc2b829fe7ee4a">arm_compute::LSTMParams::forget_layer_norm_weights</a></div><div class="ttdeci">const T * forget_layer_norm_weights() const</div><div class="ttdef"><b>Definition:</b> <a href="_l_s_t_m_params_8h_source.xhtml#l00165">LSTMParams.h:165</a></div></div>
</div><!-- fragment -->
<p class="reference">References <a class="el" href="_error_8h_source.xhtml#l00296">ARM_COMPUTE_RETURN_ERROR_ON</a>, <a class="el" href="_validate_8h_source.xhtml#l00792">ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN</a>, <a class="el" href="_validate_8h_source.xhtml#l00545">ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES</a>, <a class="el" href="_validate_8h_source.xhtml#l00163">ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR</a>, <a class="el" href="_error_8h_source.xhtml#l00204">ARM_COMPUTE_RETURN_ON_ERROR</a>, <a class="el" href="_shape_calculator_8h_source.xhtml#l01315">arm_compute::misc::shape_calculator::calculate_concatenate_shape()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00170">LSTMParams&lt; T &gt;::cell_layer_norm_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00140">LSTMParams&lt; T &gt;::cell_to_forget_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00130">LSTMParams&lt; T &gt;::cell_to_input_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00145">LSTMParams&lt; T &gt;::cell_to_output_weights()</a>, <a class="el" href="_shape_calculator_8h_source.xhtml#l00426">arm_compute::misc::shape_calculator::compute_transposed_shape()</a>, <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml#a178f0d3d87f959e00a743328d95359d2">ITensorInfo::dimension()</a>, <a class="el" href="_window_8h_source.xhtml#l00043">Window::DimX</a>, <a class="el" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a56d8353718e6fdc78b8d69078a2cdb94">arm_compute::F16</a>, <a class="el" href="namespacearm__compute.xhtml#ab4e88c89b3b7ea1735996cc4def22d58a44ad4ef5a76e6aa6fb3e3fa079a54fda">arm_compute::F32</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00482">arm_compute::test::validation::forget_gate_bias</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00165">LSTMParams&lt; T &gt;::forget_layer_norm_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00190">LSTMParams&lt; T &gt;::has_cifg_opt()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00180">LSTMParams&lt; T &gt;::has_peephole_opt()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00185">LSTMParams&lt; T &gt;::has_projection()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00487">arm_compute::test::validation::input</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00135">LSTMParams&lt; T &gt;::input_gate_bias()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00160">LSTMParams&lt; T &gt;::input_layer_norm_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00475">arm_compute::test::validation::input_to_cell_weights</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00474">arm_compute::test::validation::input_to_forget_weights</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00120">LSTMParams&lt; T &gt;::input_to_input_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00476">arm_compute::test::validation::input_to_output_weights</a>, <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaa72ee60fba0509af07cbbd91398d8db9d">ActivationLayerInfo::LOGISTIC</a>, <a class="el" href="classarm__compute_1_1_activation_layer_info.xhtml#a56297e0f7b215eea46c818cb7528d9eaaab1d4411a9e7f5e82002512cddfdc33a">ActivationLayerInfo::LU_BOUNDED_RELU</a>, <a class="el" href="classarm__compute_1_1_i_tensor_info.xhtml#a1f4e725b8e1ea36b30e09dc08ae6961d">ITensorInfo::num_dimensions()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00484">arm_compute::test::validation::output_gate_bias</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00175">LSTMParams&lt; T &gt;::output_layer_norm_weights()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00155">LSTMParams&lt; T &gt;::projection_bias()</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00150">LSTMParams&lt; T &gt;::projection_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00479">arm_compute::test::validation::recurrent_to_cell_weights</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00478">arm_compute::test::validation::recurrent_to_forget_weights</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00125">LSTMParams&lt; T &gt;::recurrent_to_input_weights()</a>, <a class="el" href="_c_l_2_l_s_t_m_layer_quantized_8cpp_source.xhtml#l00480">arm_compute::test::validation::recurrent_to_output_weights</a>, <a class="el" href="namespacearm__compute.xhtml#a82b8ac759c804bc1fb4e2d21e178fb6fa4729d95f983955f0d93a30179deb2b86">arm_compute::SATURATE</a>, <a class="el" href="namespacearm__compute.xhtml#add6426cbf2e057a195846d4ba09a50bea5631ad8e27788edfca7e13535d862c06">arm_compute::TO_ZERO</a>, <a class="el" href="_l_s_t_m_params_8h_source.xhtml#l00195">LSTMParams&lt; T &gt;::use_layer_norm()</a>, <a class="el" href="_n_e_arithmetic_addition_8cpp_source.xhtml#l00040">NEArithmeticAddition::validate()</a>, <a class="el" href="_n_e_mean_std_dev_normalization_layer_8cpp_source.xhtml#l00038">NEMeanStdDevNormalizationLayer::validate()</a>, <a class="el" href="_n_e_copy_kernel_8cpp_source.xhtml#l00102">NECopyKernel::validate()</a>, <a class="el" href="_n_e_activation_layer_kernel_8cpp_source.xhtml#l00778">NEActivationLayerKernel::validate()</a>, <a class="el" href="_n_e_concatenate_layer_8cpp_source.xhtml#l00059">NEConcatenateLayer::validate()</a>, <a class="el" href="_n_e_pixel_wise_multiplication_kernel_8cpp_source.xhtml#l00709">NEPixelWiseMultiplicationKernel::validate()</a>, <a class="el" href="_n_e_arithmetic_subtraction_kernel_8cpp_source.xhtml#l00542">NEArithmeticSubtractionKernel::validate()</a>, <a class="el" href="_n_e_g_e_m_m_8cpp_source.xhtml#l00172">NEGEMM::validate()</a>, and <a class="el" href="_n_e_fully_connected_layer_8cpp_source.xhtml#l00275">NEFullyConnectedLayer::validate()</a>.</p>
<p class="reference">Referenced by <a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml#l00056">NELSTMLayer::configure()</a>.</p>
</div>
</div>
<hr/>The documentation for this class was generated from the following files:<ul>
<li>arm_compute/runtime/NEON/functions/<a class="el" href="_n_e_l_s_t_m_layer_8h_source.xhtml">NELSTMLayer.h</a></li>
<li>src/runtime/NEON/functions/<a class="el" href="_n_e_l_s_t_m_layer_8cpp_source.xhtml">NELSTMLayer.cpp</a></li>
</ul>
</div><!-- contents -->
</div><!-- doc-content -->
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="namespacearm__compute.xhtml">arm_compute</a></li><li class="navelem"><a class="el" href="classarm__compute_1_1_n_e_l_s_t_m_layer.xhtml">NELSTMLayer</a></li>
<li class="footer">Generated on Thu Mar 5 2020 16:07:16 for Compute Library by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.15 </li>
</ul>
</div>
</body>
</html>