This document only describes a THNN API. For a thorough review of all modules present here please refer to nn's docs.
Please remember, that because C doesn't support function overloading, functions taking different tensor types have different names. So e.g. for an Abs module, there are actually two updateOutput functions:
void THNN_FloatAbs_updateOutput(...)
void THNN_DoubleAbs_updateOutput(...)
In these docs such function will be referred to as void THNN_Abs_updateOutput(...)
, and it's up to developer to add a type prefix. real
is an alias for that type.
Some arguments have additional tags placed in square brackets:
updateGradInput
and accGradParameters
should get the same buffers that were used in updateOutput
call.inplace
flag. If set to true, this argument might be modified (in addition to the output).These are all modules implemented in THNN:
void THNN_Abs_updateOutput( THNNState *state, THTensor *input, THTensor *output);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- [OUT] Abs output
void THNN_Abs_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. output
THTensor *gradInput
- [OUT] gradient w.r.t. input
void THNN_AbsCriterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *target
- tensor with target values
THTensor *output
- [OUT] a one-element tensor with loss
bool sizeAverage
- if true, the loss will be divided by batch size
void THNN_AbsCriterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *target
- tensor with target values
THTensor *gradInput
- [OUT] gradient w.r.t. input
bool sizeAverage
- if true, the gradient will be normalized by batch size
void THNN_ClassNLLCriterion_updateOutput( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *output, bool sizeAverage, THTensor *weights, THTensor *total_weight);
THNNState *state
- library's state
THTensor *input
- input tensor (1D/2D)
THIndexTensor *target
- tensor containing indexes of target classes
THTensor *output
- [OUT] a one-element tensor with loss
bool sizeAverage
- if true, the loss will be normalized by batch size and class weights
THTensor *weights
- [OPTIONAL] class weights
THTensor *total_weight
- [BUFFER]
void THNN_ClassNLLCriterion_updateGradInput( THNNState *state, THTensor *input, THIndexTensor *target, THTensor *gradInput, bool sizeAverage, THTensor *weights, THTensor *total_weight);
THNNState *state
- library's state
THTensor *input
- input tensor (1D/2D)
THIndexTensor *target
- tensor containing indexes of target classes
THTensor *gradInput
- [OUT] gradient w.r.t. input
bool sizeAverage
- if true, the loss will be normalized by batch size and class weights
THTensor *weights
- [OPTIONAL] class weights
THTensor *total_weight
- [BUFFER]
void THNN_DistKLDivCriterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *target
- target tensor
THTensor *output
- [OUT] a one-element tensor containing the loss
bool sizeAverage
- if true, the loss will be normalized by total number of elements
void THNN_DistKLDivCriterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *target
- target tensor
THTensor *gradInput
- [OUT] gradient w.r.t. input
bool sizeAverage
- if true, the loss will be normalized by total number of elements
void THNN_ELU_updateOutput( THNNState *state, THTensor *input, THTensor *output, real alpha);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- [OUT] ELU output
real alpha
- an ELU parameter (as in paper)
void THNN_ELU_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output, real alpha);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. output
THTensor *gradInput
- [OUT] gradient w.r.t. input
THTensor *output
- output from a forward pass
real alpha
- an ELU parameter (as in paper)
void THNN_HardShrink_updateOutput( THNNState *state, THTensor *input, THTensor *output, real lambda);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- [OUT] output tensor
real lambda
- HardShrink parameter
void THNN_HardShrink_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, real lambda);
THNNState *state
- library‘s state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. module’s output
THTensor *gradInput
- [OUT] gradient w.r.t. input
real lambda
- HardShrink parameter
void THNN_HardTanh_updateOutput( THNNState *state, THTensor *input, THTensor *output, real min_val, real max_val);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- [OUT] output tensor
real min_val
- lower threshold
real max_val
- upper threshold
void THNN_HardTanh_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, real min_val, real max_val);
THNNState *state
- library‘s state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. module’s output
THTensor *gradInput
- [OUT] gradient w.r.t. the input
real min_val
- lower threshold
real max_val
- upper threshold
void THNN_L1Cost_updateOutput( THNNState *state, THTensor *input, THTensor *output);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- [OUT] output tensor
void THNN_L1Cost_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput);
THNNState *state
- library‘s state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t module’s output
THTensor *gradInput
- [OUT] gradient w.r.t the input
void THNN_LeakyReLU_updateOutput( THNNState *state, THTensor *input, THTensor *output, real negval, bool inplace);
THNNState *state
- library's state
THTensor *input
- [MODIFIED] input tensor
THTensor *output
- [OUT] output tensor
real negval
- negative part slope
bool inplace
- if true, modifies the input tensor and sets the output tensor on it (no additional memory is allocated)
void THNN_LeakyReLU_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, real negval, bool inplace);
THNNState *state
- library‘s state
THTensor *input
- input tensor
THTensor *gradOutput
- [MODIFIED] gradient w.r.t. module’s output
THTensor *gradInput
- [OUT] gradient w.r.t. the input
real negval
- negative part slope
bool inplace
- if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
void THNN_LogSigmoid_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *buffer);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- output tensor
THTensor *buffer
- [BUFFER]
void THNN_LogSigmoid_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *buffer);
THNNState *state
- library‘s state
THTensor *input
- input
THTensor *gradOutput
- gradient w.r.t. module’s output
THTensor *gradInput
- [OUT] gradient w.r.t. input
THTensor *buffer
- [BUFFER]
void THNN_LogSoftMax_updateOutput( THNNState *state, THTensor *input, THTensor *output);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *output
- [OUT] output tensor
void THNN_LogSoftMax_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output);
THNNState *state
- library‘s state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. module’s output
THTensor *gradInput
- [OUT] gradient w.r.t. input
THTensor *output
- module's output
void THNN_LookupTable_accGradParameters( THNNState *state, THIndexTensor *input, THTensor *gradOutput, THTensor *gradWeight, THIntegerTensor *count, THTensor *sorted, THTensor *indices, bool scaleGradByFreq, int paddingValue, real scale);
void THNN_MSECriterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage);
void THNN_MSECriterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage);
void THNN_MarginCriterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage, real margin);
THNNState *state
- library's state
THTensor *input
- input tensor
THTensor *target
- target tensor (should contain only 1s and -1s)
THTensor *output
- [OUT] a one-element tensor containing the loss
bool sizeAverage
- if true, the loss is normalized by total number of elements
real margin
- a margin that is required for the loss to be 0
void THNN_MarginCriterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage, real margin);
THNNState *state
- library‘s state
THTensor *input
- input tensor
THTensor *target
- target tensor (should contin only 1s and -1s)
THTensor *gradInput
- [OUT] gradient w.r.t. module’s input
bool sizeAverage
- if true, the gradient is normalized by total number of elements
real margin
- a margin that is required for the loss to be 0
void THNN_MultiLabelMarginCriterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage);
void THNN_MultiLabelMarginCriterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage);
void THNN_MultiMarginCriterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage, int p, THTensor* weights);
void THNN_MultiMarginCriterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage, int p, THTensor *weights);
void THNN_PReLU_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THIndex_t nOutputPlane);
void THNN_PReLU_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THIndex_t nOutputPlane);
void THNN_PReLU_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *gradWeight, THTensor *gradWeightBuf, THTensor *gradWeightBuf2, THIndex_t nOutputPlane, real scale);
void THNN_RReLU_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *noise, real lower, real upper, bool train, bool inplace, THGenerator *generator);
void THNN_RReLU_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *noise, real lower, real upper, bool train, bool inplace);
void THNN_Sigmoid_updateOutput( THNNState *state, THTensor *input, THTensor *output);
void THNN_Sigmoid_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output);
void THNN_SmoothL1Criterion_updateOutput( THNNState *state, THTensor *input, THTensor *target, THTensor *output, bool sizeAverage);
void THNN_SmoothL1Criterion_updateGradInput( THNNState *state, THTensor *input, THTensor *target, THTensor *gradInput, bool sizeAverage);
void THNN_SoftMax_updateOutput( THNNState *state, THTensor *input, THTensor *output);
void THNN_SoftMax_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output);
void THNN_SoftPlus_updateOutput( THNNState *state, THTensor *input, THTensor *output, real beta, real threshold);
void THNN_SoftPlus_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output, real beta, real threshold);
void THNN_SoftShrink_updateOutput( THNNState *state, THTensor *input, THTensor *output, real lambda);
void THNN_SoftShrink_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, real lambda);
void THNN_SparseLinear_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *shardBuffer);
void THNN_SparseLinear_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight);
void THNN_SparseLinear_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *weight, THTensor *bias, real weightDecay, real scale);
void THNN_SparseLinear_zeroGradParameters( THNNState *state, THTensor *gradWeight, THTensor *gradBias, THTensor *lastInput);
void THNN_SparseLinear_updateParameters( THNNState *state, THTensor *weight, THTensor *bias, THTensor *gradWeight, THTensor *gradBias, THTensor *lastInput, real learningRate);
void THNN_SpatialAdaptiveMaxPooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *indices, int owidth, int oheight);
void THNN_SpatialAdaptiveMaxPooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *indices);
void THNN_SpatialAveragePooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad);
void THNN_SpatialAveragePooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad);
void THNN_SpatialBatchNormalization_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *running_mean, THTensor *running_var, THTensor *save_mean, THTensor *save_std, bool train, double momentum, double eps);
void THNN_SpatialBatchNormalization_backward( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *gradWeight, THTensor *gradBias, THTensor *weight, THTensor *save_mean, THTensor *save_std, double scale);
void THNN_SpatialConvolutionLocal_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, long inputWidth, long inputHeight, long outputWidth, long outputHeight);
void THNN_SpatialConvolutionLocal_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, long inputWidth, long inputHeight, long outputWidth, long outputHeight);
void THNN_SpatialConvolutionLocal_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, long inputWidth, long inputHeight, long outputWidth, long outputHeight, real scale);
void THNN_SpatialConvolutionMM_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
void THNN_SpatialConvolutionMM_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH);
void THNN_SpatialConvolutionMM_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int kW, int kH, int dW, int dH, int padW, int padH, real scale);
void THNN_SpatialConvolutionMap_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
THNNState *state
- library state
THTensor *input
- input tensor
THTensor *output
- [OUT] convolution output
THTensor *weight
- 3D weight tensor (connTable:size(1) x kH x kW)
THTensor *bias
- 1D bias tensor (nOutputPlane)
THTensor *connTable
- connection table
int nInputPlane
- number of input planes
int nOutputPlane
- number of output planes
int dW, int dH
- stride
void THNN_SpatialConvolutionMap_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
THNNState *state
- library state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. output
THTensor *gradInput
- [OUT] gradient w.r.t. input
THTensor *weight
- 3D weight tensor (connTable:size(1) x kH x kW)
THTensor *bias
- 1D bias tensor (nOutputPlane)
THTensor *connTable
- connection table
int nInputPlane
- number of input planes
int nOutputPlane
- number of output planes
int dW, int dH
- stride
void THNN_SpatialConvolutionMap_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH, real scale);
THNNState *state
- library state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. output
THTensor *gradWeight
- 3D gradWeight tensor (connTable:size(1) x kH x kW)
THTensor *gradBias
- 1D gradBias tensor (nOutputPlane)
THTensor *connTable
- connection table
int nInputPlane
- number of input planes
int nOutputPlane
- number of output planes
int dW, int dH
- stride
real scale
- scaling factor
void THNN_SpatialFractionalMaxPooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, int outputW, int outputH, int poolSizeW, int poolSizeH, THTensor *indices, THTensor *randomSamples);
void THNN_SpatialFractionalMaxPooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int outputW, int outputH, int poolSizeW, int poolSizeH, THTensor *indices);
void THNN_SpatialFullConvolution_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *columns, THTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH);
void THNN_SpatialFullConvolution_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *gradColumns, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH);
void THNN_SpatialFullConvolution_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *columns, THTensor *ones, int kW, int kH, int dW, int dH, int padW, int padH, int adjW, int adjH, real scale);
void THNN_SpatialFullConvolutionMap_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
THNNState *state
- library state
THTensor *input
- input tensor
THTensor *output
- [OUT] convolution output
THTensor *weight
- 3D weight tensor (connTable:size(1) x kH x kW)
THTensor *bias
- 1D bias tensor (nOutputPlane)
THTensor *connTable
- connection table
int nInputPlane
- number of input planes
int nOutputPlane
- number of output planes
int dW, int dH
- stride
void THNN_SpatialFullConvolutionMap_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *bias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH);
THNNState *state
- library state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. output
THTensor *gradInput
- [OUT] gradient w.r.t. input
THTensor *weight
- 3D weight tensor (connTable:size(1) x kH x kW)
THTensor *bias
- 1D bias tensor (nOutputPlane)
THTensor *connTable
- connection table
int nInputPlane
- number of input planes
int nOutputPlane
- number of output planes
int dW, int dH
- stride
void THNN_SpatialFullConvolutionMap_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *connTable, int nInputPlane, int nOutputPlane, int dW, int dH, real scale);
THNNState *state
- library state
THTensor *input
- input tensor
THTensor *gradOutput
- gradient w.r.t. output
THTensor *gradWeight
- 3D gradWeight tensor (connTable:size(1) x kH x kW)
THTensor *gradBias
- 1D gradBias tensor (nOutputPlane)
THTensor *connTable
- connection table
int nInputPlane
- number of input planes
int nOutputPlane
- number of output planes
int dW, int dH
- stride
real scale
- scaling factor
void THNN_SpatialMaxPooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode);
void THNN_SpatialMaxPooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *indices, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode);
void THNN_SpatialMaxUnpooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *indices, int owidth, int oheight);
void THNN_SpatialMaxUnpooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *indices, int owidth, int oheight);
void THNN_SpatialSubSampling_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, int kW, int kH, int dW, int dH);
void THNN_SpatialSubSampling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, int kW, int kH, int dW, int dH);
void THNN_SpatialSubSampling_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, int kW, int kH, int dW, int dH, real scale);
void THNN_SpatialUpSamplingNearest_updateOutput( THNNState *state, THTensor *input, THTensor *output, int scale_factor);
void THNN_SpatialUpSamplingNearest_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int scale_factor);
void THNN_Sqrt_updateOutput( THNNState *state, THTensor *input, THTensor *output, real eps);
void THNN_Sqrt_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output);
void THNN_Square_updateOutput( THNNState *state, THTensor *input, THTensor *output);
void THNN_Square_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput);
void THNN_Tanh_updateOutput( THNNState *state, THTensor *input, THTensor *output);
void THNN_Tanh_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *output);
void THNN_Threshold_updateOutput( THNNState *state, THTensor *input, THTensor *output, real threshold, real val, bool inplace);
void THNN_Threshold_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, real threshold, bool inplace);
void THNN_VolumetricAveragePooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, int kT, int kW, int kH, int dT, int dW, int dH);
void THNN_VolumetricAveragePooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, int kT, int kW, int kH, int dT, int dW, int dH);
void THNN_VolumetricConvolution_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricConvolution_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricConvolution_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, real scale);
void THNN_VolumetricConvolutionMM_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricConvolutionMM_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricConvolutionMM_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale);
void THNN_VolumetricFullConvolution_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, THTensor *fgradInput, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricFullConvolution_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *finput, THTensor *fgradInput, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricFullConvolution_accGradParameters( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, THTensor *fgradInput, int dT, int dW, int dH, int pT, int pW, int pH, real scale);
void THNN_VolumetricMaxPooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *indices, int kT, int kW, int kH, int dT, int dW, int dH, int pT, int pW, int pH, bool ceilMode);
void THNN_VolumetricMaxPooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *indices, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricMaxUnpooling_updateOutput( THNNState *state, THTensor *input, THTensor *output, THTensor *indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH);
void THNN_VolumetricMaxUnpooling_updateGradInput( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *indices, int oT, int oW, int oH, int dT, int dW, int dH, int pT, int pW, int pH);