Revert "Using accreal instead of real in the API"
diff --git a/generic/ELU.c b/generic/ELU.c
index ddcfb97..784a203 100644
--- a/generic/ELU.c
+++ b/generic/ELU.c
@@ -6,10 +6,9 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal alpha_,
+          real alpha,
           bool inplace)
-{
-  real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_);
+{  
   if(inplace) {
     TH_TENSOR_APPLY(real, input,
       if(*input_data <= 0) {
@@ -31,10 +30,9 @@
           THTensor *gradOutput,
           THTensor *gradInput,
           THTensor *output,
-          accreal alpha_,
+          real alpha,
           bool inplace)
 {
-  real alpha = TH_CONVERT_ACCREAL_TO_REAL(alpha_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   if(inplace) {
     TH_TENSOR_APPLY2(real, gradOutput, real, output,
diff --git a/generic/HardShrink.c b/generic/HardShrink.c
index aaae85b..50d272c 100644
--- a/generic/HardShrink.c
+++ b/generic/HardShrink.c
@@ -6,9 +6,8 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal lambda_)
+          real lambda)
 {
-  real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
   THTensor_(resizeAs)(output, input);
 
   TH_TENSOR_APPLY2(real, output, real, input,
@@ -26,9 +25,8 @@
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal lambda_)
+          real lambda)
 {
-  real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   THTensor_(resizeAs)(gradInput, input);
   TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
diff --git a/generic/HardTanh.c b/generic/HardTanh.c
index b38a946..57ef1be 100644
--- a/generic/HardTanh.c
+++ b/generic/HardTanh.c
@@ -6,17 +6,15 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal min_val_,
-          accreal max_val_,
+          real min_val,
+          real max_val,
           bool inplace)
 {
-  real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
-  real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
   if (inplace)
     THTensor_(set)(output, input);
   else
     THTensor_(resizeAs)(output, input);
-
+  
   if (input->nDimension == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output))
   {
     if (inplace)
@@ -70,13 +68,10 @@
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal min_val_,
-          accreal max_val_,
+          real min_val,
+          real max_val,
           bool inplace)
 {
-  real min_val = TH_CONVERT_ACCREAL_TO_REAL(min_val_);
-  real max_val = TH_CONVERT_ACCREAL_TO_REAL(max_val_);
-
   THNN_CHECK_NELEMENT(input, gradOutput);
   if (inplace)
     THTensor_(set)(gradInput, gradOutput);
diff --git a/generic/LeakyReLU.c b/generic/LeakyReLU.c
index 074047d..a4d9677 100644
--- a/generic/LeakyReLU.c
+++ b/generic/LeakyReLU.c
@@ -6,10 +6,9 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal negval_,
+          real negval,
           bool inplace)
 {
-  real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_);
   if (inplace)
   {
     TH_TENSOR_APPLY(real, input,
@@ -32,10 +31,9 @@
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal negval_,
+          real negval,
           bool inplace)
 {
-  real negval = TH_CONVERT_ACCREAL_TO_REAL(negval_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   if (inplace)
   {
diff --git a/generic/Linear.c b/generic/Linear.c
index faef421..933bc4b 100644
--- a/generic/Linear.c
+++ b/generic/Linear.c
@@ -87,9 +87,8 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           THTensor *addBuffer,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   long dim = THTensor_(nDimension)(input);
   if (dim == 1) {
     THTensor_(addr)(gradWeight,1,gradWeight,scale,gradOutput,input);
diff --git a/generic/LookupTable.c b/generic/LookupTable.c
index 46bc2c3..b460f38 100644
--- a/generic/LookupTable.c
+++ b/generic/LookupTable.c
@@ -32,9 +32,8 @@
           THIndexTensor *indices,
           bool scaleGradByFreq,
           int paddingValue,
-          accreal ascale)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(ascale);
   ptrdiff_t i;
   THInteger_t *count_data = NULL;
 
@@ -164,11 +163,9 @@
           THNNState *state,
           THIndexTensor *idx,
           THTensor *weight,
-          accreal maxNorm_,
-          accreal normType_)
+          real maxNorm,
+          real normType)
 {
-  real maxNorm = TH_CONVERT_ACCREAL_TO_REAL(maxNorm_);
-  real normType = TH_CONVERT_ACCREAL_TO_REAL(normType_);
   if (!THTensor_(isContiguous)(weight))
     THError("weight must be contiguous");
   if (!THIndexTensor_(isContiguous)(idx))
diff --git a/generic/MarginCriterion.c b/generic/MarginCriterion.c
index d6d9b60..1675860 100644
--- a/generic/MarginCriterion.c
+++ b/generic/MarginCriterion.c
@@ -8,11 +8,10 @@
           THTensor *target,
           THTensor *output,
           bool sizeAverage,
-          accreal margin_)
+          real margin)
 {
-  real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
   THNN_CHECK_NELEMENT(input, target);
-  THNN_CHECK_DIM_SIZE(output, 1, 0, 1);
+  THNN_CHECK_DIM_SIZE(output, 1, 0, 1);  
   real sum = 0;
 
   TH_TENSOR_APPLY2(real, input, real, target,
@@ -32,10 +31,9 @@
           THTensor *target,
           THTensor *gradInput,
           bool sizeAverage,
-          accreal margin_)
+          real margin)
 {
-  real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
-  THNN_CHECK_NELEMENT(input, target);
+  THNN_CHECK_NELEMENT(input, target);  
   real norm = (sizeAverage ? 1./((real)THTensor_(nElement)(input)) : 1.);
 
   THTensor_(resizeAs)(gradInput, input);
diff --git a/generic/MultiMarginCriterion.c b/generic/MultiMarginCriterion.c
index 2f8f8ff..af83e89 100644
--- a/generic/MultiMarginCriterion.c
+++ b/generic/MultiMarginCriterion.c
@@ -11,9 +11,8 @@
           bool sizeAverage,
           int p,
           THTensor *weights,
-          accreal margin_)
+          real margin)
 {
-  real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
   real *input_data, *weights_data;
   THIndex_t *target_data;
   long nframe, dim;
@@ -91,9 +90,8 @@
           bool sizeAverage,
           int p,
           THTensor *weights,
-          accreal margin_)
+          real margin)
 {
-  real margin = TH_CONVERT_ACCREAL_TO_REAL(margin_);
   real *input_data;
   real *gradInput_data;
   THIndex_t *target_data;
diff --git a/generic/PReLU.c b/generic/PReLU.c
index 174f514..3d2ebfc 100644
--- a/generic/PReLU.c
+++ b/generic/PReLU.c
@@ -165,9 +165,8 @@
           THTensor *gradWeightBuf,
           THTensor *gradWeightBuf2,
           THIndex_t nOutputPlane,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   real *gradWeight_data = THTensor_(data)(gradWeight);
 
diff --git a/generic/RReLU.c b/generic/RReLU.c
index 8fd46d3..cdb9dca 100644
--- a/generic/RReLU.c
+++ b/generic/RReLU.c
@@ -7,14 +7,12 @@
           THTensor *input,
           THTensor *output,
           THTensor *noise,
-          accreal lower_,
-          accreal upper_,
+          real lower,
+          real upper,
           bool train,
           bool inplace,
           THGenerator *generator)
 {
-  real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_);
-  real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_);
   if (train)
   {
     // get default random generator
@@ -74,7 +72,7 @@
         *output_data = *input_data * r;
       );
     }
-  }
+  }  
 }
 
 void THNN_(RReLU_updateGradInput)(
@@ -83,13 +81,11 @@
           THTensor *gradOutput,
           THTensor *gradInput,
           THTensor *noise,
-          accreal lower_,
-          accreal upper_,
+          real lower,
+          real upper,
           bool train,
           bool inplace)
 {
-  real lower = TH_CONVERT_ACCREAL_TO_REAL(lower_);
-  real upper = TH_CONVERT_ACCREAL_TO_REAL(upper_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   if (train && upper - lower > 1E-6)    // e.g. if upper == lower, RReLU behaves like LeakyReLU
   {
@@ -103,10 +99,10 @@
     {
       THTensor_(resizeAs)(gradInput, input);
       THTensor_(cmul)(gradInput, gradOutput, noise);
-    }
+    }    
   }
   else
-  {
+  { 
     // use constant factor for negative input values
     const real negSlope = (lower + upper) / 2;
     if (inplace)
diff --git a/generic/SoftPlus.c b/generic/SoftPlus.c
index 6491e66..7305238 100644
--- a/generic/SoftPlus.c
+++ b/generic/SoftPlus.c
@@ -6,11 +6,9 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal beta_,
-          accreal threshold_)
+          real beta,
+          real threshold)
 {
-  real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_);
-  real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
   THTensor_(resizeAs)(output, input);
 
   // f(x) = 1/beta * log(1 + exp(beta * x))
@@ -25,14 +23,12 @@
           THTensor *gradOutput,
           THTensor *gradInput,
           THTensor *output,
-          accreal beta_,
-          accreal threshold_)
+          real beta,
+          real threshold)
 {
-  real beta = TH_CONVERT_ACCREAL_TO_REAL(beta_);
-  real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   THTensor_(resizeAs)(gradInput, output);
-
+  
   // d/dx[log(1+exp(k*x))/k] = exp(kx) / (exp(kx) + 1)
   // SINCE
   // y = (1/k)*log(1+exp(k*x)) --> x = (1/k)*log(exp(k*y)-1)
diff --git a/generic/SoftShrink.c b/generic/SoftShrink.c
index e779508..28dcce0 100644
--- a/generic/SoftShrink.c
+++ b/generic/SoftShrink.c
@@ -6,11 +6,10 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal lambda_)
+          real lambda)
 {
-  real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
   THTensor_(resizeAs)(output, input);
-
+  
   TH_TENSOR_APPLY2(real, output, real, input,
     if ((*input_data) > lambda)
      *output_data = *input_data - lambda;
@@ -26,9 +25,8 @@
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal lambda_)
+          real lambda)
 {
-  real lambda = TH_CONVERT_ACCREAL_TO_REAL(lambda_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   THTensor_(resizeAs)(gradInput, input);
   TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input,
diff --git a/generic/SparseLinear.c b/generic/SparseLinear.c
index 0c52541..807280e 100644
--- a/generic/SparseLinear.c
+++ b/generic/SparseLinear.c
@@ -167,11 +167,9 @@
           THTensor *gradBias,
           THTensor *weight,
           THTensor *bias,
-          accreal weightDecay_,
-          accreal scale_)
+          real weightDecay,
+          real scale)
 {
-  real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   long h, i, col, hp0, hp1;
   long outDim = THTensor_(size)(weight, 0);
   long inDim = THTensor_(size)(weight, 1);
@@ -245,11 +243,9 @@
           THTensor *gradBias,
           THTensor *weight,
           THTensor *bias,
-          accreal weightDecay_,
-          accreal scale_)
+          real weightDecay,
+          real scale)
 {
-  real weightDecay = TH_CONVERT_ACCREAL_TO_REAL(weightDecay_);
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   long h, i;
   long outDim = THTensor_(size)(weight, 0);
   long inDim = THTensor_(size)(weight, 1);
@@ -312,9 +308,8 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           THTensor *lastInput,
-          accreal learningRate_)
+          real learningRate)
 {
-  real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
   long h, i;
   long outDim = weight->size[0];
   long inDim = weight->size[1];
@@ -386,9 +381,8 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           THTensor *lastInput,
-          accreal learningRate_)
+          real learningRate)
 {
-  real learningRate = TH_CONVERT_ACCREAL_TO_REAL(learningRate_);
   long h, i;
   long outDim = weight->size[0];
   long inDim = weight->size[1];
diff --git a/generic/SpatialConvolutionLocal.c b/generic/SpatialConvolutionLocal.c
index 06b57f3..efba30e 100644
--- a/generic/SpatialConvolutionLocal.c
+++ b/generic/SpatialConvolutionLocal.c
@@ -4,8 +4,8 @@
 
 static inline void THNN_(SpatialConvolutionLocal_shapeCheck)(
 	THTensor *input, THTensor *gradOutput,
-	THTensor *weight, THTensor *bias,
-	int kH, int kW, int dH,
+	THTensor *weight, THTensor *bias, 
+	int kH, int kW, int dH, 
 	int dW, int padH, int padW,
 	long inputHeight, long inputWidth,
 	long outputHeight, long outputWidth) {
@@ -39,7 +39,7 @@
   }
 
   THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+  
   if (gradOutput != NULL) {
     THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
     THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -56,8 +56,8 @@
     long s1 = weight->size[0] * weight->size[1];
     long s2 = weight->size[2];
     long s3 = weight->size[3] * weight->size[4] * weight->size[5];
-    *_weight = THTensor_(newWithStorage3d)(weight->storage,
-					   weight->storageOffset,
+    *_weight = THTensor_(newWithStorage3d)(weight->storage, 
+					   weight->storageOffset, 
 					   s1, -1, s2, -1, s3, -1);
     return 1;
   }
@@ -75,8 +75,8 @@
   long i;
   THTensor *output3d, *finput3d;
 
-  THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
-		       nInputPlane, inputWidth, inputHeight,
+  THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH, 
+		       nInputPlane, inputWidth, inputHeight, 
 		       outputWidth, outputHeight);
 
   THTensor_(copy)(output, bias);
@@ -86,7 +86,7 @@
      outputHeight * outputWidth, 1,
      nOutputPlane, outputHeight * outputWidth,
      1, nOutputPlane * outputHeight * outputWidth);
-
+  
   finput3d = THTensor_(newWithStorage3d)
     (finput->storage, finput->storageOffset,
      outputHeight * outputWidth, 1,
@@ -94,10 +94,10 @@
      1, kW * kH * nInputPlane * outputHeight * outputWidth);
 
   // weight:    oH*oW x nOutputPlane x nInputPlane*kH*kW
-  // finput3d:  oH*oW x nInputPlane*kH*kW x 1
+  // finput3d:  oH*oW x nInputPlane*kH*kW x 1  
   THTensor_(baddbmm)(output3d, 1.0, output3d, 1.0, weight, finput3d);
   // output3d:  oH*oW x nOutputPlane x 1
-
+  
   THTensor_(free)(output3d);
   THTensor_(free)(finput3d);
 }
@@ -120,10 +120,10 @@
 
   THNN_(SpatialConvolutionLocal_shapeCheck)
     (input, NULL, weight, bias, kH, kW, dH, dW, padH, padW,
-     inputHeight, inputWidth, outputHeight, outputWidth);
+     inputHeight, inputWidth, outputHeight, outputWidth);					    
 
   input = THTensor_(newContiguous)(input);
-
+  
   long nInputPlane = THTensor_(size)(weight, 2)/ (kW * kH);
   long nOutputPlane = THTensor_(size)(weight, 1);
 
@@ -174,7 +174,7 @@
 static void THNN_(SpatialConvolutionLocal_updateGradInput_frame)
      (THTensor *gradInput, THTensor *gradOutput,
       THTensor *weight, THTensor *fgradInput,
-      int kW, int kH, int dW, int dH, int padW, int padH,
+      int kW, int kH, int dW, int dH, int padW, int padH, 
       long nInputPlane, long inputWidth, long inputHeight,
       long nOutputPlane, long outputWidth, long outputHeight)
 {
@@ -188,17 +188,17 @@
                                              kW*kH*nInputPlane, outputHeight*outputWidth,
                                              1, kW*kH*nInputPlane*outputHeight*outputWidth);
   // weight:        oH*oW x nInputPlane*kH*kW x nOutputPlane
-  // gradOutput3d:  oH*oW x nOutputPlane x 1
+  // gradOutput3d:  oH*oW x nOutputPlane x 1         
   THTensor_(baddbmm)(fgradInput3d, 0.0, fgradInput3d, 1.0, weight, gradOutput3d);
-  // fgradInput3d:  oH*oW x nInputPlane*kH*kW x 1
-
+  // fgradInput3d:  oH*oW x nInputPlane*kH*kW x 1  
+  
   THTensor_(free)(gradOutput3d);
   THTensor_(free)(fgradInput3d);
-
+  
   THTensor_(zero)(gradInput);
-
-  THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH,
-		      nInputPlane, inputWidth, inputHeight,
+  
+  THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH, padW, padH, 
+		      nInputPlane, inputWidth, inputHeight, 
 		      outputWidth, outputHeight);
 
 }
@@ -235,8 +235,8 @@
   if(input->nDimension == 3)
   {
     THNN_(SpatialConvolutionLocal_updateGradInput_frame)
-      (gradInput, gradOutput, weight,
-       fgradInput, kW, kH, dW, dH, padW, padH,
+      (gradInput, gradOutput, weight, 
+       fgradInput, kW, kH, dW, dH, padW, padH, 
        nInputPlane, inputWidth, inputHeight,
        nOutputPlane, outputWidth, outputHeight);
   }
@@ -253,8 +253,8 @@
       THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
 
       THNN_(SpatialConvolutionLocal_updateGradInput_frame)
-	(gradInput_t, gradOutput_t, weight, fgradInput_t,
-	 kW, kH, dW, dH, padW, padH,
+	(gradInput_t, gradOutput_t, weight, fgradInput_t, 
+	 kW, kH, dW, dH, padW, padH, 
 	 nInputPlane, inputWidth, inputHeight,
 	 nOutputPlane, outputWidth, outputHeight);
 
@@ -275,12 +275,12 @@
 
 static void THNN_(SpatialConvolutionLocal_accGradParameters_frame)
      (THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
-      THTensor *finput, real scale,
-      int kW, int kH, int dW, int dH, int padW, int padH,
+      THTensor *finput, real scale, 
+      int kW, int kH, int dW, int dH, int padW, int padH, 
       long nInputPlane, long inputWidth, long inputHeight,
       long nOutputPlane, long outputWidth, long outputHeight)
 {
-
+   
   THTensor *gradOutput3d, *finput3d;
   gradOutput3d = THTensor_(newWithStorage3d)(gradOutput->storage, gradOutput->storageOffset,
                                              outputHeight*outputWidth, 1,
@@ -290,7 +290,7 @@
                                          outputHeight*outputWidth, 1,
                                          1, kW*kH*nInputPlane*outputHeight*outputWidth,
                                          kW*kH*nInputPlane, outputHeight*outputWidth);
-  // gradOutput3d:  oH*oW x nOutputPlane x 1
+  // gradOutput3d:  oH*oW x nOutputPlane x 1  
   // finput3d:      oH*oW x 1 x kW*kH*nInputPlane
   THTensor_(baddbmm)(gradWeight, 1.0, gradWeight, scale, gradOutput3d, finput3d);
   // gradWeight:    oH*oW x nOutputPlane x kW*kH*nInputPlane
@@ -314,9 +314,9 @@
     int padW, int padH,
     long inputWidth, long inputHeight,
     long outputWidth, long outputHeight,
-    accreal scale_)
+    real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
+
   int freeWeight = THNN_(view_weight_local)(&gradWeight);
 
   THNN_(SpatialConvolutionLocal_shapeCheck)
@@ -332,7 +332,7 @@
   if(input->nDimension == 3)
   {
     THNN_(SpatialConvolutionLocal_accGradParameters_frame)
-      (gradOutput, gradWeight, gradBias, finput, scale,
+      (gradOutput, gradWeight, gradBias, finput, scale, 
        kW, kH, dW, dH, padW, padH,
        nInputPlane, inputWidth, inputHeight,
        nOutputPlane, outputWidth, outputHeight);
@@ -348,7 +348,7 @@
       THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
 
       THNN_(SpatialConvolutionLocal_accGradParameters_frame)
-	(gradOutput_t, gradWeight, gradBias, finput_t, scale,
+	(gradOutput_t, gradWeight, gradBias, finput_t, scale, 
 	 kW, kH, dW, dH, padW, padH,
 	 nInputPlane, inputWidth, inputHeight,
 	 nOutputPlane, outputWidth, outputHeight);
diff --git a/generic/SpatialConvolutionMM.c b/generic/SpatialConvolutionMM.c
index c9c22bc..83635c1 100644
--- a/generic/SpatialConvolutionMM.c
+++ b/generic/SpatialConvolutionMM.c
@@ -4,7 +4,7 @@
 
 static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
 	THTensor *input, THTensor *gradOutput,
-	THTensor *weight, THTensor *bias,
+	THTensor *weight, THTensor *bias, 
 	int kH, int kW, int dH, int dW, int padH, int padW) {
 
   THArgCheck(kW > 0 && kH > 0, 9,
@@ -45,7 +45,7 @@
 	    nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
 
   THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+  
   if (gradOutput != NULL) {
     THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
     THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -336,9 +336,8 @@
           int dH,
           int padW,
           int padH,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   int freeWeight = 0;
 
   if (gradWeight->nDimension == 4) {
diff --git a/generic/SpatialConvolutionMap.c b/generic/SpatialConvolutionMap.c
index 750b212..82886c2 100644
--- a/generic/SpatialConvolutionMap.c
+++ b/generic/SpatialConvolutionMap.c
@@ -175,18 +175,10 @@
 }
 
 void THNN_(SpatialConvolutionMap_accGradParameters)(
-          THNNState *state,
-          THTensor *input,
-          THTensor *gradOutput,
-          THTensor *gradWeight,
-          THTensor *gradBias,
-          THTensor *connTable,
-          int nInputPlane,
-          int nOutputPlane,
-          int dW, int dH,
-          accreal scale_)
+  THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
+  THTensor *connTable, int nInputPlane, int nOutputPlane,
+  int dW, int dH, real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THArgCheck(
     gradWeight != NULL && gradWeight->nDimension == 3
     && connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
diff --git a/generic/SpatialDilatedConvolution.c b/generic/SpatialDilatedConvolution.c
index d345f7a..8b18910 100644
--- a/generic/SpatialDilatedConvolution.c
+++ b/generic/SpatialDilatedConvolution.c
@@ -289,9 +289,8 @@
     int dW, int dH,
     int padW, int padH,
     int dilationW, int dilationH,
-    accreal scale_)
+    real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THNN_(SpatialDilatedConvolution_shapeCheck)
     (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW,
      dilationH, dilationW);
diff --git a/generic/SpatialFullConvolution.c b/generic/SpatialFullConvolution.c
index e2a835d..4adcca6 100644
--- a/generic/SpatialFullConvolution.c
+++ b/generic/SpatialFullConvolution.c
@@ -59,7 +59,7 @@
 
 static inline void THNN_(SpatialFullConvolution_shapeCheck)(
 	THTensor *input, THTensor *gradOutput,
-	THTensor *weight, THTensor *bias,
+	THTensor *weight, THTensor *bias, 
 	int kH, int kW, int dH, int dW, int padH, int padW, int adjH, int adjW) {
 
   THArgCheck(kW > 0 && kH > 0, 9,
@@ -103,7 +103,7 @@
 	    nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
 
   THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
-
+  
   if (gradOutput != NULL) {
     THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
     THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
@@ -342,9 +342,8 @@
     int dW, int dH,
     int padW, int padH,
     int adjW, int adjH,
-    accreal scale_)
+    real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THNN_(SpatialFullConvolution_shapeCheck)
     (input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW, adjH, adjW);
 
diff --git a/generic/SpatialFullConvolutionMap.c b/generic/SpatialFullConvolutionMap.c
index e98dea0..1bd3455 100644
--- a/generic/SpatialFullConvolutionMap.c
+++ b/generic/SpatialFullConvolutionMap.c
@@ -147,18 +147,10 @@
 }
 
 void THNN_(SpatialFullConvolutionMap_accGradParameters)(
-  THNNState *state,
-  THTensor *input,
-  THTensor *gradOutput,
-  THTensor *gradWeight,
-  THTensor *gradBias,
-  THTensor *connTable,
-  int nInputPlane,
-  int nOutputPlane,
-  int dW, int dH,
-  accreal scale_)
+  THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias,
+  THTensor *connTable, int nInputPlane, int nOutputPlane,
+  int dW, int dH, real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THArgCheck(
     gradWeight != NULL && gradWeight->nDimension == 3
     && connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
diff --git a/generic/SpatialSubSampling.c b/generic/SpatialSubSampling.c
index 3f01540..3674f2c 100644
--- a/generic/SpatialSubSampling.c
+++ b/generic/SpatialSubSampling.c
@@ -40,7 +40,7 @@
     int kW, int kH,
     int dW, int dH)
 {
-
+  
   real *weight_data = THTensor_(data)(weight);
   real *bias_data = THTensor_(data)(bias);
   real *output_data;
@@ -76,11 +76,11 @@
     THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
   else
     THTensor_(resize4d)(output, input->size[0], nInputPlane, outputHeight, outputWidth);
-
+  
   input = THTensor_(newContiguous)(input);
   input_data = THTensor_(data)(input);
   output_data = THTensor_(data)(output);
-
+  
 #pragma omp parallel for private(k)
   for(k = 0; k < nInputPlane; k++)
   {
@@ -97,7 +97,7 @@
       long i;
       for(i = 0; i < outputWidth*outputHeight; i++)
         ptr_output[i] = z;
-
+      
       for(yy = 0; yy < outputHeight; yy++)
       {
         for(xx = 0; xx < outputWidth; xx++)
@@ -214,9 +214,8 @@
     THTensor *gradBias,
     int kW, int kH,
     int dW, int dH,
-    accreal scale_)
+    real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THNN_(SpatialSubSampling_shapeCheck)(input, gradOutput, gradWeight, kW, kH);
 
   long nbatch = 1;
diff --git a/generic/Sqrt.c b/generic/Sqrt.c
index 174884e..24cd51a 100644
--- a/generic/Sqrt.c
+++ b/generic/Sqrt.c
@@ -6,9 +6,8 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal eps_)
+          real eps)
 {
-  real eps = TH_CONVERT_ACCREAL_TO_REAL(eps_);
   THTensor_(resizeAs)(output, input);
   THTensor_(sqrt)(output, input);
 }
@@ -23,8 +22,8 @@
   THNN_CHECK_SHAPE(output, gradOutput);
   THTensor_(resizeAs)(gradInput, input);
 
-  if (output->nDimension == 1 ||
-      !THTensor_(isContiguous)(output) ||
+  if (output->nDimension == 1 || 
+      !THTensor_(isContiguous)(output) || 
       !THTensor_(isContiguous)(gradOutput) ||
       !THTensor_(isContiguous)(gradInput))
   {
diff --git a/generic/THNN.h b/generic/THNN.h
index d4b7a51..447289b 100644
--- a/generic/THNN.h
+++ b/generic/THNN.h
@@ -78,7 +78,7 @@
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *output,            // [OUT] ELU output
-          accreal alpha,               // an ELU parameter (as in paper)
+          real alpha,                  // an ELU parameter (as in paper)
           bool inplace);               // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
 TH_API void THNN_(ELU_updateGradInput)(
           THNNState *state,            // library's state
@@ -86,7 +86,7 @@
           THTensor *gradOutput,        // gradient w.r.t. output
           THTensor *gradInput,         // [OUT] gradient w.r.t. input
           THTensor *output,            // output from a forward pass
-          accreal alpha,               // an ELU parameter (as in paper)
+          real alpha,                  // an ELU parameter (as in paper)
           bool inplace);               // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
 
 TH_API void THNN_(DistKLDivCriterion_updateOutput)(
@@ -119,30 +119,30 @@
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *output,            // [OUT] output tensor
-          accreal lambda);             // HardShrink parameter
+          real lambda);                // HardShrink parameter
 TH_API void THNN_(HardShrink_updateGradInput)(
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *gradOutput,        // gradient w.r.t. module's output
           THTensor *gradInput,         // [OUT] gradient w.r.t. input
-          accreal lambda);             // HardShrink parameter
+          real lambda);                // HardShrink parameter
 
 // HardTanh clamps the values to the interval [min_val; max_val].
 TH_API void THNN_(HardTanh_updateOutput)(
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *output,            // [OUT] output tensor
-          accreal min_val,             // lower threshold
-          accreal max_val,             // upper threshold
-          bool inplace);
+          real min_val,                // lower threshold
+          real max_val,
+          bool inplace);               // upper threshold
 TH_API void THNN_(HardTanh_updateGradInput)(
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *gradOutput,        // gradient w.r.t. module's output
           THTensor *gradInput,         // [OUT] gradient w.r.t. the input
-          accreal min_val,             // lower threshold
-          accreal max_val,             // upper threshold
-          bool inplace);
+          real min_val,                // lower threshold
+          real max_val,
+          bool inplace);               // upper threshold
 
 TH_API void THNN_(L1Cost_updateOutput)(
           THNNState *state,            // library's state
@@ -158,14 +158,14 @@
           THNNState *state,            // library's state
           THTensor *input,             // [MODIFIED] input tensor
           THTensor *output,            // [OUT] output tensor
-          accreal negval,              // negative part slope
+          real negval,                 // negative part slope
           bool inplace);               // if true, modifies the input tensor and sets the output tensor on it (no additional memory is allocated)
 TH_API void THNN_(LeakyReLU_updateGradInput)(
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *gradOutput,        // [MODIFIED] gradient w.r.t. module's output
           THTensor *gradInput,         // [OUT] gradient w.r.t. the input
-          accreal negval,              // negative part slope
+          real negval,                 // negative part slope
           bool inplace);               // if true, modifies gradOutput and sets gradInput onto it (no additional memory is allocated)
 
 TH_API void THNN_(LogSigmoid_updateOutput)(
@@ -201,14 +201,14 @@
           THIndexTensor *indices,      // [OPTIONAL]
           bool scaleGradByFreq,
           int paddingValue,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(LookupTable_renorm)(
           THNNState *state,            // library's state
-          THIndexTensor *idx,          // vector containing row indices (modified in function)
+          THIndexTensor *idx,          // vector that contains row indices (modified in function)
           THTensor *weight,            // 2D tensor whose rows will be renormalized
-          accreal maxNorm,             // maximum norm
-          accreal normType);           // the norm type (e.g., normType=2, then it's 2-norm)
+          real maxNorm,                // maximum norm
+          real normType);              // the norm type (e.g., normType=2, then it's 2-norm)
 
 TH_API void THNN_(MarginCriterion_updateOutput)(
           THNNState *state,            // library's state
@@ -216,15 +216,14 @@
           THTensor *target,            // target tensor (should contain only 1s and -1s)
           THTensor *output,            // [OUT] a one-element tensor containing the loss
           bool sizeAverage,            // if true, the loss is normalized by **total number of elements**
-          accreal margin);             // a margin that is required for the loss to be 0
-
+          real margin);                // a margin that is required for the loss to be 0
 TH_API void THNN_(MarginCriterion_updateGradInput)(
           THNNState *state,            // library's state
           THTensor *input,             // input tensor
           THTensor *target,            // target tensor (should contin only 1s and -1s)
           THTensor *gradInput,         // [OUT] gradient w.r.t. module's input
           bool sizeAverage,            // if true, the gradient is normalized by **total number of elements**
-          accreal margin);             // a margin that is required for the loss to be 0
+          real margin);                // a margin that is required for the loss to be 0
 
 TH_API void THNN_(SoftMarginCriterion_updateOutput)(
           THNNState *state,
@@ -276,7 +275,7 @@
           bool sizeAverage,
           int p,
           THTensor* weights,      // [OPTIONAL]
-          accreal margin);
+          real margin);
 TH_API void THNN_(MultiMarginCriterion_updateGradInput)(
           THNNState *state,
           THTensor *input,
@@ -285,7 +284,7 @@
           bool sizeAverage,
           int p,
           THTensor *weights,      // [OPTIONAL]
-          accreal margin);
+          real margin);
 
 TH_API void THNN_(PReLU_updateOutput)(
           THNNState *state,
@@ -310,7 +309,7 @@
           THTensor *gradWeightBuf,
           THTensor *gradWeightBuf2,
           THIndex_t nOutputPlane,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(Linear_updateOutput)(
           THNNState *state,
@@ -335,15 +334,15 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           THTensor *addBuffer,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(RReLU_updateOutput)(
           THNNState *state,
           THTensor *input,
           THTensor *output,
           THTensor *noise,
-          accreal lower,
-          accreal upper,
+          real lower,
+          real upper,
           bool train,
           bool inplace,
           THGenerator *generator);
@@ -353,8 +352,8 @@
           THTensor *gradOutput,
           THTensor *gradInput,
           THTensor *noise,
-          accreal lower,
-          accreal upper,
+          real lower,
+          real upper,
           bool train,
           bool inplace);
 
@@ -397,28 +396,28 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal beta,
-          accreal threshold);
+          real beta,
+          real threshold);
 TH_API void THNN_(SoftPlus_updateGradInput)(
           THNNState *state,
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
           THTensor *output,
-          accreal beta,
-          accreal threshold);
+          real beta,
+          real threshold);
 
 TH_API void THNN_(SoftShrink_updateOutput)(
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal lambda);
+          real lambda);
 TH_API void THNN_(SoftShrink_updateGradInput)(
           THNNState *state,
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal lambda);
+          real lambda);
 
 TH_API void THNN_(SparseLinear_updateOutput)(
           THNNState *state,
@@ -434,8 +433,8 @@
           THTensor *gradBias,
           THTensor *weight,
           THTensor *bias,
-          accreal weightDecay,
-          accreal scale);
+          real weightDecay,
+          real scale);
 TH_API void THNN_(SparseLinear_zeroGradParameters)(
           THNNState *state,
           THTensor *gradWeight,
@@ -448,7 +447,7 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           THTensor *lastInput,
-          accreal learningRate);
+          real learningRate);
 TH_API void THNN_(SparseLinear_legacyUpdateOutput)(
           THNNState *state,
           THTensor *input,
@@ -463,8 +462,8 @@
           THTensor *gradBias,
           THTensor *weight,
           THTensor *bias,
-          accreal weightDecay,
-          accreal scale);
+          real weightDecay,
+          real scale);
 TH_API void THNN_(SparseLinear_legacyZeroGradParameters)(
           THNNState *state,
           THTensor *gradWeight,
@@ -477,13 +476,13 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           THTensor *lastInput,
-          accreal learningRate);
+          real learningRate);
 
 TH_API void THNN_(Sqrt_updateOutput)(
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal eps);
+          real eps);
 TH_API void THNN_(Sqrt_updateGradInput)(
           THNNState *state,
           THTensor *input,
@@ -516,16 +515,16 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal threshold,
-          accreal val,
+          real threshold,
+          real val,
           bool inplace);
 TH_API void THNN_(Threshold_updateGradInput)(
           THNNState *state,
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal threshold,
-          accreal val,
+          real threshold,
+          real val,
           bool inplace);
 
 TH_API void THNN_(TemporalConvolution_updateOutput)(
@@ -551,7 +550,7 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           int kW, int dW,
-          accreal scale);
+          real scale);
 TH_API void THNN_(TemporalMaxPooling_updateOutput)(
           THNNState *state,
           THTensor *input,
@@ -587,7 +586,7 @@
           THTensor *gradWeight,
           THTensor *gradBias,
           int kW, int dW,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(BatchNormalization_updateOutput)(
           THNNState *state,
@@ -649,7 +648,7 @@
           int nInputPlane,        // number of input planes
           int nOutputPlane,       // number of output planes
           int dW, int dH,         // stride
-          accreal scale);         // scaling factor
+          real scale);            // scaling factor
 
 TH_API void THNN_(SpatialConvolutionMM_updateOutput)(
           THNNState *state,
@@ -684,7 +683,7 @@
           int kW, int kH,
           int dW, int dH,
           int padW, int padH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(SpatialConvolutionLocal_updateOutput)(
           THNNState *state,
@@ -725,7 +724,7 @@
           int padW, int padH,
           long inputWidth, long inputHeight,
           long outputWidth, long outputHeight,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(SpatialAdaptiveMaxPooling_updateOutput)(
           THNNState *state,
@@ -812,7 +811,7 @@
           int dW, int dH,
           int padW, int padH,
           int adjW, int adjH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(SpatialFullConvolutionMap_updateOutput)(
           THNNState *state,       // library state
@@ -845,7 +844,7 @@
           int nInputPlane,        // number of input planes
           int nOutputPlane,       // number of output planes
           int dW, int dH,         // stride
-          accreal scale);         // scaling factor
+          real scale);            // scaling factor
 
 TH_API void THNN_(SpatialDilatedConvolution_updateOutput)(
           THNNState *state,
@@ -884,7 +883,7 @@
           int dW, int dH,
           int padW, int padH,
           int dilationW, int dilationH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(SpatialMaxPooling_updateOutput)(
           THNNState *state,
@@ -966,7 +965,7 @@
           THTensor *gradBias,
           int kW, int kH,
           int dW, int dH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(SpatialUpSamplingNearest_updateOutput)(
           THNNState *state,
@@ -1059,7 +1058,7 @@
           THTensor *fgradInput,
           int dT, int dW, int dH,
           int pT, int pW, int pH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(VolumetricConvolutionMM_updateOutput)(
           THNNState *state,
@@ -1092,7 +1091,7 @@
           int kT, int kW, int kH,
           int dT, int dW, int dH,
           int pT, int pW, int pH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(VolumetricFullConvolution_updateOutput)(
           THNNState *state,         // library state
@@ -1127,7 +1126,7 @@
           int dT, int dW, int dH,   // stride
           int pT, int pW, int pH,   // padding
           int aT, int aW, int aH,   // extra output adjustment
-          accreal scale);           // scaling factor
+          real scale);              // scaling factor
 
 TH_API void THNN_(VolumetricDilatedConvolution_updateOutput)(
           THNNState *state,
@@ -1166,7 +1165,7 @@
           int dT, int dW, int dH,
           int padT, int padW, int padH,
           int dilationT, int dilationW, int dilationH,
-          accreal scale);
+          real scale);
 
 TH_API void THNN_(VolumetricMaxPooling_updateOutput)(
           THNNState *state,
@@ -1274,4 +1273,5 @@
           int pleft, int pright,
           int ptop, int pbottom,
           int pfront, int pback);
+
 #endif
diff --git a/generic/TemporalConvolution.c b/generic/TemporalConvolution.c
index a107da2..14297ad 100644
--- a/generic/TemporalConvolution.c
+++ b/generic/TemporalConvolution.c
@@ -48,11 +48,11 @@
   THTensor *outputWindow, *inputWindow;
   int nInputFrame, nOutputFrame;
   long k, i;
-
+  
   int dimS = 0; // sequence dimension
   int dimF = 1; // feature dimension
-
-  if (input->nDimension == 3)
+  
+  if (input->nDimension == 3) 
   {
     dimS = 1;
     dimF = 2;
@@ -93,7 +93,7 @@
                               nFrame, inputFrameStride*input->size[1],
                               kW*input->size[1], 1);
 
-      THTensor_(setStorage2d)(outputWindow, output->storage,
+      THTensor_(setStorage2d)(outputWindow, output->storage, 
                               output->storageOffset + k*output->size[1],
                               nFrame, outputFrameStride*output->size[1],
                               output->size[1], 1);
@@ -108,18 +108,18 @@
     THTensor *outputSample = THTensor_(new)();
     THTensor *inputSample = THTensor_(new)();
     int nBatchFrame = input->size[0];
-
+    
     THTensor_(resize3d)(output,
                         nBatchFrame,
                         nOutputFrame,
                         outputFrameSize);
-
+    
     for(i = 0; i < nBatchFrame; i++)
     {
       THTensor_(select)(outputSample, output, 0, i);
       THTensor_(select)(inputSample, input, 0, i);
       long nOutputSampleFrame = nOutputFrame;
-
+      
       /* bias first */
       for(k = 0; k < nOutputFrame; k++)
       {
@@ -140,7 +140,7 @@
                                 nFrame, inputFrameStride*inputSample->size[1],
                                 kW*inputSample->size[1], 1);
 
-        THTensor_(setStorage2d)(outputWindow, outputSample->storage,
+        THTensor_(setStorage2d)(outputWindow, outputSample->storage, 
                                 outputSample->storageOffset + k*outputSample->size[1],
                                 nFrame, outputFrameStride*outputSample->size[1],
                                 outputSample->size[1], 1);
@@ -175,11 +175,11 @@
   THTensor *gradOutputWindow;
   THTensor *gradInputWindow;
   long k, i;
-
+  
   int dimS = 0; // sequence dimension
   int dimF = 1; // feature dimension
-
-  if (gradOutput->nDimension == 3)
+  
+  if (gradOutput->nDimension == 3) 
   {
     dimS = 1;
     dimF = 2;
@@ -227,13 +227,13 @@
     THTensor *gradOutputSample = THTensor_(new)();
     THTensor *gradInputSample = THTensor_(new)();
     int nBatchFrame = input->size[0];
-
+    
     for(i = 0; i < nBatchFrame; i++)
     {
       THTensor_(select)(gradOutputSample, gradOutput, 0, i);
       THTensor_(select)(gradInputSample, gradInput, 0, i);
       int nOutputSampleFrame = nOutputFrame;
-
+      
       /* ouch */
       for(k = 0; nOutputSampleFrame > 0; k++)
       {
@@ -274,20 +274,19 @@
           THTensor *gradBias,
           int kW,
           int dW,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   long nInputFrame;
   long nOutputFrame;
 
   THTensor *gradOutputWindow;
   THTensor *inputWindow;
   long k, i;
-
+  
   int dimS = 0; // sequence dimension
   int dimF = 1; // feature dimension
-
-  if (gradOutput->nDimension == 3)
+  
+  if (gradOutput->nDimension == 3) 
   {
     dimS = 1;
     dimF = 2;
@@ -302,7 +301,7 @@
   gradOutput = THTensor_(newContiguous)(gradOutput);
   gradOutputWindow = THTensor_(new)();
   inputWindow = THTensor_(new)();
-
+  
   if (input->nDimension == 2)
   {
     /* bias first */
@@ -325,7 +324,7 @@
                               nFrame, inputFrameStride*input->size[1],
                               kW*input->size[1], 1);
 
-      THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage,
+      THTensor_(setStorage2d)(gradOutputWindow, gradOutput->storage, 
                               gradOutput->storageOffset + k*gradOutput->size[1],
                               nFrame, outputFrameStride*gradOutput->size[1],
                               gradOutput->size[1], 1);
@@ -340,13 +339,13 @@
     THTensor *gradOutputSample = THTensor_(new)();
     THTensor *inputSample = THTensor_(new)();
     int nBatchFrame = input->size[0];
-
+    
     for(i = 0; i < nBatchFrame; i++)
     {
       THTensor_(select)(gradOutputSample, gradOutput, 0, i);
       THTensor_(select)(inputSample, input, 0, i);
       int nOutputSampleFrame = nOutputFrame;
-
+      
       /* bias first */
       for(k = 0; k < nOutputFrame; k++)
       {
@@ -367,7 +366,7 @@
                                 nFrame, inputFrameStride*inputSample->size[1],
                                 kW*inputSample->size[1], 1);
 
-        THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage,
+        THTensor_(setStorage2d)(gradOutputWindow, gradOutputSample->storage, 
                                 gradOutputSample->storageOffset + k*gradOutputSample->size[1],
                                 nFrame, outputFrameStride*gradOutputSample->size[1],
                                 gradOutputSample->size[1], 1);
diff --git a/generic/TemporalSubSampling.c b/generic/TemporalSubSampling.c
index 8728d14..bfc7d30 100644
--- a/generic/TemporalSubSampling.c
+++ b/generic/TemporalSubSampling.c
@@ -51,7 +51,7 @@
   THTensor *outputFrame, *inputWindow;
   int nInputFrame, nOutputFrame;
   long k;
-
+  
   THNN_(TemporalSubSampling_shapeCheck)(state, input, NULL, kW, dW, &inputFrameSize);
 
   outputFrame = THTensor_(new)();
@@ -63,7 +63,7 @@
   THTensor_(resize2d)(output,
                       nOutputFrame,
                       inputFrameSize);
-
+  
   for(k = 0; k < nOutputFrame; k++)
   {
     THTensor_(narrow)(inputWindow, input, 0, k*dW, kW);
@@ -124,9 +124,8 @@
           THTensor *gradBias,
           int kW,
           int dW,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THTensor *gradOutputFrame;
   THTensor *inputWindow, *buffer;
   long k;
diff --git a/generic/Threshold.c b/generic/Threshold.c
index 949c7a0..dd2a698 100644
--- a/generic/Threshold.c
+++ b/generic/Threshold.c
@@ -6,12 +6,10 @@
           THNNState *state,
           THTensor *input,
           THTensor *output,
-          accreal threshold_,
-          accreal val_,
+          real threshold,
+          real val,
           bool inplace)
 {
-  real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
-  real val = TH_CONVERT_ACCREAL_TO_REAL(val_);
   if (inplace)
   {
     TH_TENSOR_APPLY(real, input,
@@ -34,12 +32,10 @@
           THTensor *input,
           THTensor *gradOutput,
           THTensor *gradInput,
-          accreal threshold_,
-          accreal val_,
+          real threshold,
+          real val,
           bool inplace)
 {
-  real threshold = TH_CONVERT_ACCREAL_TO_REAL(threshold_);
-  real val = TH_CONVERT_ACCREAL_TO_REAL(val_);
   THNN_CHECK_NELEMENT(input, gradOutput);
   if (inplace)
   {
diff --git a/generic/VolumetricConvolution.c b/generic/VolumetricConvolution.c
index bcd1a0f..4fd8ac3 100644
--- a/generic/VolumetricConvolution.c
+++ b/generic/VolumetricConvolution.c
@@ -170,9 +170,8 @@
           int pT,
           int pW,
           int pH,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THArgCheck(pT != 0 || pW != 0 || pH != 0, 9, "padding not supported by CPU backend");   // sharing signature with CUDA version
 
   THNN_ARGCHECK(gradWeight->nDimension == 5, 4, gradWeight,
diff --git a/generic/VolumetricConvolutionMM.c b/generic/VolumetricConvolutionMM.c
index f8d9eb2..4085e2b 100644
--- a/generic/VolumetricConvolutionMM.c
+++ b/generic/VolumetricConvolutionMM.c
@@ -575,9 +575,8 @@
           int kT, int kW, int kH,
           int dT, int dW, int dH,
           int pT, int pW, int pH,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   int freeWeight;
   int nOutputPlane = (int)gradWeight->size[0];
 
diff --git a/generic/VolumetricDilatedConvolution.c b/generic/VolumetricDilatedConvolution.c
index e31ff2b..d2d5c88 100644
--- a/generic/VolumetricDilatedConvolution.c
+++ b/generic/VolumetricDilatedConvolution.c
@@ -299,9 +299,8 @@
           int dT, int dW, int dH,
           int padT, int padW, int padH,
           int dilationT, int dilationW, int dilationH,
-          accreal scale_)
+          real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   THNN_(VolumetricDilatedConvolution_shapeCheck)(
         input, gradOutput, gradWeight, gradBias,
         kT, kH, kW, dT, dH, dW, padT, padH, padW,
diff --git a/generic/VolumetricFullConvolution.c b/generic/VolumetricFullConvolution.c
index 61c3a44..b6ef1cd 100644
--- a/generic/VolumetricFullConvolution.c
+++ b/generic/VolumetricFullConvolution.c
@@ -402,9 +402,8 @@
   int dT, int dW, int dH,   // stride
   int pT, int pW, int pH,   // padding
   int aT, int aW, int aH,   // extra output adjustment
-  accreal scale_)
+  real scale)
 {
-  real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
   // number of input & output planes and kernel size is indirectly defined by the gradWeight tensor
   THNN_(VolumetricFullConvolution_shapeCheck)(
         input, gradOutput, gradWeight, gradBias,