Finish removal of AT_CHECK, officially deprecate the macro. (#20600)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/20600

All future uses of AT_CHECK will fail our CI.

Reviewed By: jerryzh168

Differential Revision: D15375397

fbshipit-source-id: 5582664d6c7c4f1a56ae45647eb1bca49fed2866
diff --git a/aten/src/ATen/TensorUtils.cpp b/aten/src/ATen/TensorUtils.cpp
index 832e923..742089f 100644
--- a/aten/src/ATen/TensorUtils.cpp
+++ b/aten/src/ATen/TensorUtils.cpp
@@ -270,7 +270,7 @@
     int64_t dim_size,
     int64_t size) {
   /* Check dimension size of a tensor */
-  AT_CHECK(
+  TORCH_CHECK(
       tensor.dim() == dim && tensor.size(dim_size) == size,
       "Expected a tensor of dimension ",
       dim,
diff --git a/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp b/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
index 1c875c3..38ae3d5 100644
--- a/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
+++ b/aten/src/ATen/native/AdaptiveAveragePooling3d.cpp
@@ -81,7 +81,7 @@
     Tensor const& input,
     IntArrayRef output_size) {
   for (int64_t i = 0; i < input.ndimension(); i++) {
-    AT_CHECK(
+    TORCH_CHECK(
         input.size(i) > 0,
         "adaptive_avg_pool3d(): expected input to have non-empty spatial dimensions, "
         "but input has sizes ",
@@ -92,7 +92,7 @@
         "empty");
   }
 
-  AT_CHECK(
+  TORCH_CHECK(
       (input.ndimension() == 4 || input.ndimension() == 5),
       "non-empty 4D or 5D (batch mode) tensor expected for input");
 
diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp
index 7bd33cb..330cad0 100644
--- a/aten/src/ATen/native/ReduceOps.cpp
+++ b/aten/src/ATen/native/ReduceOps.cpp
@@ -121,7 +121,7 @@
   // check that result type and dtype match if provided
   for (const Tensor *t: {&result1, &result2}) {
     const Tensor& result = *t;
-    AT_CHECK(
+    TORCH_CHECK(
         !result.defined() || result.type().scalarType() == dtype,
         name, ": provided dtype must match dtype of result. Got ",
         toString(result.type().scalarType()),
@@ -648,10 +648,10 @@
 
 static std::tuple<Tensor&,Tensor&> std_var_mean_out(const char* fname, Tensor &result1, Tensor &result2, const Tensor &self, IntArrayRef dim, bool unbiased, bool keepdim, bool take_sqrt) {
   AT_ASSERT(result1.defined() && result2.defined());
-  AT_CHECK(self.type().backend() == Backend::CPU || self.type().backend() == Backend::CUDA,
+  TORCH_CHECK(self.type().backend() == Backend::CPU || self.type().backend() == Backend::CUDA,
            fname, " only support CPU and CUDA backend, got: ", toString(self.type().backend()));
-  AT_CHECK(at::isFloatingType(self.type().scalarType()), fname, " only support floating-point dtypes");
-  AT_CHECK(result1.type().scalarType() == result2.type().scalarType(),
+  TORCH_CHECK(at::isFloatingType(self.type().scalarType()), fname, " only support floating-point dtypes");
+  TORCH_CHECK(result1.type().scalarType() == result2.type().scalarType(),
            "provided by result1 dtype must match dtype of result2. Got ",
            toString(result1.type().scalarType()),
            " and ",
diff --git a/aten/src/ATen/native/TensorIterator.cpp b/aten/src/ATen/native/TensorIterator.cpp
index 98a6d3c..5a954ad 100644
--- a/aten/src/ATen/native/TensorIterator.cpp
+++ b/aten/src/ATen/native/TensorIterator.cpp
@@ -508,14 +508,14 @@
 std::unique_ptr<TensorIterator> TensorIterator::reduce_op(Tensor& out1, Tensor& out2, const Tensor& a) {
   AT_ASSERT(out1.defined());
   AT_ASSERT(out2.defined());
-  AT_CHECK((!a.is_cuda() && !out1.is_cuda() && !out2.is_cuda()) || (a.device() == out1.device() && out1.device() == out2.device()),
+  TORCH_CHECK((!a.is_cuda() && !out1.is_cuda() && !out2.is_cuda()) || (a.device() == out1.device() && out1.device() == out2.device()),
       "reduce_op(): expected input and both outputs to be on same device, but input is on ", a.device(),
       ", output1 is on ", out1.device(), " and output2 is on", out2.device());
-  AT_CHECK(out1.dim() == out2.dim(), "reduce_op(): expected both outputs to have same number of dims, but output1 has ", out1.dim(),
+  TORCH_CHECK(out1.dim() == out2.dim(), "reduce_op(): expected both outputs to have same number of dims, but output1 has ", out1.dim(),
       " and output2 has ", out2.dim());
-  AT_CHECK(out1.sizes() == out2.sizes(), "reduce_op(): expected both outputs to have same sizes, but output1 has ", out1.sizes(),
+  TORCH_CHECK(out1.sizes() == out2.sizes(), "reduce_op(): expected both outputs to have same sizes, but output1 has ", out1.sizes(),
       " and output2 has ", out2.sizes());
-  AT_CHECK(out1.strides() == out2.strides(), "reduce_op(): expected both outputs to have same strides, but output1 has ", out1.strides(),
+  TORCH_CHECK(out1.strides() == out2.strides(), "reduce_op(): expected both outputs to have same strides, but output1 has ", out1.strides(),
            " and output2 has ", out2.strides());
   auto builder = TensorIterator::Builder();
   builder.add_output(out1);
diff --git a/aten/src/ATen/native/cuda/AdaptiveAveragePooling3d.cu b/aten/src/ATen/native/cuda/AdaptiveAveragePooling3d.cu
index da3652f..a088dcc 100644
--- a/aten/src/ATen/native/cuda/AdaptiveAveragePooling3d.cu
+++ b/aten/src/ATen/native/cuda/AdaptiveAveragePooling3d.cu
@@ -331,19 +331,19 @@
   checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg});
 
   for (int64_t i = 0; i < input_.ndimension(); i++) {
-    AT_CHECK(
+    TORCH_CHECK(
         input_.size(i) > 0,
         "adaptive_avg_pool3d_cuda(): expected input to have non-empty spatial dimensions, "
         "but input has sizes ", input_.sizes(),
         " with dimension ", i, " being empty");
   }
 
-  AT_CHECK(
+  TORCH_CHECK(
       (input_.ndimension() == 4 || input_.ndimension() == 5),
       "non-empty 4D or 5D (batch mode) tensor expected for input");
 
   // the jit sometimes passes output_size.size() == 1
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 1 || output_size.size() == 3,
       "adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3");
 
diff --git a/aten/src/ATen/native/cuda/Distributions.cu b/aten/src/ATen/native/cuda/Distributions.cu
index 7addcc5..0aec588 100644
--- a/aten/src/ATen/native/cuda/Distributions.cu
+++ b/aten/src/ATen/native/cuda/Distributions.cu
@@ -407,10 +407,10 @@
   AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "uniform_cuda", [&] {
     auto from = static_cast<scalar_t>(from_);
     auto to = static_cast<scalar_t>(to_);
-    AT_CHECK(from <= to,
+    TORCH_CHECK(from <= to,
       "uniform_ expects to return a [from, to) range, but found from=", from,
       " > to=", to);
-    AT_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
+    TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
           "uniform_ expects to-from <= std::numeric_limits<", toString(iter.dtype()),
           ">::max(), but found to=", to, " and from=", from,
           " which result in to-from to exceed the limit");
diff --git a/aten/src/ATen/native/cuda/UpSample.cuh b/aten/src/ATen/native/cuda/UpSample.cuh
index c0842f8..ff9d959 100644
--- a/aten/src/ATen/native/cuda/UpSample.cuh
+++ b/aten/src/ATen/native/cuda/UpSample.cuh
@@ -25,7 +25,7 @@
     int nchannels,
     int input_width,
     int output_width) {
-  AT_CHECK(
+  TORCH_CHECK(
       input_width > 0 && output_width > 0,
       "input and output sizes should be greater than 0, but got input (W: ",
       input_width,
@@ -34,7 +34,7 @@
       ")");
 
   if (input.defined()) {
-    AT_CHECK(
+    TORCH_CHECK(
         input.numel() != 0 && input.dim() == 3,
         "non-empty 3D input tensor expected but got a tensor with sizes ",
         input.sizes());
@@ -54,7 +54,7 @@
     int input_width,
     int output_height,
     int output_width) {
-  AT_CHECK(
+  TORCH_CHECK(
       input_height > 0 && input_width > 0 && output_height > 0 &&
           output_width > 0,
       "input and output sizes should be greater than 0,"
@@ -69,7 +69,7 @@
       ")");
 
   if (input.defined()) {
-    AT_CHECK(
+    TORCH_CHECK(
         input.numel() != 0 && input.dim() == 4,
         "non-empty 4D input tensor expected but got a tensor with sizes ",
         input.sizes());
@@ -92,7 +92,7 @@
     int output_depth,
     int output_height,
     int output_width) {
-  AT_CHECK(
+  TORCH_CHECK(
       input_depth > 0 && input_height > 0 && input_width > 0 &&
           output_depth > 0 && output_height > 0 && output_width > 0,
       "Input and output sizes should be greater than 0, but got input (D: ",
@@ -110,7 +110,7 @@
       ")");
 
   if (input.defined()) {
-    AT_CHECK(
+    TORCH_CHECK(
         input.numel() != 0 && input.dim() == 5,
         "Non-empty 5D data tensor expected but got a tensor with sizes ",
         input.sizes());
diff --git a/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu b/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu
index d3b02a0..443e88e 100644
--- a/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleBicubic2d.cu
@@ -165,7 +165,7 @@
   TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
   checkAllSameGPU("upsample_bicubic2d_out", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 2,
       "It is expected output_size equals to 2, but got size ",
       output_size.size());
@@ -243,12 +243,12 @@
       "upsample_bicubic2d_backward_out_cuda",
       {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 2,
       "It is expected output_size equals to 2, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 4,
       "It is expected input_size equals to 4, but got size ",
       input_size.size());
diff --git a/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu b/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu
index 0bb523a..d4e8d1b 100644
--- a/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleBilinear2d.cu
@@ -152,7 +152,7 @@
   TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
   checkAllSameGPU("upsample_bilinear2d_out_cuda", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 2,
       "It is expected output_size equals to 2, but got size ",
       output_size.size());
@@ -222,12 +222,12 @@
       "upsample_bilinear2d_backward_out_cuda",
       {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 2,
       "It is expected output_size equals to 2, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 4,
       "It is expected input_size equals to 4, but got size ",
       input_size.size());
diff --git a/aten/src/ATen/native/cuda/UpSampleLinear1d.cu b/aten/src/ATen/native/cuda/UpSampleLinear1d.cu
index c5a3142..0f70b57 100644
--- a/aten/src/ATen/native/cuda/UpSampleLinear1d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleLinear1d.cu
@@ -119,7 +119,7 @@
   TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
   checkAllSameGPU("upsample_linear1d_out_cuda", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 1,
       "It is expected output_size equals to 1, but got size ",
       output_size.size());
@@ -174,12 +174,12 @@
   checkAllSameGPU(
       "upsample_linear1d_backward_out_cuda", {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 1,
       "It is expected output_size equals to 1, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 3,
       "It is expected input_size equals to 3, but got size ",
       input_size.size());
diff --git a/aten/src/ATen/native/cuda/UpSampleNearest1d.cu b/aten/src/ATen/native/cuda/UpSampleNearest1d.cu
index 831c37d..2218d27 100644
--- a/aten/src/ATen/native/cuda/UpSampleNearest1d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleNearest1d.cu
@@ -99,7 +99,7 @@
   TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
   checkAllSameGPU("upsample_nearest1d_out_cuda", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 1,
       "It is expected output_size equals to 1, but got size ",
       output_size.size());
@@ -151,12 +151,12 @@
       "upsample_nearest1d_backward_out_cuda_template",
       {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 1,
       "It is expected output_size equals to 1, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 3,
       "It is expected input_size equals to 3, but got size ",
       input_size.size());
diff --git a/aten/src/ATen/native/cuda/UpSampleNearest2d.cu b/aten/src/ATen/native/cuda/UpSampleNearest2d.cu
index f09c3c7..f8d9960 100644
--- a/aten/src/ATen/native/cuda/UpSampleNearest2d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleNearest2d.cu
@@ -118,7 +118,7 @@
   checkAllSameGPU(
       "upsample_nearest2d_out_cuda_template", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 2,
       "It is expected output_size equals to 2, but got size ",
       output_size.size());
@@ -181,12 +181,12 @@
       "upsample_nearest2d_backward_out_cuda",
       {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 2,
       "It is expected output_size equals to 2, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 4,
       "It is expected input_size equals to 4, but got size ",
       input_size.size());
diff --git a/aten/src/ATen/native/cuda/UpSampleNearest3d.cu b/aten/src/ATen/native/cuda/UpSampleNearest3d.cu
index 26cc987..39590bb 100644
--- a/aten/src/ATen/native/cuda/UpSampleNearest3d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleNearest3d.cu
@@ -131,7 +131,7 @@
   TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
   checkAllSameGPU("upsample_nearest3d_out_cuda", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 3,
       "It is expected output_size equals to 3, but got size ",
       output_size.size());
@@ -202,12 +202,12 @@
       "upsample_nearest3d_backward_out_cuda",
       {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 3,
       "It is expected output_size equals to 3, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 5,
       "It is expected input_size equals to 5, but got size ",
       input_size.size());
diff --git a/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu b/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu
index 2b94c85..683860e 100644
--- a/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu
+++ b/aten/src/ATen/native/cuda/UpSampleTrilinear3d.cu
@@ -198,7 +198,7 @@
   TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2};
   checkAllSameGPU("upsample_trilinear3d_out_cuda", {input_arg, output_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 3,
       "It is expected output_size equals to 3, but got size ",
       output_size.size());
@@ -284,12 +284,12 @@
       "upsample_trilinear3d_backward_out_cuda",
       {grad_output_arg, grad_input_arg});
 
-  AT_CHECK(
+  TORCH_CHECK(
       output_size.size() == 3,
       "It is expected output_size equals to 3, but got size ",
       output_size.size());
 
-  AT_CHECK(
+  TORCH_CHECK(
       input_size.size() == 5,
       "It is expected input_size equals to 5, but got size ",
       input_size.size());
diff --git a/aten/src/TH/THTensor.hpp b/aten/src/TH/THTensor.hpp
index 1ebaf90..a9c89f2 100644
--- a/aten/src/TH/THTensor.hpp
+++ b/aten/src/TH/THTensor.hpp
@@ -34,7 +34,7 @@
   // for the first time (providing the necessary type).  It is an ERROR to
   // invoke any PyTorch operations on such a half-constructed storage,
   // and this check tests for that case.
-  AT_CHECK(tensor->storage(), "Cannot use PyTorch operations on a half-constructed "
+  TORCH_CHECK(tensor->storage(), "Cannot use PyTorch operations on a half-constructed "
            "tensor.  If this tensor came from Caffe2, please call GetMutableData on "
            "it first; otherwise, this is a bug, please report it.");
   return tensor->storage().unsafeGetStorageImpl();
diff --git a/c10/util/Exception.h b/c10/util/Exception.h
index 346c6cb..dfd99a2 100644
--- a/c10/util/Exception.h
+++ b/c10/util/Exception.h
@@ -163,7 +163,7 @@
 // Assuming no bugs in PyTorch, the conditions tested by this macro should
 // always be true; e.g., it should be possible to disable all of these
 // conditions without changing observable user behavior.  If you would like to
-// do error reporting for user input, please use AT_CHECK instead.
+// do error reporting for user input, please use TORCH_CHECK instead.
 //
 // NOTE: It is SAFE to use this macro in production code; on failure, this
 // simply raises an exception, it does NOT unceremoniously quit the process
@@ -290,10 +290,7 @@
 */
 inline void deprecated_AT_WARN() {}
 
-/*
-// Deprecation disabled until we fix sites in our codebase
 C10_DEPRECATED_MESSAGE("AT_CHECK is deprecated, use TORCH_CHECK instead.")
-*/
 inline void deprecated_AT_CHECK() {}
 
 /*
diff --git a/torch/csrc/autograd/record_function.cpp b/torch/csrc/autograd/record_function.cpp
index e2e7d8e..9ecae6d 100644
--- a/torch/csrc/autograd/record_function.cpp
+++ b/torch/csrc/autograd/record_function.cpp
@@ -20,7 +20,7 @@
   if (std::abs(prob - 1.0) < kEps) {
     is_sampled_callbacks = false;
   } else {
-    AT_CHECK(prob > -kEps && prob < 1.0);
+    TORCH_CHECK(prob > -kEps && prob < 1.0);
     is_sampled_callbacks = true;
   }
   sampling_prob = prob;