[BE] Use `C10_UNUSED` (#111439)

<!--
copilot:poem
-->
### <samp>🤖 Generated by Copilot at 21e87dc</samp>

> _We're sailing on the sea of code, with warnings to avoid_
> _We use the `C10_UNUSED` macro for variables unexploited_
> _We heave and ho and pull and push, and make the code more neat_
> _We sing this shanty as we go, to keep us in good spirits_

Pull Request resolved: https://github.com/pytorch/pytorch/pull/111439
Approved by: https://github.com/huydhn
diff --git a/aten/src/ATen/CPUApplyUtils.h b/aten/src/ATen/CPUApplyUtils.h
index 0e87b09..225390a 100644
--- a/aten/src/ATen/CPUApplyUtils.h
+++ b/aten/src/ATen/CPUApplyUtils.h
@@ -67,9 +67,10 @@
   strided_tensor_iter_fixed(strided_tensor_iter_fixed const&) = delete;
   void operator=(strided_tensor_iter_fixed const& x) = delete;
   strided_tensor_iter_fixed(strided_tensor_iter_fixed&&) = default;
-  strided_tensor_iter_fixed(Tensor& tensor, bool sort_strides = false)
+  strided_tensor_iter_fixed(
+      Tensor& tensor,
+      C10_UNUSED bool sort_strides = false)
       : data_(tensor.data_ptr<T>()) {
-    (void)sort_strides; // Suppress unused variable warning
     std::memset(counter_, 0, sizeof(int64_t) * N);
     if (tensor.dim() > 0) {
       std::memcpy(
diff --git a/aten/src/ATen/FunctionalTensorWrapper.cpp b/aten/src/ATen/FunctionalTensorWrapper.cpp
index 938ceee..a05c06a 100644
--- a/aten/src/ATen/FunctionalTensorWrapper.cpp
+++ b/aten/src/ATen/FunctionalTensorWrapper.cpp
@@ -500,8 +500,7 @@
   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_tensor.size() == other.size());
   auto functional_tensor_it = functional_tensor.begin();
   auto other_it = other.begin();
-  for (const auto i : c10::irange(functional_tensor.size())) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(functional_tensor.size())) {
     replace_(*functional_tensor_it++, *other_it++);
   }
 }
@@ -518,8 +517,7 @@
   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(functional_tensor.size() == other.size());
   auto functional_tensor_it = functional_tensor.begin();
   auto other_it = other.begin();
-  for (const auto i : c10::irange(functional_tensor.size())) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(functional_tensor.size())) {
     propagate_xla_data(*functional_tensor_it++, *other_it++);
   }
 }
diff --git a/aten/src/ATen/code_template.h b/aten/src/ATen/code_template.h
index 41aff6c..14ac2fa 100644
--- a/aten/src/ATen/code_template.h
+++ b/aten/src/ATen/code_template.h
@@ -207,8 +207,7 @@
   // or trailing newlines. It's the responsibility of the calling function
   // to indent correctly in the context.
   void emitIndent(std::ostream& out, size_t indent) const {
-    for (const auto i : c10::irange(indent)) {
-      (void)i; // Suppress unused variable warning
+    for (C10_UNUSED const auto i : c10::irange(indent)) {
       out << " ";
     }
   }
diff --git a/aten/src/ATen/core/class_type.h b/aten/src/ATen/core/class_type.h
index e2a09b7..310ed33 100644
--- a/aten/src/ATen/core/class_type.h
+++ b/aten/src/ATen/core/class_type.h
@@ -390,8 +390,7 @@
       std::string doc_string = "",
       std::vector<std::string> unresolved_class_attributes = {});
 
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     const auto& n = name().value();
     return n.qualifiedName();
   }
diff --git a/aten/src/ATen/core/enum_type.h b/aten/src/ATen/core/enum_type.h
index efb1819..bd60c1e 100644
--- a/aten/src/ATen/core/enum_type.h
+++ b/aten/src/ATen/core/enum_type.h
@@ -88,8 +88,7 @@
         cu_(std::move(cu)) {}
 
   std::string annotation_str_impl(
-      TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+      C10_UNUSED TypePrinter printer = nullptr) const override {
     const auto& n = name().value();
     return n.qualifiedName();
   }
diff --git a/aten/src/ATen/core/function.h b/aten/src/ATen/core/function.h
index f872cde..b4f8271 100644
--- a/aten/src/ATen/core/function.h
+++ b/aten/src/ATen/core/function.h
@@ -54,8 +54,7 @@
 
   virtual c10::intrusive_ptr<c10::ivalue::Future> runAsync(
       Stack& /*stack*/,
-      TaskLauncher taskLauncher = at::launch) {
-    (void)taskLauncher; // Suppress unused variable warning
+      C10_UNUSED TaskLauncher taskLauncher = at::launch) {
     TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false);
     return {};
   }
diff --git a/aten/src/ATen/core/jit_type.h b/aten/src/ATen/core/jit_type.h
index 133e0a0..3f0d797 100644
--- a/aten/src/ATen/core/jit_type.h
+++ b/aten/src/ATen/core/jit_type.h
@@ -1278,8 +1278,7 @@
  protected:
   NumberType(TypeKind kind = TypeKind::NumberType) : Type(kind) {}
 
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return "number"; // technically not a valid python type, but
                      // we need to use it when parsing back in annotations
                      // for implicit conversions
@@ -1306,8 +1305,7 @@
 
  private:
   FloatType() : NumberType(TypeKind::FloatType) {}
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return "float";
   }
 };
@@ -1332,8 +1330,7 @@
 
  private:
   ComplexType() : NumberType(TypeKind::ComplexType) {}
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return "complex";
   }
 };
@@ -1422,8 +1419,7 @@
 
  private:
   IntType() : NumberType(TypeKind::IntType) {}
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return "int";
   }
 };
@@ -1457,8 +1453,7 @@
     // we only use "str" (not "string") in both FunctionSchema and script
     return annotation_str();
   }
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return "str";
   }
   static const TypeKind Kind = TypeKind::StringType;
@@ -1478,8 +1473,7 @@
   std::string str() const override {
     return annotation_str();
   }
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return "Storage";
   }
   static const TypeKind Kind = TypeKind::StorageType;
@@ -1514,8 +1508,7 @@
 
  private:
   FunctionType(torch::jit::Function* function);
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     const auto& n = name().value();
     return n.qualifiedName();
   }
@@ -2206,8 +2199,7 @@
       const InterfaceType& rhs,
       std::ostream* why_not);
 
-  std::string annotation_str_impl(TypePrinter printer = nullptr) const override {
-    (void)printer; // Suppress unused variable warning
+  std::string annotation_str_impl(C10_UNUSED TypePrinter printer = nullptr) const override {
     return name()->qualifiedName();
   }
 
diff --git a/aten/src/ATen/cpu/vec/vec_base.h b/aten/src/ATen/cpu/vec/vec_base.h
index 77402ad..683bff4 100644
--- a/aten/src/ATen/cpu/vec/vec_base.h
+++ b/aten/src/ATen/cpu/vec/vec_base.h
@@ -1048,8 +1048,7 @@
 #ifndef _MSC_VER
 # pragma unroll
 #endif
-  for (const auto i : c10::irange(n)) {
-    (void)i; //Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(n)) {
     *dst = c10::convert<dst_T>(c10::load(src));
     src++;
     dst++;
diff --git a/aten/src/ATen/detail/CUDAHooksInterface.h b/aten/src/ATen/detail/CUDAHooksInterface.h
index ea746d3..6c9db29 100644
--- a/aten/src/ATen/detail/CUDAHooksInterface.h
+++ b/aten/src/ATen/detail/CUDAHooksInterface.h
@@ -73,8 +73,7 @@
     TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
   }
 
-  virtual const Generator& getDefaultCUDAGenerator(DeviceIndex device_index = -1) const {
-    (void)device_index; // Suppress unused variable warning
+  virtual const Generator& getDefaultCUDAGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
     TORCH_CHECK(false, "Cannot get default CUDA generator without ATen_cuda library. ", CUDA_HELP);
   }
 
diff --git a/aten/src/ATen/detail/XPUHooksInterface.h b/aten/src/ATen/detail/XPUHooksInterface.h
index bc40bae..f212a1b 100644
--- a/aten/src/ATen/detail/XPUHooksInterface.h
+++ b/aten/src/ATen/detail/XPUHooksInterface.h
@@ -67,13 +67,11 @@
         XPU_HELP);
   }
 
-  virtual Generator getXPUGenerator(DeviceIndex device_index = -1) const {
-    (void)device_index; // Suppress unused variable warning
+  virtual Generator getXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
     TORCH_CHECK(false, "Cannot get XPU generator without Intel Extension for Pytorch. ", XPU_HELP);
   }
 
-  virtual const Generator& getDefaultXPUGenerator(DeviceIndex device_index = -1) const {
-    (void)device_index; // Suppress unused variable warning
+  virtual const Generator& getDefaultXPUGenerator(C10_UNUSED DeviceIndex device_index = -1) const {
     TORCH_CHECK(false, "Cannot get default XPU generator without Intel Extension for Pytorch. ", XPU_HELP);
   }
 
diff --git a/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp b/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp
index bdd4615..35f2f2f 100644
--- a/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp
+++ b/aten/src/ATen/functorch/PyTorchOperatorHacks.cpp
@@ -176,8 +176,7 @@
   sizes.reserve(input.dim());
   sizes.push_back(input_sizes[0]);
   sizes.push_back(input_sizes[1]);
-  for (const auto i : c10::irange(2, input.dim())) {
-    (void)i; //Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(2, input.dim())) {
     sizes.push_back(1);
   }
   // NB: THIS WAS CHANGED FROM THE ORIGINAL
diff --git a/aten/src/ATen/native/Dropout.cpp b/aten/src/ATen/native/Dropout.cpp
index e3baae8..20a1164 100644
--- a/aten/src/ATen/native/Dropout.cpp
+++ b/aten/src/ATen/native/Dropout.cpp
@@ -35,8 +35,7 @@
   sizes.reserve(input.dim());
   sizes.push_back(input_sizes[0]);
   sizes.push_back(input_sizes[1]);
-  for (const auto i : c10::irange(2, input.dim())) {
-    (void)i; //Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(2, input.dim())) {
     sizes.push_back(1);
   }
   return input.new_empty_symint(sizes);
diff --git a/aten/src/ATen/native/LossMultiLabelMargin.cpp b/aten/src/ATen/native/LossMultiLabelMargin.cpp
index 6ea42e2..e39b02e 100644
--- a/aten/src/ATen/native/LossMultiLabelMargin.cpp
+++ b/aten/src/ATen/native/LossMultiLabelMargin.cpp
@@ -77,8 +77,7 @@
 
     accscalar_t sum = 0;
 
-    for (const auto t : c10::irange(nframe)) {
-      (void)t; //Suppress unused variable warning
+    for (C10_UNUSED const auto t : c10::irange(nframe)) {
       sum += multilabel_margin_loss_forward_inner_sum_cpu(
           input_data, target_data, is_target_data, dim);
 
@@ -178,8 +177,7 @@
       reduction == Reduction::Mean ? 1. / (nframe * dim) : 1. / dim);
 
   scalar_t* grad_input_row_data = grad_input.mutable_data_ptr<scalar_t>();
-  for (const auto t : c10::irange(nframe)) {
-    (void)t; //Suppress unused variable warning
+  for (C10_UNUSED const auto t : c10::irange(nframe)) {
     for (const auto dt : c10::irange(dim)) {
       int64_t target_idx = target_data[dt];
       if (target_idx < 0) {
diff --git a/aten/src/ATen/native/PackedSequence.cpp b/aten/src/ATen/native/PackedSequence.cpp
index 629b0c2..9b4e06f 100644
--- a/aten/src/ATen/native/PackedSequence.cpp
+++ b/aten/src/ATen/native/PackedSequence.cpp
@@ -188,8 +188,7 @@
     }
     int64_t dec = prev_batch_size - batch_size;
     if (dec > 0) {
-      for (const auto j : c10::irange(dec)) {
-        (void)j; //Suppress unused variable warning
+      for (C10_UNUSED const auto j : c10::irange(dec)) {
         (*lengths--) = i;
       }
     }
diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp
index d460512..e478a72 100644
--- a/aten/src/ATen/native/ReduceOps.cpp
+++ b/aten/src/ATen/native/ReduceOps.cpp
@@ -928,8 +928,7 @@
   bool is_kBool = (self.dtype() == at::kBool);
   n = n >= self.size(dim) ? self.size(dim) : n;
 
-  for (const auto i : c10::irange(n)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(n)) {
     if (is_kBool) {
       result = at::logical_xor(at::narrow(result, dim, 1, out_len), at::narrow(result, dim, 0, out_len));
     } else {
@@ -2104,8 +2103,7 @@
       }
       char* self_data = data[0];
       char* other_data = data[1];
-      for (const auto i : c10::irange(dim_size)) {
-        (void)i; //Suppress unused variable warning
+      for (C10_UNUSED const auto i : c10::irange(dim_size)) {
         if (c10::load<scalar_t>(self_data) != c10::load<scalar_t>(other_data)) {
           result = false;
           return;
diff --git a/aten/src/ATen/native/TensorAdvancedIndexing.cpp b/aten/src/ATen/native/TensorAdvancedIndexing.cpp
index 9875d9b..82d1b6e 100644
--- a/aten/src/ATen/native/TensorAdvancedIndexing.cpp
+++ b/aten/src/ATen/native/TensorAdvancedIndexing.cpp
@@ -2312,8 +2312,7 @@
 
         for (const auto i : c10::irange(n2)) {
           const char* ptr = data[0] + i * strides[1];
-          for (const auto j : c10::irange(n1)) {
-            (void)j; //Suppress unused variable warning
+          for (C10_UNUSED const auto j : c10::irange(n1)) {
             const auto& val = c10::load<scalar_t>(ptr);
             // If nonzero, write index
             if (val != scalar_t(0)) {
diff --git a/aten/src/ATen/native/TensorAdvancedIndexingUtils.h b/aten/src/ATen/native/TensorAdvancedIndexingUtils.h
index cd62a18..7b9d144 100644
--- a/aten/src/ATen/native/TensorAdvancedIndexingUtils.h
+++ b/aten/src/ATen/native/TensorAdvancedIndexingUtils.h
@@ -48,8 +48,7 @@
       }
     }
   }
-  for (const auto i : c10::irange(num_ind, self.ndimension())) {
-    (void)i; //Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(num_ind, self.ndimension())) {
     mask = mask.unsqueeze(-1);
   }
   return std::make_tuple(true, mask);
diff --git a/aten/src/ATen/native/cuda/linalg/CusolverDnHandlePool.cpp b/aten/src/ATen/native/cuda/linalg/CusolverDnHandlePool.cpp
index 2b5c75d..bdb0d26 100644
--- a/aten/src/ATen/native/cuda/linalg/CusolverDnHandlePool.cpp
+++ b/aten/src/ATen/native/cuda/linalg/CusolverDnHandlePool.cpp
@@ -11,7 +11,6 @@
 }
 
 void destroyCusolverDnHandle(cusolverDnHandle_t handle) {
-  (void)handle; // Suppress unused variable warning
 // this is because of something dumb in the ordering of
 // destruction. Sometimes atexit, the cuda context (or something)
 // would already be destroyed by the time this gets destroyed. It
@@ -19,6 +18,7 @@
 // the handle as a workaround.
 //   - Comments of @soumith copied from cuDNN handle pool implementation
 #ifdef NO_CUDNN_DESTROY_HANDLE
+  (void)handle; // Suppress unused variable warning
 #else
     cusolverDnDestroy(handle);
 #endif
diff --git a/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp b/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp
index 8c62af8..91ddfef 100644
--- a/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp
+++ b/aten/src/ATen/native/quantized/cpu/UpSampleNearest3d.cpp
@@ -71,8 +71,7 @@
         const auto* pos1 = &i_p[d1 * input_height * input_width + h1 * input_width + w1];
         auto* pos2 = &o_p[d2 * output_height * output_width + h2 * output_width + w2];
 
-        for (const auto c : c10::irange(channels)) {
-          (void)c; //Suppress unused variable warning
+        for (C10_UNUSED const auto c : c10::irange(channels)) {
           pos2[0] = pos1[0];
           pos1 += input_depth * input_height * input_width;
           pos2 += output_depth * output_height * output_width;
diff --git a/aten/src/ATen/native/quantized/cpu/conv_serialization.h b/aten/src/ATen/native/quantized/cpu/conv_serialization.h
index 6ba9e5a..37d1596 100644
--- a/aten/src/ATen/native/quantized/cpu/conv_serialization.h
+++ b/aten/src/ATen/native/quantized/cpu/conv_serialization.h
@@ -143,8 +143,7 @@
       config_vals.push_back(dilation[0].item<int16_t>());
     }
     // output_padding does not exist in v1, so we fill in a default value
-    for (const auto i : c10::irange(kSpatialDim)) {
-      (void)i; // Suppress unused variable
+    for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
       config_vals.push_back(0);
     }
     config_vals.push_back(groups[0].item<int16_t>());
@@ -304,23 +303,19 @@
   torch::List<int64_t> stride, padding, output_padding, dilation;
   // skip kSpatialDim
   int idx = 1;
-  for (const auto i : c10::irange(kSpatialDim)) {
-    (void)i; // Suppress unused variable
+  for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
     stride.emplace_back(config_vals.at(idx));
     idx++;
   }
-  for (const auto i : c10::irange(kSpatialDim)) {
-    (void)i; // Suppress unused variable
+  for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
     padding.emplace_back(config_vals.at(idx));
     idx++;
   }
-  for (const auto i : c10::irange(kSpatialDim)) {
-    (void)i; // Suppress unused variable
+  for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
     dilation.emplace_back(config_vals.at(idx));
     idx++;
   }
-  for (const auto i : c10::irange(kSpatialDim)) {
-    (void)i; // Suppress unused variable
+  for (C10_UNUSED const auto i : c10::irange(kSpatialDim)) {
     TORCH_INTERNAL_ASSERT(idx < static_cast<int64_t>(config_vals.size()),
         "Unexpected index = ", idx, " for config_vals of size ",
         config_vals.size());
diff --git a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
index 12c3b30..479b023 100644
--- a/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
+++ b/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp
@@ -2307,8 +2307,7 @@
       int64_t b{0}, h2{0}, w2{0};
       data_index_init(begin, b, nbatch, h2, output_height, w2, output_width);
 
-      for (const auto i : c10::irange(begin, end)) {
-        (void)i; //Suppress unused variable warning
+      for (C10_UNUSED const auto i : c10::irange(begin, end)) {
         auto* i_p = reinterpret_cast<typename scalar_t::underlying*>(
             idata + b * input_height * input_width * channels);
         auto* o_p = reinterpret_cast<typename scalar_t::underlying*>(
diff --git a/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp b/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp
index 25ab60a..e25ee3b 100644
--- a/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qconv_prepack.cpp
@@ -633,8 +633,7 @@
       int64_t groups) {
     torch::List<int64_t> output_padding;
     output_padding.reserve(kSpatialDim);
-    for (const auto idx : c10::irange(kSpatialDim)) {
-      (void)idx; //Suppress unused variable warning
+    for (C10_UNUSED const auto idx : c10::irange(kSpatialDim)) {
       output_padding.push_back((int64_t)0);
     }
     return _run(weight, bias, stride, padding, output_padding, dilation, groups,
diff --git a/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp b/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp
index e214ab6..4269f87 100644
--- a/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp
+++ b/aten/src/ATen/native/quantized/cudnn/ConvPrepack.cpp
@@ -138,8 +138,7 @@
       int64_t groups) {
     torch::List<int64_t> output_padding;
     output_padding.reserve(kSpatialDim);
-    for (const auto idx : c10::irange(kSpatialDim)) {
-      (void)idx; //Suppress unused variable warning
+    for (C10_UNUSED const auto idx : c10::irange(kSpatialDim)) {
       output_padding.push_back((int64_t)0);
     }
     return _run(weight, bias, stride, padding, output_padding, dilation, groups,
diff --git a/aten/src/ATen/native/sparse/SparseMatMul.cpp b/aten/src/ATen/native/sparse/SparseMatMul.cpp
index 04ab1eb..39e2d82 100644
--- a/aten/src/ATen/native/sparse/SparseMatMul.cpp
+++ b/aten/src/ATen/native/sparse/SparseMatMul.cpp
@@ -159,8 +159,7 @@
       }
     }
 
-    for (const auto jj : c10::irange(length)) {
-      (void)jj; //Suppress unused variable warning
+    for (C10_UNUSED const auto jj : c10::irange(length)) {
 
       // NOTE: the linked list that encodes col indices
       // is not guaranteed to be sorted.
diff --git a/aten/src/ATen/test/basic.cpp b/aten/src/ATen/test/basic.cpp
index 3b4bb07..f102bb6 100644
--- a/aten/src/ATen/test/basic.cpp
+++ b/aten/src/ATen/test/basic.cpp
@@ -90,8 +90,7 @@
 void TestZeros(DeprecatedTypeProperties& type) {
   auto begin = std::chrono::high_resolution_clock::now();
   Tensor a = zeros({1024, 1024}, type);
-  for (const auto i : c10::irange(1, 1000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(1, 1000)) {
     a = zeros({128, 128}, type);
   }
   auto end = std::chrono::high_resolution_clock::now();
@@ -109,8 +108,7 @@
   auto begin = std::chrono::high_resolution_clock::now();
   Tensor d = ones({3, 4}, type);
   Tensor r = zeros({3, 4}, type);
-  for (const auto i : c10::irange(1000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(1000)) {
     add_out(r, r, d);
   }
   auto end = std::chrono::high_resolution_clock::now();
@@ -127,8 +125,7 @@
   auto begin = std::chrono::high_resolution_clock::now();
   Tensor d = ones({3, 4}, type);
   Tensor r = zeros({3, 4}, type);
-  for (const auto i : c10::irange(1000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(1000)) {
     r = add(r, d);
   }
   auto end = std::chrono::high_resolution_clock::now();
diff --git a/aten/src/ATen/test/cpu_generator_test.cpp b/aten/src/ATen/test/cpu_generator_test.cpp
index 1a2413e..f24ff69 100644
--- a/aten/src/ATen/test/cpu_generator_test.cpp
+++ b/aten/src/ATen/test/cpu_generator_test.cpp
@@ -161,8 +161,7 @@
   // So if you want to skip 8 values, offset would
   // be 2, since 2*4=8.
   at::Philox4_32 engine2(123, 1, 2);
-  for (const auto i : c10::irange(8)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(8)) {
     // Note: instead of using the engine() call 8 times
     // we could have achieved the same functionality by
     // calling the incr() function twice.
@@ -223,16 +222,14 @@
   // test with zero seed
   at::mt19937 engine1(0);
   std::mt19937 engine2(0);
-  for (const auto i : c10::irange(10000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(10000)) {
     ASSERT_EQ(engine1(), engine2());
   }
 
   // test with large seed
   engine1 = at::mt19937(2147483647);
   engine2 = std::mt19937(2147483647);
-  for (const auto i : c10::irange(10000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(10000)) {
     ASSERT_EQ(engine1(), engine2());
   }
 
@@ -241,8 +238,7 @@
   auto seed = rd();
   engine1 = at::mt19937(seed);
   engine2 = std::mt19937(seed);
-  for (const auto i : c10::irange(10000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(10000)) {
     ASSERT_EQ(engine1(), engine2());
   }
 
diff --git a/aten/src/ATen/test/legacy_vmap_test.cpp b/aten/src/ATen/test/legacy_vmap_test.cpp
index 5ca827d..cbf7ca6 100644
--- a/aten/src/ATen/test/legacy_vmap_test.cpp
+++ b/aten/src/ATen/test/legacy_vmap_test.cpp
@@ -170,8 +170,7 @@
   {
     // ActualDim on kVmapMaxTensorDims sized underlying tensor
     auto tensor = ones({});
-    for (const auto i : c10::irange(kVmapMaxTensorDims)) {
-      (void)i; // Suppress unused variable warning
+    for (C10_UNUSED const auto i : c10::irange(kVmapMaxTensorDims)) {
       tensor = tensor.unsqueeze(0);
     }
     ASSERT_EQ(tensor.dim(), kVmapMaxTensorDims);
diff --git a/aten/src/ATen/test/thread_init_test.cpp b/aten/src/ATen/test/thread_init_test.cpp
index 498e5e2..5c2b903 100644
--- a/aten/src/ATen/test/thread_init_test.cpp
+++ b/aten/src/ATen/test/thread_init_test.cpp
@@ -14,8 +14,7 @@
   ASSERT_TRUE(given_num_threads >= 0);
   ASSERT_EQ(at::get_num_threads(), given_num_threads);
   auto t_sum = t.sum();
-  for (const auto i : c10::irange(1000)) {
-    (void)i; // Suppress unused variable warning
+  for (C10_UNUSED const auto i : c10::irange(1000)) {
     t_sum = t_sum + t.sum();
   }
 }
diff --git a/aten/src/ATen/test/vec_test_all_types.cpp b/aten/src/ATen/test/vec_test_all_types.cpp
index b990a9b..85e9b64 100644
--- a/aten/src/ATen/test/vec_test_all_types.cpp
+++ b/aten/src/ATen/test/vec_test_all_types.cpp
@@ -1126,8 +1126,7 @@
         float minv = static_cast<float>(static_cast<double>(min_val) * 2.0);
         float maxv = static_cast<float>(static_cast<double>(max_val) * 2.0);
         ValueGen<float> gen(minv, maxv, seed.add(2));
-        for (const auto i : c10::irange(trials)) {
-            (void)i; // Suppress unused variable warning
+        for (C10_UNUSED const auto i : c10::irange(trials)) {
             float scale = generator_sc.get();
             float inv_scale = 1.0f / static_cast<float>(scale);
             auto zero_point_val = generator_zp.get();
@@ -1173,8 +1172,7 @@
       float minv = static_cast<float>(static_cast<double>(min_val) * 2.0);
       float maxv = static_cast<float>(static_cast<double>(max_val) * 2.0);
       ValueGen<float> gen(minv, maxv, seed.add(2));
-      for (const auto i : c10::irange(trials)) {
-        (void)i; // Suppress unused variable warning
+      for (C10_UNUSED const auto i : c10::irange(trials)) {
         float scale = generator_sc.get();
         float inv_scale = 1.0f / static_cast<float>(scale);
         auto zero_point_val = generator_zp.get();
@@ -1233,8 +1231,7 @@
         ValueGen<int> generator(min_val, max_val, seed.add(1));
         //scale
         ValueGen<float> generator_sc(1.f, 15.f, seed.add(2));
-        for (const auto i : c10::irange(trials)) {
-            (void)i; // Suppress unused variable warning
+        for (C10_UNUSED const auto i : c10::irange(trials)) {
             float scale = generator_sc.get();
             int32_t zero_point_val = generator.get();
             float scale_zp_premul = -(scale * zero_point_val);
@@ -1281,8 +1278,7 @@
         ValueGen<int32_t> generator(min_val, max_val, seed);
         //scale
         ValueGen<float> generator_sc(1.f, 15.f, seed.add(1));
-        for (const auto i : c10::irange(trials)) {
-            (void)i; // Suppress unused variable warning
+        for (C10_UNUSED const auto i : c10::irange(trials)) {
             float multiplier = 1.f / (generator_sc.get());
             auto zero_point_val = generator.get();
             int index = 0;
@@ -1319,8 +1315,7 @@
         typename vec::int_vec_return_type  expected_int_ret;
         auto seed = TestSeed();
         ValueGen<underlying> generator(min_val, max_val, seed);
-        for (const auto i : c10::irange(trials)) {
-            (void)i; // Suppress unused variable warning
+        for (C10_UNUSED const auto i : c10::irange(trials)) {
             //generate vals
             for (int j = 0; j < vec::size(); j++) {
                 qint_vals[j] = generator.get();
diff --git a/aten/src/ATen/test/vec_test_all_types.h b/aten/src/ATen/test/vec_test_all_types.h
index 9a37e51..d915e6b 100644
--- a/aten/src/ATen/test/vec_test_all_types.h
+++ b/aten/src/ATen/test/vec_test_all_types.h
@@ -949,8 +949,7 @@
         UVT start = dmn_argc > 0 ? dmn.ArgsDomain[0].start : default_start;
         UVT end = dmn_argc > 0 ? dmn.ArgsDomain[0].end : default_end;
         ValueGen<VT> generator(start, end, seed.add(changeSeedBy));
-        for (const auto trial : c10::irange(trialCount)) {
-            (void)trial; // Suppress unused variable warning
+        for (C10_UNUSED const auto trial : c10::irange(trialCount)) {
             for (const auto k : c10::irange(el_count)) {
                 vals[k] = generator.get();
                 call_filter(filter, vals[k]);
@@ -1009,8 +1008,7 @@
         UVT end1 = dmn_argc > 1 ? dmn.ArgsDomain[1].end : default_end;
         ValueGen<VT> generator0(start0, end0, seed.add(changeSeedBy));
         ValueGen<VT> generator1(start1, end1, seed.add(changeSeedBy + 1));
-        for (const auto trial : c10::irange(trialCount)) {
-            (void)trial; // Suppress unused variable warning
+        for (C10_UNUSED const auto trial : c10::irange(trialCount)) {
             for (const auto k : c10::irange(el_count)) {
                 vals0[k] = generator0.get();
                 vals1[k] = generator1.get();
@@ -1075,8 +1073,7 @@
         ValueGen<VT> generator1(start1, end1, seed.add(changeSeedBy + 1));
         ValueGen<VT> generator2(start2, end2, seed.add(changeSeedBy + 2));
 
-        for (const auto trial : c10::irange(trialCount)) {
-            (void)trial; // Suppress unused variable warning
+        for (C10_UNUSED const auto trial : c10::irange(trialCount)) {
             for (const auto k : c10::irange(el_count)) {
                 vals0[k] = generator0.get();
                 vals1[k] = generator1.get();