Fix some code issues (#92760)

Fixes #ISSUE_NUMBER

Pull Request resolved: https://github.com/pytorch/pytorch/pull/92760
Approved by: https://github.com/Skylion007, https://github.com/albanD
diff --git a/.clang-tidy b/.clang-tidy
index ec43eca..9f30945 100644
--- a/.clang-tidy
+++ b/.clang-tidy
@@ -26,6 +26,8 @@
 -facebook-hte-RelativeInclude,
 hicpp-exception-baseclass,
 hicpp-avoid-goto,
+misc-unused-alias-decls,
+misc-unused-using-decls,
 modernize-*,
 -modernize-concat-nested-namespaces,
 -modernize-return-braced-init-list,
diff --git a/aten/src/ATen/code_template.h b/aten/src/ATen/code_template.h
index c84165e..e7ee6cb 100644
--- a/aten/src/ATen/code_template.h
+++ b/aten/src/ATen/code_template.h
@@ -18,9 +18,7 @@
 // in the top level environment, and then recurses into a parent
 // environment if the key is not found.)
 struct TemplateEnv {
-  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
   TemplateEnv() : parent(nullptr) {}
-  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
   TemplateEnv(TemplateEnv& parent) : parent(&parent) {}
 
   using string_list = std::vector<std::string>;
diff --git a/aten/src/ATen/cuda/CUDAEvent.h b/aten/src/ATen/cuda/CUDAEvent.h
index 1c3c679..467970b 100644
--- a/aten/src/ATen/cuda/CUDAEvent.h
+++ b/aten/src/ATen/cuda/CUDAEvent.h
@@ -28,8 +28,8 @@
 struct TORCH_CUDA_CPP_API CUDAEvent {
   // Constructors
   // Default value for `flags` is specified below - it's cudaEventDisableTiming
-  CUDAEvent() {}
-  CUDAEvent(unsigned int flags) : flags_{flags} {}
+  CUDAEvent() noexcept = default;
+  CUDAEvent(unsigned int flags) noexcept : flags_{flags} {}
 
   CUDAEvent(
       DeviceIndex device_index, const cudaIpcEventHandle_t* handle) {
@@ -58,9 +58,11 @@
   CUDAEvent(const CUDAEvent&) = delete;
   CUDAEvent& operator=(const CUDAEvent&) = delete;
 
-  CUDAEvent(CUDAEvent&& other) { moveHelper(std::move(other)); }
-  CUDAEvent& operator=(CUDAEvent&& other) {
-    moveHelper(std::move(other));
+  CUDAEvent(CUDAEvent&& other) noexcept { moveHelper(std::move(other)); }
+  CUDAEvent& operator=(CUDAEvent&& other) noexcept {
+    if (this != &other) {
+      moveHelper(std::move(other));
+    }
     return *this;
   }
 
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/src/fc-prepack.cc b/aten/src/ATen/native/quantized/cpu/qnnpack/src/fc-prepack.cc
index c772630..2b2922d 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/src/fc-prepack.cc
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/src/fc-prepack.cc
@@ -37,7 +37,7 @@
   output_channels_ = output_channels;
   packed_weights_ =
       malloc(n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
-  if (packed_weights_ == NULL) {
+  if (packed_weights_ == nullptr) {
     pytorch_qnnp_log_error(
         "failed to allocate %zu bytes for packed weights",
         n_stride * (k_stride * sizeof(uint8_t) + sizeof(int32_t)));
diff --git a/c10/core/CPUAllocator.cpp b/c10/core/CPUAllocator.cpp
index 60b76ed..4d0a1f1 100644
--- a/c10/core/CPUAllocator.cpp
+++ b/c10/core/CPUAllocator.cpp
@@ -71,7 +71,6 @@
 class DefaultMobileCPUAllocator final : public at::Allocator {
  public:
   DefaultMobileCPUAllocator() = default;
-  // NOLINTNEXTLINE(modernize-use-override)
   ~DefaultMobileCPUAllocator() override = default;
 
   static void deleter(void* const pointer) {
diff --git a/c10/core/GeneratorImpl.cpp b/c10/core/GeneratorImpl.cpp
index e2876bf..487bb27 100644
--- a/c10/core/GeneratorImpl.cpp
+++ b/c10/core/GeneratorImpl.cpp
@@ -46,14 +46,13 @@
 #if !defined(_WIN32)
 static uint64_t readURandomLong() {
   int randDev = open("/dev/urandom", O_RDONLY);
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-  uint64_t randValue;
   TORCH_CHECK(randDev >= 0, "Unable to open /dev/urandom");
+  uint64_t randValue{};
   ssize_t readBytes = read(randDev, &randValue, sizeof(randValue));
+  close(randDev);
   TORCH_CHECK(
       readBytes >= (ssize_t)sizeof(randValue),
       "Unable to read from /dev/urandom");
-  close(randDev);
   return randValue;
 }
 #endif // _WIN32
diff --git a/c10/core/TensorImpl.cpp b/c10/core/TensorImpl.cpp
index 743e80f..a8b4e25 100644
--- a/c10/core/TensorImpl.cpp
+++ b/c10/core/TensorImpl.cpp
@@ -104,7 +104,6 @@
 // the Python and PythonTLSSnapshot dispatch keys will be set and all is well.
 // The point is to delay the dispatch key setting until that point.
 
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
 TensorImpl::TensorImpl(
     ImplType type,
     Storage&& storage,
@@ -129,7 +128,6 @@
     c10::optional<c10::Device> device_opt)
     : TensorImpl({}, key_set, data_type, device_opt) {}
 
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
 TensorImpl::TensorImpl(
     Storage&& storage,
     DispatchKeySet key_set,
diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
index fe74e49..40b85f8 100644
--- a/c10/util/Logging.cpp
+++ b/c10/util/Logging.cpp
@@ -32,7 +32,7 @@
 } // namespace
 
 void SetStackTraceFetcher(std::function<string(void)> fetcher) {
-  *GetFetchStackTrace() = fetcher;
+  *GetFetchStackTrace() = std::move(fetcher);
 }
 
 void ThrowEnforceNotMet(
@@ -113,13 +113,13 @@
 
 void SetAPIUsageLogger(std::function<void(const std::string&)> logger) {
   TORCH_CHECK(logger);
-  *GetAPIUsageLogger() = logger;
+  *GetAPIUsageLogger() = std::move(logger);
 }
 
 void SetPyTorchDDPUsageLogger(
     std::function<void(const DDPLoggingData&)> logger) {
   TORCH_CHECK(logger);
-  *GetDDPUsageLogger() = logger;
+  *GetDDPUsageLogger() = std::move(logger);
 }
 
 void LogAPIUsage(const std::string& event) try {
diff --git a/c10/util/Type_demangle.cpp b/c10/util/Type_demangle.cpp
index 8b2e626..435e7cf 100644
--- a/c10/util/Type_demangle.cpp
+++ b/c10/util/Type_demangle.cpp
@@ -24,8 +24,7 @@
       abi::__cxa_demangle(
           name,
           /*__output_buffer=*/nullptr,
-          // NOLINTNEXTLINE(modernize-use-nullptr)
-          /*__length=*/0,
+          /*__length=*/nullptr,
           &status),
       /*deleter=*/free);
 
diff --git a/caffe2/operators/pow_op.cc b/caffe2/operators/pow_op.cc
index 159757b..97ede3f 100644
--- a/caffe2/operators/pow_op.cc
+++ b/caffe2/operators/pow_op.cc
@@ -13,8 +13,7 @@
   template <int b_is_scalar, typename T1, typename T2, typename R>
   inline void
   Run(size_t n, const T1* a, const T2* b, T2 e, R* out, CPUContext*) {
-    // NOLINTNEXTLINE(modernize-use-nullptr)
-    if (b == NULL) {
+    if (b == nullptr) {
       EigenVectorArrayMap<R>(out, n) =
           EIGEN_POW((ConstEigenVectorArrayMap<T1>(a, n)), (e));
     } else {
diff --git a/caffe2/share/contrib/nnpack/conv_op.cc b/caffe2/share/contrib/nnpack/conv_op.cc
index 6eafe4e..db1f124 100644
--- a/caffe2/share/contrib/nnpack/conv_op.cc
+++ b/caffe2/share/contrib/nnpack/conv_op.cc
@@ -161,8 +161,7 @@
   ConvPoolOpBase<CPUContext>::SetOutputSize(X, Y, filter.dim32(0));
   const int oH = Y->dim32(2), oW = Y->dim32(3);
 
-  // NOLINTNEXTLINE(modernize-use-nullptr)
-  const float* biasData = NULL;
+  const float* biasData = nullptr;
   if (InputSize() == 3) {
     /* Convolution with bias */
     auto& bias = Input(2);
diff --git a/torch/csrc/Device.h b/torch/csrc/Device.h
index 665c38b..5b45e39 100644
--- a/torch/csrc/Device.h
+++ b/torch/csrc/Device.h
@@ -5,7 +5,6 @@
 
 #include <ATen/Device.h>
 
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
 struct TORCH_API THPDevice {
   PyObject_HEAD at::Device device;
 };
diff --git a/torch/csrc/api/include/torch/nn/modules/embedding.h b/torch/csrc/api/include/torch/nn/modules/embedding.h
index 3bf305c..60b8305 100644
--- a/torch/csrc/api/include/torch/nn/modules/embedding.h
+++ b/torch/csrc/api/include/torch/nn/modules/embedding.h
@@ -32,7 +32,7 @@
  public:
   EmbeddingImpl(int64_t num_embeddings, int64_t embedding_dim)
       : EmbeddingImpl(EmbeddingOptions(num_embeddings, embedding_dim)) {}
-  explicit EmbeddingImpl(const EmbeddingOptions& options_);
+  explicit EmbeddingImpl(EmbeddingOptions options_);
 
   void reset() override;
 
@@ -110,7 +110,7 @@
  public:
   EmbeddingBagImpl(int64_t num_embeddings, int64_t embedding_dim)
       : EmbeddingBagImpl(EmbeddingBagOptions(num_embeddings, embedding_dim)) {}
-  explicit EmbeddingBagImpl(const EmbeddingBagOptions& options_);
+  explicit EmbeddingBagImpl(EmbeddingBagOptions options_);
 
   void reset() override;
 
diff --git a/torch/csrc/api/src/nn/modules/batchnorm.cpp b/torch/csrc/api/src/nn/modules/batchnorm.cpp
index 8032001..105bd16 100644
--- a/torch/csrc/api/src/nn/modules/batchnorm.cpp
+++ b/torch/csrc/api/src/nn/modules/batchnorm.cpp
@@ -11,8 +11,6 @@
 #include <utility>
 #include <vector>
 
-namespace F = torch::nn::functional;
-
 namespace torch {
 namespace nn {
 
diff --git a/torch/csrc/api/src/nn/modules/embedding.cpp b/torch/csrc/api/src/nn/modules/embedding.cpp
index 5354cef..9598248 100644
--- a/torch/csrc/api/src/nn/modules/embedding.cpp
+++ b/torch/csrc/api/src/nn/modules/embedding.cpp
@@ -13,8 +13,8 @@
 
 namespace torch {
 namespace nn {
-EmbeddingImpl::EmbeddingImpl(const EmbeddingOptions& options_)
-    : options(options_) { // NOLINT(modernize-pass-by-value)
+EmbeddingImpl::EmbeddingImpl(EmbeddingOptions options_)
+    : options(std::move(options_)) {
   // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
   reset();
 }
@@ -89,8 +89,8 @@
       options.sparse());
 }
 
-EmbeddingBagImpl::EmbeddingBagImpl(const EmbeddingBagOptions& options_)
-    : options(options_) { // NOLINT(modernize-pass-by-value)
+EmbeddingBagImpl::EmbeddingBagImpl(EmbeddingBagOptions options_)
+    : options(std::move(options_)) {
   // NOLINTNEXTLINE(clang-analyzer-optin.cplusplus.VirtualCall)
   reset();
 }
diff --git a/torch/csrc/api/src/nn/modules/instancenorm.cpp b/torch/csrc/api/src/nn/modules/instancenorm.cpp
index a7eb318..99ab1d7 100644
--- a/torch/csrc/api/src/nn/modules/instancenorm.cpp
+++ b/torch/csrc/api/src/nn/modules/instancenorm.cpp
@@ -1,8 +1,6 @@
 #include <torch/nn/functional/instancenorm.h>
 #include <torch/nn/modules/instancenorm.h>
 
-namespace F = torch::nn::functional;
-
 namespace torch {
 namespace nn {
 
diff --git a/torch/csrc/autograd/functions/accumulate_grad.cpp b/torch/csrc/autograd/functions/accumulate_grad.cpp
index ec0dbf0..e25ee10 100644
--- a/torch/csrc/autograd/functions/accumulate_grad.cpp
+++ b/torch/csrc/autograd/functions/accumulate_grad.cpp
@@ -10,8 +10,6 @@
 #include <stdexcept>
 #include <utility>
 
-using at::Tensor;
-
 namespace torch {
 namespace autograd {
 
diff --git a/torch/csrc/autograd/profiler_kineto.cpp b/torch/csrc/autograd/profiler_kineto.cpp
index 2f7fd18..ce1a5ab 100644
--- a/torch/csrc/autograd/profiler_kineto.cpp
+++ b/torch/csrc/autograd/profiler_kineto.cpp
@@ -258,7 +258,7 @@
       std::set<torch::profiler::impl::ActivityType> activities)
       : ProfilerStateBase(config),
         start_time_(getTimeUs()),
-        record_queue_(config, activities) {}
+        record_queue_(config, std::move(activities)) {}
   ~KinetoThreadLocalState() override = default;
 
   static KinetoThreadLocalState* get(bool global) {
diff --git a/torch/csrc/cuda/Event.cpp b/torch/csrc/cuda/Event.cpp
index 72b740c..8f3cb83 100644
--- a/torch/csrc/cuda/Event.cpp
+++ b/torch/csrc/cuda/Event.cpp
@@ -43,9 +43,7 @@
     return nullptr;
   }
 
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
   THCPEvent* self = (THCPEvent*)ptr.get();
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
   unsigned int flags = (blocking ? cudaEventBlockingSync : cudaEventDefault) |
       (enable_timing ? cudaEventDefault : cudaEventDisableTiming) |
       (interprocess ? cudaEventInterprocess : cudaEventDefault);
@@ -88,7 +86,6 @@
   if (!ptr) {
     return nullptr;
   }
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
   THCPEvent* self = (THCPEvent*)ptr.get();
 
   // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
diff --git a/torch/csrc/cuda/Stream.cpp b/torch/csrc/cuda/Stream.cpp
index bb7be99..560fb68 100644
--- a/torch/csrc/cuda/Stream.cpp
+++ b/torch/csrc/cuda/Stream.cpp
@@ -66,7 +66,6 @@
       : at::cuda::getStreamFromPool(
             /* isHighPriority */ priority < 0 ? true : false);
 
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
   THCPStream* self = (THCPStream*)ptr.get();
   self->stream_id = static_cast<int64_t>(stream.id());
   self->device_index = static_cast<int64_t>(stream.device_index());
@@ -104,9 +103,7 @@
     PyObject* _unused,
     PyObject* noargs) {
   HANDLE_TH_ERRORS
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-  int least_priority, greatest_priority;
-  std::tie(least_priority, greatest_priority) =
+  auto [least_priority, greatest_priority] =
       at::cuda::CUDAStream::priority_range();
   return Py_BuildValue("(ii)", least_priority, greatest_priority);
   END_HANDLE_TH_ERRORS
diff --git a/torch/csrc/cuda/Stream.h b/torch/csrc/cuda/Stream.h
index 9b7197d..6175ac2 100644
--- a/torch/csrc/cuda/Stream.h
+++ b/torch/csrc/cuda/Stream.h
@@ -5,7 +5,6 @@
 #include <torch/csrc/Stream.h>
 #include <torch/csrc/python_headers.h>
 
-// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
 struct THCPStream : THPStream {
   at::cuda::CUDAStream cuda_stream;
 };
diff --git a/torch/csrc/distributed/autograd/utils.cpp b/torch/csrc/distributed/autograd/utils.cpp
index 4167d3b..3de6e1e 100644
--- a/torch/csrc/distributed/autograd/utils.cpp
+++ b/torch/csrc/distributed/autograd/utils.cpp
@@ -20,7 +20,6 @@
 using torch::distributed::rpc::Message;
 using torch::distributed::rpc::MessageType;
 using torch::distributed::rpc::RpcAgent;
-using torch::distributed::rpc::RpcCommandBase;
 using torch::distributed::rpc::WorkerInfo;
 
 void addSendRpcBackward(
diff --git a/torch/csrc/distributed/rpc/script_resp.cpp b/torch/csrc/distributed/rpc/script_resp.cpp
index dcc253f..28ede36 100644
--- a/torch/csrc/distributed/rpc/script_resp.cpp
+++ b/torch/csrc/distributed/rpc/script_resp.cpp
@@ -9,13 +9,6 @@
 namespace distributed {
 namespace rpc {
 
-namespace {
-
-using torch::jit::Pickler;
-using torch::jit::Unpickler;
-
-} // namespace
-
 ScriptResp::ScriptResp(at::IValue&& value) : value_(value) {}
 
 const at::IValue& ScriptResp::value() {
diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
index 9f45f30..5d054ba 100644
--- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
+++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
@@ -2061,10 +2061,8 @@
         const char shape_err[] = "ShapeInferenceError";
         // NOLINTNEXTLINE(modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
         const char type_err[] = "TypeInferenceError";
-        // NOLINTNEXTLINE(modernize-use-nullptr)
-        if ((strstr(ex.what(), shape_err) == NULL) &&
-            // NOLINTNEXTLINE(modernize-use-nullptr)
-            (strstr(ex.what(), type_err) == NULL)) {
+        if ((strstr(ex.what(), shape_err) == nullptr) &&
+            (strstr(ex.what(), type_err) == nullptr)) {
           throw;
         }
       }
diff --git a/torch/csrc/profiler/collection.cpp b/torch/csrc/profiler/collection.cpp
index ccf4cf9..7480da9 100644
--- a/torch/csrc/profiler/collection.cpp
+++ b/torch/csrc/profiler/collection.cpp
@@ -52,13 +52,13 @@
 
 TensorMetadata::TensorMetadata(
     const RawTensorMetadata& r,
-    const std::vector<int64_t>& sizes,
-    const std::vector<int64_t>& strides)
+    std::vector<int64_t> sizes,
+    std::vector<int64_t> strides)
     : RawTensorMetadataBase(r),
       weak_self_{r.weak_self_.value_or(WeakTensor(at::Tensor()))},
       device_{r.device_type_, r.device_index_},
-      sizes_{sizes},
-      strides_{strides} {
+      sizes_{std::move(sizes)},
+      strides_{std::move(strides)} {
   SOFT_ASSERT(r.weak_self_.has_value());
 }
 
@@ -1129,12 +1129,16 @@
     auto& queue = *subqueue_it.second;
     auto materialize = [&](auto& events) {
       for (auto& i : events) {
+        time_t start_time_ns;
+        if constexpr (std::is_same<
+                          std::remove_reference_t<decltype(i)>,
+                          ExtraFields<EventType::Backend>>::value) {
+          start_time_ns = i.start_time_us_ * 1000;
+        } else {
+          start_time_ns = converter(i.start_time_);
+        }
         out.emplace_back(Result::create(
-            /*start_time_ns_=*/c10::guts::if_constexpr<std::is_same<
-                typename std::remove_reference<decltype(i)>::type,
-                ExtraFields<EventType::Backend>>::value>(
-                [&](auto _) { return _(i).start_time_us_ * 1000; },
-                [&](auto _) { return converter(_(i).start_time_); }),
+            /*start_time_ns_=*/start_time_ns,
             /*start_tid_=*/queue.tid(),
             /*kineto_info_=*/queue.kineto_info(),
             /*extra_fields_=*/std::move(i)));
diff --git a/torch/csrc/profiler/collection.h b/torch/csrc/profiler/collection.h
index 7326899..764839e 100644
--- a/torch/csrc/profiler/collection.h
+++ b/torch/csrc/profiler/collection.h
@@ -68,8 +68,8 @@
 struct TORCH_API TensorMetadata : public RawTensorMetadataBase {
   TensorMetadata(
       const RawTensorMetadata& r,
-      const std::vector<int64_t>& sizes,
-      const std::vector<int64_t>& strides);
+      std::vector<int64_t> sizes,
+      std::vector<int64_t> strides);
 
   TensorImplAddress impl() const {
     return weak_self_.get();
diff --git a/torch/csrc/profiler/orchestration/python_tracer.cpp b/torch/csrc/profiler/orchestration/python_tracer.cpp
index 8f63163..64db126 100644
--- a/torch/csrc/profiler/orchestration/python_tracer.cpp
+++ b/torch/csrc/profiler/orchestration/python_tracer.cpp
@@ -9,7 +9,7 @@
 
 struct NoOpPythonTracer : public PythonTracerBase {
   NoOpPythonTracer() = default;
-  ~NoOpPythonTracer() = default;
+  ~NoOpPythonTracer() override = default;
 
   void stop() override {}
   std::vector<std::shared_ptr<Result>> getEvents(
diff --git a/torch/csrc/serialization.cpp b/torch/csrc/serialization.cpp
index d30bfff..e090d77 100644
--- a/torch/csrc/serialization.cpp
+++ b/torch/csrc/serialization.cpp
@@ -226,9 +226,7 @@
     bool save_size,
     uint64_t element_size) {
   c10::DeviceGuard guard(self->device());
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-  uint8_t* data;
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
+  uint8_t* data{};
   at::Tensor cpu_tensor;
   int64_t size_bytes = self->nbytes();
   int64_t numel = size_bytes / element_size;
@@ -251,8 +249,7 @@
         torch::utils::THPByteOrder::THP_LITTLE_ENDIAN)
       doWrite(fd, &numel, sizeof(int64_t));
     else {
-      // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-      int64_t nsize; // convert big endian cpu to little endian storage
+      int64_t nsize{}; // convert big endian cpu to little endian storage
       torch::utils::THP_encodeInt64Buffer(
           (uint8_t*)&nsize,
           (const int64_t*)&numel,
@@ -269,7 +266,6 @@
   } else {
     int64_t buffer_size = std::min(numel, (int64_t)5000);
     // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
-    // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
     std::unique_ptr<uint8_t[]> le_buffer(
         new uint8_t[buffer_size * element_size]);
     for (int64_t i = 0; i < numel; i += buffer_size) {
@@ -319,16 +315,11 @@
   if (storage.defined()) {
     guard.reset_device(storage->device());
   }
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-  uint8_t* data;
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-  int64_t size;
+  int64_t size{};
   doRead(file, &size, sizeof(int64_t));
   if (torch::utils::THP_nativeByteOrder() ==
       torch::utils::THPByteOrder::THP_BIG_ENDIAN) {
-    // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
-    int64_t tsize; // convert little endian storage to big endian cpu
-    tsize = size;
+    int64_t tsize = size; // convert little endian storage to big endian cpu
     torch::utils::THP_decodeInt64Buffer(
         &size, (const uint8_t*)&tsize, torch::utils::THP_nativeByteOrder(), 1);
   }
@@ -348,9 +339,9 @@
         _storage_nbytes);
   }
 
-  // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
   std::unique_ptr<char[]> cpu_data;
 
+  uint8_t* data{};
   if (storage->device_type() == at::kCPU) {
     data = storage->data<uint8_t>();
   } else {
@@ -366,7 +357,6 @@
   } else {
     int64_t buffer_size = std::min(size, (int64_t)5000);
     // NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
-    // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
     std::unique_ptr<uint8_t[]> le_buffer(
         new uint8_t[buffer_size * element_size]);