[Reland] [1/N] Fixes clang-tidy warnings in header files (#114668)

Reland of #113608 after fixing the problematic parts.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/114668
Approved by: https://github.com/huydhn
diff --git a/aten/src/ATen/TensorIndexing.h b/aten/src/ATen/TensorIndexing.h
index b041cb0..6f5fd03 100644
--- a/aten/src/ATen/TensorIndexing.h
+++ b/aten/src/ATen/TensorIndexing.h
@@ -224,7 +224,8 @@
       return self;
     }
   }
-  return self.slice_symint(dim, start, stop, std::move(step));
+  return self.slice_symint(
+      dim, std::move(start), std::move(stop), std::move(step));
 }
 
 static inline Tensor applySelect(
@@ -258,7 +259,7 @@
   // if the index is negative, do not normalize it because that would fix the
   // index on the current tensor size in the tracer. aten::select also works on
   // negative indices
-  return self.select_symint(dim, index);
+  return self.select_symint(dim, std::move(index));
 }
 
 static inline Tensor boolToIndexingTensorCPUOrCUDA(
@@ -534,7 +535,7 @@
         /*original_tensor=*/self,
         /*index=*/obj,
         /*dim=*/&dim,
-        /*specified_dims=*/&specified_dims,
+        /*specified_dims_ptr=*/&specified_dims,
         /*real_dim=*/i,
         /*outIndices=*/outIndices,
         /*disable_slice_optimization=*/disable_slice_optimization,
diff --git a/aten/src/ATen/core/function_schema.h b/aten/src/ATen/core/function_schema.h
index c48e9b0..b381878 100644
--- a/aten/src/ATen/core/function_schema.h
+++ b/aten/src/ATen/core/function_schema.h
@@ -10,6 +10,7 @@
 #include <ATen/core/operator_name.h>
 #include <ATen/core/dispatch/OperatorOptions.h>
 #include <unordered_map>
+#include <utility>
 
 namespace c10 {
 
@@ -27,12 +28,12 @@
 struct Argument {
   Argument(
       std::string name = "",
-      TypePtr type = nullptr,
+      const TypePtr& type = nullptr,
       c10::optional<int32_t> N = c10::nullopt,
       c10::optional<IValue> default_value = c10::nullopt,
       bool kwarg_only = false,
       c10::optional<AliasInfo> alias_info = c10::nullopt)
-    : Argument(name, type, type, N, default_value, kwarg_only, alias_info) {}
+    : Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
 
   Argument(
       std::string name,
@@ -45,7 +46,7 @@
       : name_(std::move(name)),
         type_(fake_type ? std::move(fake_type) : TensorType::get()),
         real_type_(real_type ? std::move(real_type) : type_),
-        N_(std::move(N)),
+        N_(N),
         default_value_(std::move(default_value)),
         alias_info_(alias_info ? std::make_unique<AliasInfo>(std::move(*alias_info)) : nullptr),
         kwarg_only_(kwarg_only) {
diff --git a/torch/csrc/api/include/torch/ordered_dict.h b/torch/csrc/api/include/torch/ordered_dict.h
index 46e8e3e..31a2ab6 100644
--- a/torch/csrc/api/include/torch/ordered_dict.h
+++ b/torch/csrc/api/include/torch/ordered_dict.h
@@ -36,8 +36,8 @@
   // values. I tried to make this noexcept (conditional on the move constructors
   // of index_ and items_ being noexcept) but the obvious spelling didn't
   // compile on Windows.
-  OrderedDict(OrderedDict&& other) = default;
-  OrderedDict& operator=(OrderedDict&& other) = default;
+  OrderedDict(OrderedDict&& other) noexcept = default;
+  OrderedDict& operator=(OrderedDict&& other) noexcept = default;
 
   ~OrderedDict() = default;
 
diff --git a/torch/csrc/autograd/custom_function.h b/torch/csrc/autograd/custom_function.h
index a60102b..52fc900 100644
--- a/torch/csrc/autograd/custom_function.h
+++ b/torch/csrc/autograd/custom_function.h
@@ -305,8 +305,8 @@
     outputs = T::forward(&node->ctx_, std::forward<Args>(args)...);
   }
 
-  _jvp_fn_t jvp_fn = [](variable_list inputs,
-                        variable_list gI) -> variable_list {
+  _jvp_fn_t jvp_fn = [](const variable_list& inputs,
+                        const variable_list& gI) -> variable_list {
     TORCH_CHECK(
         false,
         "jvp is not implemented for the c++ API of custom Function yet.",
diff --git a/torch/csrc/jit/ir/ir.h b/torch/csrc/jit/ir/ir.h
index ec8e1c9..4781b15 100644
--- a/torch/csrc/jit/ir/ir.h
+++ b/torch/csrc/jit/ir/ir.h
@@ -446,7 +446,7 @@
     return callstack_;
   }
   void setCallStack(InlinedCallStackPtr cs) {
-    callstack_ = cs;
+    callstack_ = std::move(cs);
   }
 
   // NB: This returns an ArrayRef; that means that it will
diff --git a/torch/csrc/jit/mobile/module.h b/torch/csrc/jit/mobile/module.h
index 8d7e426..5e5d87f 100644
--- a/torch/csrc/jit/mobile/module.h
+++ b/torch/csrc/jit/mobile/module.h
@@ -5,6 +5,8 @@
 #include <torch/csrc/jit/mobile/method.h>
 #include <torch/csrc/jit/mobile/quantization.h>
 
+#include <utility>
+
 namespace torch {
 namespace jit {
 namespace mobile {
@@ -137,7 +139,7 @@
   }
 
   void set_delete_memory(std::shared_ptr<char> delete_mem) {
-    mem_to_delete_ = delete_mem;
+    mem_to_delete_ = std::move(delete_mem);
   }
 
   void set_min_operator_version(int64_t version) {
diff --git a/torch/csrc/jit/runtime/argument_spec.h b/torch/csrc/jit/runtime/argument_spec.h
index 7840b9a..06c77ed 100644
--- a/torch/csrc/jit/runtime/argument_spec.h
+++ b/torch/csrc/jit/runtime/argument_spec.h
@@ -72,10 +72,10 @@
     "ArgumentInfo is expected to be a 32-bit struct");
 
 struct ArgumentSpec {
-  // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
-  ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs) {
-    hash_code =
-        c10::hash_combine(num_flat_tensor_inputs, num_flat_optional_inputs);
+  ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs)
+      : hash_code(c10::hash_combine(
+            num_flat_tensor_inputs,
+            num_flat_optional_inputs)) {
     tensor_args.reserve(num_flat_tensor_inputs);
     optional_presence.reserve(num_flat_optional_inputs);
   }
diff --git a/torch/csrc/jit/serialization/pickler.h b/torch/csrc/jit/serialization/pickler.h
index d1caf17..4f553b6 100644
--- a/torch/csrc/jit/serialization/pickler.h
+++ b/torch/csrc/jit/serialization/pickler.h
@@ -327,8 +327,8 @@
 // Register function pointer of Tensor BackendMetadata for serialization.
 TORCH_API inline void TensorBackendMetaRegistry(
     c10::DeviceType t,
-    BackendMetaPtr get_fptr,
-    BackendMetaPtr set_fptr) {
+    const BackendMetaPtr& get_fptr,
+    const BackendMetaPtr& set_fptr) {
   // allowlist verification
   // Only if the devicetype is in the allowlist,
   // we allow the serialization extension to be registered for backendmeta data.
@@ -416,7 +416,7 @@
 // NOTE: This overload is required by unpickler.cpp
 inline void setTensorMetadata(
     const at::Tensor& t,
-    c10::Dict<c10::IValue, c10::IValue> metadata_idict) {
+    const c10::Dict<c10::IValue, c10::IValue>& metadata_idict) {
   std::unordered_map<std::string, bool> metadata;
   for (auto& pair : metadata_idict) {
     auto key = *pair.key().toString();
diff --git a/torch/csrc/jit/serialization/source_range_serialization.h b/torch/csrc/jit/serialization/source_range_serialization.h
index 577a383..bbfd533 100644
--- a/torch/csrc/jit/serialization/source_range_serialization.h
+++ b/torch/csrc/jit/serialization/source_range_serialization.h
@@ -37,7 +37,7 @@
 class SourceRangeDeserializer {
  public:
   SourceRangeDeserializer() = default;
-  explicit SourceRangeDeserializer(c10::IValue text_table) {
+  explicit SourceRangeDeserializer(const c10::IValue& text_table) {
     for (const auto& x : text_table.toTuple()->elements()) {
       text_table_.emplace_back(std::make_shared<std::string>(x.toStringRef()));
     }
diff --git a/torch/custom_class.h b/torch/custom_class.h
index a7f5dd5..a556ae6 100644
--- a/torch/custom_class.h
+++ b/torch/custom_class.h
@@ -103,7 +103,7 @@
         "__init__",
         std::move(func),
         std::move(doc_string),
-        std::move(default_args));
+        default_args);
     return *this;
   }
 
@@ -126,7 +126,7 @@
         "__init__",
         std::move(init_lambda_wrapper),
         std::move(doc_string),
-        std::move(default_args));
+        default_args);
 
     return *this;
   }
@@ -160,7 +160,7 @@
         std::move(name),
         std::move(wrapped_f),
         std::move(doc_string),
-        std::move(default_args));
+        default_args);
     return *this;
   }
 
@@ -257,7 +257,7 @@
   /// This is an unsafe method registration API added for adding custom JIT
   /// backend support via custom C++ classes. It is not for general purpose use.
   class_& _def_unboxed(
-      std::string name,
+      const std::string& name,
       std::function<void(jit::Stack&)> func,
       c10::FunctionSchema schema,
       std::string doc_string = "") {