Revert "[1/N] Fixes clang-tidy warnings in header files (#113608)"
This reverts commit cab039fe9b9466f09f98318a11d2dcafef235426.
Reverted https://github.com/pytorch/pytorch/pull/113608 on behalf of https://github.com/huydhn due to Sorry for reverting your change but it is failing with an internal build when -Wpessimizing-move is used ([comment](https://github.com/pytorch/pytorch/pull/113608#issuecomment-1815424448))
diff --git a/aten/src/ATen/TensorIndexing.h b/aten/src/ATen/TensorIndexing.h
index 6f5fd03..b041cb0 100644
--- a/aten/src/ATen/TensorIndexing.h
+++ b/aten/src/ATen/TensorIndexing.h
@@ -224,8 +224,7 @@
return self;
}
}
- return self.slice_symint(
- dim, std::move(start), std::move(stop), std::move(step));
+ return self.slice_symint(dim, start, stop, std::move(step));
}
static inline Tensor applySelect(
@@ -259,7 +258,7 @@
// if the index is negative, do not normalize it because that would fix the
// index on the current tensor size in the tracer. aten::select also works on
// negative indices
- return self.select_symint(dim, std::move(index));
+ return self.select_symint(dim, index);
}
static inline Tensor boolToIndexingTensorCPUOrCUDA(
@@ -535,7 +534,7 @@
/*original_tensor=*/self,
/*index=*/obj,
/*dim=*/&dim,
- /*specified_dims_ptr=*/&specified_dims,
+ /*specified_dims=*/&specified_dims,
/*real_dim=*/i,
/*outIndices=*/outIndices,
/*disable_slice_optimization=*/disable_slice_optimization,
diff --git a/aten/src/ATen/core/function_schema.h b/aten/src/ATen/core/function_schema.h
index b381878..c48e9b0 100644
--- a/aten/src/ATen/core/function_schema.h
+++ b/aten/src/ATen/core/function_schema.h
@@ -10,7 +10,6 @@
#include <ATen/core/operator_name.h>
#include <ATen/core/dispatch/OperatorOptions.h>
#include <unordered_map>
-#include <utility>
namespace c10 {
@@ -28,12 +27,12 @@
struct Argument {
Argument(
std::string name = "",
- const TypePtr& type = nullptr,
+ TypePtr type = nullptr,
c10::optional<int32_t> N = c10::nullopt,
c10::optional<IValue> default_value = c10::nullopt,
bool kwarg_only = false,
c10::optional<AliasInfo> alias_info = c10::nullopt)
- : Argument(std::move(name), type, type, N, std::move(default_value), kwarg_only, std::move(alias_info)) {}
+ : Argument(name, type, type, N, default_value, kwarg_only, alias_info) {}
Argument(
std::string name,
@@ -46,7 +45,7 @@
: name_(std::move(name)),
type_(fake_type ? std::move(fake_type) : TensorType::get()),
real_type_(real_type ? std::move(real_type) : type_),
- N_(N),
+ N_(std::move(N)),
default_value_(std::move(default_value)),
alias_info_(alias_info ? std::make_unique<AliasInfo>(std::move(*alias_info)) : nullptr),
kwarg_only_(kwarg_only) {
diff --git a/torch/csrc/api/include/torch/ordered_dict.h b/torch/csrc/api/include/torch/ordered_dict.h
index 31a2ab6..46e8e3e 100644
--- a/torch/csrc/api/include/torch/ordered_dict.h
+++ b/torch/csrc/api/include/torch/ordered_dict.h
@@ -36,8 +36,8 @@
// values. I tried to make this noexcept (conditional on the move constructors
// of index_ and items_ being noexcept) but the obvious spelling didn't
// compile on Windows.
- OrderedDict(OrderedDict&& other) noexcept = default;
- OrderedDict& operator=(OrderedDict&& other) noexcept = default;
+ OrderedDict(OrderedDict&& other) = default;
+ OrderedDict& operator=(OrderedDict&& other) = default;
~OrderedDict() = default;
diff --git a/torch/csrc/autograd/custom_function.h b/torch/csrc/autograd/custom_function.h
index 52fc900..a60102b 100644
--- a/torch/csrc/autograd/custom_function.h
+++ b/torch/csrc/autograd/custom_function.h
@@ -305,8 +305,8 @@
outputs = T::forward(&node->ctx_, std::forward<Args>(args)...);
}
- _jvp_fn_t jvp_fn = [](const variable_list& inputs,
- const variable_list& gI) -> variable_list {
+ _jvp_fn_t jvp_fn = [](variable_list inputs,
+ variable_list gI) -> variable_list {
TORCH_CHECK(
false,
"jvp is not implemented for the c++ API of custom Function yet.",
diff --git a/torch/csrc/jit/api/module.h b/torch/csrc/jit/api/module.h
index 5c7aea7..83d46e0 100644
--- a/torch/csrc/jit/api/module.h
+++ b/torch/csrc/jit/api/module.h
@@ -90,8 +90,6 @@
Module() = default;
Module(const Module&) = default;
Module& operator=(const Module&) = default;
- Module(Module&&) noexcept = default;
- Module& operator=(Module&&) noexcept = default;
Module(
c10::QualifiedName,
std::shared_ptr<CompilationUnit> cu,
@@ -279,7 +277,7 @@
// torch.jit.load. It only works on tensors and lists/dicts of tensors
// because tracing is only supported by these types.
void store_traced_inputs(std::string func_name, std::vector<IValue> inputs) {
- if (inputs.empty()) {
+ if (inputs.size() == 0) {
return;
}
auto c10_inputs = c10::impl::GenericList(AnyType::get());
diff --git a/torch/csrc/jit/api/object.h b/torch/csrc/jit/api/object.h
index 7ccacf3..6a3d38a 100644
--- a/torch/csrc/jit/api/object.h
+++ b/torch/csrc/jit/api/object.h
@@ -25,8 +25,6 @@
Object() = default;
Object(const Object&) = default;
Object& operator=(const Object&) = default;
- Object(Object&&) noexcept = default;
- Object& operator=(Object&&) noexcept = default;
Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {}
Object(std::shared_ptr<CompilationUnit> cu, const c10::ClassTypePtr& type);
Object(
diff --git a/torch/csrc/jit/ir/ir.h b/torch/csrc/jit/ir/ir.h
index 4781b15..ec8e1c9 100644
--- a/torch/csrc/jit/ir/ir.h
+++ b/torch/csrc/jit/ir/ir.h
@@ -446,7 +446,7 @@
return callstack_;
}
void setCallStack(InlinedCallStackPtr cs) {
- callstack_ = std::move(cs);
+ callstack_ = cs;
}
// NB: This returns an ArrayRef; that means that it will
diff --git a/torch/csrc/jit/mobile/module.h b/torch/csrc/jit/mobile/module.h
index 5e5d87f..8d7e426 100644
--- a/torch/csrc/jit/mobile/module.h
+++ b/torch/csrc/jit/mobile/module.h
@@ -5,8 +5,6 @@
#include <torch/csrc/jit/mobile/method.h>
#include <torch/csrc/jit/mobile/quantization.h>
-#include <utility>
-
namespace torch {
namespace jit {
namespace mobile {
@@ -139,7 +137,7 @@
}
void set_delete_memory(std::shared_ptr<char> delete_mem) {
- mem_to_delete_ = std::move(delete_mem);
+ mem_to_delete_ = delete_mem;
}
void set_min_operator_version(int64_t version) {
diff --git a/torch/csrc/jit/runtime/argument_spec.h b/torch/csrc/jit/runtime/argument_spec.h
index 06c77ed..7840b9a 100644
--- a/torch/csrc/jit/runtime/argument_spec.h
+++ b/torch/csrc/jit/runtime/argument_spec.h
@@ -72,10 +72,10 @@
"ArgumentInfo is expected to be a 32-bit struct");
struct ArgumentSpec {
- ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs)
- : hash_code(c10::hash_combine(
- num_flat_tensor_inputs,
- num_flat_optional_inputs)) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
+ ArgumentSpec(size_t num_flat_tensor_inputs, size_t num_flat_optional_inputs) {
+ hash_code =
+ c10::hash_combine(num_flat_tensor_inputs, num_flat_optional_inputs);
tensor_args.reserve(num_flat_tensor_inputs);
optional_presence.reserve(num_flat_optional_inputs);
}
diff --git a/torch/csrc/jit/serialization/pickler.h b/torch/csrc/jit/serialization/pickler.h
index 4f553b6..d1caf17 100644
--- a/torch/csrc/jit/serialization/pickler.h
+++ b/torch/csrc/jit/serialization/pickler.h
@@ -327,8 +327,8 @@
// Register function pointer of Tensor BackendMetadata for serialization.
TORCH_API inline void TensorBackendMetaRegistry(
c10::DeviceType t,
- const BackendMetaPtr& get_fptr,
- const BackendMetaPtr& set_fptr) {
+ BackendMetaPtr get_fptr,
+ BackendMetaPtr set_fptr) {
// allowlist verification
// Only if the devicetype is in the allowlist,
// we allow the serialization extension to be registered for backendmeta data.
@@ -416,7 +416,7 @@
// NOTE: This overload is required by unpickler.cpp
inline void setTensorMetadata(
const at::Tensor& t,
- const c10::Dict<c10::IValue, c10::IValue>& metadata_idict) {
+ c10::Dict<c10::IValue, c10::IValue> metadata_idict) {
std::unordered_map<std::string, bool> metadata;
for (auto& pair : metadata_idict) {
auto key = *pair.key().toString();
diff --git a/torch/csrc/jit/serialization/source_range_serialization.h b/torch/csrc/jit/serialization/source_range_serialization.h
index bbfd533..577a383 100644
--- a/torch/csrc/jit/serialization/source_range_serialization.h
+++ b/torch/csrc/jit/serialization/source_range_serialization.h
@@ -37,7 +37,7 @@
class SourceRangeDeserializer {
public:
SourceRangeDeserializer() = default;
- explicit SourceRangeDeserializer(const c10::IValue& text_table) {
+ explicit SourceRangeDeserializer(c10::IValue text_table) {
for (const auto& x : text_table.toTuple()->elements()) {
text_table_.emplace_back(std::make_shared<std::string>(x.toStringRef()));
}
diff --git a/torch/custom_class.h b/torch/custom_class.h
index a556ae6..a7f5dd5 100644
--- a/torch/custom_class.h
+++ b/torch/custom_class.h
@@ -103,7 +103,7 @@
"__init__",
std::move(func),
std::move(doc_string),
- default_args);
+ std::move(default_args));
return *this;
}
@@ -126,7 +126,7 @@
"__init__",
std::move(init_lambda_wrapper),
std::move(doc_string),
- default_args);
+ std::move(default_args));
return *this;
}
@@ -160,7 +160,7 @@
std::move(name),
std::move(wrapped_f),
std::move(doc_string),
- default_args);
+ std::move(default_args));
return *this;
}
@@ -257,7 +257,7 @@
/// This is an unsafe method registration API added for adding custom JIT
/// backend support via custom C++ classes. It is not for general purpose use.
class_& _def_unboxed(
- const std::string& name,
+ std::string name,
std::function<void(jit::Stack&)> func,
c10::FunctionSchema schema,
std::string doc_string = "") {