Resent the reverted PR: Add register_frozenpython.cpp to the torch::deploy interpreter library in the OSS build (#67303)
Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/67303
Test Plan: Imported from OSS
Reviewed By: suo
Differential Revision: D32016061
Pulled By: shunting314
fbshipit-source-id: 9460c90dd4f630f4c81dbfbbd772446ddffbabd0
diff --git a/tools/linter/clang_tidy/__main__.py b/tools/linter/clang_tidy/__main__.py
index bba57cb..fa6403a 100644
--- a/tools/linter/clang_tidy/__main__.py
+++ b/tools/linter/clang_tidy/__main__.py
@@ -11,6 +11,7 @@
from tools.linter.clang_tidy.run import run
from tools.linter.clang_tidy.generate_build_files import generate_build_files
from tools.linter.install.clang_tidy import INSTALLATION_PATH
+from tools.linter.install.download_bin import PYTORCH_ROOT
def clang_search_dirs() -> List[str]:
@@ -46,6 +47,17 @@
elif append_path:
search_paths.append(line.strip())
+ # There are source files include <torch/cuda.h>, <torch/torch.h> etc.
+ # under torch/csrc/api/include folder. Since torch/csrc/api/include is not
+ # a search path for clang-tidy, there will be clang-disagnostic errors
+ # complaing those header files not found. Change the source code to include
+ # full path like torch/csrc/api/include/torch/torch.h does not work well
+ # since torch/torch.h includes torch/all.h which inturn includes more.
+ # We would need recursively change mutliple files.
+ # Adding the include path to the lint script should be a better solution.
+ search_paths.append(
+ os.path.join(PYTORCH_ROOT, "torch/csrc/api/include"),
+ )
return search_paths
diff --git a/torch/csrc/api/include/torch/detail/TensorDataContainer.h b/torch/csrc/api/include/torch/detail/TensorDataContainer.h
index 4f740e2..ccc7ff5 100644
--- a/torch/csrc/api/include/torch/detail/TensorDataContainer.h
+++ b/torch/csrc/api/include/torch/detail/TensorDataContainer.h
@@ -91,6 +91,7 @@
// NOTE: For tensors with zero-size dimensions (e.g. `torch::tensor({{}, {}})`),
// the innermost empty braced-init-list `{}` matches the default constructor of
// the innermost `TensorDataContainer`.
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
TensorDataContainer() :
sizes_({0}),
// NOTE: In Python, the dtype of tensors with zero-size dimensions (e.g. `torch.tensor([[], []])`)
@@ -103,9 +104,12 @@
scalar_type_(at::k##S), \
type_(TensorDataContainerType::Scalar), \
scalar_(value) {}
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
TensorDataContainer(std::initializer_list<TensorDataContainer> init_list) :
sizes_(),
scalar_type_(init_list.begin()->scalar_type()),
@@ -144,7 +148,9 @@
tensor_ = at::tensor(values, at::dtype(scalar_type_).device(at::kCPU)); \
} \
}
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR)
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR
@@ -161,7 +167,9 @@
// ArrayRef<bool> cannot be constructed from a std::vector<bool> bitfield.
#define TENSOR(T, S) \
TensorDataContainer(const std::vector<T>& values) : TensorDataContainer(at::ArrayRef<T>(values)) {}
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_SCALAR_TYPES_AND2(Half, BFloat16, TENSOR)
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
AT_FORALL_COMPLEX_TYPES(TENSOR)
#undef TENSOR
diff --git a/torch/csrc/autograd/profiler_legacy.h b/torch/csrc/autograd/profiler_legacy.h
index 363a42d..040693a 100644
--- a/torch/csrc/autograd/profiler_legacy.h
+++ b/torch/csrc/autograd/profiler_legacy.h
@@ -357,6 +357,7 @@
// a std::vector resize from taking a large amount of time inside
// a profiling event
struct RangeEventList {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init,modernize-use-equals-default)
RangeEventList() {
events_.reserve(kReservedCapacity);
}
@@ -369,6 +370,7 @@
std::vector<LegacyEvent> consolidate() {
std::lock_guard<std::mutex> lock(mutex_);
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<LegacyEvent> result;
result.insert(
result.begin(),
diff --git a/torch/csrc/autograd/saved_variable.h b/torch/csrc/autograd/saved_variable.h
index c887aa7..d231f17 100644
--- a/torch/csrc/autograd/saved_variable.h
+++ b/torch/csrc/autograd/saved_variable.h
@@ -18,6 +18,7 @@
/// A snapshot of a variable at a certain version. A `SavedVariable` stores
/// enough information to reconstruct a variable from a certain point in time.
+// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
class TORCH_API SavedVariable {
public:
SavedVariable() = default;
diff --git a/torch/csrc/deploy/interpreter/CMakeLists.txt b/torch/csrc/deploy/interpreter/CMakeLists.txt
index ed4763b..7da73d1 100644
--- a/torch/csrc/deploy/interpreter/CMakeLists.txt
+++ b/torch/csrc/deploy/interpreter/CMakeLists.txt
@@ -88,6 +88,7 @@
set(INTERPRETER_LIB_SOURCES
${INTERPRETER_DIR}/interpreter_impl.cpp
${INTERPRETER_DIR}/builtin_registry.cpp
+ ${INTERPRETER_DIR}/register_frozenpython.cpp
${INTERPRETER_DIR}/import_find_sharedfuncptr.cpp
${FROZEN_FILES}
${LINKER_SCRIPT}
diff --git a/torch/csrc/deploy/test_deploy_gpu.cpp b/torch/csrc/deploy/test_deploy_gpu.cpp
index dc4474d..940aff5 100644
--- a/torch/csrc/deploy/test_deploy_gpu.cpp
+++ b/torch/csrc/deploy/test_deploy_gpu.cpp
@@ -36,6 +36,7 @@
auto M = model.acquireSession();
M.self.attr("to")({"cuda"});
}
+ // NOLINTNEXTLINE(cppcoreguidelines-init-variables)
std::vector<at::IValue> inputs;
{
auto I = p.acquireSession();
diff --git a/torch/csrc/jit/frontend/tracer.h b/torch/csrc/jit/frontend/tracer.h
index 5e1cc90..e0c6967 100644
--- a/torch/csrc/jit/frontend/tracer.h
+++ b/torch/csrc/jit/frontend/tracer.h
@@ -190,10 +190,12 @@
};
struct WithNestedTracingFrame {
+ // NOLINTNEXTLINE(modernize-use-equals-default)
WithNestedTracingFrame() {
getTracingState()->enterFrame();
}
+ // NOLINTNEXTLINE(modernize-use-equals-default)
~WithNestedTracingFrame() {
getTracingState()->leaveFrame();
}