turn on -Werror=unused-function in our Bazel CPU build
Summary:
We also fix any existing issues. Note that we only do this for the CPU
build because nvcc is considered a C++ toolchain but it does not have
the same flag support. Adding flags to the GPU build will cause nvcc
errors.
Test Plan: Built locally, rely on CI to confirm.
Reviewers: malfet
Subscribers:
Tasks:
Tags:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/79154
Approved by: https://github.com/seemethere, https://github.com/osalpekar, https://github.com/albanD
diff --git a/.bazelrc b/.bazelrc
index 60d6fda..6b97431 100644
--- a/.bazelrc
+++ b/.bazelrc
@@ -49,5 +49,19 @@
# On the bright side, this means we don't have to more broadly apply
# the exceptions to an entire target.
build \
- --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
- --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits
+ --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=type-limits \
+ --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=type-limits \
+ --per_file_copt='^//.*\.(cpp|cc)$'@-Werror=unused-function \
+ --per_file_copt=^//.*\.cu$@--compiler-options=-Werror=unused-function
+
+build \
+ --per_file_copt=//:aten/src/ATen/RegisterCompositeExplicitAutograd.cpp@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterCompositeImplicitAutograd.cpp@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterMkldnnCPU.cpp$@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterNestedTensorCPU.cpp$@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterQuantizedCPU.cpp$@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterSparseCPU.cpp$@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterSparseCsrCPU.cpp$@-Wno-error=unused-function \
+ --per_file_copt=//:aten/src/ATen/RegisterZeroTensor.cpp$@-Wno-error=unused-function \
+ --per_file_copt=//:torch/csrc/lazy/generated/RegisterAutogradLazy.cpp@-Wno-error=unused-function \
+ --per_file_copt=//:torch/csrc/lazy/generated/RegisterLazy.cpp@-Wno-error=unused-function
diff --git a/aten/src/ATen/NamedTensorUtils.cpp b/aten/src/ATen/NamedTensorUtils.cpp
index 31ff41e..ca38f7b 100644
--- a/aten/src/ATen/NamedTensorUtils.cpp
+++ b/aten/src/ATen/NamedTensorUtils.cpp
@@ -260,33 +260,6 @@
return outnames;
}
-// tensor_dotted_dim and other_dotted_dim are the dimensions of the two
-// tensors that we contract together. Usually other_dotted_dim is 0
-// and tensor_dotted_dim is the last dim of tensor, but there are some special
-// cases like einsum and tensordot where one can contract arbitrary dims.
-// NOLINTNEXTLINE(clang-diagnostic-unused-function)
-static std::vector<Dimname> compute_dot_product_outnames(
- DimnameList tensor_names,
- int64_t tensor_dotted_dim,
- DimnameList other_names,
- int64_t other_dotted_dim) {
- int64_t num_outnames = tensor_names.size() + other_names.size() - 2;
- if (num_outnames == 0) {
- return {};
- }
- std::vector<Dimname> outnames(num_outnames, Dimname::wildcard());
- int64_t index = 0;
- for (const auto j : c10::irange(static_cast<int64_t>(tensor_names.size()))) {
- if (j == tensor_dotted_dim) continue;
- outnames[index++] = tensor_names[j];
- }
- for (const auto j : c10::irange(static_cast<int64_t>(other_names.size()))) {
- if (j == other_dotted_dim) continue;
- outnames[index++] = other_names[j];
- }
- return outnames;
-}
-
static void check_feature_names_are_distinct(
DimnameList self_names,
DimnameList other_names,
@@ -306,36 +279,6 @@
". Please rename the input tensors with `Tensor.rename` to prevent this.");
}
-// NOLINTNEXTLINE(clang-diagnostic-unused-function)
-static DimnameList batch_dims(DimnameList names) {
- if (names.size() <= 2) {
- return {};
- }
- return DimnameList(names.begin(), names.end() - 2);
-}
-
-// NOLINTNEXTLINE(clang-diagnostic-unused-function)
-static DimnameList feature_dims(DimnameList names) {
- if (names.size() <= 2) {
- return names;
- }
- return DimnameList(names.end() - 2, 2);
-}
-
-// NOLINTNEXTLINE(clang-diagnostic-unused-function)
-static bool are_distinct(DimnameList batch_dims, DimnameList feature_dims) {
- for (const auto& target : feature_dims) {
- if (target.isWildcard()) {
- continue;
- }
- if (std::any_of(batch_dims.begin(), batch_dims.end(),
- [&](const Dimname& dim) { return target == dim; })) {
- return false;
- }
- }
- return true;
-}
-
static int64_t num_batch_dims(DimnameList names) {
if (names.size() <= 2) {
return 0;
diff --git a/aten/src/ATen/native/BinaryOps.cpp b/aten/src/ATen/native/BinaryOps.cpp
index 5a4800e..8071700 100644
--- a/aten/src/ATen/native/BinaryOps.cpp
+++ b/aten/src/ATen/native/BinaryOps.cpp
@@ -12,26 +12,6 @@
#include <torch/library.h>
namespace at {
-namespace native {
-
-// These are still needed because we don't have C++ conversions from number
-// types (int, float, etc.) to Tensor (only to Scalar). They're not exposed
-// to Python.
-
-static void check_convert(const Scalar& scalar, ScalarType scalarType) {
- // Validate that is possible to convert scalar to tensor dtype without
- // overflow
- AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND4(
- at::ScalarType::Bool,
- at::ScalarType::BFloat16,
- at::ScalarType::Half,
- at::ScalarType::ComplexHalf,
- scalarType,
- "check_convert",
- [&] { scalar.to<scalar_t>(); });
-}
-
-} // namespace native
namespace meta {
diff --git a/aten/src/ATen/native/ReduceOps.cpp b/aten/src/ATen/native/ReduceOps.cpp
index 2015c16..a62c898 100644
--- a/aten/src/ATen/native/ReduceOps.cpp
+++ b/aten/src/ATen/native/ReduceOps.cpp
@@ -1111,18 +1111,6 @@
return at::native::nansum_out(self, dim, keepdim, dtype, result);
}
-static Tensor& prod_out_impl(Tensor& result, const Tensor& self, IntArrayRef dim,
- bool keepdim, c10::optional<ScalarType> opt_dtype) {
- ScalarType dtype = get_dtype_from_result(result, opt_dtype);
- auto iter = make_reduction("prod", result, self, dim, keepdim, dtype);
- if (iter.numel() == 0) {
- result.fill_(1);
- } else {
- prod_stub(iter.device_type(), iter);
- }
- return result;
-}
-
// NOTE: this could be implemented via diag and sum, but this has perf problems,
// see https://github.com/pytorch/pytorch/pull/47305,
Tensor trace_cpu(const Tensor& self) {
diff --git a/aten/src/ATen/native/quantized/AffineQuantizer.cpp b/aten/src/ATen/native/quantized/AffineQuantizer.cpp
index 0e15534..e2fa8f6 100644
--- a/aten/src/ATen/native/quantized/AffineQuantizer.cpp
+++ b/aten/src/ATen/native/quantized/AffineQuantizer.cpp
@@ -35,11 +35,6 @@
return;
}
-void checkCPUTensor(const std::string& fn_name, const Tensor& t) {
- TORCH_CHECK(
- t.device().type() == kCPU, fn_name, " only supports CPU device type.");
-}
-
void checkFloatTensor(const std::string& fn_name, const Tensor& t) {
TORCH_CHECK(
t.scalar_type() == kFloat, fn_name, " expects a Float Tensor, got ",
diff --git a/c10/test/util/exception_test.cpp b/c10/test/util/exception_test.cpp
index af06b4c..0fc7abe 100644
--- a/c10/test/util/exception_test.cpp
+++ b/c10/test/util/exception_test.cpp
@@ -5,9 +5,6 @@
using c10::Error;
namespace {
-bool throw_func() {
- throw std::runtime_error("I'm throwing...");
-}
template <class Functor>
inline void expectThrowsEq(Functor&& functor, const char* expectedMessage) {
@@ -26,9 +23,10 @@
#ifdef NDEBUG
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false));
- // Does nothing - `throw_func()` should not be evaluated
+ // Does nothing - `throw ...` should not be evaluated
// NOLINTNEXTLINE(cppcoreguidelines-avoid-goto,hicpp-avoid-goto)
- ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(throw_func()));
+ ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(
+ (throw std::runtime_error("I'm throwing..."), true)));
#else
ASSERT_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(false), c10::Error);
ASSERT_NO_THROW(TORCH_INTERNAL_ASSERT_DEBUG_ONLY(true));
diff --git a/caffe2/ideep/operators/adam_op.cc b/caffe2/ideep/operators/adam_op.cc
index a8d43f4..8228e7d 100644
--- a/caffe2/ideep/operators/adam_op.cc
+++ b/caffe2/ideep/operators/adam_op.cc
@@ -4,31 +4,6 @@
namespace {
-// NOLINTNEXTLINE(clang-diagnostic-unused-function)
-void adam_ideep_update(
- int N,
- const float* g,
- const float* m,
- const float* v,
- float* ng,
- float* nm,
- float* nv,
- float beta1,
- float beta2,
- float eps_hat,
- float correction,
- const float* lr) {
-#ifdef _OPENMP
- #pragma omp parallel for schedule(static)
-#endif
- for (auto i = 0; i < N; ++i) {
- float gi = g[i];
- float mi = nm[i] = m[i] * beta1 + gi * (1 - beta1);
- float vi = nv[i] = v[i] * beta2 + gi * gi * (1 - beta2);
- ng[i] = lr[0] * correction * mi / (std::sqrt(vi) + eps_hat);
- }
-}
-
void adam_ideep_compute(
int N,
const float* w,
diff --git a/caffe2/opt/onnxifi_transformer.cc b/caffe2/opt/onnxifi_transformer.cc
index 2af9088..0ccbc5e 100644
--- a/caffe2/opt/onnxifi_transformer.cc
+++ b/caffe2/opt/onnxifi_transformer.cc
@@ -31,28 +31,6 @@
return shape_map;
}
-// NOLINTNEXTLINE(clang-diagnostic-unused-function)
-uint64_t onnxifiDataType(caffe2::TensorProto::DataType t) {
-#define CAFFE2_TO_ONNXIFI_TYPE(x, y) \
- case (caffe2::TensorProto::x): \
- return y
- switch (t) {
- CAFFE2_TO_ONNXIFI_TYPE(FLOAT, ONNXIFI_DATATYPE_FLOAT32);
- CAFFE2_TO_ONNXIFI_TYPE(INT8, ONNXIFI_DATATYPE_INT8);
- CAFFE2_TO_ONNXIFI_TYPE(UINT8, ONNXIFI_DATATYPE_UINT8);
- CAFFE2_TO_ONNXIFI_TYPE(INT16, ONNXIFI_DATATYPE_INT16);
- CAFFE2_TO_ONNXIFI_TYPE(UINT16, ONNXIFI_DATATYPE_UINT16);
- CAFFE2_TO_ONNXIFI_TYPE(INT32, ONNXIFI_DATATYPE_INT32);
- CAFFE2_TO_ONNXIFI_TYPE(INT64, ONNXIFI_DATATYPE_INT64);
- CAFFE2_TO_ONNXIFI_TYPE(FLOAT16, ONNXIFI_DATATYPE_FLOAT16);
- default:
- LOG(WARNING) << "Unsupported Caffe2 tensor type: " << t
- << ", fallback to FLOAT";
- return ONNXIFI_DATATYPE_FLOAT32;
- }
-#undef CAFFE2_TO_ONNXIFI_TYPE
-}
-
std::vector<::ONNX_NAMESPACE::ValueInfoProto> convertToValueInfo(
const std::vector<std::string>& names,
const std::unordered_map<std::string, TensorShape>& shape_hints,
diff --git a/test/cpp/jit/torch_python_test.cpp b/test/cpp/jit/torch_python_test.cpp
index 74b9343..14193f6 100644
--- a/test/cpp/jit/torch_python_test.cpp
+++ b/test/cpp/jit/torch_python_test.cpp
@@ -34,29 +34,30 @@
AT_ASSERT(module.attr("dropout").toModule().is_training());
}
-void testSerializationInterop() {
- if (isSandcastle()) {
- // The module file to load is not generated in Sandcastle
- return;
- }
+// TODO: this test never ran before and is broken.
+// void testSerializationInterop() {
+// if (isSandcastle()) {
+// // The module file to load is not generated in Sandcastle
+// return;
+// }
- // This should be generated by `test/cpp/jit/tests_setup.py`
- std::ifstream input_stream("ivalue.pt");
- std::vector<char> input;
- input.insert(
- input.begin(),
- std::istream_iterator<char>(input_stream),
- std::istream_iterator<char>());
- IValue ivalue = pickle_load(input);
+// // This should be generated by `test/cpp/jit/tests_setup.py`
+// std::ifstream input_stream("ivalue.pt");
+// std::vector<char> input;
+// input.insert(
+// input.begin(),
+// std::istream_iterator<char>(input_stream),
+// std::istream_iterator<char>());
+// IValue ivalue = pickle_load(input);
- auto elements = ivalue.toTupleRef().elements();
- auto ones = torch::ones({2, 2});
- AT_ASSERT(ones.equal(elements.at(0).toTensor()));
+// auto elements = ivalue.toTupleRef().elements();
+// auto ones = torch::ones({2, 2});
+// AT_ASSERT(ones.equal(elements.at(0).toTensor()));
- // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
- auto twos = torch::ones({3, 5}) * 2;
- AT_ASSERT(twos.equal(elements.at(1).toTensor()));
-}
+// // NOLINTNEXTLINE(cppcoreguidelines-avoid-magic-numbers)
+// auto twos = torch::ones({3, 5}) * 2;
+// AT_ASSERT(twos.equal(elements.at(1).toTensor()));
+// }
void testTorchSaveError() {
if (isSandcastle()) {
diff --git a/test/cpp/tensorexpr/test_conv.cpp b/test/cpp/tensorexpr/test_conv.cpp
index cf458af..e723038 100644
--- a/test/cpp/tensorexpr/test_conv.cpp
+++ b/test/cpp/tensorexpr/test_conv.cpp
@@ -12,14 +12,14 @@
namespace te = torch::jit::tensorexpr;
namespace F = torch::nn::functional;
+#ifdef TORCH_ENABLE_LLVM
+
// Generate test data with few bits of precision, to minimize error
// accumulation from floating-point reordering.
static at::Tensor genTestData(c10::IntArrayRef args) {
return at::trunc(at::randn(args) * 256.0f) / 256.0f;
}
-#ifdef TORCH_ENABLE_LLVM
-
TEST(Conv, DepthwiseConv2D) {
constexpr int N = 1, C = 72, H = 56, W = 56;
constexpr int K = 72, R = 3, S = 3;
diff --git a/test/cpp/tensorexpr/tutorial.cpp b/test/cpp/tensorexpr/tutorial.cpp
index e34d980..3f4c32a 100644
--- a/test/cpp/tensorexpr/tutorial.cpp
+++ b/test/cpp/tensorexpr/tutorial.cpp
@@ -54,9 +54,13 @@
using namespace torch::jit::tensorexpr;
+#ifdef TORCH_ENABLE_LLVM
+
// Helper function to print a snippet from a big multi-line string
static void printLinesToFrom(const std::string& input_str, int from, int to);
+#endif
+
int main(int argc, char* argv[]) {
std::cout << "*** Structure of tensor expressions and statements ***"
<< std::endl;
diff --git a/torch/csrc/DynamicTypes.cpp b/torch/csrc/DynamicTypes.cpp
index 3ae7b84..c929f38 100644
--- a/torch/csrc/DynamicTypes.cpp
+++ b/torch/csrc/DynamicTypes.cpp
@@ -28,22 +28,6 @@
std::array<THPLayout*, static_cast<int>(at::Layout::NumOptions)> layout_registry = {};
-at::Backend get_backend(bool is_cuda, bool is_sparse) {
- if (is_cuda) {
- if (is_sparse){
- return at::Backend::SparseCUDA;
- } else {
- return at::Backend::CUDA;
- }
- } else {
- if (is_sparse){
- return at::Backend::SparseCPU;
- } else {
- return at::Backend::CPU;
- }
- }
-}
-
at::DeprecatedTypeProperties* get_type_properties(at::DeviceType device_type, at::ScalarType scalarType) {
at::Backend backend;
if (device_type == at::kCPU) {
diff --git a/torch/csrc/Storage.cpp b/torch/csrc/Storage.cpp
index 8de5ae5..1df0fa1 100644
--- a/torch/csrc/Storage.cpp
+++ b/torch/csrc/Storage.cpp
@@ -337,20 +337,6 @@
END_HANDLE_TH_ERRORS
}
-static PyObject * THPStorage_dtype(THPStorage *self, void *unused)
-{
- HANDLE_TH_ERRORS
- return torch::autograd::utils::wrap(
- torch::getTHPDtype(at::typeMetaToScalarType(
-#ifdef THQUANTIZED
- caffe2::TypeMeta::Make<quantized_t>()
-#else
- caffe2::TypeMeta::Make<uint8_t>()
-#endif
- )));
- END_HANDLE_TH_ERRORS
-}
-
typedef PyObject *(*getter)(PyObject *, void *);
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-non-const-global-variables)
diff --git a/torch/csrc/autograd/init.cpp b/torch/csrc/autograd/init.cpp
index f6c1bc7..fb333d2 100644
--- a/torch/csrc/autograd/init.cpp
+++ b/torch/csrc/autograd/init.cpp
@@ -480,18 +480,6 @@
END_HANDLE_TH_ERRORS
}
-static const char* scalarTypeName(const at::ScalarType type) {
- switch (type) {
-#define DEFINE_CASE(ctype, name) \
- case at::ScalarType::name: \
- return #ctype;
- AT_FORAUTOCAST_SCALAR_TYPES(DEFINE_CASE)
-#undef DEFINE_CASE
- default:
- throw std::runtime_error("unknown scalar type for autocast");
- }
-}
-
static PyObject * get_autocast_gpu_dtype(PyObject* _unused, PyObject *arg){
HANDLE_TH_ERRORS
at::ScalarType current_dtype = at::autocast::get_autocast_gpu_dtype();
diff --git a/torch/csrc/jit/codegen/cuda/codegen.cpp b/torch/csrc/jit/codegen/cuda/codegen.cpp
index 9aee0cc..70d6c30 100644
--- a/torch/csrc/jit/codegen/cuda/codegen.cpp
+++ b/torch/csrc/jit/codegen/cuda/codegen.cpp
@@ -27,12 +27,6 @@
return ss.str();
}
-std::string refType(DataType dt) {
- std::stringstream ss;
- ss << dt << "&";
- return ss.str();
-}
-
//! Utility class to build an argument list
class ArgumentBuilder {
public:
diff --git a/torch/csrc/jit/codegen/cuda/index_compute.cpp b/torch/csrc/jit/codegen/cuda/index_compute.cpp
index a000dca..93f9e3c 100644
--- a/torch/csrc/jit/codegen/cuda/index_compute.cpp
+++ b/torch/csrc/jit/codegen/cuda/index_compute.cpp
@@ -2407,19 +2407,6 @@
return contig_id_infos;
}
-IterDomain* getMappedReferenceDomain(
- IterDomain* id,
- const ReferenceTensor& reference) {
- // Partially overlaps with getPredicateContigIds()
- auto concrete_id = GpuLower::current()->caMap()->getConcreteMappedID(
- id, IdMappingMode::EXACT);
- auto it = reference.concrete_to_id.find(concrete_id);
- if (it == reference.concrete_to_id.end()) {
- return nullptr;
- }
- return it->second;
-}
-
std::vector<PredicateDomainInfo> getNonDivisibleConsumerDomainsToPredicate(
TensorView* consumer_tv) {
const auto& non_divisible_split_info =
diff --git a/torch/csrc/jit/codegen/cuda/ir_nodes.cpp b/torch/csrc/jit/codegen/cuda/ir_nodes.cpp
index f2c366e..9bd5381 100644
--- a/torch/csrc/jit/codegen/cuda/ir_nodes.cpp
+++ b/torch/csrc/jit/codegen/cuda/ir_nodes.cpp
@@ -1351,18 +1351,6 @@
contiguity_(src->contiguity()),
has_nontrivial_reduction_(src->has_nontrivial_reduction_) {}
-namespace {
-std::vector<IterDomain*> lowerIterDomains(
- const std::vector<fuser::cuda::IterDomain*>& domains) {
- std::vector<IterDomain*> lowered_domains;
- lowered_domains.reserve(domains.size());
- for (const auto iter_domain : domains) {
- lowered_domains.push_back(iter_domain);
- }
- return lowered_domains;
-};
-} // namespace
-
bool TensorDomain::hasBlockBroadcast() const {
return std::any_of(domain_.begin(), domain_.end(), [](IterDomain* id) {
return id->isBroadcast() && id->isThreadDim();
diff --git a/torch/csrc/jit/codegen/cuda/kernel_cache.cpp b/torch/csrc/jit/codegen/cuda/kernel_cache.cpp
index 9d75362..f4585e9 100644
--- a/torch/csrc/jit/codegen/cuda/kernel_cache.cpp
+++ b/torch/csrc/jit/codegen/cuda/kernel_cache.cpp
@@ -42,11 +42,6 @@
return index;
}
-// TODO: temporary hack to resolve my is_constructible issue;
-std::vector<size_t> toVector(const at::DimVector& small_vec) {
- return std::vector<size_t>(small_vec.begin(), small_vec.end());
-}
-
void encodeBuffer(size_t value, std::string& buffer) {
const char* v = reinterpret_cast<char*>(&value);
for (const auto i : c10::irange(sizeof(size_t))) {
diff --git a/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp b/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp
index 281fa05..ecf7320 100644
--- a/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp
+++ b/torch/csrc/jit/codegen/cuda/lower_expr_sort.cpp
@@ -555,39 +555,41 @@
}
// Debug function that prints the current state of the sorter.
-std::string ExprSegmentationSorter::toString(int verbosity) const {
- std::stringstream ss;
- ss << "{\n";
- for (auto& group : groups_) {
- ss << " " << group.get() << "\n";
+//
+// Uncomment if needed.
+// std::string ExprSegmentationSorter::toString(int verbosity) const {
+// std::stringstream ss;
+// ss << "{\n";
+// for (auto& group : groups_) {
+// ss << " " << group.get() << "\n";
- if (verbosity > 1) {
- if (group->producerEdges().size() > 0) {
- ss << "Produced by groups with edges: { \n";
- for (auto producer_edge : group->producerEdges()) {
- ss << producer_edge->producer_val_ << " -> "
- << producer_edge->consumer_val_ << "\n";
- }
- ss << " }"
- << "\n";
- }
- }
+// if (verbosity > 1) {
+// if (group->producerEdges().size() > 0) {
+// ss << "Produced by groups with edges: { \n";
+// for (auto producer_edge : group->producerEdges()) {
+// ss << producer_edge->producer_val_ << " -> "
+// << producer_edge->consumer_val_ << "\n";
+// }
+// ss << " }"
+// << "\n";
+// }
+// }
- if (verbosity > 1) {
- if (group->consumerEdges().size() > 0) {
- ss << "Consumed by groups with edges: { \n";
- for (auto consumer_edge : group->consumerEdges()) {
- ss << consumer_edge->producer_val_ << " -> "
- << consumer_edge->consumer_val_ << "\n";
- }
- ss << " }"
- << "\n";
- }
- }
- }
- ss << "}\n";
- return ss.str();
-}
+// if (verbosity > 1) {
+// if (group->consumerEdges().size() > 0) {
+// ss << "Consumed by groups with edges: { \n";
+// for (auto consumer_edge : group->consumerEdges()) {
+// ss << consumer_edge->producer_val_ << " -> "
+// << consumer_edge->consumer_val_ << "\n";
+// }
+// ss << " }"
+// << "\n";
+// }
+// }
+// }
+// ss << "}\n";
+// return ss.str();
+// }
namespace {
diff --git a/torch/csrc/jit/codegen/cuda/parser.cpp b/torch/csrc/jit/codegen/cuda/parser.cpp
index 169d41b..c98d4eb 100644
--- a/torch/csrc/jit/codegen/cuda/parser.cpp
+++ b/torch/csrc/jit/codegen/cuda/parser.cpp
@@ -386,10 +386,6 @@
}
};
-bool operator==(const MemoryFormat& a, const MemoryFormat& b) {
- return a.permutation_ == b.permutation_;
-};
-
typedef std::map<MemoryFormat, CgValue, MemoryCompare> MemoryFormatMap;
MemoryFormat operator+(const MemoryFormat& a, const MemoryFormat& b) {
diff --git a/torch/csrc/jit/mobile/compatibility/backport_manager.cpp b/torch/csrc/jit/mobile/compatibility/backport_manager.cpp
index e6413ce..2bad08c 100644
--- a/torch/csrc/jit/mobile/compatibility/backport_manager.cpp
+++ b/torch/csrc/jit/mobile/compatibility/backport_manager.cpp
@@ -77,21 +77,6 @@
}
}
-// Copy all content from reader to stringstream
-void get_model_stream(PyTorchStreamReader& reader, std::stringstream& out) {
- auto writer_func = [&](const void* buf, size_t nbytes) -> size_t {
- out.write(static_cast<const char*>(buf), nbytes);
- return !out ? 0 : nbytes;
- };
- PyTorchStreamWriter writer(writer_func);
-
- selective_copy(
- reader,
- writer,
- std::unordered_set<std::string>(),
- std::unordered_set<std::string>());
-}
-
// The write_archive_current function is used for bytecode from version v5 to
// v7 (the latest bytecode version). pre-v5 we serialized things differently.
// This write archive function may change in export_module.cpp, however we don't
diff --git a/torch/csrc/jit/mobile/module.cpp b/torch/csrc/jit/mobile/module.cpp
index 1483af2..2ef7c34 100644
--- a/torch/csrc/jit/mobile/module.cpp
+++ b/torch/csrc/jit/mobile/module.cpp
@@ -107,6 +107,7 @@
}
}
+#if defined(SYMBOLICATE_MOBILE_DEBUG_HANDLE)
std::string getTopModuleTypeName(const Module& m) {
std::string name;
if (m._ivalue()->type() && m._ivalue()->type()->name()) {
@@ -114,6 +115,8 @@
}
return name;
}
+#endif
+
} // namespace
const std::vector<at::Tensor> Module::parameters() const {
diff --git a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
index 0927198..d5dbfe8 100644
--- a/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
+++ b/torch/csrc/jit/passes/onnx/shape_type_inference.cpp
@@ -377,27 +377,6 @@
}
}
-// this function checks wheather the blocks of If node have the same return
-// type.
-bool IsBlockReturnTypeSame(Node* n) {
- TORCH_INTERNAL_ASSERT(n->kind() == ::c10::onnx::If);
- auto then_block = n->blocks()[0];
- auto else_block = n->blocks()[1];
- for (const auto i : c10::irange(n->outputs().size())) {
- // check the type
- auto then_block_type = then_block->outputs()[i]->type();
- auto else_block_type = else_block->outputs()[i]->type();
- if (then_block_type->cast<TensorType>() &&
- else_block_type->cast<TensorType>()) {
- if (then_block_type->castRaw<TensorType>()->scalarType() !=
- else_block_type->castRaw<TensorType>()->scalarType()) {
- return false;
- }
- }
- }
- return true;
-}
-
c10::optional<at::Tensor> ComputeConstantFolding(Node* n, int opset_version) {
if (n->inputs().size() == 0) {
return c10::nullopt;
diff --git a/torch/csrc/jit/runtime/static/ops.cpp b/torch/csrc/jit/runtime/static/ops.cpp
index cac0ebd..65f4777 100644
--- a/torch/csrc/jit/runtime/static/ops.cpp
+++ b/torch/csrc/jit/runtime/static/ops.cpp
@@ -2642,12 +2642,6 @@
});
}
-at::Tensor signed_log1p(const at::Tensor& input) {
- auto out = create_empty_from(input);
- signed_log1p_out(out, input);
- return out;
-}
-
} // namespace
// NOLINTNEXTLINE(cppcoreguidelines-avoid-non-const-global-variables)
diff --git a/torch/csrc/jit/serialization/export_module.cpp b/torch/csrc/jit/serialization/export_module.cpp
index d0bfc84..b4e1f62 100644
--- a/torch/csrc/jit/serialization/export_module.cpp
+++ b/torch/csrc/jit/serialization/export_module.cpp
@@ -345,17 +345,6 @@
}
}
-std::unordered_set<const FunctionSchema*> getInterfaceCalls(Graph& graph) {
- std::unordered_set<const FunctionSchema*> ret;
- auto nodes = findAllNodes(graph, c10::prim::CallMethod, true);
- for (Node* node : nodes) {
- if (auto iface = node->input(0)->type()->castRaw<InterfaceType>()) {
- ret.insert(iface->getMethod(node->s(attr::name)));
- }
- }
- return ret;
-}
-
struct ModuleMethod {
ModuleMethod(const Module& m, const GraphFunction& f, c10::QualifiedName n)
: module(m), function(f), exportName(std::move(n)) {}
@@ -364,28 +353,6 @@
c10::QualifiedName exportName;
};
-std::vector<ModuleMethod> getModuleInterfaceExports(
- const Module& module,
- const std::unordered_set<const FunctionSchema*>& schemas) {
- if (schemas.size() == 0) {
- return {};
- }
- std::unordered_set<std::string> names;
- for (auto schema : schemas) {
- names.insert(schema->name());
- }
- std::vector<ModuleMethod> ret;
- for (const auto& submodule : module.modules()) {
- for (const auto& method : submodule.get_methods()) {
- const auto& f = toGraphFunction(method.function());
- if (names.find(f.qualname().name()) != names.end()) {
- ret.emplace_back(submodule, f, f.qualname());
- }
- }
- }
- return ret;
-}
-
bool isLoweredModule(const Module& m) {
c10::QualifiedName type_name;
if (m.type()->name()) {
diff --git a/torch/csrc/utils/tensor_new.cpp b/torch/csrc/utils/tensor_new.cpp
index d11d7cd..2e19a5e 100644
--- a/torch/csrc/utils/tensor_new.cpp
+++ b/torch/csrc/utils/tensor_new.cpp
@@ -70,12 +70,6 @@
// options.
// TODO: Refactor this so we just pass everything in via options
-Tensor dispatch_ones(c10::TensorOptions options, at::ScalarType scalar_type, const optional<Device>& device, IntArrayRef sizes) {
- maybe_initialize_cuda(options.device());
- pybind11::gil_scoped_release no_gil;
- return torch::ones(sizes, build_options(options, scalar_type, device));
-}
-
Tensor new_with_sizes(c10::TensorOptions options, at::ScalarType scalar_type, const optional<Device>& device, IntArrayRef sizes) {
maybe_initialize_cuda(options.device());
pybind11::gil_scoped_release no_gil;
@@ -469,14 +463,6 @@
throw std::runtime_error("new(): invalid arguments");
}
-Tensor legacy_sparse_tensor_ctor(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) {
- return legacy_sparse_tensor_generic_ctor_new(dispatch_key, scalar_type, args, kwargs, CtorOrNew::CTOR);
-}
-
-Tensor legacy_sparse_tensor_new(c10::DispatchKey dispatch_key, at::ScalarType scalar_type, PyObject* args, PyObject* kwargs) {
- return legacy_sparse_tensor_generic_ctor_new(dispatch_key, scalar_type, args, kwargs, CtorOrNew::NEW);
-}
-
// NB: device_idx here is NOT a DeviceIndex, but index into PythonArgs
c10::TensorOptions typeIdWithDefault(PythonArgs& r, int64_t device_idx, c10::DispatchKey dispatch_key) {
auto options = dispatchKeyToTensorOptions(dispatch_key);