[AutoAccept][Codemod][FBSourceClangFormatLinter] Daily `arc lint --take CLANGFORMAT`
Reviewed By: zertosh
Differential Revision: D33938055
fbshipit-source-id: 6c0643a18f09854e87e183341f252c66dd6395a6
(cherry picked from commit fd183aedbc0f015bd43a01a28930093ab94ab41e)
diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp
index 641333a..e6a6c08 100644
--- a/test/cpp/jit/test_misc.cpp
+++ b/test/cpp/jit/test_misc.cpp
@@ -2865,9 +2865,13 @@
auto ref1 = a * (a * b);
auto ref2 = a * ref1;
WithCPUFuser g(true);
- bool fusable_on_device = torch::jit::tensorexpr::getTEMustUseLLVMOnCPU();
+ bool fusable_on_device = torch::jit::tensorexpr::getTEMustUseLLVMOnCPU();
torch::jit::tensorexpr::getTEMustUseLLVMOnCPU() = false;
- FuseTensorExprs(graph, /*min_group_size*/2, /*add_composed_op*/true, /*fuse_to_dynamic_shapes*/true);
+ FuseTensorExprs(
+ graph,
+ /*min_group_size*/ 2,
+ /*add_composed_op*/ true,
+ /*fuse_to_dynamic_shapes*/ true);
Code code(graph, "");
InterpreterState interpreter{code};
std::vector<IValue> stack = {a, b};
diff --git a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp
index ca55a0a..602f951 100644
--- a/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp
+++ b/torch/csrc/jit/passes/symbolic_shape_runtime_fusion.cpp
@@ -164,7 +164,8 @@
// otherwise we defer to contiguous
// TODO: channels last 3d
// NNC Channels last permutation for outputs causes slowdown, disable
- if (c10::is_channels_last_strides_2d(sizes, strides) && !tt.device()->is_cpu()) {
+ if (c10::is_channels_last_strides_2d(sizes, strides) &&
+ !tt.device()->is_cpu()) {
return StrideInput::TENSOR_CONT_CHANNELS_LAST;
}
return StrideInput::TENSOR_CONT;
diff --git a/torch/csrc/jit/passes/tensorexpr_fuser.cpp b/torch/csrc/jit/passes/tensorexpr_fuser.cpp
index 3195ddd..3335104 100644
--- a/torch/csrc/jit/passes/tensorexpr_fuser.cpp
+++ b/torch/csrc/jit/passes/tensorexpr_fuser.cpp
@@ -1047,7 +1047,8 @@
// Allow only if the node has a shape function defined.
// ListConstruct node is an exception since that is needed to fuse
// aten::cat, though it does not have a shape function.
- REQ(node->kind() == prim::ListConstruct || node->kind() == prim::TensorExprGroup ||
+ REQ(node->kind() == prim::ListConstruct ||
+ node->kind() == prim::TensorExprGroup ||
(node->maybeSchema() && shapeComputeGraphForSchema(node->schema())));
}
@@ -1234,13 +1235,15 @@
}
if (add_composed_op) {
- TORCH_INTERNAL_ASSERT(fuse_to_dynamic_shapes, "Fusing static shapes with composed op NYI");
+ TORCH_INTERNAL_ASSERT(
+ fuse_to_dynamic_shapes, "Fusing static shapes with composed op NYI");
}
// Get rid of dead code so that we don't waste effort fusing it.
EliminateDeadCode(graph);
- TensorExprFuser fuser(graph, min_group_size, add_composed_op, fuse_to_dynamic_shapes);
+ TensorExprFuser fuser(
+ graph, min_group_size, add_composed_op, fuse_to_dynamic_shapes);
fuser.run();
EliminateCommonSubexpression(graph);
@@ -1250,7 +1253,8 @@
}
Operation createTensorExprOp(const Node* node) {
- bool dynamic_shape_fusion_node = node->hasAttribute(attr::striding_inputs_desc);
+ bool dynamic_shape_fusion_node =
+ node->hasAttribute(attr::striding_inputs_desc);
if (!dynamic_shape_fusion_node) {
auto kernel =
std::make_shared<tensorexpr::TensorExprKernel>(node->g(attr::Subgraph));
diff --git a/torch/csrc/jit/python/init.cpp b/torch/csrc/jit/python/init.cpp
index aefb164..d00c56b 100644
--- a/torch/csrc/jit/python/init.cpp
+++ b/torch/csrc/jit/python/init.cpp
@@ -117,13 +117,13 @@
#include <pybind11/iostream.h>
#include <pybind11/operators.h>
+#include <torch/csrc/jit/runtime/profiling_graph_executor_impl.h>
#include <memory>
#include <sstream>
#include <stdexcept>
#include <string>
#include <tuple>
#include <utility>
-#include <torch/csrc/jit/runtime/profiling_graph_executor_impl.h>
namespace torch {
namespace jit {
@@ -753,28 +753,38 @@
.def(
"_jit_set_bailout_depth",
[](size_t depth) {
- TORCH_WARN("Use _jit_set_fusion_strategy, bailout depth is deprecated. Setting to (STATIC, ", depth, ")");
+ TORCH_WARN(
+ "Use _jit_set_fusion_strategy, bailout depth is deprecated. Setting to (STATIC, ",
+ depth,
+ ")");
size_t old_depth = getBailoutDepth();
FusionStrategy strat = {{FusionBehavior::STATIC, depth}};
setFusionStrategy(strat);
return old_depth;
})
- .def("_jit_set_fusion_strategy",
+ .def(
+ "_jit_set_fusion_strategy",
[](std::vector<std::pair<std::string, size_t>> strategy) {
FusionStrategy vec_conv;
- for (const auto& pair: strategy) {
+ for (const auto& pair : strategy) {
if (pair.first == "STATIC") {
vec_conv.emplace_back(FusionBehavior::STATIC, pair.second);
} else if (pair.first == "DYNAMIC") {
vec_conv.emplace_back(FusionBehavior::DYNAMIC, pair.second);
- } else {
- TORCH_INTERNAL_ASSERT("FusionBehavior only supported 'STATIC' or 'DYNAMIC', got: ", pair.first);
+ } else {
+ TORCH_INTERNAL_ASSERT(
+ "FusionBehavior only supported 'STATIC' or 'DYNAMIC', got: ",
+ pair.first);
}
}
auto old_strategy = getFusionStrategy();
- auto strat = fmap(old_strategy, [](std::pair<FusionBehavior, size_t> behav) {
- return std::pair<std::string, size_t>(behav.first == FusionBehavior::STATIC ? "STATIC" : "DYNAMIC", behav.second);
- });
+ auto strat =
+ fmap(old_strategy, [](std::pair<FusionBehavior, size_t> behav) {
+ return std::pair<std::string, size_t>(
+ behav.first == FusionBehavior::STATIC ? "STATIC"
+ : "DYNAMIC",
+ behav.second);
+ });
setFusionStrategy(vec_conv);
return strat;
})
diff --git a/torch/csrc/jit/runtime/graph_executor.cpp b/torch/csrc/jit/runtime/graph_executor.cpp
index b24fa49..09f6f7e 100644
--- a/torch/csrc/jit/runtime/graph_executor.cpp
+++ b/torch/csrc/jit/runtime/graph_executor.cpp
@@ -894,7 +894,7 @@
if (tensorExprFuserEnabled()) {
auto min_size = getFusionGroupInlining() ? 2 : 1;
auto dyn_shapes = tensorExprDynamicShapeFusionEnabled();
- FuseTensorExprs(graph, min_size, /*composed_op*/false, dyn_shapes);
+ FuseTensorExprs(graph, min_size, /*composed_op*/ false, dyn_shapes);
}
} else {
FuseGraph(graph, strict_fuser_check);
diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp
index 3b7752e..c0fc02e 100644
--- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp
+++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.cpp
@@ -74,7 +74,9 @@
#ifdef FBCODE_CAFFE2
static FusionStrategy fusion_strategy = {{FusionBehavior::STATIC, 20}};
#else
-static FusionStrategy fusion_strategy = {{FusionBehavior::STATIC, 2}, {FusionBehavior::DYNAMIC, 10}};
+static FusionStrategy fusion_strategy = {
+ {FusionBehavior::STATIC, 2},
+ {FusionBehavior::DYNAMIC, 10}};
#endif
FusionStrategy getFusionStrategy() {
@@ -113,7 +115,7 @@
size_t getBailoutDepth() {
// Initialize bailout_depth from command-line flag.
size_t depth = 0;
- for (const auto& pair: getFusionStrategy()) {
+ for (const auto& pair : getFusionStrategy()) {
depth += pair.second;
}
return depth;
@@ -375,7 +377,7 @@
FusionBehavior getCurrentBehavior(size_t remaining_depth) {
size_t curr_depth = 0;
auto curr_strategy = getFusionStrategy();
- for (int i = static_cast<int>(curr_strategy.size()) -1; i >= 0; i--) {
+ for (int i = static_cast<int>(curr_strategy.size()) - 1; i >= 0; i--) {
curr_depth += curr_strategy[i].second;
if (remaining_depth <= curr_depth) {
return curr_strategy[i].first;
@@ -386,7 +388,9 @@
return FusionBehavior::STATIC;
}
-void runNoGradOptimizations(std::shared_ptr<Graph>& graph, size_t remaining_bailout_depth) {
+void runNoGradOptimizations(
+ std::shared_ptr<Graph>& graph,
+ size_t remaining_bailout_depth) {
GRAPH_DEBUG(
"After customPostPasses (beginning of runNoGradOptimizations)\n", *graph);
// runNondiffOptimization
@@ -419,8 +423,9 @@
BatchMM(graph);
GRAPH_DEBUG("After BatchMM, before Fusion\n", *graph);
auto min_size = getFusionGroupInlining() ? 2 : 1;
- bool dyn_shapes = getCurrentBehavior(remaining_bailout_depth) == FusionBehavior::DYNAMIC;
- FuseTensorExprs(graph, min_size, /*composed_op*/false, dyn_shapes);
+ bool dyn_shapes = getCurrentBehavior(remaining_bailout_depth) ==
+ FusionBehavior::DYNAMIC;
+ FuseTensorExprs(graph, min_size, /*composed_op*/ false, dyn_shapes);
GRAPH_DEBUG(
"After Fusion, before RemoveTensorTypeSpecializations\n", *graph);
@@ -448,7 +453,8 @@
}
void ProfilingGraphExecutorImpl::runProfilingOptimizations(
- std::shared_ptr<Graph>& copy, size_t remaining_bailout_depth) {
+ std::shared_ptr<Graph>& copy,
+ size_t remaining_bailout_depth) {
GRAPH_DEBUG("Before runProfilingOptimizations:\n", *copy);
if (!getGraphExecutorOptimize()) {
runNooptPassPipeline(copy);
diff --git a/torch/csrc/jit/runtime/profiling_graph_executor_impl.h b/torch/csrc/jit/runtime/profiling_graph_executor_impl.h
index a3c6e0b..af9d1a2 100644
--- a/torch/csrc/jit/runtime/profiling_graph_executor_impl.h
+++ b/torch/csrc/jit/runtime/profiling_graph_executor_impl.h
@@ -26,7 +26,8 @@
// [3,4,5] are observed, then it is assumed that future inputs will have
// shapes [a,b,c] and [b,c,d] for some values of a,b,c,d.
//
-// In both cases, we also recompile on new striding behavior, device, or dtype.
+// In both cases, we also recompile on new striding behavior, device, or
+// dtype.
//
// Behavior - fallback functions & depth:
// When an input doesn't match the format required by the specialized compiled
@@ -56,7 +57,6 @@
// returns previous strategy
TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy);
-
struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase {
ProfilingGraphExecutorImpl(
const std::shared_ptr<Graph>& graph,
@@ -75,7 +75,7 @@
optimized_plan_.reset();
// prevent memory leaks
fallback_functions_.clear();
- remaining_bailout_depth_.reset();
+ remaining_bailout_depth_.reset();
}
bool isOptimized() const override {
@@ -87,7 +87,9 @@
Stack& stack,
size_t remaining_bailout_depth);
void runProfilingInsensitiveOptimizations(std::shared_ptr<Graph>& graph);
- void runProfilingOptimizations(std::shared_ptr<Graph>& graph, size_t remaining_depth);
+ void runProfilingOptimizations(
+ std::shared_ptr<Graph>& graph,
+ size_t remaining_depth);
void replaceFallbackGraphWithFallbackFunction(Block* b);
std::unique_ptr<ProfilingRecord> pr_;
c10::optional<ExecutionPlan>
diff --git a/torch/csrc/jit/runtime/static/fusion.cpp b/torch/csrc/jit/runtime/static/fusion.cpp
index b89c658..d9c2837 100644
--- a/torch/csrc/jit/runtime/static/fusion.cpp
+++ b/torch/csrc/jit/runtime/static/fusion.cpp
@@ -334,7 +334,7 @@
traced_graph,
/*min_group_size*/ 2,
/*add_composed_op*/ true,
- /*fuse_to_dynamic_shapes*/true);
+ /*fuse_to_dynamic_shapes*/ true);
graph->block()->clear();
graph->block()->cloneFrom(traced_graph->block(), nullptr);
GRAPH_DUMP("Graph after fusion: ", graph);
diff --git a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp
index f532514..e398333 100644
--- a/torch/csrc/jit/tensorexpr/cuda_codegen.cpp
+++ b/torch/csrc/jit/tensorexpr/cuda_codegen.cpp
@@ -1160,7 +1160,7 @@
std::vector<void*> extent_args;
size_t raw_args_size = raw_args.size();
extent_args.reserve(raw_args_size);
- for (size_t i = 0 ; i < raw_args_size; ++i) {
+ for (size_t i = 0; i < raw_args_size; ++i) {
if (arg_pos_in_extents_[i]) {
extent_args.push_back(raw_args[i]);
}
@@ -1177,7 +1177,8 @@
gpu_thread_extents_v[i] = immediateAs<int64_t>(gpu_thread_extents[i]);
continue;
}
- gpu_thread_extents_v[i] = thread_extents_eval_[i].value<int64_t>(extent_args);
+ gpu_thread_extents_v[i] =
+ thread_extents_eval_[i].value<int64_t>(extent_args);
}
// Skip launching the kernel if there are no elements to process.