Remove ProfiledType (#42570)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/42570
ProfiledType doesn't do anything and is not used atm, removing
Test Plan: CI
Reviewed By: ezyang
Differential Revision: D22938664
Pulled By: ilia-cher
fbshipit-source-id: 037c512938028f44258b702bbcde3f8c144f4aa0
diff --git a/BUILD.bazel b/BUILD.bazel
index d87dd9b..0e4e656 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -186,12 +186,6 @@
"torch/csrc/autograd/generated/VariableType_3.cpp",
"torch/csrc/autograd/generated/VariableType_4.cpp",
# "torch/csrc/autograd/generated/VariableTypeEverything.cpp",
- "torch/csrc/autograd/generated/ProfiledType_0.cpp",
- "torch/csrc/autograd/generated/ProfiledType_1.cpp",
- "torch/csrc/autograd/generated/ProfiledType_2.cpp",
- "torch/csrc/autograd/generated/ProfiledType_3.cpp",
- "torch/csrc/autograd/generated/ProfiledType_4.cpp",
- # "torch/csrc/autograd/generated/ProfiledTypeEverything.cpp",
"torch/csrc/autograd/generated/TraceType_0.cpp",
"torch/csrc/autograd/generated/TraceType_1.cpp",
"torch/csrc/autograd/generated/TraceType_2.cpp",
diff --git a/c10/core/DispatchKey.cpp b/c10/core/DispatchKey.cpp
index de80833..4ce9ecd 100644
--- a/c10/core/DispatchKey.cpp
+++ b/c10/core/DispatchKey.cpp
@@ -50,8 +50,6 @@
return "Autocast";
case DispatchKey::TESTING_ONLY_GenericWrapper:
return "TESTING_ONLY_GenericWrapper";
- case DispatchKey::Profiler:
- return "Profiler";
case DispatchKey::Named:
return "Named";
case DispatchKey::Tracer:
diff --git a/c10/core/DispatchKey.h b/c10/core/DispatchKey.h
index 5605a07..1a7c63f 100644
--- a/c10/core/DispatchKey.h
+++ b/c10/core/DispatchKey.h
@@ -189,8 +189,6 @@
// the bulk of this logic.
Autograd,
- Profiler,
-
Tracer,
// Pre-autograd dispatch keys allow backends to override the autograd behavior
diff --git a/caffe2/CMakeLists.txt b/caffe2/CMakeLists.txt
index f778dcc..380a483 100644
--- a/caffe2/CMakeLists.txt
+++ b/caffe2/CMakeLists.txt
@@ -309,11 +309,6 @@
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_2.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_3.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/VariableType_4.cpp"
- "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_0.cpp"
- "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_1.cpp"
- "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_2.cpp"
- "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_3.cpp"
- "${TORCH_SRC_DIR}/csrc/autograd/generated/ProfiledType_4.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/TraceType_0.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/TraceType_1.cpp"
"${TORCH_SRC_DIR}/csrc/autograd/generated/TraceType_2.cpp"
@@ -371,7 +366,6 @@
"${CMAKE_BINARY_DIR}/aten/src/ATen/Declarations.yaml"
"${TOOLS_PATH}/autograd/templates/VariableType.h"
"${TOOLS_PATH}/autograd/templates/VariableType.cpp"
- "${TOOLS_PATH}/autograd/templates/ProfiledType.cpp"
"${TOOLS_PATH}/autograd/templates/TraceType.cpp"
"${TOOLS_PATH}/autograd/templates/Functions.h"
"${TOOLS_PATH}/autograd/templates/Functions.cpp"
diff --git a/test/cpp/jit/test_misc.cpp b/test/cpp/jit/test_misc.cpp
index 8ec1681..aa4d2fb 100644
--- a/test/cpp/jit/test_misc.cpp
+++ b/test/cpp/jit/test_misc.cpp
@@ -821,8 +821,6 @@
}
void testRecordFunction() {
- // enable observers
- c10::impl::IncludeDispatchKeyGuard observer_guard(c10::DispatchKey::Profiler);
// disabling the inlining of method calls
GraphOptimizerEnabledGuard opt_guard(false);
@@ -1016,8 +1014,6 @@
ids.clear();
auto th = std::thread([&ids]() {
- c10::impl::IncludeDispatchKeyGuard observer_guard(
- c10::DispatchKey::Profiler);
addThreadLocalCallback(RecordFunctionCallback(
[&ids](const RecordFunction& fn) { ids.push_back(2); },
[](const RecordFunction&) {}));
@@ -1128,9 +1124,6 @@
}
void testThreadLocalDebugInfo() {
- // enable observers
- c10::impl::IncludeDispatchKeyGuard observer_guard(c10::DispatchKey::Profiler);
-
TORCH_CHECK(
c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::TEST_INFO) == nullptr);
auto debug_info = std::make_shared<TestThreadLocalDebugInfo>();
diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py
index a244c22d..1ab895b 100644
--- a/tools/autograd/gen_variable_type.py
+++ b/tools/autograd/gen_variable_type.py
@@ -364,23 +364,6 @@
${return_type} ${api_name}(${declaration_formals}); // {"schema": "${schema_string}", "compound": "${compound}"}
""")
-# TODO(iliacher): remove Profile wrappers
-# ProfiledType templates
-# See NOTE[UnboxedOnly] in function_wrapper.py
-UNBOXED_PROFILE_DISPATCH = CodeTemplate("""\
-static auto op = c10::Dispatcher::singleton()
- .findSchemaOrThrow("aten::${operator_name}", "${overload_name}")
- .typed<${return_type} (${profiled_arg_types})>();
-return c10::Dispatcher::singleton().redispatch<${profiled_ret_and_arg_types}>(${profiled_dispatch_args});
-""")
-PROFILE_DISPATCH = CodeTemplate("""\
-static auto op = c10::Dispatcher::singleton()
- .findSchemaOrThrow("aten::${operator_name}", "${overload_name}")
- .typed<${return_type} (${profiled_arg_types})>();
-return c10::Dispatcher::singleton().redispatch<${profiled_ret_and_arg_types}>(${profiled_dispatch_args});
-""")
-
-
# TraceType templates
# TODO: change `redispatch` to `NoTracerDispatchMode` + regular `call`.
# See NOTE[UnboxedOnly] in function_wrapper.py
@@ -678,14 +661,11 @@
def gen_variable_type_shard(out, aten_declarations, template_path, suffix, header):
VARIABLE_TYPE_H = CodeTemplate.from_file(template_path + '/VariableType.h')
VARIABLE_TYPE_CPP = CodeTemplate.from_file(template_path + '/VariableType.cpp')
- PROFILED_TYPE_CPP = CodeTemplate.from_file(template_path + '/ProfiledType.cpp')
TRACE_TYPE_CPP = CodeTemplate.from_file(template_path + '/TraceType.cpp')
type_declarations = []
type_definitions = []
wrapper_registrations = []
- profiled_method_definitions = []
- profiled_wrapper_registrations = []
trace_method_definitions = []
trace_wrapper_registrations = []
@@ -708,19 +688,6 @@
# See Note [Manual catchAll kernels]
assert (declaration['name'] in MANUAL_CATCHALL) == declaration['manual_kernel_registration']
- # Emit ProfiledType code
- profiled_body = emit_profiled_body(declaration)
- profiled_method_definitions.append(METHOD_DEFINITION.substitute(
- declaration, type_definition_body=profiled_body))
-
- if declaration['use_c10_dispatcher'] == 'full':
- profiled_wrapper_registrations.append(WRAPPER_REGISTRATION.substitute(
- declaration, class_type='ProfiledType'))
- else:
- assert declaration['use_c10_dispatcher'] == 'with_codegenerated_unboxing_wrapper'
- profiled_wrapper_registrations.append(UNBOXEDONLY_WRAPPER_REGISTRATION.substitute(
- declaration, class_type='ProfiledType'))
-
# Emit TraceType code
if declaration['name'] not in MANUAL_TRACER:
trace_body = emit_trace_body(declaration)
@@ -738,8 +705,6 @@
'type_derived_method_declarations': type_declarations,
'type_derived_method_definitions': type_definitions,
'wrapper_registrations': wrapper_registrations,
- 'profiled_method_definitions': profiled_method_definitions,
- 'profiled_wrapper_registrations': profiled_wrapper_registrations,
'trace_method_definitions': trace_method_definitions,
'trace_wrapper_registrations': trace_wrapper_registrations,
}
@@ -747,64 +712,9 @@
write(out, 'VariableType.h', VARIABLE_TYPE_H, env)
else:
write(out, 'VariableType%s.cpp' % suffix, VARIABLE_TYPE_CPP, env)
- write(out, 'ProfiledType%s.cpp' % suffix, PROFILED_TYPE_CPP, env)
write(out, 'TraceType%s.cpp' % suffix, TRACE_TYPE_CPP, env)
-def emit_profiled_body(declaration):
- arguments = declaration['arguments']
- returns = declaration['returns']
- func = declaration['derivative']
- name = declaration['name']
- inplace = declaration['inplace']
- is_out_fn = name.endswith('_out')
- modifies_arguments = inplace or is_out_fn
- returns_void = len(returns) == 0
-
- processed_args = []
- for a in arguments:
- processed_args.append('{}'.format(a['name']))
-
- arg_types = ', '.join([a['type'] for a in declaration['arguments']])
- ret_and_arg_types = ', '.join([declaration['return_type']] + [a['type'] for a in declaration['arguments']])
- schema_order_arg_types = ', '.join([a['type'] for a in declaration['schema_order_arguments']])
- schema_order_ret_and_arg_types = ', '.join(
- [declaration['return_type']] + [a['type'] for a in declaration['schema_order_arguments']])
-
- def check_record_function_input_type(simple_type):
- return simple_type in ['Tensor', 'Scalar']
-
- def record_function_input_names():
- return ', '.join([
- arg['name'] for arg in declaration['arguments']
- if check_record_function_input_type(arg['simple_type'])])
-
- profiled_dispatch_args = ['op', 'c10::DispatchKey::Profiler'] + declaration['args']
- schema_order_profiled_dispatch_args = ['op', 'c10::DispatchKey::Profiler'] + declaration['schema_order_args']
-
- if declaration['use_c10_dispatcher'] == 'full':
- profiled_arg_types = schema_order_arg_types
- profiled_ret_and_arg_types = schema_order_ret_and_arg_types
- profiled_dispatch_args = schema_order_profiled_dispatch_args
- else:
- assert declaration['use_c10_dispatcher'] == 'with_codegenerated_unboxing_wrapper'
- profiled_arg_types = arg_types
- profiled_ret_and_arg_types = ret_and_arg_types
- profiled_dispatch_args = profiled_dispatch_args
-
- call = PROFILE_DISPATCH.substitute(
- declaration,
- name=name,
- input_names=record_function_input_names(),
- return_type=declaration['return_type'],
- profiled_arg_types=profiled_arg_types,
- profiled_ret_and_arg_types=profiled_ret_and_arg_types,
- profiled_dispatch_args=profiled_dispatch_args,
- )
-
- return [call]
-
-
def emit_trace_body(declaration):
returns = declaration['returns']
name = declaration['name']
diff --git a/tools/autograd/templates/ProfiledType.cpp b/tools/autograd/templates/ProfiledType.cpp
deleted file mode 100644
index 1613d6e..0000000
--- a/tools/autograd/templates/ProfiledType.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-#include "torch/csrc/autograd/VariableTypeUtils.h"
-
-#include <ATen/TypeDefault.h>
-#include <torch/library.h>
-#include <ATen/core/op_registration/hacky_wrapper_for_legacy_signatures.h>
-
-#include "torch/csrc/autograd/function.h"
-
-#include "ATen/quantized/Quantizer.h"
-
-// ${generated_comment}
-
-// NOTE See [Sharded File] comment in VariableType
-
-using namespace at;
-using namespace torch::autograd::generated;
-using torch::autograd::Node;
-
-namespace torch {
-
-namespace ProfiledType {
-
-namespace {
-${profiled_method_definitions}
-} // namespace
-} // namespace ProfiledType
-
-namespace {
-
-TORCH_LIBRARY_IMPL(aten, Profiler, m) {
- ${profiled_wrapper_registrations};
-}
-
-} // namespace
-
-} // namespace torch
diff --git a/tools/build_variables.bzl b/tools/build_variables.bzl
index 6e13c60..ae22532 100644
--- a/tools/build_variables.bzl
+++ b/tools/build_variables.bzl
@@ -10,11 +10,6 @@
"jit/generated/generated_unboxing_wrappers_0.cpp",
"jit/generated/generated_unboxing_wrappers_1.cpp",
"jit/generated/generated_unboxing_wrappers_2.cpp",
- "autograd/generated/ProfiledType_0.cpp",
- "autograd/generated/ProfiledType_1.cpp",
- "autograd/generated/ProfiledType_2.cpp",
- "autograd/generated/ProfiledType_3.cpp",
- "autograd/generated/ProfiledType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
@@ -38,11 +33,6 @@
"autograd/generated/VariableType_2.cpp",
"autograd/generated/VariableType_3.cpp",
"autograd/generated/VariableType_4.cpp",
- "autograd/generated/ProfiledType_0.cpp",
- "autograd/generated/ProfiledType_1.cpp",
- "autograd/generated/ProfiledType_2.cpp",
- "autograd/generated/ProfiledType_3.cpp",
- "autograd/generated/ProfiledType_4.cpp",
"autograd/generated/TraceType_0.cpp",
"autograd/generated/TraceType_1.cpp",
"autograd/generated/TraceType_2.cpp",
diff --git a/torch/csrc/autograd/profiler.cpp b/torch/csrc/autograd/profiler.cpp
index 7ec52cb..0c86902 100644
--- a/torch/csrc/autograd/profiler.cpp
+++ b/torch/csrc/autograd/profiler.cpp
@@ -663,12 +663,3 @@
}
}}}
-
-void profile_wrapper(const c10::OperatorHandle& op, torch::jit::Stack* stack) {
- c10::impl::ExcludeDispatchKeyGuard key_guard(c10::DispatchKey::Profiler);
- op.callBoxed(stack);
-}
-
-TORCH_LIBRARY_IMPL(_, Profiler, m) {
- m.fallback(torch::CppFunction::makeFromBoxedFunction<&profile_wrapper>());
-}
diff --git a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp
index 4be26c9..18364ba 100644
--- a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp
+++ b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp
@@ -205,12 +205,6 @@
.findSchemaOrThrow(unpack_fn.c_str(), "")
.typed<std::tuple<at::Tensor, c10::optional<at::Tensor>>(
at::Tensor)>();
- // Temporary hack: when the `Profiler` dispatch key is inserted, this call
- // will fail since the `unpack()` ops return multiple values, however the
- // boxing code currently does not support this. Instead, exclude the
- // Profiler dispatch key and go through unboxed dispatch, avoiding boxing
- // altogether
- c10::impl::ExcludeDispatchKeyGuard key_guard(c10::DispatchKey::Profiler);
std::tie(unpacked_weight, bias) = op.call(packed_weight);
}