s/callUnboxed/call/ (#37999)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/37999
Next step: make explicit type arguments less intrusive, or fine
a way to eliminate them entirely.
Test Plan: Imported from OSS
Differential Revision: D21445646
Pulled By: bhosmer
fbshipit-source-id: 106b3381acea473ca686ab42b5ca610c89f5c531
diff --git a/aten/src/ATen/core/boxing/KernelFunction.h b/aten/src/ATen/core/boxing/KernelFunction.h
index b8ed0b2..6a59a73 100644
--- a/aten/src/ATen/core/boxing/KernelFunction.h
+++ b/aten/src/ATen/core/boxing/KernelFunction.h
@@ -66,16 +66,16 @@
*
* > KernelFunction func = KernelFunction::makeFromUnboxedLambda(
* > [] (Tensor a, bool b) -> Tensor {...});
- * > Tensor result = func.callUnboxed<Tensor, Tensor, bool>(tensor1, true);
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
*
* Or, with a boxed implementation:
*
* > void boxed_func(OperatorKernel*, Stack* stack) {...}
* > KernelFunction func = KernelFunction::makeFromBoxedFunction(&boxed_func);
- * > Tensor result = func.callUnboxed<Tensor, Tensor, bool>(tensor1, true);
+ * > Tensor result = func.call<Tensor, Tensor, bool>(tensor1, true);
*/
template<class Return, class... Args>
- Return callUnboxed(const OperatorHandle& opHandle, Args... args) const;
+ Return call(const OperatorHandle& opHandle, Args... args) const;
/**
* Create a KernelFunction from a boxed function.
@@ -104,9 +104,8 @@
/**
* Create a KernelFunction from an unboxed functor and prevent creation of an
- * unboxing-wrapper. This means that you can only call this KernelFunction
- * using KernelFunction::callUnboxedOnly(), not using KernelFunction::callBoxed()
- * or KernelFunction::callUnboxed().
+ * unboxing-wrapper. This means that you cannot call this KernelFunction
+ * using KernelFunction::callBoxed()
*
* This is necessary because our unboxing wrappers don't work for all types
* yet, so if you want to use one of these types as function arguments,
@@ -140,9 +139,8 @@
/**
* Create a KernelFunction from an unboxed function and prevent creation of an
- * unboxing-wrapper. This means that you can only call this KernelFunction
- * using KernelFunction::callUnboxedOnly(), not using KernelFunction::callBoxed()
- * or KernelFunction::callUnboxed().
+ * unboxing-wrapper. This means that you cannot call this KernelFunction
+ * using KernelFunction::callBoxed()
*
* This is necessary because our unboxing wrappers don't work for all types
* yet, so if you want to use one of these types as function arguments,
diff --git a/aten/src/ATen/core/boxing/KernelFunction_impl.h b/aten/src/ATen/core/boxing/KernelFunction_impl.h
index fbcd5ac..3459a10 100644
--- a/aten/src/ATen/core/boxing/KernelFunction_impl.h
+++ b/aten/src/ATen/core/boxing/KernelFunction_impl.h
@@ -37,7 +37,7 @@
TORCH_INTERNAL_ASSERT(false, "Tried to call KernelFunction::callBoxed() on an uninitialized KernelFunction.");
} else {
// TODO We want to introduce the invariant that all kernels must be callable in a boxed way, then this case should be impossible.
- TORCH_INTERNAL_ASSERT(false, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed().");
+ TORCH_INTERNAL_ASSERT(false, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call().");
}
}
@@ -45,7 +45,7 @@
}
template<class Return, class... Args>
-inline Return KernelFunction::callUnboxed(const OperatorHandle& opHandle, Args... args) const {
+inline Return KernelFunction::call(const OperatorHandle& opHandle, Args... args) const {
// note: Args above is intentionally not Args&&. We don't want perfect
// forwarding, which would require Args to be deduced, but instead we
// want callers to explicitly specify the Args.
@@ -56,7 +56,7 @@
return (*func)(functor_.get(), std::forward<Args>(args)...);
}
- TORCH_INTERNAL_ASSERT_DEBUG_ONLY(boxed_kernel_func_ != nullptr, "Tried to call KernelFunction::callUnboxed() on an uninitialized KernelFunction.");
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(boxed_kernel_func_ != nullptr, "Tried to call KernelFunction::call() on an uninitialized KernelFunction.");
return impl::boxAndCallBoxedFunc<Return, Args...>(boxed_kernel_func_, functor_.get(), opHandle, std::forward<Args>(args)...);
}
diff --git a/aten/src/ATen/core/boxing/KernelFunction_test.cpp b/aten/src/ATen/core/boxing/KernelFunction_test.cpp
index 0750de4..1b706d1 100644
--- a/aten/src/ATen/core/boxing/KernelFunction_test.cpp
+++ b/aten/src/ATen/core/boxing/KernelFunction_test.cpp
@@ -131,7 +131,7 @@
called_with_args = c10::nullopt;
OperatorHandle dummy = makeDummyOperatorHandle();
- int64_t result = func.callUnboxed<int64_t, int64_t, int64_t>(dummy, 3, 4);
+ int64_t result = func.call<int64_t, int64_t, int64_t>(dummy, 3, 4);
EXPECT_TRUE(called_with_args.has_value());
EXPECT_EQ((tuple<int64_t, int64_t>(3, 4)), *called_with_args);
@@ -142,7 +142,7 @@
called_with_args = c10::nullopt;
OperatorHandle dummy = makeDummyOperatorHandle();
- func.callUnboxed<void, int64_t, int64_t>(dummy, 3, 4);
+ func.call<void, int64_t, int64_t>(dummy, 3, 4);
EXPECT_TRUE(called_with_args.has_value());
EXPECT_EQ((tuple<int64_t, int64_t>(3, 4)), *called_with_args);
@@ -191,12 +191,12 @@
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withReturn_whenCallingBoxed_thenFails) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_with_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_with_return>()));
- kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()");
+ kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()");
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withoutReturn_whenCallingBoxed_thenFails) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunctor<kernels::unboxed_functor_without_return>(std::unique_ptr<OperatorKernel>(std::make_unique<kernels::unboxed_functor_without_return>()));
- kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()");
+ kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()");
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunctor_withReturn_whenCallingUnboxed_thenWorks) {
@@ -231,12 +231,12 @@
TEST(KernelFunctionTest, givenUnboxedOnlyFunction_withReturn_whenCallingBoxed_thenFails) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunction<decltype(kernels::unboxed_function_with_return), &kernels::unboxed_function_with_return>();
- kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()");
+ kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()");
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunction_withoutReturn_whenCallingBoxed_thenFails) {
KernelFunction func = KernelFunction::makeFromUnboxedOnlyFunction<decltype(kernels::unboxed_function_without_return), &kernels::unboxed_function_without_return>();
- kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()");
+ kernels::expectBoxedCallingFailsWith(func, "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()");
}
TEST(KernelFunctionTest, givenUnboxedOnlyFunction_withReturn_whenCallingUnboxed_thenWorks) {
diff --git a/aten/src/ATen/core/boxing/impl/boxing.h b/aten/src/ATen/core/boxing/impl/boxing.h
index 62ed360..21a5765 100644
--- a/aten/src/ATen/core/boxing/impl/boxing.h
+++ b/aten/src/ATen/core/boxing/impl/boxing.h
@@ -37,7 +37,7 @@
template<class Result, class... Args>
Result boxAndCallBoxedFunc(KernelFunction::InternalBoxedKernelFunction* boxed_kernel_func, OperatorKernel* functor, const OperatorHandle& opHandle, Args... args, std::enable_if_t<!supports_boxing<Result, Args...>::value, int> = 0) {
- TORCH_INTERNAL_ASSERT(false, "Tried to call KernelFunction::callUnboxed() for a kernel that only has a boxed kernel and doesn't support calling from an unboxed API yet.");
+ TORCH_INTERNAL_ASSERT(false, "Tried to call KernelFunction::call() for a kernel that only has a boxed kernel and doesn't support calling from an unboxed API yet.");
}
// SFINAE version for ops with returns
@@ -64,7 +64,7 @@
(*boxed_kernel_func)(functor, opHandle, &stack);
- TORCH_INTERNAL_ASSERT(stack.size() == 0, "A boxed kernel returned a value but when we called it with KernelFunction::callUnboxed, we expected it to return void.");
+ TORCH_INTERNAL_ASSERT(stack.size() == 0, "A boxed kernel returned a value but when we called it with KernelFunction::call, we expected it to return void.");
}
}
diff --git a/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp b/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp
index 880a505..94c01cc 100644
--- a/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp
+++ b/aten/src/ATen/core/boxing/impl/kernel_function_test.cpp
@@ -649,7 +649,7 @@
ASSERT_TRUE(op.has_value());
expectThrows<c10::Error>(
[&] {callOp(*op, dummyTensor(dispatch_key), "1", "2", 3);},
- "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::callUnboxed()."
+ "Tried to call KernelFunction::callBoxed() on a KernelFunction that can only be called with KernelFunction::call()."
);
}
diff --git a/aten/src/ATen/core/boxing/impl/test_helpers.h b/aten/src/ATen/core/boxing/impl/test_helpers.h
index e90825c..1ff65ea 100644
--- a/aten/src/ATen/core/boxing/impl/test_helpers.h
+++ b/aten/src/ATen/core/boxing/impl/test_helpers.h
@@ -42,13 +42,13 @@
template<class Result, class... Args>
inline Result callOpUnboxed(const c10::OperatorHandle& op, Args... args) {
return c10::Dispatcher::singleton()
- .template callUnboxed<Result, Args...>(op, std::forward<Args>(args)...);
+ .template call<Result, Args...>(op, std::forward<Args>(args)...);
}
template<class Result, class... Args>
inline Result callOpUnboxedWithDispatchKey(const c10::OperatorHandle& op, c10::DispatchKey dispatchKey, Args... args) {
return c10::Dispatcher::singleton()
- .template callUnboxedWithDispatchKey<Result, Args...>(op, dispatchKey, std::forward<Args>(args)...);
+ .template callWithDispatchKey<Result, Args...>(op, dispatchKey, std::forward<Args>(args)...);
}
inline void expectDoesntFindKernel(const char* op_name, c10::DispatchKey dispatch_key) {
diff --git a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h
index 1057b8c..9abcd0b 100644
--- a/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h
+++ b/aten/src/ATen/core/dispatch/DispatchKeyExtractor.h
@@ -31,7 +31,7 @@
// - If there is no operator registered for a backend whose fallback behavior
// is to fallthrough, we eliminate that backend from consideration (since
// we want to "fallthrough" to the next valid key.)
- // - If a user invokes with callUnboxedWithoutDispatchKey, the mask lets us
+ // - If a user invokes with redispatch, the mask lets us
// zero out the key the user asked us to stop.
//
// These excluded backends are NOT tracked in the TLS, but must be applied
diff --git a/aten/src/ATen/core/dispatch/Dispatcher.h b/aten/src/ATen/core/dispatch/Dispatcher.h
index a5a54bd..f794720 100644
--- a/aten/src/ATen/core/dispatch/Dispatcher.h
+++ b/aten/src/ATen/core/dispatch/Dispatcher.h
@@ -107,20 +107,20 @@
// ------------------------------------------------------------------------
template<class Return, class... Args>
- Return callUnboxed(const OperatorHandle& op, Args... args) const;
+ Return call(const OperatorHandle& op, Args... args) const;
- // Like callUnboxed, but override the default DispatchKey calculation code,
+ // Like call, but override the default DispatchKey calculation code,
// instead dispatching straight to the provided DispatchKey
template<class Return, class... Args>
- Return callUnboxedWithDispatchKey(const OperatorHandle& op, DispatchKey dispatchKey, Args... args) const;
+ Return callWithDispatchKey(const OperatorHandle& op, DispatchKey dispatchKey, Args... args) const;
- // Like callUnboxed, but intended for use in a redispatch: you are currently
+ // Like call, but intended for use in a redispatch: you are currently
// in some currentDispatchKey, you have finished processing the key and
// you now want to redispatch to the next dispatch key in the chain.
- // This will mask out the current key and all previous keys from the
- // eligible set, and redo the calculation.
+ // This will mask out the current key *and all previous keys* from the
+ // eligible set, and reinvoke the dispatcher.
template<class Return, class... Args>
- Return callUnboxedRedispatch(const OperatorHandle& op, DispatchKey currentDispatchKey, Args... args) const;
+ Return redispatch(const OperatorHandle& op, DispatchKey currentDispatchKey, Args... args) const;
// Invoke an operator via the boxed calling convention using an IValue stack
void callBoxed(const OperatorHandle& op, Stack* stack) const;
@@ -258,13 +258,13 @@
}
template<class Return, class... Args>
- Return callUnboxed(Args... args) const {
- return c10::Dispatcher::singleton().callUnboxed<Return, Args...>(*this, std::forward<Args>(args)...);
+ Return call(Args... args) const {
+ return c10::Dispatcher::singleton().call<Return, Args...>(*this, std::forward<Args>(args)...);
}
template<class Return, class... Args>
- Return callUnboxedWithDispatchKey(DispatchKey dispatchKey, Args... args) const {
- return c10::Dispatcher::singleton().callUnboxedWithDispatchKey<Return, Args...>(*this, dispatchKey, std::forward<Args>(args)...);
+ Return callWithDispatchKey(DispatchKey dispatchKey, Args... args) const {
+ return c10::Dispatcher::singleton().callWithDispatchKey<Return, Args...>(*this, dispatchKey, std::forward<Args>(args)...);
}
void callBoxed(Stack* stack) const {
@@ -284,23 +284,23 @@
}
template<class Return, class... Args>
-inline Return Dispatcher::callUnboxedWithDispatchKey(const OperatorHandle& op, DispatchKey dispatchKey, Args... args) const {
+inline Return Dispatcher::callWithDispatchKey(const OperatorHandle& op, DispatchKey dispatchKey, Args... args) const {
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
const auto& dispatchTable = op.operatorIterator_->op.dispatch_table();
const KernelFunction& kernel = dispatch_(dispatchTable, dispatchKey);
- return kernel.template callUnboxed<Return, Args...>(op, std::forward<Args>(args)...);
+ return kernel.template call<Return, Args...>(op, std::forward<Args>(args)...);
}
template<class Return, class... Args>
-inline Return Dispatcher::callUnboxed(const OperatorHandle& op, Args... args) const {
+inline Return Dispatcher::call(const OperatorHandle& op, Args... args) const {
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
const auto& dispatchTable = op.operatorIterator_->op.dispatch_table();
auto dispatchKey = dispatchTable.dispatchKeyExtractor().getDispatchKeyUnboxed<Args...>(backendsWithoutFallthrough_, DispatchKeySet::FULL, args...);
- return callUnboxedWithDispatchKey<Return, Args...>(op, dispatchKey, args...);
+ return callWithDispatchKey<Return, Args...>(op, dispatchKey, args...);
}
template<class Return, class... Args>
-inline Return Dispatcher::callUnboxedRedispatch(const OperatorHandle& op, DispatchKey currentDispatchKey, Args... args) const {
+inline Return Dispatcher::redispatch(const OperatorHandle& op, DispatchKey currentDispatchKey, Args... args) const {
detail::unused_arg_(args...); // workaround for a false-positive warning about unused parameters in gcc 5
const auto& dispatchTable = op.operatorIterator_->op.dispatch_table();
auto dispatchKey = dispatchTable.dispatchKeyExtractor().getDispatchKeyUnboxed<Args...>(
@@ -308,7 +308,7 @@
DispatchKeySet(DispatchKeySet::FULL_AFTER, currentDispatchKey),
args...);
const KernelFunction& kernel = dispatch_(dispatchTable, dispatchKey);
- return kernel.template callUnboxed<Return, Args...>(op, std::forward<Args>(args)...);
+ return kernel.template call<Return, Args...>(op, std::forward<Args>(args)...);
}
inline void Dispatcher::callBoxed(const OperatorHandle& op, Stack* stack) const {
diff --git a/aten/src/ATen/core/op_registration/op_registration_test.cpp b/aten/src/ATen/core/op_registration/op_registration_test.cpp
index f184a0e..727b4de 100644
--- a/aten/src/ATen/core/op_registration/op_registration_test.cpp
+++ b/aten/src/ATen/core/op_registration/op_registration_test.cpp
@@ -811,7 +811,7 @@
ASSERT_TRUE(op.has_value());
called_autograd = false;
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(DispatchKey::CPU)); // note: all tensors have VariableTypeId set
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(DispatchKey::CPU)); // note: all tensors have VariableTypeId set
EXPECT_TRUE(called_autograd);
}
@@ -824,7 +824,7 @@
ASSERT_TRUE(op.has_value());
called_nonautograd = called_autograd = false;
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(DispatchKey::CPU)); // note: all tensors have VariableTypeId set
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(DispatchKey::CPU)); // note: all tensors have VariableTypeId set
EXPECT_FALSE(called_nonautograd);
EXPECT_TRUE(called_autograd);
}
@@ -839,7 +839,7 @@
called_nonautograd = called_autograd = false;
at::AutoNonVariableTypeMode _var_guard(true);
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(DispatchKey::CPU));
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(DispatchKey::CPU));
EXPECT_TRUE(called_nonautograd);
EXPECT_FALSE(called_autograd);
}
@@ -853,7 +853,7 @@
ASSERT_TRUE(op.has_value());
called_nonautograd = called_autograd = false;
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(DispatchKey::CPU)); // note: all tensors have VariableTypeId set
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(DispatchKey::CPU)); // note: all tensors have VariableTypeId set
EXPECT_FALSE(called_nonautograd);
EXPECT_TRUE(called_autograd);
}
@@ -868,7 +868,7 @@
called_nonautograd = called_autograd = false;
at::AutoNonVariableTypeMode _var_guard(true);
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(DispatchKey::CPU));
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(DispatchKey::CPU));
EXPECT_TRUE(called_nonautograd);
EXPECT_FALSE(called_autograd);
}
@@ -882,12 +882,12 @@
ASSERT_TRUE(op.has_value());
called_nonautograd = called_autograd = false;
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(c10::DispatchKeySet{DispatchKey::XLA, DispatchKey::XLAPreAutograd}));
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(c10::DispatchKeySet{DispatchKey::XLA, DispatchKey::XLAPreAutograd}));
EXPECT_TRUE(called_nonautograd);
EXPECT_FALSE(called_autograd);
called_nonautograd = called_autograd = false;
- c10::Dispatcher::singleton().callUnboxed<void, Tensor>(*op, dummyTensor(DispatchKey::CPU));
+ c10::Dispatcher::singleton().call<void, Tensor>(*op, dummyTensor(DispatchKey::CPU));
EXPECT_TRUE(called_autograd);
EXPECT_FALSE(called_nonautograd);
}
@@ -1565,7 +1565,7 @@
m.impl("fn", c10::DispatchKey::BackendSelect, [&](const Tensor& x) {
backend_generic_called = true;
auto op = c10::Dispatcher::singleton().findSchema({"test::fn", ""});
- return c10::Dispatcher::singleton().callUnboxedRedispatch<Tensor, const Tensor&>(*op, c10::DispatchKey::BackendSelect, x);
+ return c10::Dispatcher::singleton().redispatch<Tensor, const Tensor&>(*op, c10::DispatchKey::BackendSelect, x);
});
auto op = Dispatcher::singleton().findSchema({"test::fn", ""});
diff --git a/aten/src/ATen/function_wrapper.py b/aten/src/ATen/function_wrapper.py
index f036e5c..0b0834c 100644
--- a/aten/src/ATen/function_wrapper.py
+++ b/aten/src/ATen/function_wrapper.py
@@ -133,7 +133,7 @@
${static_dispatch_method_body}
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchemaOrThrow("aten::${operator_name}", "${overload_name}");
- return op.callUnboxed<${formals_types_with_return}>(${method_actuals});
+ return op.call<${formals_types_with_return}>(${method_actuals});
#endif
}
""")
@@ -158,7 +158,7 @@
#else
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchemaOrThrow("aten::${operator_name}", "${overload_name}");
- return op.callUnboxed<${formals_types_with_return}>(${native_actuals});
+ return op.call<${formals_types_with_return}>(${native_actuals});
#endif
}
""")
diff --git a/aten/src/ATen/gen_backend_select_register.py b/aten/src/ATen/gen_backend_select_register.py
index 38bb481..f50913b 100644
--- a/aten/src/ATen/gen_backend_select_register.py
+++ b/aten/src/ATen/gen_backend_select_register.py
@@ -36,7 +36,7 @@
Tensor ${function_name}(${method_formals}) {
static OperatorHandle OP = c10::Dispatcher::singleton().findSchemaOrThrow("aten::${name}", "${overload_name}");
${dispatch_key_init}
- return OP.callUnboxedWithDispatchKey<${formals_types}>(_dk, ${type_method_actuals});
+ return OP.callWithDispatchKey<${formals_types}>(_dk, ${type_method_actuals});
}
""")
diff --git a/test/mobile/op_deps/simple_ops.h b/test/mobile/op_deps/simple_ops.h
index feac4a9..cbead48 100644
--- a/test/mobile/op_deps/simple_ops.h
+++ b/test/mobile/op_deps/simple_ops.h
@@ -7,42 +7,42 @@
static inline Tensor call_AA_op(const Tensor& self) {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::AA", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
}
static inline Tensor call_BB_op(const Tensor& self) {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::BB", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
}
static inline Tensor call_CC_op(const Tensor& self) {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::CC", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
}
static inline Tensor call_DD_op(const Tensor& self) {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::DD", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
}
static inline Tensor call_EE_op(const Tensor& self) {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::EE", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
}
static inline Tensor call_FF_op(const Tensor& self) {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::FF", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
}
diff --git a/test/mobile/op_deps/utils.cpp b/test/mobile/op_deps/utils.cpp
index 1fb0048..dd471f4 100644
--- a/test/mobile/op_deps/utils.cpp
+++ b/test/mobile/op_deps/utils.cpp
@@ -27,7 +27,7 @@
auto lambda = [&]() {
static c10::OperatorHandle op = c10::Dispatcher::singleton()
.findSchema({"_test::AA", ""}).value();
- return c10::Dispatcher::singleton().callUnboxed<Tensor, const Tensor&>(
+ return c10::Dispatcher::singleton().call<Tensor, const Tensor&>(
op, self, self);
};
return lambda();
diff --git a/tools/autograd/gen_variable_type.py b/tools/autograd/gen_variable_type.py
index 9a7b6c0..6d219a0 100644
--- a/tools/autograd/gen_variable_type.py
+++ b/tools/autograd/gen_variable_type.py
@@ -347,7 +347,7 @@
static auto op = c10::Dispatcher::singleton().findSchema({"aten::${operator_name}", "${overload_name}"});
TORCH_INTERNAL_ASSERT(op);
RECORD_FUNCTION("${name}", std::vector<c10::IValue>({${input_names}}), Node::peek_at_next_sequence_nr());
-return c10::Dispatcher::singleton().callUnboxedRedispatch<${ret_and_arg_types}>(${profiled_dispatch_args});
+return c10::Dispatcher::singleton().redispatch<${ret_and_arg_types}>(${profiled_dispatch_args});
""")
FACTORY_FUNCTION_NAMES = None
diff --git a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp
index e2a4b48..d7e358d 100644
--- a/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp
+++ b/torch/csrc/jit/passes/onnx/unpack_quantized_weights.cpp
@@ -23,7 +23,7 @@
// boxing code currently does not support this. Instead, exclude the Profiler
// dispatch key and go through unboxed dispatch, avoiding boxing altogether
c10::impl::ExcludeDispatchKeyGuard key_guard(c10::DispatchKey::Profiler);
- return c10::Dispatcher::singleton().template callUnboxed<Result, Args...>(
+ return c10::Dispatcher::singleton().template call<Result, Args...>(
op, std::forward<Args>(args)...);
}