Turn default kernels into Meyer singletons (#60568)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/60568
https://github.com/pytorch/pytorch/pull/58661 induced a static
initialization order fiasco as flagged by ASAN strict_init_order=true.
On further inspection, it became clear that it was not necessary for
these to actually be globals initialized at module load time; so
I converted them into Meyer singletons which ensures they get loaded
immediately when another compilation unit requests them.
Signed-off-by: Edward Z. Yang <ezyang@fb.com>
Test Plan: Imported from OSS
Reviewed By: bdhirsh
Differential Revision: D29338019
Pulled By: ezyang
fbshipit-source-id: 282846118df6867277404a1830d0ce39fccaa769
diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.cpp b/aten/src/ATen/core/dispatch/OperatorEntry.cpp
index 9bb104b..14ee356 100644
--- a/aten/src/ATen/core/dispatch/OperatorEntry.cpp
+++ b/aten/src/ATen/core/dispatch/OperatorEntry.cpp
@@ -46,8 +46,16 @@
}
} // anonymous namespace
-const AnnotatedKernel OperatorEntry::ambiguousAutogradOtherKernel_ = AnnotatedKernel(
+const AnnotatedKernel& OperatorEntry::missingKernel() const {
+ static AnnotatedKernel kernel;
+ return kernel;
+}
+
+const AnnotatedKernel& OperatorEntry::ambiguousAutogradOtherKernel() const {
+ static AnnotatedKernel kernel(
c10::KernelFunction::makeAmbiguousAutogradOther(), nullptr, "ambiguous_autogradother");
+ return kernel;
+}
void OperatorEntry::registerSchema(FunctionSchema&& schema, std::string&& debug) {
TORCH_INTERNAL_ASSERT(!schema_.has_value());
@@ -190,7 +198,7 @@
// (2.2) Use kernel from DispatchKey::CompositeImplicitAutograd if available.
// For autograd keys, we only use kernel from CompositeImplicitAutograd when there's no direct registration
// to its corresponding backend key or CompositeExplicitAutograd. See Note [CompositeExplicitAutograd and CompositeImplicitAutograd].
- // For AutogradOther, we eagerly return ambiguousAutogradOtherKernel_ if there's registration to any of
+ // For AutogradOther, we eagerly return ambiguousAutogradOtherKernel() if there's registration to any of
// its backends and ask backend extender to request a decicated Autograd key for the backend.
// See Note [Ambiguity in AutogradOther kernel] for more details.
// A CompositeExplicitAutograd kernel prevents CompositeImplicitAutograd kernel being used for Autograd keys, but it doesn't
@@ -229,14 +237,14 @@
// 2.2. Use CompositeImplicitAutograd kernel if available. For autograd keys, we only use kernel from CompositeImplicitAutograd
// when there's no direct registration to its corresponding backend key or CompositeExplicitAutograd.
- // For AutogradOther, we return ambiguousAutogradOtherKernel_ if there's registration
+ // For AutogradOther, we return ambiguousAutogradOtherKernel() if there's registration
// to any of its backends.
// See Note [Undefined in dispatchTable_] for the special handling for Undefined.
if (dispatch_key == DispatchKey::Undefined || isIncludedInAlias(dispatch_key, DispatchKey::CompositeImplicitAutograd)) {
if (auto math_registration = getKernelForDispatchKey(DispatchKey::CompositeImplicitAutograd)) {
if (dispatch_key == DispatchKey::AutogradOther
&& hasKernelForAnyDispatchKey(c10::autogradother_backends)) {
- return {ambiguousAutogradOtherKernel_, "ambiguous autogradother"};
+ return {ambiguousAutogradOtherKernel(), "ambiguous autogradother"};
} else if (!has_backend_kernel) {
return {*math_registration.value(), "math kernel"};
}
@@ -257,7 +265,7 @@
}
// 4. Default to error
- return {missingKernel_, "missing"};
+ return {missingKernel(), "missing"};
}
// synchronizes the dispatch table entry for a given dispatch key
diff --git a/aten/src/ATen/core/dispatch/OperatorEntry.h b/aten/src/ATen/core/dispatch/OperatorEntry.h
index e5d4b53..cb225c3 100644
--- a/aten/src/ATen/core/dispatch/OperatorEntry.h
+++ b/aten/src/ATen/core/dispatch/OperatorEntry.h
@@ -217,8 +217,8 @@
// currently not high-pri.
ska::flat_hash_map<DispatchKey, std::list<AnnotatedKernel>> kernels_;
- AnnotatedKernel missingKernel_;
- static const AnnotatedKernel ambiguousAutogradOtherKernel_;
+ const AnnotatedKernel& missingKernel() const;
+ const AnnotatedKernel& ambiguousAutogradOtherKernel() const;
// cpp_signature_ stores function signature if any of
// the kernels was created in a way that allowed us to know the function