Remove the lock acquire in AllocateTensor since the lock has already been acquired before the function is called. Added an annotation to prevent future confusion.
PiperOrigin-RevId: 404219869
Change-Id: I6abd1d00b5e1709bbc64e75468b9eae4c9478dcf
diff --git a/tensorflow/core/kernels/mkl/mkl_conv_ops.cc b/tensorflow/core/kernels/mkl/mkl_conv_ops.cc
index a1849cd..6063731 100644
--- a/tensorflow/core/kernels/mkl/mkl_conv_ops.cc
+++ b/tensorflow/core/kernels/mkl/mkl_conv_ops.cc
@@ -1173,7 +1173,8 @@
// descriptor (data format)
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,
Tensor** filter_tensor,
- const MklDnnShape* filter_mkl_shape) {
+ const MklDnnShape* filter_mkl_shape)
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
DCHECK(filter_tensor);
TensorShape filter_tf_shape;
filter_tf_shape.AddDim(
@@ -1195,11 +1196,8 @@
OP_REQUIRES_OK(context,
context->allocate_temp(DT_UINT8, cached_filter_md_shape,
&cached_filter_md_));
- {
- tf_shared_lock lock(mu_);
- *reinterpret_cast<memory::desc*>(cached_filter_md_.flat<uint8>().data()) =
- weights_desc;
- }
+ *reinterpret_cast<memory::desc*>(cached_filter_md_.flat<uint8>().data()) =
+ weights_desc;
}
void AllocateTensor(OpKernelContext* context, const ConvFwdPd& conv_prim_desc,