[XLA:GPU] Ban bad CUDNN algo

PiperOrigin-RevId: 383932719
Change-Id: I3a5b29ac56d621388cb430a52ce7879cdffe87e4
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc
index 0be2573..7d74667 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc
@@ -397,7 +397,7 @@
   const bool crash_on_checking_failure =
       debug_options.xla_gpu_crash_on_verification_failures();
 
-  const auto canonical_hlo =
+  std::string canonical_hlo =
       std::get<1>(AutotuneCacheKeyfromInstruction(instr, stream_exec_));
 
   string blas_version;
diff --git a/tensorflow/compiler/xla/service/gpu/hlo_algorithm_denylist.cc b/tensorflow/compiler/xla/service/gpu/hlo_algorithm_denylist.cc
index 4a0075f..7c80341 100644
--- a/tensorflow/compiler/xla/service/gpu/hlo_algorithm_denylist.cc
+++ b/tensorflow/compiler/xla/service/gpu/hlo_algorithm_denylist.cc
@@ -39,6 +39,14 @@
     algos { id: 7 tensor_ops: true }
     blas_version: "10201"
   }
+  entries {
+    hlo: "(f16[3,3,256,256]{2,1,0,3}, u8[0]{0}) custom-call(f16[2048,7,7,256]{3,2,1,0}, f16[2048,7,7,256]{3,2,1,0}), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target=\"__cudnn$convBackwardFilter\", backend_config=\"{\\\"algorithm\\\":\\\"0\\\",\\\"tensor_ops_enabled\\\":false,\\\"conv_result_scale\\\":1,\\\"activation_mode\\\":\\\"0\\\",\\\"side_input_scale\\\":0}\""
+    cc { major: 7 }
+    cudnn_version { major: 8 minor: 2 patch: 1 } algos
+    [ { id: 0 tensor_ops: true }
+      , { id: 0 }]
+    blas_version: "11402"
+  }
 )pb";
 
 absl::Span<const stream_executor::dnn::AlgorithmDesc> GetDisabledConvAlgorithms(