Simplify the code. The condition is only used in one of the path, so no need to test it.
diff --git a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
index 11912e0..177ce7b 100644
--- a/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
+++ b/tensorflow/compiler/xla/service/gpu/ir_emitter_unnested.cc
@@ -1865,14 +1865,13 @@
 
 // Returns true if the fusion contains any instruction that is likely
 // translated to complex LLVM IR, such as loops, and prevent vectorization.
-bool MayPreventVectorization(const HloInstruction& hlo,
-                             bool tolerate_reduce = false) {
+bool MayPreventVectorization(const HloInstruction& hlo) {
   if (hlo.opcode() == HloOpcode::kFusion) {
     return absl::c_any_of(hlo.fused_instructions_computation()->instructions(),
                           [&](const HloInstruction* instr) {
                             switch (instr->opcode()) {
                               case HloOpcode::kReduce:
-                                return !tolerate_reduce;
+                                return false;
                               case HloOpcode::kReduceWindow:
                               case HloOpcode::kSort:
                               case HloOpcode::kDot:
@@ -1900,7 +1899,7 @@
         return false;
     }
   } else if (hlo.opcode() == HloOpcode::kReduce) {
-    return !tolerate_reduce;
+    return false;
   }
   return true;
 }
@@ -3213,7 +3212,7 @@
     if (reduction_dimensions.dimensions[2] % 2 == 0 &&
         // As XLA unroll and suppose LLVM will vectorize,
         // disable the unroll for case that LLVM doesn't vectorize.
-        !MayPreventVectorization(*unnested_hlo, /*tolerate_reduce*/ true)) {
+        !MayPreventVectorization(*unnested_hlo)) {
       vector_size = 2;
     } else {
       indexing_order = kStridedIndexingX;