Using more concise name for shapeutil comparison
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index edbc305..34ff2ed 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -1498,8 +1498,7 @@
void HloInstruction::SetupDerivedInstruction(
HloInstruction* derived_instruction) const {
if (sharding_ != nullptr &&
- ShapeUtil::CompatibleIgnoringElementTypeAndDimensions(
- shape_, derived_instruction->shape())) {
+ ShapeUtil::CompatibleKind(shape_, derived_instruction->shape())) {
// Only copy sharding if the tuple tree shape of the two instruction is
// compatible because copying it between differently shaped instructions
// can produce invalid shardings.
diff --git a/tensorflow/compiler/xla/shape_util.cc b/tensorflow/compiler/xla/shape_util.cc
index 00e9eea..e068794 100644
--- a/tensorflow/compiler/xla/shape_util.cc
+++ b/tensorflow/compiler/xla/shape_util.cc
@@ -654,7 +654,7 @@
.IgnoreLayout()(lhs, rhs);
}
-/* static */ bool ShapeUtil::CompatibleIgnoringElementTypeAndDimensions(
+/* static */ bool ShapeUtil::CompatibleKind(
const Shape& lhs, const Shape& rhs) {
return Shape::Equal().IgnoreElementType().IgnoreLayout().IgnoreDimensions()
.IgnoreDynamicDimension()(lhs, rhs);
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index ec9fbf5..2d0ec13 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -296,8 +296,7 @@
// Returns true if the tuple tree shapes are identical. Leaf dimensions,
// element type, and layout are ignored. Tuple elements are compared
// recursively for compatibility.
- static bool CompatibleIgnoringElementTypeAndDimensions(const Shape& lhs,
- const Shape& rhs);
+ static bool CompatibleKind(const Shape& lhs, const Shape& rhs);
// As Compatible, but allow one of lhs and rhs to be BF16 while the other
// being F32. Tuple elements are compared recursively for compatibility.