Update MLIR bridge phase 1 logging
PiperOrigin-RevId: 382492482
Change-Id: Ibbbb7ac062a610f20599637e99ac9ac1971a117a
diff --git a/tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc b/tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc
index 055743a..bcc6d21 100644
--- a/tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc
+++ b/tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc
@@ -172,16 +172,21 @@
/*record_stats=*/true);
if (overall_state == MlirOptimizationPassState::Disabled) {
- LOG_FIRST_N(INFO, 1) << "None of the MLIR Optimization Passes are enabled "
- << "(registered " << registry_->passes().size() << ")";
+ if (VLOG_IS_ON(1)) {
+ LOG_FIRST_N(INFO, 1)
+ << "None of the MLIR Optimization Passes are enabled "
+ << "(registered " << registry_->passes().size() << ")";
+ }
return Status::OK();
}
- LOG_FIRST_N(INFO, 1) << "MLIR Graph Optimization Passes."
- << " Enabled: " << num_passes_enabled
- << ", Disabled: " << num_passes_disabled
- << ", FallbackEnabled: " << num_passes_fallback_enabled
- << ", Total: " << registry_->passes().size();
+ if (VLOG_IS_ON(1)) {
+ LOG_FIRST_N(INFO, 1) << "MLIR Graph Optimization Passes."
+ << " Enabled: " << num_passes_enabled
+ << ", Disabled: " << num_passes_disabled
+ << ", FallbackEnabled: " << num_passes_fallback_enabled
+ << ", Total: " << registry_->passes().size();
+ }
GraphDebugInfo debug_info;
mlir::DialectRegistry registry;
diff --git a/tensorflow/compiler/tf2xla/mlir_bridge_pass.cc b/tensorflow/compiler/tf2xla/mlir_bridge_pass.cc
index a5ba2a9..82cabad 100644
--- a/tensorflow/compiler/tf2xla/mlir_bridge_pass.cc
+++ b/tensorflow/compiler/tf2xla/mlir_bridge_pass.cc
@@ -139,7 +139,7 @@
// based on the devices in the module.
if (GetPassState(/*device_set=*/nullptr, config_proto, graph,
function_library) == MlirOptimizationPassState::Disabled) {
- LOG_AT_LEAST_ONCE("Skipping MLIR TPU Bridge, session flag not enabled");
+ VLOG(1) << "Skipping MLIR TPU Bridge, session flag not enabled";
mlir_bridge_gauge_v2->GetCell()->Set(false);
return Status::OK();
}
@@ -151,7 +151,7 @@
return Status::OK();
}
- LOG_AT_LEAST_ONCE("Running MLIR TPU Bridge");
+ VLOG(1) << "Running MLIR TPU Bridge";
mlir_bridge_gauge_v2->GetCell()->Set(true);
TF_RETURN_IF_ERROR(
@@ -198,8 +198,7 @@
if (GetPassState(/*device_set=*/nullptr, options.session_options->config,
**options.graph,
*options.flib_def) == MlirOptimizationPassState::Disabled) {
- LOG_AT_LEAST_ONCE(
- "Skipping MLIR TPU Bridge V1 Compat, session flag not enabled");
+ VLOG(1) << "Skipping MLIR TPU Bridge V1 Compat, session flag not enabled";
mlir_bridge_gauge_v1->GetCell()->Set(false);
return Status::OK();
}
@@ -211,7 +210,7 @@
return Status::OK();
}
- LOG_AT_LEAST_ONCE("Running MLIR TPU Bridge V1 Compat");
+ VLOG(1) << "Running MLIR TPU Bridge V1 Compat";
mlir_bridge_gauge_v1->GetCell()->Set(true);
TF_RETURN_IF_ERROR(
diff --git a/tensorflow/compiler/tf2xla/xla_compiler.cc b/tensorflow/compiler/tf2xla/xla_compiler.cc
index 6d2703a..e94c52f 100644
--- a/tensorflow/compiler/tf2xla/xla_compiler.cc
+++ b/tensorflow/compiler/tf2xla/xla_compiler.cc
@@ -816,7 +816,7 @@
/*uses_uninitialized_resource_args=*/AnyUninitializedResourceArg(args));
}
if (policy == MlirBridgeRolloutPolicy::kEnabledByUser) {
- VLOG(1) << "Using MLIR bridge";
+ VLOG(1) << "Using MLIR bridge to compile the function";
GraphDebugInfo debug_info;
std::vector<std::string> valid_control_rets =
@@ -828,6 +828,7 @@
options.use_tuple_arg, /*analyse_graph=*/false, *options_.flib_def,
debug_info, options_.shape_representation_fn, result));
} else {
+ VLOG(1) << "Using the old bridge to compile the function";
TF_RETURN_IF_ERROR(
CompileGraph(options, function_id, std::move(graph), args, result));
}