Replace `tensorflow::Status::OK()` with tensorflow::OkStatus()`.
PiperOrigin-RevId: 452145640
diff --git a/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.cc b/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.cc
index 8ffdf62..48bc009 100644
--- a/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.cc
+++ b/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.cc
@@ -168,7 +168,7 @@
return InvalidArgument("%s got bad device_id: %d (num_devices=%d).",
caller_name, device_id, device_count());
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
static Status CheckDataType(xla::PrimitiveType dtype) {
@@ -178,7 +178,7 @@
"64-bit data types are not yet supported on the TPU driver API. "
"Convert inputs to float32/int32_t before using.");
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
/* static */
@@ -303,7 +303,7 @@
if (host_value_) {
// The host value has already been requested or is available.
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
host_value->value = std::make_shared<Literal>(on_host_shape_);
@@ -350,7 +350,7 @@
}
});
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<std::shared_ptr<Literal>> PyTpuBuffer::ToLiteral() {
diff --git a/tensorflow/compiler/xla/python/tpu_driver/direct_tpu_driver.cc b/tensorflow/compiler/xla/python/tpu_driver/direct_tpu_driver.cc
index 76d7978..e7388d7 100644
--- a/tensorflow/compiler/xla/python/tpu_driver/direct_tpu_driver.cc
+++ b/tensorflow/compiler/xla/python/tpu_driver/direct_tpu_driver.cc
@@ -35,7 +35,7 @@
xla::Status CreateXlaStatus(::TpuStatus* status) {
if (status->code == tensorflow::error::OK) {
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
} else {
return xla::Status(tensorflow::error::Code(status->code),
absl::StrFormat("%s", status->msg));
diff --git a/tensorflow/compiler/xla/python/tpu_driver/grpc_tpu_driver.cc b/tensorflow/compiler/xla/python/tpu_driver/grpc_tpu_driver.cc
index fe7f360..03aeb0b 100644
--- a/tensorflow/compiler/xla/python/tpu_driver/grpc_tpu_driver.cc
+++ b/tensorflow/compiler/xla/python/tpu_driver/grpc_tpu_driver.cc
@@ -135,7 +135,7 @@
return status;
}
*program_shape = metadata_->program_shape();
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
std::shared_ptr<CompiledProgramMetadata> metadata() { return metadata_; }
@@ -569,7 +569,7 @@
if (it == events_.end()) {
// This event has already been marked as done and deleted. Assume success.
events_mutex_.Unlock();
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
if (!it->second.all_deps_done) {
@@ -597,7 +597,8 @@
return !events_.contains(id) || events_[id].done;
};
if (events_mutex_.AwaitWithTimeout(absl::Condition(&done), duration)) {
- auto status = events_.contains(id) ? events_[id].status : Status::OK();
+ auto status =
+ events_.contains(id) ? events_[id].status : ::tensorflow::OkStatus();
events_mutex_.Unlock();
return status;
}
@@ -710,7 +711,7 @@
Status(static_cast<tensorflow::error::Code>(entry.status().code()),
entry.status().message()));
} else {
- UpdateEventStatus(event_id, Status::OK());
+ UpdateEventStatus(event_id, ::tensorflow::OkStatus());
}
}
}
@@ -1050,7 +1051,7 @@
". Details: ", status.error_details()));
}
closed_ = true;
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
} // namespace
diff --git a/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc b/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc
index a2112fa..9d865be 100644
--- a/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc
+++ b/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc
@@ -94,7 +94,7 @@
for (auto& event : events_) {
TF_RETURN_IF_ERROR(event->Await());
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
absl::optional<xla::Status> AwaitWithTimeout(
@@ -109,7 +109,7 @@
TF_RETURN_IF_ERROR(status.value());
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
void AddCallback(std::function<void(Status)> callback)
@@ -344,7 +344,7 @@
for (auto& driver : drivers_) {
TF_RETURN_IF_ERROR(driver.second->Reset());
}
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
std::unique_ptr<BufferHandle> Allocate(
@@ -733,7 +733,7 @@
if (event == events_.end()) {
auto event_status = abnormal_event_status_.find(event_id);
if (event_status == abnormal_event_status_.end()) {
- return Status::OK();
+ return ::tensorflow::OkStatus();
} else {
return event_status->second;
}
@@ -768,7 +768,7 @@
absl::MutexLock l(&mu_);
auto event_status = abnormal_event_status_.find(event_id);
if (event_status == abnormal_event_status_.end()) {
- return Status::OK();
+ return ::tensorflow::OkStatus();
} else {
return event_status->second;
}
@@ -783,7 +783,7 @@
if (event == events_.end()) {
auto event_status = abnormal_event_status_.find(event_id);
if (event_status == abnormal_event_status_.end()) {
- fn(Status::OK());
+ fn(::tensorflow::OkStatus());
} else {
fn(event_status->second);
}