Replace `tensorflow::Status::OK()` with tensorflow::OkStatus()`.

PiperOrigin-RevId: 452387972
diff --git a/tensorflow/core/distributed_runtime/coordination/coordination_service.cc b/tensorflow/core/distributed_runtime/coordination/coordination_service.cc
index df76e30..2ccdf5b 100644
--- a/tensorflow/core/distributed_runtime/coordination/coordination_service.cc
+++ b/tensorflow/core/distributed_runtime/coordination/coordination_service.cc
@@ -277,7 +277,7 @@
 void CoordinationServiceStandaloneImpl::TaskState::SetConnected(
     uint64_t task_incarnation) {
   state_ = State::CONNECTED;
-  status_ = Status::OK();
+  status_ = OkStatus();
   task_incarnation_ = task_incarnation;
   mutex_lock l(last_heartbeat_mu_);
   last_heartbeat_us_ = Env::Default()->NowMicros();
@@ -288,7 +288,7 @@
   disconnect_grace_period_us_ =
       Env::Default()->NowMicros() + grace_period_duration_us;
   state_ = State::DISCONNECTED;
-  status_ = Status::OK();
+  status_ = OkStatus();
 }
 
 void CoordinationServiceStandaloneImpl::TaskState::SetError(
@@ -308,7 +308,7 @@
   }
   mutex_lock l(last_heartbeat_mu_);
   last_heartbeat_us_ = Env::Default()->NowMicros();
-  return Status::OK();
+  return OkStatus();
 }
 
 int64_t
@@ -391,7 +391,7 @@
             }
           }
           // Heartbeat check.
-          Status status = Status::OK();
+          Status status = OkStatus();
           {
             mutex_lock l(state_mu_);
             for (const auto& [task_name, task_state] : cluster_state_) {
@@ -608,7 +608,7 @@
   }
 
   LOG(INFO) << task_name << " has disconnected from coordination service.";
-  return Status::OK();
+  return OkStatus();
 }
 
 const CoordinationServiceDeviceInfo&
@@ -637,13 +637,13 @@
     }
   }
   PropagateError(task, /*is_reported_by_task=*/true);
-  return Status::OK();
+  return OkStatus();
 }
 
 Status CoordinationServiceStandaloneImpl::RecordHeartbeat(
     const CoordinatedTask& task, uint64_t incarnation) {
   const std::string& task_name = GetTaskName(task);
-  Status s = Status::OK();
+  Status s = OkStatus();
   {
     mutex_lock l(state_mu_);
     if (!cluster_state_.contains(task_name)) {
@@ -816,7 +816,7 @@
     }
     get_cb_.erase(iter);
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 void CoordinationServiceStandaloneImpl::GetKeyValueAsync(
@@ -892,7 +892,7 @@
   if (iter != kv_store_.end()) {
     kv_store_.erase(iter);
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 void CoordinationServiceStandaloneImpl::SetTaskError(
@@ -1026,7 +1026,7 @@
     --barrier->num_pending_tasks;
 
     if (barrier->num_pending_tasks == 0) {
-      PassBarrier(barrier_id, Status::OK(), barrier);
+      PassBarrier(barrier_id, OkStatus(), barrier);
       return;
     }
   }
@@ -1054,7 +1054,7 @@
       "Barrier (", barrier_id, ") is cancelled by task: ", GetTaskName(task))));
   PassBarrier(barrier_id, cancelled, barrier);
 
-  return Status::OK();
+  return OkStatus();
 }
 
 // Mark barrier as passed.
diff --git a/tensorflow/core/distributed_runtime/coordination/coordination_service_agent.cc b/tensorflow/core/distributed_runtime/coordination/coordination_service_agent.cc
index 268efd0..cfaad76 100644
--- a/tensorflow/core/distributed_runtime/coordination/coordination_service_agent.cc
+++ b/tensorflow/core/distributed_runtime/coordination/coordination_service_agent.cc
@@ -134,7 +134,7 @@
   };
   mutable mutex state_mu_;
   State state_ TF_GUARDED_BY(state_mu_) = State::UNINITIALIZED;
-  Status status_ TF_GUARDED_BY(state_mu_) = Status::OK();
+  Status status_ TF_GUARDED_BY(state_mu_) = OkStatus();
   // Note: this set grows without bounds. For now, this is okay as most users
   // require < 100 barriers. If there is a use case that requires many barriers,
   // consider using a monotonic sequence number to track instead.
@@ -218,7 +218,7 @@
   }
   error_fn_ = error_fn;
   state_ = State::DISCONNECTED;
-  return Status::OK();
+  return OkStatus();
 }
 
 bool CoordinationServiceAgentImpl::IsInitialized() {
@@ -319,7 +319,7 @@
           }
         }
       }));
-  return Status::OK();
+  return OkStatus();
 }
 
 Status CoordinationServiceAgentImpl::WaitForAllTasks(
@@ -344,7 +344,7 @@
     return status;
   }
   cluster_devices_.MergeFrom(response.cluster_device_info());
-  return Status::OK();
+  return OkStatus();
 }
 
 const CoordinationServiceDeviceInfo&
@@ -398,11 +398,11 @@
     n.Notify();
   });
   n.WaitForNotification();
-  return Status::OK();
+  return OkStatus();
 }
 
 Status CoordinationServiceAgentImpl::Shutdown() {
-  Status status = Status::OK();
+  Status status = OkStatus();
   bool is_connected = false;
   {
     mutex_lock l(state_mu_);
@@ -631,7 +631,7 @@
     n.Notify();
   });
   n.WaitForNotification();
-  return Status::OK();
+  return OkStatus();
 }
 
 Status CoordinationServiceAgentImpl::UpdateKeyValue(const std::string& key,
@@ -747,7 +747,7 @@
   mutex_lock l(state_mu_);
   switch (state_) {
     case State::RUNNING:
-      return Status::OK();
+      return OkStatus();
 
     case State::UNINITIALIZED:
       return MakeCoordinationError(errors::FailedPrecondition(
diff --git a/tensorflow/core/distributed_runtime/coordination/coordination_service_agent_test.cc b/tensorflow/core/distributed_runtime/coordination/coordination_service_agent_test.cc
index d6c96a2..43b23bd 100644
--- a/tensorflow/core/distributed_runtime/coordination/coordination_service_agent_test.cc
+++ b/tensorflow/core/distributed_runtime/coordination/coordination_service_agent_test.cc
@@ -108,15 +108,15 @@
  public:
   void SetUp() override {
     ON_CALL(*client_, RegisterTaskAsync(_, _, _, _))
-        .WillByDefault(InvokeArgument<3>(Status::OK()));
+        .WillByDefault(InvokeArgument<3>(OkStatus()));
     ON_CALL(*client_, ShutdownTaskAsync(_, _, _, _))
-        .WillByDefault(InvokeArgument<3>(Status::OK()));
+        .WillByDefault(InvokeArgument<3>(OkStatus()));
     ON_CALL(*client_, ReportErrorToServiceAsync(_, _, _))
-        .WillByDefault(InvokeArgument<2>(Status::OK()));
+        .WillByDefault(InvokeArgument<2>(OkStatus()));
     ON_CALL(*client_, ResetTaskAsync(_, _, _))
-        .WillByDefault(InvokeArgument<2>(Status::OK()));
+        .WillByDefault(InvokeArgument<2>(OkStatus()));
     ON_CALL(*client_, BarrierAsync(_, _, _))
-        .WillByDefault(InvokeArgument<2>(Status::OK()));
+        .WillByDefault(InvokeArgument<2>(OkStatus()));
   }
 
   // Should be called after mocking service responses, before testing the agent.
@@ -155,7 +155,7 @@
   kv->set_value(test_value);
   ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
       .WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
-                           InvokeArgument<3>(Status::OK())));
+                           InvokeArgument<3>(OkStatus())));
   // Initialize coordination agent.
   InitializeAgent();
 
@@ -175,7 +175,7 @@
   kv->set_value(test_value);
   ON_CALL(*GetClient(), GetKeyValueAsync(_, _, _, _))
       .WillByDefault(DoAll(SetArgPointee<2>(mocked_response),
-                           InvokeArgument<3>(Status::OK())));
+                           InvokeArgument<3>(OkStatus())));
   // Initialize coordination agent.
   InitializeAgent();
 
@@ -228,7 +228,7 @@
   auto kv = owned_response->mutable_kv();
   kv->set_key(test_key);
   kv->set_value(test_value);
-  owned_done(Status::OK());
+  owned_done(OkStatus());
   // No explicit test, but used to verify there is no stack-use-after-return
   // or other memory-related errors.
 }
@@ -260,7 +260,7 @@
                   auto kv = owned_response->mutable_kv();
                   kv->set_key(test_key);
                   kv->set_value(test_value);
-                  owned_done(Status::OK());
+                  owned_done(OkStatus());
                 }));
           }));
   InitializeAgent();
@@ -306,7 +306,7 @@
   kv->set_value(test_value);
   ON_CALL(*GetClient(), TryGetKeyValueAsync(_, _, _))
       .WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
-                           InvokeArgument<2>(Status::OK())));
+                           InvokeArgument<2>(OkStatus())));
 
   // Initialize coordination agent.
   InitializeAgent();
@@ -326,7 +326,7 @@
   *mocked_response.mutable_kv() = {test_values.begin(), test_values.end()};
   ON_CALL(*GetClient(), GetKeyValueDirAsync(_, _, _))
       .WillByDefault(DoAll(SetArgPointee<1>(mocked_response),
-                           InvokeArgument<2>(Status::OK())));
+                           InvokeArgument<2>(OkStatus())));
   // Initialize coordination agent.
   InitializeAgent();
 
@@ -386,7 +386,7 @@
   // Mock reset error failing for the first time.
   EXPECT_CALL(*GetClient(), ResetTaskAsync(_, _, _))
       .WillOnce(InvokeArgument<2>(errors::Internal("Reset error")))
-      .WillOnce(InvokeArgument<2>(Status::OK()));
+      .WillOnce(InvokeArgument<2>(OkStatus()));
   // Connect coordination agent and set it to error.
   InitializeAgent();
   TF_EXPECT_OK(agent_->Connect());
diff --git a/tensorflow/core/distributed_runtime/coordination/coordination_service_rpc_handler.cc b/tensorflow/core/distributed_runtime/coordination/coordination_service_rpc_handler.cc
index e5c6936..72b5da0 100644
--- a/tensorflow/core/distributed_runtime/coordination/coordination_service_rpc_handler.cc
+++ b/tensorflow/core/distributed_runtime/coordination/coordination_service_rpc_handler.cc
@@ -71,7 +71,7 @@
     return;
   }
   response->set_leader_incarnation(leader_incarnation);
-  done(Status::OK());
+  done(OkStatus());
 }
 
 void CoordinationServiceRpcHandler::WaitForAllTasksAsync(
@@ -139,7 +139,7 @@
                                ": ", request->error_message()));
   error = MakeCoordinationError(error, error_payload);
   agent_->SetError(error);
-  done(Status::OK());
+  done(OkStatus());
 }
 
 void CoordinationServiceRpcHandler::ReportErrorToServiceAsync(
@@ -229,7 +229,7 @@
       service->GetKeyValueDir(request->directory_key());
   *response->mutable_kv() = {std::make_move_iterator(results.begin()),
                              std::make_move_iterator(results.end())};
-  done(Status::OK());
+  done(OkStatus());
 }
 
 void CoordinationServiceRpcHandler::DeleteKeyValueAsync(
diff --git a/tensorflow/core/distributed_runtime/coordination/coordination_service_test.cc b/tensorflow/core/distributed_runtime/coordination/coordination_service_test.cc
index 4cac113..195fb39 100644
--- a/tensorflow/core/distributed_runtime/coordination/coordination_service_test.cc
+++ b/tensorflow/core/distributed_runtime/coordination/coordination_service_test.cc
@@ -69,7 +69,7 @@
   void RegisterTaskAsync(CallOptions* opts, const RegisterTaskRequest* request,
                          RegisterTaskResponse* response,
                          StatusCallback done) override {
-    done(Status::OK());
+    done(OkStatus());
   }
 
   void ReportErrorToTaskAsync(CallOptions* call_opts,
@@ -79,7 +79,7 @@
     mutex_lock l(mu_);
     status_ = Status(static_cast<errors::Code>(request->error_code()),
                      request->error_message());
-    done(Status::OK());
+    done(OkStatus());
   }
 
 #define UNIMPLEMENTED(method)                                         \
@@ -624,7 +624,7 @@
   CoordinatedTask task_2;
   task_2.set_job_name("worker");
   task_2.set_task_id(2);
-  Status status = Status::OK();
+  Status status = OkStatus();
   auto client_cache = std::make_unique<TestCoordinationClientCache>();
   std::unique_ptr<CoordinationServiceInterface> coord_service =
       CoordinationServiceInterface::EnableCoordinationService(
@@ -682,7 +682,7 @@
   CoordinatedTask task_2;
   task_2.set_job_name("worker");
   task_2.set_task_id(2);
-  Status status = Status::OK();
+  Status status = OkStatus();
   auto client_cache = std::make_unique<TestCoordinationClientCache>();
   std::unique_ptr<CoordinationServiceInterface> coord_service =
       CoordinationServiceInterface::EnableCoordinationService(
@@ -746,7 +746,7 @@
   CoordinatedTask task_1;
   task_1.set_job_name("worker");
   task_1.set_task_id(1);
-  Status status = Status::OK();
+  Status status = OkStatus();
   auto client_cache = std::make_unique<TestCoordinationClientCache>();
   std::unique_ptr<CoordinationServiceInterface> coord_service =
       CoordinationServiceInterface::EnableCoordinationService(
diff --git a/tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc b/tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc
index d6a6e16..3551859 100644
--- a/tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc
+++ b/tensorflow/core/distributed_runtime/eager/cluster_function_library_runtime.cc
@@ -250,7 +250,7 @@
             return;
           }
         }
-        done(Status::OK());
+        done(OkStatus());
       });
 }
 
diff --git a/tensorflow/core/distributed_runtime/eager/destroy_tensor_handle_node.h b/tensorflow/core/distributed_runtime/eager/destroy_tensor_handle_node.h
index 0df6286..c52d2d3 100644
--- a/tensorflow/core/distributed_runtime/eager/destroy_tensor_handle_node.h
+++ b/tensorflow/core/distributed_runtime/eager/destroy_tensor_handle_node.h
@@ -59,7 +59,7 @@
                    "remote tensors handles: "
                 << s.ToString();
           }
-          done(Status::OK());
+          done(OkStatus());
           delete response;
         });
   }
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
index 52f9e78..9e44c70 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl.cc
@@ -97,7 +97,7 @@
     }
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GetEagerOperationAndNumRetvals(const Operation& operation,
@@ -157,7 +157,7 @@
   const tensorflow::Tensor* t = nullptr;
   TF_RETURN_IF_ERROR(handle->Tensor(&t));
   t->AsProtoTensorContent(proto);
-  return Status::OK();
+  return OkStatus();
 }
 
 Status TensorHandleShape(TensorHandle* handle, TensorShapeProto* proto) {
@@ -174,7 +174,7 @@
     shape.AsProto(proto);
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status AddOpRetvalsToResponse(
@@ -350,7 +350,7 @@
                       new ServerContext(ctx, request->keep_alive_secs(), env_));
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::UpdateContext(const UpdateContextRequest* request,
@@ -382,7 +382,7 @@
     ctx->IncrementContextViewId();
     VLOG(1) << "Processing simplified UpdateContextRequest on "
             << ctx->HostCPU()->name();
-    return Status::OK();
+    return OkStatus();
   }
 
   auto session_name =
@@ -430,7 +430,7 @@
     *response->add_device_attributes() = da;
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::CreateMasterContext(
@@ -448,7 +448,7 @@
       ServerContext::CreateMasterContext(context, env_);
   mutex_lock l(contexts_mu_);
   contexts_.emplace(context_id, server_context);
-  return Status::OK();
+  return OkStatus();
 }
 
 void EagerServiceImpl::RunComponentFunction(
@@ -629,7 +629,7 @@
     }
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::WaitQueueDone(const WaitQueueDoneRequest* request,
@@ -654,7 +654,7 @@
 
   tensorflow::EagerContext* ctx = context->Context();
   response->set_context_view_id(ctx->GetContextViewId());
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::CloseContext(const CloseContextRequest* request,
@@ -664,7 +664,7 @@
   ServerContext* context = nullptr;
   if (!GetServerContext(request->context_id(), &context).ok()) {
     // Swallow the error here.
-    return Status::OK();
+    return OkStatus();
   }
   core::ScopedUnref context_unref(context);
 
@@ -674,7 +674,7 @@
               << request->context_view_id() << "  for context_id "
               << request->context_id() << ". The current context_view_id is "
               << context->Context()->GetContextViewId() << ".";
-    return Status::OK();
+    return OkStatus();
   }
 
   mutex_lock l(contexts_mu_);
@@ -685,7 +685,7 @@
   // we are releasing it from the map.
   context->Unref();
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::RegisterFunction(
@@ -700,7 +700,7 @@
 Status EagerServiceImpl::CleanupFunction(
     const CleanupFunctionOp& cleanup_function) {
   env_->rendezvous_mgr->Cleanup(cleanup_function.step_id());
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::SendTensor(const SendTensorOp& send_tensor,
@@ -727,7 +727,7 @@
 
   eager_context->RemoteMgr()->AddOperationOutputs(tensors, send_tensor.op_id());
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status EagerServiceImpl::SendPackedHandle(
@@ -773,7 +773,7 @@
 
   eager_context->RemoteMgr()->AddOperationOutputs({packed_handle},
                                                   send_packed_handle.op_id());
-  return Status::OK();
+  return OkStatus();
 }
 
 tensorflow::Status EagerServiceImpl::GetServerContext(
@@ -793,7 +793,7 @@
 
   (*server_context)->RecordAccess();
 
-  return Status::OK();
+  return OkStatus();
 }
 
 }  // namespace eager
diff --git a/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc b/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
index 44797a2..a3979f0 100644
--- a/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
+++ b/tensorflow/core/distributed_runtime/eager/eager_service_impl_test.cc
@@ -52,7 +52,7 @@
     TF_RETURN_IF_ERROR(GetServerContext(context_id, &context));
     core::ScopedUnref context_unref(context);
     *ctx = context->Context();
-    return Status::OK();
+    return OkStatus();
   }
   Status GetTensorHandle(const uint64 context_id,
                          const RemoteTensorHandleInternal& remote_handle,
@@ -120,7 +120,7 @@
                    core::RefCountPtr<EagerClient>* client) override {
     client->reset(client_.get());
     client_->Ref();
-    return Status::OK();
+    return OkStatus();
   }
 
  private:
@@ -131,7 +131,7 @@
   Status GetEagerClientCache(
       std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
     eager_client_cache->reset(new DummyEagerClientCache);
-    return Status::OK();
+    return OkStatus();
   }
 
   void ListWorkers(std::vector<string>* workers) const override {
@@ -149,7 +149,7 @@
             [](const ServerDef& server_def,
                WorkerCacheInterface** worker_cache) {
               *worker_cache = new FakeCache;
-              return Status::OK();
+              return OkStatus();
             })) {
     worker_env_.env = Env::Default();
 
@@ -786,7 +786,7 @@
                                    const DeviceMgr* device_mgr,
                                    Rendezvous** r) {
           *r = worker_env_.rendezvous_mgr->Find(step_id);
-          return Status::OK();
+          return OkStatus();
         }});
   }
 
@@ -876,7 +876,7 @@
       std::move(tensor_args),
       [&inputs](const int i, RemoteTensorHandle* handle) -> Status {
         *handle = inputs.at(i);
-        return Status::OK();
+        return OkStatus();
       });
   eager_pflr_->Run(opts, handle, args, &outputs,
                    [&status, &done](const Status& s) {
@@ -979,7 +979,7 @@
       std::move(input_tensors),
       [&remote_handles](const int index, RemoteTensorHandle* handle) -> Status {
         *handle = remote_handles.at(index);
-        return Status::OK();
+        return OkStatus();
       });
   std::vector<FunctionRet> outputs;
 
@@ -1033,7 +1033,7 @@
       std::move(input_tensors),
       [&remote_handles](const int index, RemoteTensorHandle* handle) -> Status {
         *handle = remote_handles.at(index);
-        return Status::OK();
+        return OkStatus();
       });
   std::vector<FunctionRet> outputs;
 
diff --git a/tensorflow/core/distributed_runtime/eager/remote_copy_node.cc b/tensorflow/core/distributed_runtime/eager/remote_copy_node.cc
index 0288a11..b975f32 100644
--- a/tensorflow/core/distributed_runtime/eager/remote_copy_node.cc
+++ b/tensorflow/core/distributed_runtime/eager/remote_copy_node.cc
@@ -214,7 +214,7 @@
           "Expect to receive a Tensor but got a TensorShape.");
     }
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 void RemoteCopyNode::RunRemoteRecv(EagerOperation* op, StatusCallback done) {
@@ -356,7 +356,7 @@
       return errors::InvalidArgument("Nested packed handles are not supported");
     }
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 void RemoteCopyNode::StartSendPackedHandle(StatusCallback done) {
@@ -477,7 +477,7 @@
 
 Status RemoteCopyNode::Prepare() {
   TF_RETURN_IF_ERROR(captured_state_->dst()->CopyInferenceShape(src_));
-  return Status::OK();
+  return OkStatus();
 }
 
 void RemoteCopyNode::RunAsync(StatusCallback done) {
diff --git a/tensorflow/core/distributed_runtime/eager/remote_mgr.cc b/tensorflow/core/distributed_runtime/eager/remote_mgr.cc
index 65c4537..10a2a00 100644
--- a/tensorflow/core/distributed_runtime/eager/remote_mgr.cc
+++ b/tensorflow/core/distributed_runtime/eager/remote_mgr.cc
@@ -73,7 +73,7 @@
 
   *handle = iter->second;
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteMgr::GetTensorHandle(
@@ -102,7 +102,7 @@
 
   *handle = iter->second;
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteMgr::GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
@@ -118,7 +118,7 @@
         "Found two different tensor handles with the same op_id:", *op_id,
         " and output_num:", *output_num));
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteMgr::DeleteTensorHandle(
@@ -129,7 +129,7 @@
     if (iter != remote_tensor_handle_map_.end()) {
       iter->second->Unref();
       remote_tensor_handle_map_.erase(iter);
-      return Status::OK();
+      return OkStatus();
     }
   }
   {
@@ -137,7 +137,7 @@
     auto iter = mirrored_resource_shape_map_.find(remote_handle);
     if (iter != mirrored_resource_shape_map_.end()) {
       mirrored_resource_shape_map_.erase(iter);
-      return Status::OK();
+      return OkStatus();
     }
   }
   return WithErrorSourcePayload(errors::InvalidArgument(
@@ -173,7 +173,7 @@
       dtype_and_shape.shape.AsProto(dtype_and_shape_proto->mutable_shape());
     }
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteMgr::DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
@@ -211,7 +211,7 @@
     (*out)->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 EagerExecutor& RemoteMgr::GetOrCreateExecutorForStream(uint64 stream_id) {
diff --git a/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.cc b/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.cc
index 110a354..c2c54c9 100644
--- a/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.cc
+++ b/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.cc
@@ -127,7 +127,7 @@
   tf_shared_lock l(mu_);
   *shape = shape_;
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteTensorHandleData::NumDims(int* num_dims) const {
@@ -136,7 +136,7 @@
   tf_shared_lock l(mu_);
   *num_dims = shape_.dims();
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteTensorHandleData::Dim(int dim_index, int64_t* dim) const {
@@ -145,7 +145,7 @@
   tf_shared_lock l(mu_);
   *dim = shape_.dim_size(dim_index);
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteTensorHandleData::NumElements(int64_t* num_elements) const {
@@ -154,7 +154,7 @@
   tf_shared_lock l(mu_);
   *num_elements = shape_.num_elements();
 
-  return Status::OK();
+  return OkStatus();
 }
 
 bool RemoteTensorHandleData::IsReady() const {
@@ -192,10 +192,10 @@
   if (!remote_task.empty()) {
     remote_task_ = remote_task;
   }
-  is_poisoned_ = Status::OK();
+  is_poisoned_ = OkStatus();
   is_ready_ = true;
 
-  return Status::OK();
+  return OkStatus();
 }
 
 string RemoteTensorHandleData::DebugString() const {
@@ -211,7 +211,7 @@
   }
   *op_id = op_id_;
   *output_num = output_num_;
-  return Status::OK();
+  return OkStatus();
 }
 
 Status RemoteTensorHandleData::WaitReady(const char* caller) const {
diff --git a/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h b/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h
index 97c6723..5c57a99 100644
--- a/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h
+++ b/tensorflow/core/distributed_runtime/eager/remote_tensor_handle_data.h
@@ -46,7 +46,7 @@
   Status NumDims(int* num_dims) const;
   Status Dim(int dim_index, int64_t* dim) const;
   Status NumElements(int64_t* num_elements) const;
-  Status Unprotect() { return Status::OK(); }
+  Status Unprotect() { return OkStatus(); }
 
   bool IsReady() const;
   Status WaitReady(const char* caller) const;
diff --git a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc
index 7f194e3..afd43a7 100644
--- a/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc
+++ b/tensorflow/core/distributed_runtime/rpc/eager/grpc_eager_client.cc
@@ -295,7 +295,7 @@
 
     it->second->Ref();
     client->reset(it->second.get());
-    return Status::OK();
+    return OkStatus();
   }
 
  private:
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc b/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc
index 594c46a..da6322e 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_channel.cc
@@ -49,7 +49,7 @@
 Status ValidateHostPortPair(const string& host_port) {
   string bns_prefix = "/bns/";
   if (host_port.substr(0, bns_prefix.length()) == bns_prefix) {
-    return Status::OK();
+    return OkStatus();
   }
   uint32 port;
   auto colon_index = host_port.find_last_of(':');
@@ -58,7 +58,7 @@
     return errors::InvalidArgument("Could not interpret \"", host_port,
                                    "\" as a host-port pair.");
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 ::grpc::ChannelArguments* CreateDefaultChannelArguments() {
@@ -146,7 +146,7 @@
   ::grpc::ChannelArguments args = GetChannelArguments(rpc_options);
   *channel_pointer = ::grpc::CreateCustomChannel(
       "dns:///" + target, ::grpc::InsecureChannelCredentials(), args);
-  return Status::OK();
+  return OkStatus();
 }
 
 ChannelCreationFunction ConvertToChannelCreationFunction(
@@ -182,7 +182,7 @@
     TF_RETURN_IF_ERROR(ValidateHostPortPair(id_host_port.second));
   }
   host_ports_jobs_.emplace_back(job_id, host_ports);
-  return Status::OK();
+  return OkStatus();
 }
 
 namespace {
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc
index 8e7d80c..40dd07a 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_master_service.cc
@@ -206,7 +206,7 @@
           if (call->request.store_errors_in_response_body() && !status.ok()) {
             call->response.set_status_code(status.code());
             call->response.set_status_error_message(status.error_message());
-            call->SendResponse(ToGrpcStatus(Status::OK()));
+            call->SendResponse(ToGrpcStatus(OkStatus()));
           } else {
             call->SendResponse(ToGrpcStatus(status));
           }
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
index 099b40d..f6c550c 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
@@ -171,7 +171,7 @@
                             "\" was not defined in cluster");
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcServer::Init(const GrpcServerOptions& opts) {
@@ -338,7 +338,7 @@
   LocalMaster::Register(target(), master_impl_.get(),
                         config.operation_timeout_in_ms());
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcServer::ParseChannelSpec(const WorkerCacheFactoryOptions& options,
@@ -361,7 +361,7 @@
     }
     TF_RETURN_IF_ERROR(channel_spec->AddHostPortsJob(job.name(), host_ports));
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcServer::WorkerCacheFactory(const WorkerCacheFactoryOptions& options,
@@ -403,7 +403,7 @@
   }
   *worker_cache = NewGrpcWorkerCacheWithLocalWorker(
       channel_cache, grpc_worker_env(), worker_impl(), name_prefix);
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcServer::Start() {
@@ -436,11 +436,11 @@
 
       state_ = STARTED;
       LOG(INFO) << "Started server with target: " << target();
-      return Status::OK();
+      return OkStatus();
     }
     case STARTED:
       LOG(INFO) << "Server already started (target: " << target() << ")";
-      return Status::OK();
+      return OkStatus();
     case STOPPED:
       return errors::FailedPrecondition("Server has stopped.");
     default:
@@ -483,7 +483,7 @@
   master_env_.worker_cache = worker_cache;
   master_env_.collective_executor_mgr =
       worker_env_.collective_executor_mgr.get();
-  return Status::OK();
+  return OkStatus();
 }
 
 // TODO(haoyuzhang): Remove this method once we have a mechanism to directly set
@@ -493,7 +493,7 @@
   auto* coord_service =
       static_cast<GrpcCoordinationServiceImpl*>(coordination_service_);
   coord_service->SetCoordinationServiceAgentInstance(agent);
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcServer::StopCoordinationService() {
@@ -506,7 +506,7 @@
   worker_env()->session_mgr->TeardownCoordinationServiceAgent();
   coordination_service_->Shutdown();
   worker_env()->session_mgr->TeardownCoordinationService();
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcServer::Stop() {
@@ -514,13 +514,13 @@
   switch (state_) {
     case NEW:
       state_ = STOPPED;
-      return Status::OK();
+      return OkStatus();
     case STARTED:
       return errors::Unimplemented(
           "Clean shutdown is not currently implemented");
     case STOPPED:
       LOG(INFO) << "Server already stopped (target: " << target() << ")";
-      return Status::OK();
+      return OkStatus();
     default:
       LOG(FATAL);
   }
@@ -532,7 +532,7 @@
     case NEW:
       // Prevent the server from being started subsequently.
       state_ = STOPPED;
-      return Status::OK();
+      return OkStatus();
     case STARTED:
     case STOPPED:
       master_thread_.reset();
@@ -541,7 +541,7 @@
       for (auto& thread : extra_service_threads_) {
         thread.reset();
       }
-      return Status::OK();
+      return OkStatus();
     default:
       LOG(FATAL);
   }
@@ -581,7 +581,7 @@
     return s;
   }
   *out_server = std::move(ret);
-  return Status::OK();
+  return OkStatus();
 }
 
 /* static */
@@ -599,7 +599,7 @@
     return s;
   }
   out_server->reset(dynamic_cast<GrpcServer*>(server.release()));
-  return Status::OK();
+  return OkStatus();
 }
 
 namespace {
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_session.cc b/tensorflow/core/distributed_runtime/rpc/grpc_session.cc
index 9beb640..508dc64 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_session.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_session.cc
@@ -63,7 +63,7 @@
   }
   session->SetRemoteMaster(std::move(master));
   *out_session = std::move(session);
-  return Status::OK();
+  return OkStatus();
 }
 
 namespace {
@@ -109,7 +109,7 @@
     return errors::InvalidArgument("A session is not created yet....");
   }
   *out_handle = handle_;
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcSession::CreateImpl(CallOptions* call_options, GraphDef graph) {
@@ -281,7 +281,7 @@
     run_metadata->Swap(resp->mutable_metadata());
   }
 
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcSession::Run(const RunOptions& run_options,
@@ -335,7 +335,7 @@
   call_options.SetTimeout(options_.config.operation_timeout_in_ms());
   TF_RETURN_IF_ERROR(master_->PartialRunSetup(&call_options, &req, &resp));
   *handle = resp.partial_run_handle();
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcSession::PRun(const string& handle,
@@ -353,7 +353,7 @@
   {
     mutex_lock l(mu_);
     if (handle_.empty()) {
-      return Status::OK();
+      return OkStatus();
     }
     req.set_session_handle(handle_);
     handle_.clear();
@@ -398,7 +398,7 @@
   for (const auto& device_attr : resp.remote_device()) {
     response->emplace_back(device_attr);
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 void GrpcSession::SetRemoteMaster(std::unique_ptr<MasterInterface> master) {
@@ -435,7 +435,7 @@
   call_options.SetTimeout(options_.config.operation_timeout_in_ms());
   TF_RETURN_IF_ERROR(master_->MakeCallable(&call_options, &req, &resp));
   *out_handle = resp.handle();
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcSession::RunCallable(CallableHandle handle,
@@ -462,7 +462,7 @@
     }
     fetch_tensors->push_back(std::move(fetch_tensor));
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 Status GrpcSession::ReleaseCallable(CallableHandle handle) {
@@ -486,7 +486,7 @@
     std::unique_ptr<GrpcSession> session;
     TF_RETURN_IF_ERROR(GrpcSession::Create(options, &session));
     *out_session = session.release();
-    return Status::OK();
+    return OkStatus();
   }
 
   // Invokes the session specific static method to reset containers.
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_state.h b/tensorflow/core/distributed_runtime/rpc/grpc_state.h
index 3ccf374..b1770d2 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_state.h
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_state.h
@@ -527,7 +527,7 @@
     e = &exchanges_.GetFront();
     mu_.unlock();
 
-    e->Complete(Status::OK());
+    e->Complete(OkStatus());
 
     {
       mutex_lock l(mu_);
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_tensorflow_server.cc b/tensorflow/core/distributed_runtime/rpc/grpc_tensorflow_server.cc
index 00b32d3..807d7ec 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_tensorflow_server.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_tensorflow_server.cc
@@ -78,7 +78,7 @@
                                    " is invalid (job \"", options->job_name(),
                                    "\" contains ", my_num_tasks, " tasks");
   }
-  return Status::OK();
+  return OkStatus();
 }
 
 }  // namespace
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_testlib.cc b/tensorflow/core/distributed_runtime/rpc/grpc_testlib.cc
index 7ffff94..ff21947 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_testlib.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_testlib.cc
@@ -83,7 +83,7 @@
   TF_RETURN_IF_ERROR(session->ListDevices(&ret->devices_));
 
   *out_cluster = std::move(ret);
-  return Status::OK();
+  return OkStatus();
 }
 
 TestCluster::~TestCluster() {
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_testlib_server.cc b/tensorflow/core/distributed_runtime/rpc/grpc_testlib_server.cc
index 9a6de2c..27a0596 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_testlib_server.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_testlib_server.cc
@@ -69,7 +69,7 @@
   ConfigProto* config = options->mutable_default_session_config();
   (*config->mutable_device_count())["CPU"] = num_cpus;
   (*config->mutable_device_count())["GPU"] = num_gpus;
-  return Status::OK();
+  return OkStatus();
 }
 
 }  // namespace
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_util.h b/tensorflow/core/distributed_runtime/rpc/grpc_util.h
index 5357c13..49f592e 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_util.h
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_util.h
@@ -108,7 +108,7 @@
 
 inline ::tensorflow::Status FromGrpcStatus(const ::grpc::Status& s) {
   if (s.ok()) {
-    return Status::OK();
+    return OkStatus();
   } else {
     ::tensorflow::Status converted;
     // Convert "UNKNOWN" stream removed errors into unavailable, to allow
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc
index d89ecda..45d2015 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.cc
@@ -78,14 +78,14 @@
   Status GetEagerClientCache(
       std::unique_ptr<eager::EagerClientCache>* eager_client_cache) override {
     eager_client_cache->reset(eager::NewGrpcEagerClientCache(channel_cache_));
-    return Status::OK();
+    return OkStatus();
   }
 
   Status GetCoordinationClientCache(std::unique_ptr<CoordinationClientCache>*
                                         coordination_client_cache) override {
     coordination_client_cache->reset(
         NewGrpcCoordinationClientCache(channel_cache_));
-    return Status::OK();
+    return OkStatus();
   }
 
   void SetLogging(bool v) override { logger_.SetLogging(v); }
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
index b650e3a..2648a2a 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_worker_service.cc
@@ -730,7 +730,7 @@
       }
     }
   }
-  done(Status::OK());
+  done(OkStatus());
 }
 
 void GrpcWorker::CleanupGraphAsync(const CleanupGraphRequest* request,
diff --git a/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc b/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc
index 7c1fc38..5f89302 100644
--- a/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc
+++ b/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr.cc
@@ -84,7 +84,7 @@
     resp_.Clear();
     {
       mutex_lock l(mu_);
-      status_ = Status::OK();
+      status_ = OkStatus();
     }
     done_ = nullptr;
   }
diff --git a/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc b/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc
index e275aea..543f752 100644
--- a/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc
+++ b/tensorflow/core/distributed_runtime/rpc/rpc_rendezvous_mgr_test.cc
@@ -63,7 +63,7 @@
       // RPC call objects.
       const int64_t t_us = random::New64() % 100 * 1000;
       Env::Default()->SleepForMicroseconds(t_us);
-      done(Status::OK());
+      done(OkStatus());
     });
   }
 };
@@ -103,7 +103,7 @@
   class FakeDevice : public Device {
    public:
     explicit FakeDevice(const DeviceAttributes& attr) : Device(nullptr, attr) {}
-    Status Sync() override { return Status::OK(); }
+    Status Sync() override { return OkStatus(); }
     Allocator* GetAllocator(AllocatorAttributes) override { return nullptr; }
   };
   DeviceAttributes attr;
@@ -326,7 +326,7 @@
     int num_requests = 10000;
     Tensor val(DT_STRING);
     mutex mu_;
-    Status status = Status::OK();
+    Status status = OkStatus();
     BlockingCounter counter(num_requests);
 
     for (int i = 0; i < num_requests; i++) {