Roll back a broken change.

PiperOrigin-RevId: 372639951
Change-Id: Iec845d729e2405b98f2320618ad2e500e5c7be6b
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
index ac7712e..3e076e4 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.cc
@@ -102,10 +102,6 @@
   delete worker_service_;
   delete eager_service_;
 
-  for (auto& [_, service] : extra_services_) {
-    delete service;
-  }
-
   // TODO(mrry): Refactor the *Env classes so that it is less fiddly
   // to destroy them.
 
@@ -183,7 +179,6 @@
   TF_RETURN_IF_ERROR(GetHostAndPort(server_def_, &host_name_, &requested_port));
 
   SessionOptions sess_opts;
-  VLOG(3) << "Grpc Server Init Definition: " << server_def_.DebugString();
   ConfigProto config = server_def_.default_session_config();
   sess_opts.config = config;
 
@@ -258,9 +253,6 @@
   profiler_service_ = profiler::CreateProfilerService();
   builder.RegisterService(profiler_service_.get());
 
-  // Add any extra services to be started.
-  extra_services_ = ExtraServices();
-
   // extra service:
   if (opts.service_func != nullptr) {
     opts.service_func(&worker_env_, &builder);
@@ -419,16 +411,6 @@
       eager_thread_.reset(
           env_->StartThread(ThreadOptions(), "TF_eager_service",
                             [this] { eager_service_->HandleRPCsLoop(); }));
-
-      for (const auto& [service_name, service] : extra_services_) {
-        std::unique_ptr<Thread> extra_service_thread;
-        extra_service_thread.reset(env_->StartThread(
-            ThreadOptions(), service_name,
-            [service = service] { service->HandleRPCsLoop(); }));
-        extra_service_threads_.push_back(std::move(extra_service_thread));
-        VLOG(3) << "Started extra service: " << service_name;
-      }
-
       state_ = STARTED;
       LOG(INFO) << "Started server with target: " << target();
       return Status::OK();
@@ -516,9 +498,6 @@
       master_thread_.reset();
       worker_thread_.reset();
       eager_thread_.reset();
-      for (auto& thread : extra_service_threads_) {
-        thread.reset();
-      }
       return Status::OK();
     default:
       LOG(FATAL);
diff --git a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h
index a3bd633..d4087e7 100644
--- a/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h
+++ b/tensorflow/core/distributed_runtime/rpc/grpc_server_lib.h
@@ -128,18 +128,6 @@
   virtual Status WorkerCacheFactory(const WorkerCacheFactoryOptions& options,
                                     WorkerCacheInterface** worker_cache);
 
-  // Override to return extra services to be brought up and managed along with
-  // the standard {master, worker, eager} services. The map key is an aribtrary
-  // string and the value is a pointer to the service to be brought up.
-  // Ownership of the pointer is transferred to GrpcServer after this call
-  // returns, and the service will be destroyed during the destruction of
-  // GrpcServer. Each service will have its HandleRPCsLoop called in a separate
-  // thread. An example usage would be to add a RDMA based partial worker
-  // service to offload tensor and data buffer transfers.
-  virtual std::map<std::string, AsyncServiceInterface*> ExtraServices() {
-    return {};
-  }
-
   // Parses a WorkerCacheFactoryOptions into a GrpcChannelSpec.
   Status ParseChannelSpec(const WorkerCacheFactoryOptions& options,
                           GrpcChannelSpec* channel_spec);
@@ -185,10 +173,6 @@
   AsyncServiceInterface* master_service_ = nullptr;
   std::unique_ptr<Thread> master_thread_ TF_GUARDED_BY(mu_);
 
-  std::map<std::string, AsyncServiceInterface*> extra_services_;
-  std::vector<std::unique_ptr<Thread>> extra_service_threads_
-      TF_GUARDED_BY(mu_);
-
   // Implementation of a TensorFlow worker, and RPC polling thread.
   WorkerEnv worker_env_;
   std::unique_ptr<const DeviceMgr> owned_device_manager_;