Rollback of PR #48087: Initialize allowed_devices

Rollback due to crashes and time outs

PiperOrigin-RevId: 366312107
Change-Id: I2ec67d23ec8b0674fa1b0bec57872dd28553c806
diff --git a/tensorflow/compiler/jit/xla_platform_info.cc b/tensorflow/compiler/jit/xla_platform_info.cc
index ab95727..cfd4de0 100644
--- a/tensorflow/compiler/jit/xla_platform_info.cc
+++ b/tensorflow/compiler/jit/xla_platform_info.cc
@@ -60,15 +60,6 @@
   client_options.set_platform(platform.ValueOrDie());
   client_options.set_intra_op_parallelism_threads(
       device->tensorflow_cpu_worker_threads()->num_threads);
-  // Initialize allowed_devices to the selected incoming device.
-  // Without this, allowed_devices is empty, resulting in the device
-  // initialization in GetStreamExecutors to initialize _all_ visible
-  // devices. Not only is that wasteful, in can result in OOMs.
-  auto dev_info = device->tensorflow_gpu_device_info();
-  if (dev_info != nullptr) {
-    std::set<int> allowed_device = {dev_info->gpu_id};
-    client_options.set_allowed_devices(allowed_device);
-  }
   auto client = xla::ClientLibrary::GetOrCreateLocalClient(client_options);
   if (!client.ok()) {
     return client.status();