[TF-TRT] s/absl::make_unique/std::make_unique 

diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
index fa82050..430e7f1 100644
--- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
+++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
@@ -1599,22 +1599,22 @@
         DeviceFactory::NewDevice("GPU", {}, "/job:a/replica:0/task:0"));
     Device* device_ptr = device_.get();
 
-    device_mgr_ = absl::make_unique<StaticDeviceMgr>(std::move(device_));
+    device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(device_));
 
-    managed_allocator_ = absl::make_unique<GpuManagedAllocator>();
+    managed_allocator_ = std::make_unique<GpuManagedAllocator>();
     Allocator* allocator = managed_allocator_.get();
     step_container_ =
-        absl::make_unique<ScopedStepContainer>(0, [](const string&) {});
+        std::make_unique<ScopedStepContainer>(0, [](const string&) {});
     slice_reader_cache_wrapper_ =
-        absl::make_unique<checkpoint::TensorSliceReaderCacheWrapper>();
+        std::make_unique<checkpoint::TensorSliceReaderCacheWrapper>();
 
-    flib_def_ = absl::make_unique<FunctionLibraryDefinition>(
+    flib_def_ = std::make_unique<FunctionLibraryDefinition>(
         OpRegistry::Global(), FunctionDefLibrary{});
 
     thread_pool_ =
-        absl::make_unique<thread::ThreadPool>(Env::Default(), "default",
+        std::make_unique<thread::ThreadPool>(Env::Default(), "default",
                                               /*num_threads=*/1);
-    pflr_ = absl::make_unique<ProcessFunctionLibraryRuntime>(
+    pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>(
         device_mgr_.get(), Env::Default(), /*config=*/nullptr,
         TF_GRAPH_DEF_VERSION, flib_def_.get(), OptimizerOptions(),
         thread_pool_.get());
@@ -1646,7 +1646,7 @@
     params_.slice_reader_cache = slice_reader_cache_wrapper_.get();
     params_.op_device_context = device_context;
 
-    context_ = absl::make_unique<OpKernelContext>(&params_);
+    context_ = std::make_unique<OpKernelContext>(&params_);
 
     // Outputs.
     *kernel = op_kernel_.get();
diff --git a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc
index 3f284ba..5b19c6b 100644
--- a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc
+++ b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_op.cc
@@ -1040,7 +1040,7 @@
     // Store an empty engine in the cache for these input shapes so we don't try
     // to build the same failing engine again.
     cache_resource->cache_.emplace(input_concrete_shapes,
-                                   absl::make_unique<EngineContext>());
+                                   std::make_unique<EngineContext>());
     return status;
   }
   return engine;
@@ -1101,7 +1101,7 @@
       TF_RETURN_IF_ERROR(cache_res->profiles_.CreateExecutionContexts(
           static_engine.get(), &exec_contexts));
       cache.emplace(input_concrete_shapes,
-                    absl::make_unique<EngineContext>(std::move(static_engine),
+                    std::make_unique<EngineContext>(std::move(static_engine),
                                                      std::move(exec_contexts)));
       VLOG(1) << "Added new engine to cache of " << name()
               << ". Cache size: " << cache.size();
@@ -1119,7 +1119,7 @@
         // Store an empty engine in the cache so we don't try to load the same
         // failing engine again.
         cache.emplace(input_concrete_shapes,
-                      absl::make_unique<EngineContext>());
+                      std::make_unique<EngineContext>());
         return std::pair<EngineContext*, int>(&empty_context, 0);
       }
       if (segment_graph_def_.node().empty()) {
@@ -1151,7 +1151,7 @@
     // TODO(laigd): here we assume engine_input_shapes matches the actual input
     // shapes of the engine, we should verify that.
     cache.emplace(engine_input_shapes,
-                  absl::make_unique<EngineContext>(std::move(static_engine),
+                  std::make_unique<EngineContext>(std::move(static_engine),
                                                    std::move(context)));
     // Runtime is safe to delete after engine creation
     VLOG(1) << "Size of serialized TRT engine: "
@@ -1193,7 +1193,7 @@
           << "The native segment will be used instead.";
       // Store an empty engine in the cache for these input shapes so we don't
       // try to build the same failing engine again.
-      cache.emplace(input_concrete_shapes, absl::make_unique<EngineContext>());
+      cache.emplace(input_concrete_shapes, std::make_unique<EngineContext>());
       return std::pair<EngineContext*, int>(&empty_context, 0);
     }
 
@@ -1211,7 +1211,7 @@
     TF_RETURN_IF_ERROR(cache_res->profiles_.CreateExecutionContexts(
         engine.get(), &exec_contexts));
     cache.emplace(input_concrete_shapes,
-                  absl::make_unique<EngineContext>(std::move(engine),
+                  std::make_unique<EngineContext>(std::move(engine),
                                                    std::move(exec_contexts)));
     VLOG(1) << "Added new engine to cache of " << name()
             << ". Cache size: " << cache.size();
@@ -1227,7 +1227,7 @@
 // possible.
 Status TRTEngineOp::AllocateCalibrationResources(
     OpKernelContext* ctx, TRTEngineCacheResource* cache_res) {
-  cache_res->calib_ctx_ = absl::make_unique<CalibrationContext>();
+  cache_res->calib_ctx_ = std::make_unique<CalibrationContext>();
   auto* cres = cache_res->calib_ctx_.get();
 
   // Get the input shapes.
@@ -1326,13 +1326,13 @@
         auto calib_result = cache_res->profiles_.CreateExecutionContexts(
             cres->engine_.get(), &exec_contexts);
         cache_res->cache_.emplace(
-            shapes, absl::make_unique<EngineContext>(std::move(cres->engine_),
+            shapes, std::make_unique<EngineContext>(std::move(cres->engine_),
                                                      std::move(exec_contexts)));
       } else {
         ExecutionContext context =
             ExecutionContext::Create(cres->engine_.get());
         cache_res->cache_.emplace(
-            shapes, absl::make_unique<EngineContext>(std::move(cres->engine_),
+            shapes, std::make_unique<EngineContext>(std::move(cres->engine_),
                                                      std::move(context)));
       }
     }
diff --git a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops.cc b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops.cc
index d9e3f09..fdb8004 100644
--- a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops.cc
+++ b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops.cc
@@ -115,7 +115,7 @@
     // Parse the serialized engines and add them to the cache.
     std::unique_ptr<RandomAccessFile> file;
     OP_REQUIRES_OK(ctx, ctx->env()->NewRandomAccessFile(filename, &file));
-    auto reader = absl::make_unique<io::RecordReader>(file.get());
+    auto reader = std::make_unique<io::RecordReader>(file.get());
 
     uint64 offset = 0;
     int num_loaded_engine = 0;
@@ -156,7 +156,7 @@
         ctx_vec.push_back(ExecutionContext::Create(raw_engine));
       }
       resource->cache_.emplace(engine_input_shapes,
-                               absl::make_unique<EngineContext>(
+                               std::make_unique<EngineContext>(
                                    std::move(engine), std::move(ctx_vec)));
       ++num_loaded_engine;
     } while (1);
@@ -207,7 +207,7 @@
     // Serialize the engines and write them to file.
     std::unique_ptr<WritableFile> file;
     OP_REQUIRES_OK(ctx, ctx->env()->NewWritableFile(filename, &file));
-    auto writer = absl::make_unique<io::RecordWriter>(file.get());
+    auto writer = std::make_unique<io::RecordWriter>(file.get());
 
     int num_serialized_engines = 0;
     if (save_gpu_specific_engines_) {
diff --git a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc
index 40163de..dfa248d 100644
--- a/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc
+++ b/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc
@@ -286,7 +286,7 @@
   }
   resource->cache_.emplace(
       engine_input_shape,
-      absl::make_unique<EngineContext>(std::move(engine), std::move(context)));
+      std::make_unique<EngineContext>(std::move(engine), std::move(context)));
   // Check that the resource has multiple references before it is unregistered
   // from the resource manager.
   EXPECT_FALSE(resource->RefCountIsOne());
@@ -322,7 +322,7 @@
   // Verify the file for the serialized engine.
   std::unique_ptr<RandomAccessFile> file;
   TF_ASSERT_OK(env->NewRandomAccessFile(filename, &file));
-  auto reader = absl::make_unique<io::RecordReader>(file.get());
+  auto reader = std::make_unique<io::RecordReader>(file.get());
   uint64 offset = 0;
   tstring record;
   TF_ASSERT_OK(reader->ReadRecord(&offset, &record));