[TF] [NFC] Replace absl::optional w/ std::optional in SE and compiler/
PiperOrigin-RevId: 452557276
diff --git a/tensorflow/compiler/xla/pjrt/cpu_device_test.cc b/tensorflow/compiler/xla/pjrt/cpu_device_test.cc
index b7da137..9273adb 100644
--- a/tensorflow/compiler/xla/pjrt/cpu_device_test.cc
+++ b/tensorflow/compiler/xla/pjrt/cpu_device_test.cc
@@ -30,7 +30,7 @@
auto client = *GetCpuClient(true);
auto* device = client->devices()[0];
auto buffer = *client->BufferFromHostBuffer(
- &data, PrimitiveType::F32, {4}, absl::nullopt,
+ &data, PrimitiveType::F32, {4}, std::nullopt,
PjRtClient::HostBufferSemantics::kZeroCopy, {}, device);
auto literal = *buffer->ToLiteralSync();
}
diff --git a/tensorflow/compiler/xla/pjrt/gpu_device.cc b/tensorflow/compiler/xla/pjrt/gpu_device.cc
index f5beee2..45c1571 100644
--- a/tensorflow/compiler/xla/pjrt/gpu_device.cc
+++ b/tensorflow/compiler/xla/pjrt/gpu_device.cc
@@ -202,8 +202,8 @@
// Builds an xla::LocalClient for the GPU platform.
StatusOr<LocalClient*> GetGpuXlaClient(
- const absl::optional<std::string>& platform_name,
- const absl::optional<std::set<int>>& allowed_devices) {
+ const std::optional<std::string>& platform_name,
+ const std::optional<std::set<int>>& allowed_devices) {
TF_ASSIGN_OR_RETURN(
se::Platform * platform,
PlatformUtil::GetPlatform(platform_name ? *platform_name : "gpu"));
@@ -470,8 +470,8 @@
StatusOr<std::unique_ptr<PjRtClient>> GetGpuClient(
bool asynchronous, const GpuAllocatorConfig& allocator_config,
std::shared_ptr<DistributedRuntimeClient> distributed_client, int node_id,
- const absl::optional<std::set<int>>& allowed_devices,
- absl::optional<std::string> platform_name) {
+ const std::optional<std::set<int>>& allowed_devices,
+ std::optional<std::string> platform_name) {
TF_ASSIGN_OR_RETURN(LocalClient * xla_client,
GetGpuXlaClient(platform_name, allowed_devices));
TF_ASSIGN_OR_RETURN(
diff --git a/tensorflow/compiler/xla/pjrt/gpu_device.h b/tensorflow/compiler/xla/pjrt/gpu_device.h
index 587e578..f5bd7d7 100644
--- a/tensorflow/compiler/xla/pjrt/gpu_device.h
+++ b/tensorflow/compiler/xla/pjrt/gpu_device.h
@@ -69,8 +69,8 @@
StatusOr<std::unique_ptr<PjRtClient>> GetGpuClient(
bool asynchronous, const GpuAllocatorConfig& allocator_config,
std::shared_ptr<DistributedRuntimeClient> distributed_client, int node_id,
- const absl::optional<std::set<int>>& allowed_devices = absl::nullopt,
- absl::optional<std::string> platform_name = absl::nullopt);
+ const std::optional<std::set<int>>& allowed_devices = std::nullopt,
+ std::optional<std::string> platform_name = std::nullopt);
} // namespace xla
diff --git a/tensorflow/compiler/xla/pjrt/gpu_multistream_test.cc b/tensorflow/compiler/xla/pjrt/gpu_multistream_test.cc
index 51fedd6..ca26344 100644
--- a/tensorflow/compiler/xla/pjrt/gpu_multistream_test.cc
+++ b/tensorflow/compiler/xla/pjrt/gpu_multistream_test.cc
@@ -74,21 +74,21 @@
auto dummy_buffer,
client->BufferFromHostBuffer(
dummy_inputs.data(), S32, dummy_shape.dimensions(),
- /*byte_strides=*/absl::nullopt,
+ /*byte_strides=*/std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes,
/*on_done_with_host_buffer=*/nullptr, device));
TF_ASSERT_OK_AND_ASSIGN(
auto in_buffer0,
client->BufferFromHostBuffer(
inputs.data(), S32, shape.dimensions(),
- /*byte_strides=*/absl::nullopt,
+ /*byte_strides=*/std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes,
/*on_done_with_host_buffer=*/nullptr, device));
TF_ASSERT_OK_AND_ASSIGN(
auto in_buffer1,
client->BufferFromHostBuffer(
inputs.data(), S32, shape.dimensions(),
- /*byte_strides=*/absl::nullopt,
+ /*byte_strides=*/std::nullopt,
PjRtClient::HostBufferSemantics::kImmutableUntilTransferCompletes,
/*on_done_with_host_buffer=*/nullptr, device));
// The execution may be enqueued before the transfers complete, requiring
diff --git a/tensorflow/compiler/xla/pjrt/local_device_state.h b/tensorflow/compiler/xla/pjrt/local_device_state.h
index fb3cbcc..c6227fb 100644
--- a/tensorflow/compiler/xla/pjrt/local_device_state.h
+++ b/tensorflow/compiler/xla/pjrt/local_device_state.h
@@ -195,7 +195,7 @@
// Callback map pairs callback stream with a device stream and is used for
// running short host-side callbacks after device side events, without
// preventing the device-side stream from doing useful work.
- absl::optional<absl::flat_hash_map<se::Stream*, std::unique_ptr<se::Stream>>>
+ std::optional<absl::flat_hash_map<se::Stream*, std::unique_ptr<se::Stream>>>
callback_stream_map_;
// A worker thread, used for replicated computation launches.
diff --git a/tensorflow/compiler/xla/pjrt/lru_cache.h b/tensorflow/compiler/xla/pjrt/lru_cache.h
index 00c0896..358ef0a 100644
--- a/tensorflow/compiler/xla/pjrt/lru_cache.h
+++ b/tensorflow/compiler/xla/pjrt/lru_cache.h
@@ -101,7 +101,7 @@
// pointer stability for keys.
const Key* key;
LRUCache* container;
- absl::optional<Value> value;
+ std::optional<Value> value;
};
// We use `node_hash_map` because we want to guarantee pointer stability for
diff --git a/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.cc b/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.cc
index fbe8951..f6ef388 100644
--- a/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.cc
+++ b/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.cc
@@ -41,7 +41,7 @@
PjRtCApiClient::~PjRtCApiClient() { delete wrapped_; }
-StatusOr<absl::optional<std::string>> PjRtCApiClient::ExecutableFingerprint(
+StatusOr<std::optional<std::string>> PjRtCApiClient::ExecutableFingerprint(
const PjRtExecutable& executable) const {
return wrapped_->ExecutableFingerprint(
*PjRtCApiExecutable::GetWrapped(&executable));
@@ -86,7 +86,7 @@
PjRtCApiExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures) {
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures) {
std::vector<std::vector<PjRtBuffer*>> wrapped_args;
for (const std::vector<PjRtBuffer*>& args : argument_handles) {
wrapped_args.push_back(PjRtCApiBuffer::GetWrappedVector(args));
@@ -108,7 +108,7 @@
PjRtCApiExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
std::vector<PjRtBuffer*> wrapped_args =
PjRtCApiBuffer::GetWrappedVector(argument_handles);
@@ -127,7 +127,7 @@
PjRtCApiExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
std::vector<PjRtBuffer*> wrapped_args =
PjRtCApiBuffer::GetWrappedVector(argument_handles);
diff --git a/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.h b/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.h
index 72fbf33..7b8795f 100644
--- a/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.h
+++ b/tensorflow/compiler/xla/pjrt/pjrt_c_api_client.h
@@ -155,7 +155,7 @@
return WrapExecutable(wrapped_->Compile(module, options));
}
- StatusOr<absl::optional<std::string>> ExecutableFingerprint(
+ StatusOr<std::optional<std::string>> ExecutableFingerprint(
const PjRtExecutable& executable) const override;
StatusOr<std::string> SerializeExecutable(
@@ -181,7 +181,7 @@
StatusOr<std::unique_ptr<PjRtBuffer>> BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
- absl::optional<absl::Span<int64_t const>> byte_strides,
+ std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
std::function<void()> on_done_with_host_buffer,
PjRtDevice* device) override {
@@ -394,19 +394,19 @@
StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>> Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures)
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures)
override;
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
+ std::optional<PjRtFuture<Status>>& returned_future,
bool fill_future) override;
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
+ std::optional<PjRtFuture<Status>>& returned_future,
bool fill_future) override;
void Delete() override { return wrapped_->Delete(); }
diff --git a/tensorflow/compiler/xla/pjrt/pjrt_client.h b/tensorflow/compiler/xla/pjrt/pjrt_client.h
index 90c8a67..56b3675 100644
--- a/tensorflow/compiler/xla/pjrt/pjrt_client.h
+++ b/tensorflow/compiler/xla/pjrt/pjrt_client.h
@@ -220,7 +220,7 @@
struct CompileOptions {
// The layouts of the arguments that the computation should expect.
- absl::optional<std::vector<Shape>> argument_layouts;
+ std::optional<std::vector<Shape>> argument_layouts;
// If true, the supplied computation expects its arguments to be wrapped in a
// tuple and passed as a single parameter.
@@ -350,7 +350,7 @@
// num_replicas_per_slice is going to be "num_replicas / num_slices".
// TODO(zhangqiaorjc): Convert this to pure virtual and push down.
virtual StatusOr<DeviceAssignment> GetDefaultDeviceAssignment(
- int num_replicas, absl::optional<int> num_replicas_per_slice,
+ int num_replicas, std::optional<int> num_replicas_per_slice,
int num_partitions, const MultiSliceConfig* multi_slice_config) const {
return Unimplemented("Multi slice device assignment is not supported.");
}
@@ -366,8 +366,8 @@
virtual StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
mlir::ModuleOp module, CompileOptions options) = 0;
- // Generates a unique fingerprint for `executable`, may be absl::nullopt.
- virtual StatusOr<absl::optional<std::string>> ExecutableFingerprint(
+ // Generates a unique fingerprint for `executable`, may be std::nullopt.
+ virtual StatusOr<std::optional<std::string>> ExecutableFingerprint(
const PjRtExecutable& executable) const = 0;
// Returns a platform-specific serialization of `executable`. The
@@ -524,7 +524,7 @@
// with dimensions in major-to-minor order.
virtual StatusOr<std::unique_ptr<PjRtBuffer>> BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
- absl::optional<absl::Span<int64_t const>> byte_strides,
+ std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
std::function<void()> on_done_with_host_buffer, PjRtDevice* device) = 0;
@@ -952,15 +952,14 @@
// The caller is *NOT* required to ensure that PjRtExecutable stays alive
// until futures are ready.
virtual StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>>
- Execute(
- absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
- const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures) = 0;
+ Execute(absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
+ const ExecuteOptions& options,
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures) = 0;
// Convenience wrapper for Execute that never returns futures.
StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>> Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options) {
- absl::optional<std::vector<PjRtFuture<Status>>> returned_futures;
+ std::optional<std::vector<PjRtFuture<Status>>> returned_futures;
return Execute(std::move(argument_handles), options, returned_futures);
}
@@ -977,13 +976,12 @@
virtual StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
- bool fill_future) = 0;
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) = 0;
// Convenience wrapper for ExecuteSharded that always returns a future.
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future) {
+ std::optional<PjRtFuture<Status>>& returned_future) {
return ExecuteSharded(std::move(argument_handles), device, options,
returned_future, /*fill_future=*/true);
}
@@ -991,7 +989,7 @@
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options) {
- absl::optional<PjRtFuture<Status>> returned_future;
+ std::optional<PjRtFuture<Status>> returned_future;
return ExecuteSharded(std::move(argument_handles), device, options,
returned_future, /*fill_future=*/false);
}
@@ -1009,13 +1007,12 @@
virtual StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
- bool fill_future) = 0;
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) = 0;
// Convenience wrapper for ExecutePortable that always returns a future.
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future) {
+ std::optional<PjRtFuture<Status>>& returned_future) {
return ExecutePortable(std::move(argument_handles), device, options,
returned_future, /*fill_future=*/true);
}
@@ -1023,7 +1020,7 @@
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options) {
- absl::optional<PjRtFuture<Status>> returned_future;
+ std::optional<PjRtFuture<Status>> returned_future;
return ExecutePortable(std::move(argument_handles), device, options,
returned_future, /*fill_future=*/false);
}
@@ -1039,7 +1036,7 @@
// combining the result buffers with a future that becomes ready when the
// execution completes.
struct Result {
- absl::optional<PjRtFuture<Status>> future;
+ std::optional<PjRtFuture<Status>> future;
std::vector<std::unique_ptr<PjRtBuffer>> buffers;
};
};
diff --git a/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.cc b/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.cc
index 703b2a6..40d4576 100644
--- a/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.cc
+++ b/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.cc
@@ -689,7 +689,7 @@
StatusOr<std::unique_ptr<PjRtBuffer>>
PjRtStreamExecutorClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
- absl::optional<absl::Span<int64_t const>> byte_strides,
+ std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
std::function<void()> on_done_with_host_buffer, PjRtDevice* device) {
tensorflow::profiler::TraceMe traceme(
@@ -1022,9 +1022,8 @@
buffers.push_back(std::move(buffer));
}
- TF_RETURN_IF_ERROR(
- EnqueueCrossHostReceive(buffers, std::move(definition_event),
- std::move(notifier), absl::nullopt));
+ TF_RETURN_IF_ERROR(EnqueueCrossHostReceive(
+ buffers, std::move(definition_event), std::move(notifier), std::nullopt));
return buffers;
}
@@ -1852,7 +1851,7 @@
client_->client()->backend().transfer_manager();
// Lift tuple_handle outside the conditional so that the event it returns is
// not destroyed until after the loop below that waits on events.
- absl::optional<TupleHandle> tuple_handle;
+ std::optional<TupleHandle> tuple_handle;
if (parameter_is_tupled_arguments_ && !options.arguments_are_tupled) {
TF_ASSIGN_OR_RETURN(
tuple_handle,
@@ -2173,7 +2172,7 @@
}
}
- absl::optional<PjRtFuture<Status>> future;
+ std::optional<PjRtFuture<Status>> future;
if (fill_future) {
auto promise = PjRtFuture<Status>::CreatePromise();
future = PjRtFuture<Status>(promise);
@@ -2197,7 +2196,7 @@
PjRtStreamExecutorExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures) {
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures) {
if (device_assignment_ == nullptr) {
return InvalidArgument("Execute expects a non-null device_assignment");
}
@@ -2322,7 +2321,7 @@
PjRtStreamExecutorExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
if (device_assignment_ == nullptr) {
return InvalidArgument("ExecuteShard expects a non-null device_assignment");
}
@@ -2351,7 +2350,7 @@
PjRtStreamExecutorExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
if (device_assignment_ != nullptr) {
return InvalidArgument("ExecutePortable gets a non-portable executable");
}
diff --git a/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h b/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h
index 36f8a0d..3e09e50 100644
--- a/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h
+++ b/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h
@@ -189,9 +189,9 @@
StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
mlir::ModuleOp mlir_module, CompileOptions options) override;
- StatusOr<absl::optional<std::string>> ExecutableFingerprint(
+ StatusOr<std::optional<std::string>> ExecutableFingerprint(
const PjRtExecutable& executable) const override {
- return absl::optional<std::string>();
+ return std::optional<std::string>();
}
StatusOr<std::string> SerializeExecutable(
@@ -226,7 +226,7 @@
StatusOr<std::unique_ptr<PjRtBuffer>> BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
- absl::optional<absl::Span<int64_t const>> byte_strides,
+ std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
std::function<void()> on_done_with_host_buffer,
PjRtDevice* device) override;
@@ -290,7 +290,7 @@
absl::Span<const std::unique_ptr<PjRtBuffer>> buffers,
std::shared_ptr<BufferSequencingEvent> definition_event,
PjRtCrossHostRecvNotifier notifier,
- absl::optional<std::vector<GatherDetails>> gather_details) const {
+ std::optional<std::vector<GatherDetails>> gather_details) const {
return Unimplemented("Cross host receives not implemented.");
}
@@ -754,21 +754,21 @@
StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>> Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures)
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures)
override;
using PjRtExecutable::ExecuteSharded;
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
+ std::optional<PjRtFuture<Status>>& returned_future,
bool fill_future) override;
using PjRtExecutable::ExecutePortable;
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
+ std::optional<PjRtFuture<Status>>& returned_future,
bool fill_future) override;
void Delete() override { executables_.clear(); }
diff --git a/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.cc b/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.cc
index db699f3..1d04541 100644
--- a/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.cc
+++ b/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.cc
@@ -210,9 +210,9 @@
return absl::make_unique<HloCostAnalysis>(cpu::CpuExecutable::ShapeSizeBytes);
}
-StatusOr<absl::optional<std::string>> TfrtCpuClient::ExecutableFingerprint(
+StatusOr<std::optional<std::string>> TfrtCpuClient::ExecutableFingerprint(
const PjRtExecutable& executable) const {
- return absl::optional<std::string>();
+ return std::optional<std::string>();
}
static StatusOr<std::unique_ptr<xla::Executable>> JitCompile(
@@ -227,7 +227,7 @@
std::unique_ptr<HloModuleConfig> hlo_module_config,
CreateModuleConfig(program_shape, argument_layouts, &execution_options,
execution_options.num_replicas(),
- /*num_threads=*/absl::nullopt,
+ /*num_threads=*/std::nullopt,
/*aot_options=*/nullptr));
// Unoptimized HloModule.
@@ -461,7 +461,7 @@
StatusOr<std::unique_ptr<PjRtBuffer>> TfrtCpuClient::BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
- absl::optional<absl::Span<int64_t const>> byte_strides,
+ std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
std::function<void()> on_done_with_host_buffer, PjRtDevice* device) {
tensorflow::profiler::TraceMe traceme("TfrtCpuClient::BufferFromHostBuffer");
@@ -1353,8 +1353,8 @@
bool TfrtCpuExecutable::IsDeleted() { return false; }
-StatusOr<absl::optional<std::string>> TfrtCpuExecutable::Fingerprint() const {
- return absl::optional<std::string>();
+StatusOr<std::optional<std::string>> TfrtCpuExecutable::Fingerprint() const {
+ return std::optional<std::string>();
}
Status TfrtCpuExecutable::SetUpDonation(bool tuple_inputs) {
@@ -1629,7 +1629,7 @@
buffer_pointers.data(), &status,
nullptr);
- absl::optional<absl::string_view> error_message =
+ std::optional<absl::string_view> error_message =
xla::CustomCallStatusGetMessage(&status);
if (error_message) {
return InternalError("Generated function failed: %s", *error_message);
@@ -1680,7 +1680,7 @@
nullptr, buffer_pointers.data(),
&status, nullptr);
- absl::optional<absl::string_view> error_message =
+ std::optional<absl::string_view> error_message =
xla::CustomCallStatusGetMessage(&status);
if (error_message) {
// CPU computation fails with an error.
@@ -1736,7 +1736,7 @@
result_shape, std::move(tracked_device_buffer), client_, device);
res.push_back(std::move(tfrt_output_buffer));
}
- absl::optional<PjRtFuture<Status>> future;
+ std::optional<PjRtFuture<Status>> future;
if (fill_future) {
auto done_event = tfrt::MakeUnconstructedAsyncValueRef<Status>();
execute_event.AndThen(
@@ -1757,7 +1757,7 @@
TfrtCpuExecutable::Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures) {
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures) {
tensorflow::profiler::TraceMe traceme("TfrtCpuExecutable::Execute");
if (device_assignment_ == nullptr) {
return InvalidArgument("Execute expects a non-null device_assignment");
@@ -1871,7 +1871,7 @@
TfrtCpuExecutable::ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
tensorflow::profiler::TraceMe traceme("TfrtCpuExecutable::ExecuteSharded");
if (device_assignment_ == nullptr) {
return InvalidArgument("ExecuteShard expects a non-null device_assignment");
@@ -1902,7 +1902,7 @@
TfrtCpuExecutable::ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
+ std::optional<PjRtFuture<Status>>& returned_future, bool fill_future) {
tensorflow::profiler::TraceMe traceme("TfrtCpuExecutable::ExecutePortable");
if (device_assignment_ != nullptr) {
return InvalidArgument("ExecutePortable gets a non-portable executable");
diff --git a/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.h b/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.h
index 973dd54..98a810a 100644
--- a/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.h
+++ b/tensorflow/compiler/xla/pjrt/tfrt_cpu_pjrt_client.h
@@ -153,7 +153,7 @@
StatusOr<std::unique_ptr<PjRtExecutable>> Compile(
mlir::ModuleOp module, CompileOptions options) override;
- StatusOr<absl::optional<std::string>> ExecutableFingerprint(
+ StatusOr<std::optional<std::string>> ExecutableFingerprint(
const PjRtExecutable& executable) const override;
StatusOr<std::string> SerializeExecutable(
@@ -179,7 +179,7 @@
StatusOr<std::unique_ptr<PjRtBuffer>> BufferFromHostBuffer(
const void* data, PrimitiveType type, absl::Span<int64_t const> dims,
- absl::optional<absl::Span<int64_t const>> byte_strides,
+ std::optional<absl::Span<int64_t const>> byte_strides,
HostBufferSemantics host_buffer_semantics,
std::function<void()> on_done_with_host_buffer,
PjRtDevice* device) override;
@@ -506,7 +506,7 @@
}
StatusOr<tfrt::AsyncValueRef<Literal>> CopyToHostAsyncInternal(
- bool discard_cached_copy, absl::optional<xla::Layout> layout);
+ bool discard_cached_copy, std::optional<xla::Layout> layout);
// Requires holds_[kDonation] == 0 (i.e., WaitForOutstandingDonationHolds()
// must be called first.)
@@ -608,28 +608,28 @@
StatusOr<std::vector<std::vector<std::unique_ptr<PjRtBuffer>>>> Execute(
absl::Span<const std::vector<PjRtBuffer*>> argument_handles,
const ExecuteOptions& options,
- absl::optional<std::vector<PjRtFuture<Status>>>& returned_futures)
+ std::optional<std::vector<PjRtFuture<Status>>>& returned_futures)
override;
using PjRtExecutable::ExecuteSharded;
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecuteSharded(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
+ std::optional<PjRtFuture<Status>>& returned_future,
bool fill_future) override;
using PjRtExecutable::ExecutePortable;
StatusOr<std::vector<std::unique_ptr<PjRtBuffer>>> ExecutePortable(
absl::Span<PjRtBuffer* const> argument_handles, PjRtDevice* device,
const ExecuteOptions& options,
- absl::optional<PjRtFuture<Status>>& returned_future,
+ std::optional<PjRtFuture<Status>>& returned_future,
bool fill_future) override;
void Delete() override;
bool IsDeleted() override;
- StatusOr<absl::optional<std::string>> Fingerprint() const;
+ StatusOr<std::optional<std::string>> Fingerprint() const;
private:
friend class TfrtCpuClient;
diff --git a/tensorflow/compiler/xla/pjrt/tpu_client.cc b/tensorflow/compiler/xla/pjrt/tpu_client.cc
index 6b95aa1..7b96362 100644
--- a/tensorflow/compiler/xla/pjrt/tpu_client.cc
+++ b/tensorflow/compiler/xla/pjrt/tpu_client.cc
@@ -133,7 +133,7 @@
num_partitions);
}
-StatusOr<absl::optional<std::string>> PjRtTpuClient::ExecutableFingerprint(
+StatusOr<std::optional<std::string>> PjRtTpuClient::ExecutableFingerprint(
const PjRtExecutable& executable) const {
if (executable.client() != this) {
return InvalidArgument(
@@ -151,7 +151,7 @@
&executable)
->executables()[0]
->executable());
- return absl::optional<std::string>(tpu_executable->fingerprint());
+ return std::optional<std::string>(tpu_executable->fingerprint());
}
StatusOr<std::string> PjRtTpuClient::SerializeExecutable(
diff --git a/tensorflow/compiler/xla/pjrt/tpu_client.h b/tensorflow/compiler/xla/pjrt/tpu_client.h
index ae1be64..1cfe9d3 100644
--- a/tensorflow/compiler/xla/pjrt/tpu_client.h
+++ b/tensorflow/compiler/xla/pjrt/tpu_client.h
@@ -83,7 +83,7 @@
bool EnqueueD2DTransfersOnSrcStream() const override { return false; }
- StatusOr<absl::optional<std::string>> ExecutableFingerprint(
+ StatusOr<std::optional<std::string>> ExecutableFingerprint(
const PjRtExecutable& executable) const override;
StatusOr<std::string> SerializeExecutable(
diff --git a/tensorflow/compiler/xla/pjrt/utils.cc b/tensorflow/compiler/xla/pjrt/utils.cc
index 92a88cc..99af379 100644
--- a/tensorflow/compiler/xla/pjrt/utils.cc
+++ b/tensorflow/compiler/xla/pjrt/utils.cc
@@ -149,7 +149,7 @@
const XlaComputation& computation,
std::function<StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function,
- absl::optional<std::vector<Shape>>& argument_layouts,
+ std::optional<std::vector<Shape>>& argument_layouts,
ExecutableBuildOptions* build_options,
std::vector<const Shape*>* argument_layout_pointers) {
TF_ASSIGN_OR_RETURN(ProgramShape program_shape,
diff --git a/tensorflow/compiler/xla/pjrt/utils.h b/tensorflow/compiler/xla/pjrt/utils.h
index d891527..5c469c9 100644
--- a/tensorflow/compiler/xla/pjrt/utils.h
+++ b/tensorflow/compiler/xla/pjrt/utils.h
@@ -41,7 +41,7 @@
const XlaComputation& computation,
std::function<StatusOr<Shape>(Shape)>
choose_compact_layout_for_shape_function,
- absl::optional<std::vector<Shape>>& argument_layouts,
+ std::optional<std::vector<Shape>>& argument_layouts,
ExecutableBuildOptions* build_options,
std::vector<const Shape*>* argument_layout_pointers);