[XLA] Replace TF macros with ABSL macros
Also, remove some references to tensorflow::uint/int types that were left behind.
PiperOrigin-RevId: 418306144
Change-Id: Ibf4dd8ebcd3ad6d81e0d3082a0a6d034596c8976
diff --git a/tensorflow/compiler/xla/BUILD b/tensorflow/compiler/xla/BUILD
index 2c0e44d..fc5eed5 100644
--- a/tensorflow/compiler/xla/BUILD
+++ b/tensorflow/compiler/xla/BUILD
@@ -481,6 +481,7 @@
":xla_data_proto_cc",
"//tensorflow/core:lib",
"@com_google_absl//absl/base",
+ "@com_google_absl//absl/flags:flag",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/strings:str_format",
diff --git a/tensorflow/compiler/xla/array.h b/tensorflow/compiler/xla/array.h
index b11af58..b4976db 100644
--- a/tensorflow/compiler/xla/array.h
+++ b/tensorflow/compiler/xla/array.h
@@ -34,6 +34,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/array2d.h b/tensorflow/compiler/xla/array2d.h
index b56c713..96ff77e 100644
--- a/tensorflow/compiler/xla/array2d.h
+++ b/tensorflow/compiler/xla/array2d.h
@@ -30,6 +30,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/array3d.h b/tensorflow/compiler/xla/array3d.h
index cc0fbf8..8d534e2 100644
--- a/tensorflow/compiler/xla/array3d.h
+++ b/tensorflow/compiler/xla/array3d.h
@@ -27,6 +27,7 @@
#include "tensorflow/compiler/xla/array.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/array4d.h b/tensorflow/compiler/xla/array4d.h
index f251814..9b4049f 100644
--- a/tensorflow/compiler/xla/array4d.h
+++ b/tensorflow/compiler/xla/array4d.h
@@ -32,6 +32,7 @@
#include "tensorflow/compiler/xla/array2d.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/client/BUILD b/tensorflow/compiler/xla/client/BUILD
index bedea98..f844f23 100644
--- a/tensorflow/compiler/xla/client/BUILD
+++ b/tensorflow/compiler/xla/client/BUILD
@@ -229,6 +229,7 @@
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_evaluator",
"//tensorflow/compiler/xla/service:hlo_proto_cc",
+ "//tensorflow/core:tflite_portable_logging",
"//tensorflow/core/platform:errors",
"//tensorflow/stream_executor/lib",
"@com_google_absl//absl/container:flat_hash_map",
diff --git a/tensorflow/compiler/xla/client/client.h b/tensorflow/compiler/xla/client/client.h
index afcc953..be5c745 100644
--- a/tensorflow/compiler/xla/client/client.h
+++ b/tensorflow/compiler/xla/client/client.h
@@ -29,6 +29,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla.pb.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/client/client_library.h b/tensorflow/compiler/xla/client/client_library.h
index 719d75f0b..e2cfeca 100644
--- a/tensorflow/compiler/xla/client/client_library.h
+++ b/tensorflow/compiler/xla/client/client_library.h
@@ -34,6 +34,7 @@
#include "tensorflow/compiler/xla/service/local_service.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/core/platform/thread_annotations.h"
@@ -133,10 +134,10 @@
tensorflow::mutex service_mutex_; // Guards the singleton creation state.
std::unordered_map<se::Platform::Id, std::unique_ptr<LocalInstance>>
- local_instances_ ABSL_GUARDED_BY(service_mutex_);
+ local_instances_ TF_GUARDED_BY(service_mutex_);
std::unordered_map<se::Platform::Id, std::unique_ptr<CompileOnlyInstance>>
- compile_only_instances_ ABSL_GUARDED_BY(service_mutex_);
+ compile_only_instances_ TF_GUARDED_BY(service_mutex_);
ClientLibrary(const ClientLibrary&) = delete;
ClientLibrary& operator=(const ClientLibrary&) = delete;
diff --git a/tensorflow/compiler/xla/client/global_data.h b/tensorflow/compiler/xla/client/global_data.h
index 49066b7..e5924eb 100644
--- a/tensorflow/compiler/xla/client/global_data.h
+++ b/tensorflow/compiler/xla/client/global_data.h
@@ -23,6 +23,7 @@
#include "tensorflow/compiler/xla/service_interface.h"
#include "tensorflow/compiler/xla/xla.pb.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/client/value_inference.cc b/tensorflow/compiler/xla/client/value_inference.cc
index 639719a..6a5c9e8 100644
--- a/tensorflow/compiler/xla/client/value_inference.cc
+++ b/tensorflow/compiler/xla/client/value_inference.cc
@@ -292,7 +292,7 @@
// A postorder dfs node can be visited once its dependency requests are all
// fulfilled.
-struct ABSL_MUST_USE_RESULT PostorderDFSNode {
+struct TF_MUST_USE_RESULT PostorderDFSNode {
PostorderDFSNode& AddDependency(int64_t handle, PostorderDFSNodeType type,
InferenceContext context,
std::string annotation = "") {
diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc
index 6a80a9c..35298d5 100644
--- a/tensorflow/compiler/xla/client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_builder.cc
@@ -49,6 +49,7 @@
#include "tensorflow/compiler/xla/window_util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/errors.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/stream_executor/lib/statusor.h"
namespace xla {
@@ -350,7 +351,7 @@
// Set bound is considered constant -- the bound is used as the value.
break;
}
- ABSL_FALLTHROUGH_INTENDED;
+ TF_FALLTHROUGH_INTENDED;
case HloOpcode::kWhile:
// TODO(b/32495713): We aren't checking the condition and body
// computations themselves.
diff --git a/tensorflow/compiler/xla/client/xla_builder.h b/tensorflow/compiler/xla/client/xla_builder.h
index 80be433..dc68250 100644
--- a/tensorflow/compiler/xla/client/xla_builder.h
+++ b/tensorflow/compiler/xla/client/xla_builder.h
@@ -40,6 +40,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stacktrace.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/index_util.h b/tensorflow/compiler/xla/index_util.h
index b3a7cfb..9d75236 100644
--- a/tensorflow/compiler/xla/index_util.h
+++ b/tensorflow/compiler/xla/index_util.h
@@ -24,6 +24,7 @@
#include "tensorflow/compiler/xla/shape.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/layout_util.h b/tensorflow/compiler/xla/layout_util.h
index dc2f39d..6f199e0 100644
--- a/tensorflow/compiler/xla/layout_util.h
+++ b/tensorflow/compiler/xla/layout_util.h
@@ -26,6 +26,7 @@
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/literal.h b/tensorflow/compiler/xla/literal.h
index e912eb8..6a5c891 100644
--- a/tensorflow/compiler/xla/literal.h
+++ b/tensorflow/compiler/xla/literal.h
@@ -44,6 +44,7 @@
#include "tensorflow/core/lib/core/bitmap.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/literal_test.cc b/tensorflow/compiler/xla/literal_test.cc
index cab503b..5464bef 100644
--- a/tensorflow/compiler/xla/literal_test.cc
+++ b/tensorflow/compiler/xla/literal_test.cc
@@ -30,6 +30,7 @@
#include "tensorflow/compiler/xla/test.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace {
diff --git a/tensorflow/compiler/xla/literal_util.h b/tensorflow/compiler/xla/literal_util.h
index 4a00918..3f0b918 100644
--- a/tensorflow/compiler/xla/literal_util.h
+++ b/tensorflow/compiler/xla/literal_util.h
@@ -47,6 +47,7 @@
#include "tensorflow/core/lib/core/bitmap.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/overflow_util.h b/tensorflow/compiler/xla/overflow_util.h
index f7ecceb..c44b52e 100644
--- a/tensorflow/compiler/xla/overflow_util.h
+++ b/tensorflow/compiler/xla/overflow_util.h
@@ -21,6 +21,7 @@
#include "absl/types/optional.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
@@ -34,7 +35,7 @@
const uint64_t uxy = ux * uy;
// Check if we overflow uint64_t, using a cheap check if both inputs are small
- if (ABSL_PREDICT_FALSE((ux | uy) >> 32 != 0)) {
+ if (TF_PREDICT_FALSE((ux | uy) >> 32 != 0)) {
// Ensure nonnegativity. Note that negative numbers will appear "large"
// to the unsigned comparisons above.
CHECK(x >= 0 && y >= 0);
diff --git a/tensorflow/compiler/xla/packed_literal_reader.h b/tensorflow/compiler/xla/packed_literal_reader.h
index 7c91ca9..c89bbec 100644
--- a/tensorflow/compiler/xla/packed_literal_reader.h
+++ b/tensorflow/compiler/xla/packed_literal_reader.h
@@ -23,6 +23,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/parse_flags_from_env.cc b/tensorflow/compiler/xla/parse_flags_from_env.cc
index a9f06c1..f50298e 100644
--- a/tensorflow/compiler/xla/parse_flags_from_env.cc
+++ b/tensorflow/compiler/xla/parse_flags_from_env.cc
@@ -34,6 +34,7 @@
#include "absl/types/span.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/command_line_flags.h"
diff --git a/tensorflow/compiler/xla/pjrt/BUILD b/tensorflow/compiler/xla/pjrt/BUILD
index e4f31f7..573bbf0 100644
--- a/tensorflow/compiler/xla/pjrt/BUILD
+++ b/tensorflow/compiler/xla/pjrt/BUILD
@@ -41,6 +41,7 @@
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
+ "//tensorflow/core:lib",
"//tensorflow/core/platform:stream_executor",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/synchronization",
@@ -123,6 +124,7 @@
"//tensorflow/compiler/xla:status",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla/client:local_client",
+ "//tensorflow/core:lib",
"//tensorflow/core/platform:stream_executor",
"//tensorflow/core/profiler/lib:traceme",
"//tensorflow/core/protobuf:error_codes_proto_impl_cc",
diff --git a/tensorflow/compiler/xla/pjrt/event_pool.h b/tensorflow/compiler/xla/pjrt/event_pool.h
index 5ff2e0b..8068b79 100644
--- a/tensorflow/compiler/xla/pjrt/event_pool.h
+++ b/tensorflow/compiler/xla/pjrt/event_pool.h
@@ -87,8 +87,8 @@
const bool allow_reuse_;
absl::Mutex mu_;
- std::stack<std::unique_ptr<se::Event>> free_events_ ABSL_GUARDED_BY(mu_);
- uint64_t next_sequence_number_ ABSL_GUARDED_BY(mu_);
+ std::stack<std::unique_ptr<se::Event>> free_events_ TF_GUARDED_BY(mu_);
+ uint64_t next_sequence_number_ TF_GUARDED_BY(mu_);
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/pjrt/local_device_state.h b/tensorflow/compiler/xla/pjrt/local_device_state.h
index fb3cbcc..d4afab7 100644
--- a/tensorflow/compiler/xla/pjrt/local_device_state.h
+++ b/tensorflow/compiler/xla/pjrt/local_device_state.h
@@ -183,14 +183,13 @@
static constexpr int kNumDeviceToDeviceStreams = 4;
absl::Mutex mu_;
- int next_device_to_host_stream_ ABSL_GUARDED_BY(mu_) = 0;
- int next_device_to_device_stream_ ABSL_GUARDED_BY(mu_) = 0;
- std::stack<std::unique_ptr<se::Stream>> usage_stream_pool_
- ABSL_GUARDED_BY(mu_);
+ int next_device_to_host_stream_ TF_GUARDED_BY(mu_) = 0;
+ int next_device_to_device_stream_ TF_GUARDED_BY(mu_) = 0;
+ std::stack<std::unique_ptr<se::Stream>> usage_stream_pool_ TF_GUARDED_BY(mu_);
- std::random_device prng_seed_device_ ABSL_GUARDED_BY(mu_);
- std::mt19937 prng_seed_generator_ ABSL_GUARDED_BY(mu_);
- std::uniform_int_distribution<> prng_seed_distribution_ ABSL_GUARDED_BY(mu_);
+ std::random_device prng_seed_device_ TF_GUARDED_BY(mu_);
+ std::mt19937 prng_seed_generator_ TF_GUARDED_BY(mu_);
+ std::uniform_int_distribution<> prng_seed_distribution_ TF_GUARDED_BY(mu_);
// Callback map pairs callback stream with a device stream and is used for
// running short host-side callbacks after device side events, without
diff --git a/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h b/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h
index ca98656..71e7243 100644
--- a/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h
+++ b/tensorflow/compiler/xla/pjrt/pjrt_stream_executor_client.h
@@ -667,9 +667,9 @@
PjRtStreamExecutorDevice* const device_;
mutable absl::Mutex mu_;
- std::shared_ptr<TrackedDeviceBuffer> device_buffer_ ABSL_GUARDED_BY(mu_);
+ std::shared_ptr<TrackedDeviceBuffer> device_buffer_ TF_GUARDED_BY(mu_);
// Count of holds on the buffer.
- std::array<int, ScopedHold::Type::kMaxValue> holds_ ABSL_GUARDED_BY(mu_);
+ std::array<int, ScopedHold::Type::kMaxValue> holds_ TF_GUARDED_BY(mu_);
};
// Wraps one or more XLA LocalExecutables (one per partition, as specified by
diff --git a/tensorflow/compiler/xla/pjrt/tracked_device_buffer.h b/tensorflow/compiler/xla/pjrt/tracked_device_buffer.h
index 1b2127d..bbfe52e 100644
--- a/tensorflow/compiler/xla/pjrt/tracked_device_buffer.h
+++ b/tensorflow/compiler/xla/pjrt/tracked_device_buffer.h
@@ -97,7 +97,7 @@
}
private:
- bool EventHasBeenRecorded() const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ bool EventHasBeenRecorded() const TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
uint64_t sequence_number() const;
// An event that is triggered when the content of one or more buffers has been
@@ -115,7 +115,7 @@
mutable absl::Mutex mu_;
// A list of all streams for which the buffer's content is known to be defined
// at the tail of the queue, i.e., for any newly enqueued command.
- absl::InlinedVector<se::Stream*, 2> streams_defined_on_ ABSL_GUARDED_BY(mu_);
+ absl::InlinedVector<se::Stream*, 2> streams_defined_on_ TF_GUARDED_BY(mu_);
};
// Class that represents a tuple of device buffers. Like a ScopedShapedBuffer it
diff --git a/tensorflow/compiler/xla/pjrt/worker_thread.h b/tensorflow/compiler/xla/pjrt/worker_thread.h
index 5b0528c..4fd2baa 100644
--- a/tensorflow/compiler/xla/pjrt/worker_thread.h
+++ b/tensorflow/compiler/xla/pjrt/worker_thread.h
@@ -40,11 +40,11 @@
void Schedule(std::function<void()> fn);
private:
- bool WorkAvailable() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ bool WorkAvailable() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void WorkLoop();
absl::Mutex mu_;
- std::queue<std::function<void()>> work_queue_ ABSL_GUARDED_BY(mu_);
+ std::queue<std::function<void()>> work_queue_ TF_GUARDED_BY(mu_);
std::unique_ptr<tensorflow::Thread> thread_;
};
diff --git a/tensorflow/compiler/xla/python/outfeed_receiver.cc b/tensorflow/compiler/xla/python/outfeed_receiver.cc
index f33bffe..ccf0379 100644
--- a/tensorflow/compiler/xla/python/outfeed_receiver.cc
+++ b/tensorflow/compiler/xla/python/outfeed_receiver.cc
@@ -170,11 +170,11 @@
std::vector<XlaOp> arrays);
private:
- bool CallbackQueueHasSpace() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ bool CallbackQueueHasSpace() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return callback_queue_size_bytes_ < max_callback_queue_size_bytes_;
}
- bool ShutdownDone() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ bool ShutdownDone() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (num_working_callback_threads_ == 0 && num_listening_threads_ == 0);
}
@@ -191,7 +191,7 @@
// Enqueues received data in the callbaback queue.
void EnqueueReceivedData(uint32_t device_idx,
std::unique_ptr<OutfeedData> received)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_);
// Shuts down the threads. See implementation notes at top of file.
// It is not safe to restart an OutfeedReceiver after shutting down one.
@@ -207,18 +207,18 @@
// Registered shapes by consumer id.
// The shape registry must be alive as long as the program exists.
// Right now we tell the user to never restart after Shutdown.
- absl::flat_hash_map<uint32_t, Shape> shape_registry_ ABSL_GUARDED_BY(mu_);
+ absl::flat_hash_map<uint32_t, Shape> shape_registry_ TF_GUARDED_BY(mu_);
// How many bytes of Literal are in the ensemble of callback queues.
- uint64_t callback_queue_size_bytes_ ABSL_GUARDED_BY(mu_);
+ uint64_t callback_queue_size_bytes_ TF_GUARDED_BY(mu_);
// Threads listening.
- int num_listening_threads_ ABSL_GUARDED_BY(mu_);
- bool shutdown_started_ ABSL_GUARDED_BY(mu_);
+ int num_listening_threads_ TF_GUARDED_BY(mu_);
+ bool shutdown_started_ TF_GUARDED_BY(mu_);
// How many callback threads are still working. Used for shutdown.
- int num_working_callback_threads_ ABSL_GUARDED_BY(mu_);
+ int num_working_callback_threads_ TF_GUARDED_BY(mu_);
std::vector<std::queue<std::unique_ptr<OutfeedData>>> callback_queues_
- ABSL_GUARDED_BY(mu_);
+ TF_GUARDED_BY(mu_);
// The threadpool must come last to ensure the queue exists
// when the pool destructor is called.
std::unique_ptr<tensorflow::thread::ThreadPool> threads_;
@@ -330,7 +330,7 @@
void OutfeedReceiverImpl::EnqueueReceivedData(
uint32_t device_idx, std::unique_ptr<OutfeedData> received)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::CallbackQueueHasSpace));
ssize_t literal_size_bytes = received->literal_size_bytes();
callback_queue_size_bytes_ += literal_size_bytes;
diff --git a/tensorflow/compiler/xla/python/outfeed_receiver_py.cc b/tensorflow/compiler/xla/python/outfeed_receiver_py.cc
index c62da10..a732ab8 100644
--- a/tensorflow/compiler/xla/python/outfeed_receiver_py.cc
+++ b/tensorflow/compiler/xla/python/outfeed_receiver_py.cc
@@ -113,7 +113,7 @@
private:
CallbackToPython callback_python_;
absl::Mutex mu_;
- bool outfeed_receiver_shutting_down_ ABSL_GUARDED_BY(mu_) = false;
+ bool outfeed_receiver_shutting_down_ TF_GUARDED_BY(mu_) = false;
std::vector<std::shared_ptr<PyClient>> clients_;
std::unique_ptr<OutfeedReceiver> outfeed_receiver_;
};
diff --git a/tensorflow/compiler/xla/python/outfeed_receiver_test.cc b/tensorflow/compiler/xla/python/outfeed_receiver_test.cc
index 0362105..915ff47 100644
--- a/tensorflow/compiler/xla/python/outfeed_receiver_test.cc
+++ b/tensorflow/compiler/xla/python/outfeed_receiver_test.cc
@@ -72,7 +72,7 @@
private:
absl::Mutex mutex_;
- std::vector<Data> received_ ABSL_GUARDED_BY(mutex_);
+ std::vector<Data> received_ TF_GUARDED_BY(mutex_);
};
StatusOr<std::unique_ptr<PjRtClient>> GetCpuClientWithNonLocalDevice() {
diff --git a/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.h b/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.h
index 45555a2..44f0d7b 100644
--- a/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.h
+++ b/tensorflow/compiler/xla/python/tpu_driver/client/tpu_client.h
@@ -279,9 +279,9 @@
// `child_buffers_` stores the child buffers; else, `device_buffer_` stores
// the data content and `child_buffers_` is empty.
mutable absl::Mutex mu_;
- std::shared_ptr<TpuSharedBuffer> device_buffer_ ABSL_GUARDED_BY(mu_);
+ std::shared_ptr<TpuSharedBuffer> device_buffer_ TF_GUARDED_BY(mu_);
std::vector<std::shared_ptr<TpuSharedBuffer>> child_buffers_
- ABSL_GUARDED_BY(mu_);
+ TF_GUARDED_BY(mu_);
// The cached value of the buffer on the host, produced either from a call to
// CopyToHost or from a call to ToLiteral. Once a value has been fetched to
// the host, it persists Delete() is called or the PyTpuBuffer is destroyed.
@@ -294,7 +294,7 @@
Status status;
std::shared_ptr<Literal> value;
};
- std::shared_ptr<HostValue> host_value_ ABSL_GUARDED_BY(mu_);
+ std::shared_ptr<HostValue> host_value_ TF_GUARDED_BY(mu_);
};
// Represents a compiled computation that can be executed given handles to
diff --git a/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc b/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc
index a3aa173..c6c08b8 100644
--- a/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc
+++ b/tensorflow/compiler/xla/python/tpu_driver/pod_tpu_driver.cc
@@ -114,7 +114,7 @@
}
void AddCallback(std::function<void(Status)> callback)
- ABSL_LOCKS_EXCLUDED(mu_) override {
+ TF_LOCKS_EXCLUDED(mu_) override {
bool all_events_completed = false;
{
absl::MutexLock l(&mu_);
@@ -129,7 +129,7 @@
}
private:
- void IncrementAndCheckComplete(Status s) ABSL_LOCKS_EXCLUDED(mu_) {
+ void IncrementAndCheckComplete(Status s) TF_LOCKS_EXCLUDED(mu_) {
std::vector<std::function<void(Status)>> callbacks;
{
absl::MutexLock l(&mu_);
@@ -356,14 +356,14 @@
ScheduleRequest(
operation_id,
- [this, core_id, region, num_bytes, operation_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- underlying_buffers_.insert(
- {operation_id,
- core_to_driver_[core_id]->Allocate(
- core_to_driver_core_[core_id], region, num_bytes, {})});
- return underlying_buffers_[operation_id]->OnReady();
- },
+ [this, core_id, region, num_bytes,
+ operation_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ underlying_buffers_.insert(
+ {operation_id,
+ core_to_driver_[core_id]->Allocate(core_to_driver_core_[core_id],
+ region, num_bytes, {})});
+ return underlying_buffers_[operation_id]->OnReady();
+ },
deps);
return absl::make_unique<PodBufferHandle>(this, operation_id, num_bytes,
@@ -378,14 +378,14 @@
ScheduleRequest(
operation_id,
- [this, core_id, region, shape, operation_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
- underlying_buffers_.insert(
- {operation_id,
- core_to_driver_[core_id]->Allocate(
- core_to_driver_core_[core_id], region, shape, {})});
- return underlying_buffers_[operation_id]->OnReady();
- },
+ [this, core_id, region, shape,
+ operation_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ underlying_buffers_.insert(
+ {operation_id,
+ core_to_driver_[core_id]->Allocate(core_to_driver_core_[core_id],
+ region, shape, {})});
+ return underlying_buffers_[operation_id]->OnReady();
+ },
deps);
return absl::make_unique<PodBufferHandle>(
@@ -411,23 +411,23 @@
ScheduleRequest(
operation_id,
- [this, core_id, region, children_ids, operation_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- std::vector<BufferHandle*> child_buffers;
- child_buffers.reserve(children_ids.size());
- for (size_t i = 0; i < children_ids.size(); ++i) {
- CHECK_EXISTS_OR_RETURN(underlying_buffers_, children_ids[i],
- operation_id);
- child_buffers.push_back(
- underlying_buffers_[children_ids[i]].get());
- }
+ [this, core_id, region, children_ids,
+ operation_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_)
+ -> std::shared_ptr<Event> {
+ std::vector<BufferHandle*> child_buffers;
+ child_buffers.reserve(children_ids.size());
+ for (size_t i = 0; i < children_ids.size(); ++i) {
+ CHECK_EXISTS_OR_RETURN(underlying_buffers_, children_ids[i],
+ operation_id);
+ child_buffers.push_back(underlying_buffers_[children_ids[i]].get());
+ }
- underlying_buffers_.insert(
- {operation_id, core_to_driver_[core_id]->AllocateTuple(
- core_to_driver_core_[core_id], region,
- child_buffers, {})});
- return underlying_buffers_[operation_id]->OnReady();
- },
+ underlying_buffers_.insert(
+ {operation_id,
+ core_to_driver_[core_id]->AllocateTuple(
+ core_to_driver_core_[core_id], region, child_buffers, {})});
+ return underlying_buffers_[operation_id]->OnReady();
+ },
deps);
return absl::make_unique<PodBufferHandle>(this, operation_id, 0,
@@ -446,17 +446,17 @@
ScheduleRequest(
operation_id,
- [this, operation_id, op_id, core_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- CHECK_EXISTS_OR_RETURN(underlying_buffers_, op_id, operation_id);
+ [this, operation_id, op_id,
+ core_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
+ CHECK_EXISTS_OR_RETURN(underlying_buffers_, op_id, operation_id);
- auto buf_iter = underlying_buffers_.find(op_id);
- auto underlying_hn = std::move(buf_iter->second);
- underlying_buffers_.erase(buf_iter);
+ auto buf_iter = underlying_buffers_.find(op_id);
+ auto underlying_hn = std::move(buf_iter->second);
+ underlying_buffers_.erase(buf_iter);
- return core_to_driver_[core_id]->Deallocate(
- std::move(underlying_hn), {});
- },
+ return core_to_driver_[core_id]->Deallocate(std::move(underlying_hn),
+ {});
+ },
deps);
return std::make_shared<PodEvent>(this, operation_id);
@@ -474,14 +474,14 @@
ScheduleRequest(
operation_id,
- [this, src, operation_id, op_id, core_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- CHECK_EXISTS_OR_RETURN(underlying_buffers_, op_id, operation_id);
+ [this, src, operation_id, op_id,
+ core_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
+ CHECK_EXISTS_OR_RETURN(underlying_buffers_, op_id, operation_id);
- auto buf_iter = underlying_buffers_.find(op_id);
- return core_to_driver_[core_id]->TransferToDevice(
- src, buf_iter->second.get(), {});
- },
+ auto buf_iter = underlying_buffers_.find(op_id);
+ return core_to_driver_[core_id]->TransferToDevice(
+ src, buf_iter->second.get(), {});
+ },
deps);
return std::make_shared<PodEvent>(this, operation_id);
@@ -499,13 +499,13 @@
ScheduleRequest(
operation_id,
- [this, dst, operation_id, op_id, core_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- CHECK_EXISTS_OR_RETURN(underlying_buffers_, op_id, operation_id);
- auto buf_iter = underlying_buffers_.find(op_id);
- return core_to_driver_[core_id]->TransferFromDevice(
- buf_iter->second.get(), dst, {});
- },
+ [this, dst, operation_id, op_id,
+ core_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
+ CHECK_EXISTS_OR_RETURN(underlying_buffers_, op_id, operation_id);
+ auto buf_iter = underlying_buffers_.find(op_id);
+ return core_to_driver_[core_id]->TransferFromDevice(
+ buf_iter->second.get(), dst, {});
+ },
deps);
return std::make_shared<PodEvent>(this, operation_id);
@@ -533,7 +533,7 @@
ScheduleRequest(
operation_id,
[this, operation_id, src_op_id, dst_op_id, dst_core_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
CHECK_EXISTS_OR_RETURN(underlying_buffers_, src_op_id,
operation_id);
CHECK_EXISTS_OR_RETURN(underlying_buffers_, dst_op_id,
@@ -568,7 +568,7 @@
ScheduleRequest(
operation_id,
[this, operation_id, source,
- num_replicas]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
+ num_replicas]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
auto cph_iterator =
underlying_cph_
.insert(
@@ -603,20 +603,19 @@
ScheduleRequest(
operation_id,
- [this, operation_id, cph_op_id, core_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- CHECK_EXISTS_OR_RETURN(underlying_cph_, cph_op_id, operation_id);
- auto cph_iter = underlying_cph_.find(cph_op_id);
+ [this, operation_id, cph_op_id,
+ core_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
+ CHECK_EXISTS_OR_RETURN(underlying_cph_, cph_op_id, operation_id);
+ auto cph_iter = underlying_cph_.find(cph_op_id);
- underlying_lph_.insert(
- {operation_id,
- core_to_driver_[core_id]->LoadProgram(
- core_to_driver_core_[core_id],
- cph_iter->second[core_to_driver_id_[core_id]].get(),
- {})});
+ underlying_lph_.insert(
+ {operation_id,
+ core_to_driver_[core_id]->LoadProgram(
+ core_to_driver_core_[core_id],
+ cph_iter->second[core_to_driver_id_[core_id]].get(), {})});
- return underlying_lph_[operation_id]->OnReady();
- },
+ return underlying_lph_[operation_id]->OnReady();
+ },
deps);
return absl::make_unique<PodLoadedProgramHandle>(this, operation_id,
@@ -637,16 +636,16 @@
ScheduleRequest(
operation_id,
- [this, operation_id, op_id, core_id]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- CHECK_EXISTS_OR_RETURN(underlying_lph_, op_id, operation_id);
- auto lph_iter = underlying_lph_.find(op_id);
- auto event = core_to_driver_[core_id]->UnloadProgram(
- std::move(lph_iter->second), {});
- underlying_lph_.erase(lph_iter);
+ [this, operation_id, op_id,
+ core_id]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
+ CHECK_EXISTS_OR_RETURN(underlying_lph_, op_id, operation_id);
+ auto lph_iter = underlying_lph_.find(op_id);
+ auto event = core_to_driver_[core_id]->UnloadProgram(
+ std::move(lph_iter->second), {});
+ underlying_lph_.erase(lph_iter);
- return event;
- },
+ return event;
+ },
deps);
return std::make_shared<PodEvent>(this, operation_id);
@@ -686,32 +685,31 @@
ScheduleRequest(
operation_id,
[this, operation_id, core_id, op_id, input_op_ids, output_op_ids,
- device_assignment]()
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) -> std::shared_ptr<Event> {
- std::vector<BufferHandle*> underlying_inputs;
- std::vector<BufferHandle*> underlying_outputs;
+ device_assignment]() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_)
+ -> std::shared_ptr<Event> {
+ std::vector<BufferHandle*> underlying_inputs;
+ std::vector<BufferHandle*> underlying_outputs;
- underlying_inputs.reserve(input_op_ids.size());
- for (auto input_op_id : input_op_ids) {
- CHECK_EXISTS_OR_RETURN(underlying_buffers_, input_op_id,
- operation_id);
- underlying_inputs.push_back(
- underlying_buffers_[input_op_id].get());
- }
- underlying_outputs.reserve(output_op_ids.size());
- for (auto output_op_id : output_op_ids) {
- CHECK_EXISTS_OR_RETURN(underlying_buffers_, output_op_id,
- operation_id);
- underlying_outputs.push_back(
- underlying_buffers_[output_op_id].get());
- }
+ underlying_inputs.reserve(input_op_ids.size());
+ for (auto input_op_id : input_op_ids) {
+ CHECK_EXISTS_OR_RETURN(underlying_buffers_, input_op_id,
+ operation_id);
+ underlying_inputs.push_back(underlying_buffers_[input_op_id].get());
+ }
+ underlying_outputs.reserve(output_op_ids.size());
+ for (auto output_op_id : output_op_ids) {
+ CHECK_EXISTS_OR_RETURN(underlying_buffers_, output_op_id,
+ operation_id);
+ underlying_outputs.push_back(
+ underlying_buffers_[output_op_id].get());
+ }
- CHECK_EXISTS_OR_RETURN(underlying_lph_, op_id, operation_id);
- LoadedProgramHandle* handle = underlying_lph_[op_id].get();
- return core_to_driver_[core_id]->ExecuteProgram(
- handle, underlying_inputs, underlying_outputs,
- device_assignment, {});
- },
+ CHECK_EXISTS_OR_RETURN(underlying_lph_, op_id, operation_id);
+ LoadedProgramHandle* handle = underlying_lph_[op_id].get();
+ return core_to_driver_[core_id]->ExecuteProgram(
+ handle, underlying_inputs, underlying_outputs, device_assignment,
+ {});
+ },
deps);
return std::make_shared<PodEvent>(this, operation_id);
@@ -724,7 +722,7 @@
// Helper methods for Event scheduling
absl::optional<Status> WaitForEvent(int64_t event_id, absl::Duration duration)
- ABSL_LOCKS_EXCLUDED(mu_) {
+ TF_LOCKS_EXCLUDED(mu_) {
std::shared_ptr<Event> underlying_event;
{
@@ -777,7 +775,7 @@
}
void AddCallbackForEvent(int64_t event_id, std::function<void(Status)> fn)
- ABSL_LOCKS_EXCLUDED(mu_) {
+ TF_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
auto event = events_.find(event_id);
@@ -800,7 +798,7 @@
xla::Status GetCompiledProgramShape(int64_t op_id,
xla::ProgramShapeProto* program_shape)
- ABSL_LOCKS_EXCLUDED(mu_) {
+ TF_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
auto done = [this, op_id]() {
@@ -855,8 +853,7 @@
// EventCompleted is executed on the event_thread_ worker thread. We want
// to propagate the fact that the event is completed to any subsequent events
// that might depend on this event.
- void EventCompleted(int64_t event_id, Status status)
- ABSL_LOCKS_EXCLUDED(mu_) {
+ void EventCompleted(int64_t event_id, Status status) TF_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
absl::btree_map<int64_t, std::unique_ptr<EventInFlight>>::iterator
@@ -896,7 +893,7 @@
void ScheduleRequest(int64_t operation_id,
std::function<std::shared_ptr<Event>(void)> fn,
const absl::flat_hash_set<int64_t>& deps)
- ABSL_LOCKS_EXCLUDED(mu_) {
+ TF_LOCKS_EXCLUDED(mu_) {
absl::MutexLock l(&mu_);
absl::btree_map<int64_t, std::unique_ptr<EventInFlight>>::iterator event;
absl::flat_hash_set<int64_t> incomplete_deps;
diff --git a/tensorflow/compiler/xla/python/xla_compiler.cc b/tensorflow/compiler/xla/python/xla_compiler.cc
index 34361d5..5ab106f 100644
--- a/tensorflow/compiler/xla/python/xla_compiler.cc
+++ b/tensorflow/compiler/xla/python/xla_compiler.cc
@@ -58,7 +58,7 @@
struct Uniquer {
absl::Mutex mu;
- NameUniquer name_uniquer ABSL_GUARDED_BY(mu);
+ NameUniquer name_uniquer TF_GUARDED_BY(mu);
};
Uniquer* GetUniquer() {
diff --git a/tensorflow/compiler/xla/reference_util.h b/tensorflow/compiler/xla/reference_util.h
index 10e71fe..66cf64f 100644
--- a/tensorflow/compiler/xla/reference_util.h
+++ b/tensorflow/compiler/xla/reference_util.h
@@ -31,6 +31,7 @@
#include "tensorflow/compiler/xla/service/hlo_evaluator.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/rpc/BUILD b/tensorflow/compiler/xla/rpc/BUILD
index 09b63a4..7911a7d 100644
--- a/tensorflow/compiler/xla/rpc/BUILD
+++ b/tensorflow/compiler/xla/rpc/BUILD
@@ -42,6 +42,7 @@
":xla_service_proto_cc",
"//tensorflow/compiler/xla:service_interface",
"//tensorflow/compiler/xla:xla_data_proto_cc",
+ "//tensorflow/core:lib",
"//tensorflow/core/distributed_runtime/rpc:grpc_util",
],
)
diff --git a/tensorflow/compiler/xla/rpc/grpc_stub.h b/tensorflow/compiler/xla/rpc/grpc_stub.h
index 9362b04..8bc09b9 100644
--- a/tensorflow/compiler/xla/rpc/grpc_stub.h
+++ b/tensorflow/compiler/xla/rpc/grpc_stub.h
@@ -19,6 +19,7 @@
#include "tensorflow/compiler/xla/rpc/xla_service.grpc.pb.h"
#include "tensorflow/compiler/xla/service_interface.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/BUILD b/tensorflow/compiler/xla/service/BUILD
index 9efbe39..850b39d 100644
--- a/tensorflow/compiler/xla/service/BUILD
+++ b/tensorflow/compiler/xla/service/BUILD
@@ -3319,6 +3319,7 @@
"//tensorflow/compiler/xla:status",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
+ "//tensorflow/core/platform:macros",
],
)
@@ -3340,6 +3341,7 @@
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:window_util",
+ "//tensorflow/core/platform:macros",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
@@ -3529,6 +3531,8 @@
"//tensorflow/compiler/xla:xla_data_proto_cc",
"//tensorflow/core:framework_lite",
"//tensorflow/core:lib_proto_parsing",
+ "//tensorflow/core:tflite_portable_logging",
+ "//tensorflow/core/platform:macros",
"//tensorflow/stream_executor:stream_header",
],
)
@@ -3755,6 +3759,7 @@
"//tensorflow/compiler/xla:shape_util",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto_cc",
+ "//tensorflow/core:lib",
"//tensorflow/core:lib_internal",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:span",
@@ -3776,6 +3781,8 @@
"//tensorflow/compiler/xla:xla_data_proto_cc",
"//tensorflow/core:lib",
"//tensorflow/core/platform:logging",
+ "//tensorflow/core/platform:macros",
+ "//tensorflow/core/platform:types",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
@@ -4591,6 +4598,7 @@
"//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
+ "//tensorflow/core:lib",
"@com_google_absl//absl/container:flat_hash_set",
],
)
@@ -5560,6 +5568,7 @@
deps = [
":executable",
"//tensorflow/compiler/xla:status",
+ "//tensorflow/core:lib",
"@com_google_absl//absl/strings:str_format",
],
)
diff --git a/tensorflow/compiler/xla/service/allocation_tracker.h b/tensorflow/compiler/xla/service/allocation_tracker.h
index bab41fa..0a220e3 100644
--- a/tensorflow/compiler/xla/service/allocation_tracker.h
+++ b/tensorflow/compiler/xla/service/allocation_tracker.h
@@ -27,6 +27,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
@@ -86,7 +87,7 @@
// Internal helper which resolves the given GlobalDataHandle to a
// list of ScopedShapedBuffers.
StatusOr<std::vector<const ShapedBuffer*>> ResolveInternal(
- const GlobalDataHandle& data) const ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ const GlobalDataHandle& data) const TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Internal helper which registers a vector of shaped buffers, one per
// replica. ShapedBufferTy is either ScopedShapedBuffer or ShapedBuffer. If
@@ -95,19 +96,19 @@
template <typename ShapedBufferTy>
StatusOr<GlobalDataHandle> RegisterInternal(
std::vector<ShapedBufferTy> replicated_buffers, const std::string& tag)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Adds the given device address to the allocation tracker, or if it already
// exists, then increment its reference count.
void AddAllocationOrIncrementRefCount(se::DeviceMemoryBase device_memory,
int device_ordinal)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Decrements the reference count of the given device memory. Then, if it is
// zero, deallocate the memory.
Status DecrementRefCount(se::DeviceMemoryBase device_memory,
int device_ordinal)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// A map from device memory opaque value to allocation. One such map is
// maintained per device ordinal.
@@ -121,11 +122,11 @@
// The next handle to assign to an allocation, guarded by the same mutex as
// the mapping as they'll be mutated at the same time.
- int64_t next_handle_ ABSL_GUARDED_BY(mutex_);
+ int64_t next_handle_ TF_GUARDED_BY(mutex_);
// A map from device ordinal to AllocationMap.
absl::flat_hash_map<int, AllocationMap> opaque_to_allocation_map_
- ABSL_GUARDED_BY(mutex_);
+ TF_GUARDED_BY(mutex_);
// A map from data handle to a vector of shaped buffers that represent the
// buffers for different replicas.
@@ -145,7 +146,7 @@
// free'd when both the view *and* the original tuple are Unregistered. This
// refcounting is managed in opaque_to_allocation_map_.
absl::flat_hash_map<int64_t, std::vector<std::unique_ptr<ShapedBuffer>>>
- handle_to_shaped_buffers_ ABSL_GUARDED_BY(mutex_);
+ handle_to_shaped_buffers_ TF_GUARDED_BY(mutex_);
AllocationTracker(const AllocationTracker&) = delete;
AllocationTracker& operator=(const AllocationTracker&) = delete;
diff --git a/tensorflow/compiler/xla/service/backend.h b/tensorflow/compiler/xla/service/backend.h
index 8d00450..6eb52c5 100644
--- a/tensorflow/compiler/xla/service/backend.h
+++ b/tensorflow/compiler/xla/service/backend.h
@@ -179,7 +179,7 @@
// Mapping from stream executor to stream pools, used by `BorrowStream` above.
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<StreamPool>>
- stream_pools_ ABSL_GUARDED_BY(mu_);
+ stream_pools_ TF_GUARDED_BY(mu_);
// The default memory allocator to use.
// This must be a shared_ptr, as this is passed all the way down to the
diff --git a/tensorflow/compiler/xla/service/buffer_assignment.h b/tensorflow/compiler/xla/service/buffer_assignment.h
index 3469245..3749807 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment.h
+++ b/tensorflow/compiler/xla/service/buffer_assignment.h
@@ -39,6 +39,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/buffer_assignment_test.cc b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
index 2d1f6ed..f484173 100644
--- a/tensorflow/compiler/xla/service/buffer_assignment_test.cc
+++ b/tensorflow/compiler/xla/service/buffer_assignment_test.cc
@@ -44,6 +44,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace {
diff --git a/tensorflow/compiler/xla/service/buffer_value.h b/tensorflow/compiler/xla/service/buffer_value.h
index 4bcd03d..bc7cd62 100644
--- a/tensorflow/compiler/xla/service/buffer_value.h
+++ b/tensorflow/compiler/xla/service/buffer_value.h
@@ -26,6 +26,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/channel_tracker.h b/tensorflow/compiler/xla/service/channel_tracker.h
index 8367254..725497c 100644
--- a/tensorflow/compiler/xla/service/channel_tracker.h
+++ b/tensorflow/compiler/xla/service/channel_tracker.h
@@ -25,6 +25,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
@@ -66,24 +67,24 @@
// Bumps the next_channel_ number and returns the allocated number
// wrapped in a ChannelHandle.
ChannelHandle AllocateHandle(ChannelHandle::ChannelType type)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
Status RegisterSendInternal(const ChannelHandle& handle)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
Status RegisterRecvInternal(const ChannelHandle& handle)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
+ TF_EXCLUSIVE_LOCKS_REQUIRED(channel_mutex_);
// Guards the channel mapping.
tensorflow::mutex channel_mutex_;
// The next sequence number to assign to a channel.
- int64_t next_channel_ ABSL_GUARDED_BY(channel_mutex_);
+ int64_t next_channel_ TF_GUARDED_BY(channel_mutex_);
// Mapping from ChannelHandle value to the corresponding registered
// Channel object.
absl::flat_hash_map<int64_t, Channel> opaque_to_channel_
- ABSL_GUARDED_BY(channel_mutex_);
+ TF_GUARDED_BY(channel_mutex_);
ChannelTracker(const ChannelTracker&) = delete;
ChannelTracker& operator=(const ChannelTracker&) = delete;
diff --git a/tensorflow/compiler/xla/service/collective_ops_utils.h b/tensorflow/compiler/xla/service/collective_ops_utils.h
index 16f9eb3..8299446 100644
--- a/tensorflow/compiler/xla/service/collective_ops_utils.h
+++ b/tensorflow/compiler/xla/service/collective_ops_utils.h
@@ -338,9 +338,9 @@
tensorflow::mutex mu_;
- bool initialized_ ABSL_GUARDED_BY(mu_) = false;
+ bool initialized_ TF_GUARDED_BY(mu_) = false;
- std::vector<I> participants_ ABSL_GUARDED_BY(mu_);
+ std::vector<I> participants_ TF_GUARDED_BY(mu_);
private:
// Runs the all-reduce on the given thread. If successful, returns
diff --git a/tensorflow/compiler/xla/service/compilation_cache.h b/tensorflow/compiler/xla/service/compilation_cache.h
index 6abc462..d8f3a50 100644
--- a/tensorflow/compiler/xla/service/compilation_cache.h
+++ b/tensorflow/compiler/xla/service/compilation_cache.h
@@ -24,6 +24,7 @@
#include "tensorflow/compiler/xla/service/executable.h"
#include "tensorflow/compiler/xla/service/hlo_module_config.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/thread_annotations.h"
@@ -50,7 +51,7 @@
using CacheKey = int64_t;
absl::flat_hash_map<CacheKey, std::shared_ptr<Executable>> cache_
- ABSL_GUARDED_BY(mutex_);
+ TF_GUARDED_BY(mutex_);
private:
CompilationCache(const CompilationCache&) = delete;
diff --git a/tensorflow/compiler/xla/service/compiler.cc b/tensorflow/compiler/xla/service/compiler.cc
index 2677984..4ede4e0 100644
--- a/tensorflow/compiler/xla/service/compiler.cc
+++ b/tensorflow/compiler/xla/service/compiler.cc
@@ -21,6 +21,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/computation_placer.h b/tensorflow/compiler/xla/service/computation_placer.h
index e37e07c..f599374 100644
--- a/tensorflow/compiler/xla/service/computation_placer.h
+++ b/tensorflow/compiler/xla/service/computation_placer.h
@@ -27,6 +27,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/stream_executor/platform.h"
diff --git a/tensorflow/compiler/xla/service/conditional_code_motion_test.cc b/tensorflow/compiler/xla/service/conditional_code_motion_test.cc
index 21e7c4b..a45096c 100644
--- a/tensorflow/compiler/xla/service/conditional_code_motion_test.cc
+++ b/tensorflow/compiler/xla/service/conditional_code_motion_test.cc
@@ -1407,7 +1407,7 @@
HloInstruction* root = module->entry_computation()->root_instruction();
switch (flip_start) {
case 0:
- ABSL_FALLTHROUGH_INTENDED;
+ TF_FALLTHROUGH_INTENDED;
case 1:
// After flipping the corresponding decisions,
// instructions has been moved inside the conditionals.
@@ -1535,7 +1535,7 @@
HloInstruction* root = module->entry_computation()->root_instruction();
switch (flip_start) {
case 0:
- ABSL_FALLTHROUGH_INTENDED;
+ TF_FALLTHROUGH_INTENDED;
case 1:
// After flipping the corresponding decisions,
// instructions has been moved inside the conditionals.
diff --git a/tensorflow/compiler/xla/service/cpu/BUILD b/tensorflow/compiler/xla/service/cpu/BUILD
index 1f80ff4..6341661 100644
--- a/tensorflow/compiler/xla/service/cpu/BUILD
+++ b/tensorflow/compiler/xla/service/cpu/BUILD
@@ -152,6 +152,7 @@
deps = [
"//tensorflow/compiler/xla:cpu_function_runtime",
"//tensorflow/compiler/xla/service:buffer_assignment",
+ "//tensorflow/core:lib",
"@com_google_absl//absl/types:span",
],
)
@@ -169,7 +170,6 @@
":cpu_layout_assignment",
":cpu_options",
":dot_op_emitter",
- "@com_google_absl//absl/base:dynamic_annotations",
":ir_emission_utils",
":ir_emitter",
":parallel_task_assignment",
@@ -192,7 +192,6 @@
"//tensorflow/compiler/xla/service:bitcast_dtypes_expander",
"//tensorflow/compiler/xla/service:copy_insertion",
"//tensorflow/compiler/xla/service:dump",
- "@com_google_absl//absl/base:core_headers",
"//tensorflow/compiler/xla/service:result_caster",
"//tensorflow/compiler/xla/service:topk_rewriter",
"//tensorflow/compiler/xla/service:map_inliner",
@@ -259,6 +258,7 @@
"//tensorflow/compiler/xla/service:zero_sized_hlo_elimination",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_command_line_options",
"//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
+ "//tensorflow/core:lib",
"//tensorflow/core/platform:stream_executor_no_cuda",
"@llvm-project//llvm:Core",
"@llvm-project//llvm:Object",
@@ -342,7 +342,10 @@
"runtime_fp16.h",
],
copts = runtime_copts(),
- deps = ["@com_google_absl//absl/base:core_headers"],
+ deps = [
+ "//tensorflow/core/platform:macros",
+ "//tensorflow/core/platform:types",
+ ],
)
cc_library(
@@ -354,7 +357,10 @@
"runtime_pow.h",
],
copts = runtime_copts(),
- deps = ["@com_google_absl//absl/base:core_headers"],
+ deps = [
+ "//tensorflow/core/platform:macros",
+ "//tensorflow/core/platform:types",
+ ],
)
cc_library(
@@ -383,9 +389,11 @@
"//tensorflow/compiler/xla/service:xla_debug_info_manager",
"//tensorflow/core:lib",
"//tensorflow/core/platform:logging",
+ "//tensorflow/core/platform:macros",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:platform_port",
"//tensorflow/core/platform:stream_executor_no_cuda",
+ "//tensorflow/core/platform:types",
"//tensorflow/stream_executor:device_memory_allocator",
"//tensorflow/stream_executor/host:host_stream",
"@com_google_absl//absl/cleanup",
@@ -443,6 +451,8 @@
"//tensorflow/core:lib",
"//tensorflow/core/lib/math:math_util",
"//tensorflow/core/platform:logging",
+ "//tensorflow/core/platform:macros",
+ "//tensorflow/core/platform:types",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/strings",
@@ -629,6 +639,7 @@
"//tensorflow/compiler/xla:executable_run_options",
"//tensorflow/compiler/xla:refcounting_hash_map",
"//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:status_macros",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto_cc",
@@ -636,14 +647,16 @@
"//tensorflow/compiler/xla/service:computation_placer",
"//tensorflow/compiler/xla/service:hlo",
"//tensorflow/compiler/xla/service:hlo_parser",
+ "//tensorflow/compiler/xla/service/llvm_ir:llvm_util",
+ "//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:logging",
+ "//tensorflow/core/platform:macros",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:platform_port",
"//tensorflow/core/platform:status",
+ "//tensorflow/core/platform:types",
"//tensorflow/core/profiler/lib:traceme",
"//tensorflow/stream_executor",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/strings:str_format",
@@ -685,10 +698,10 @@
"//tensorflow/compiler/xla:executable_run_options",
"//tensorflow/core/kernels:eigen_contraction_kernel",
"//tensorflow/core/kernels:eigen_helpers",
+ "//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:mutex",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -706,10 +719,10 @@
"//tensorflow/compiler/xla:executable_run_options",
"//tensorflow/core/kernels:eigen_contraction_kernel",
"//tensorflow/core/kernels:eigen_helpers",
+ "//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:mutex",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -721,8 +734,7 @@
visibility = ["//visibility:public"],
deps = [
"//tensorflow/compiler/xla/service:custom_call_status_internal",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
+ "//tensorflow/core/platform:dynamic_annotations",
],
)
@@ -738,8 +750,8 @@
":runtime_conv2d",
":runtime_single_threaded_conv2d",
"//tensorflow/compiler/xla:executable_run_options",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:types",
"//tensorflow/core/kernels:eigen_helpers",
"//third_party/eigen3",
] + mkl_deps(),
@@ -757,11 +769,11 @@
deps = [
":runtime_lightweight_check",
"//tensorflow/compiler/xla:executable_run_options",
- "//tensorflow/compiler/xla:types",
+ "//tensorflow/core/framework:numeric_types",
+ "//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:mutex",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -775,10 +787,10 @@
":runtime_lightweight_check",
"//tensorflow/compiler/xla:executable_run_options",
"//tensorflow/core/kernels:eigen_contraction_kernel",
+ "//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:mutex",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -790,6 +802,7 @@
visibility = ["//visibility:public"],
deps = [
"//tensorflow/compiler/xla:executable_run_options",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
] + mkl_deps(),
)
@@ -807,9 +820,9 @@
":runtime_lightweight_check",
"//tensorflow/core/kernels:eigen_contraction_kernel",
"//tensorflow/core/kernels:eigen_helpers",
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -826,9 +839,9 @@
":runtime_lightweight_check",
"//tensorflow/core/kernels:eigen_contraction_kernel",
"//tensorflow/core/kernels:eigen_helpers",
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -842,11 +855,11 @@
copts = runtime_copts(),
visibility = ["//visibility:public"],
deps = [
- "//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto_cc",
+ "//tensorflow/core/framework:numeric_types",
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -859,8 +872,9 @@
visibility = ["//visibility:public"],
deps = [
"//tensorflow/core/kernels:eigen_contraction_kernel",
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
],
)
@@ -871,8 +885,10 @@
copts = runtime_copts(),
visibility = ["//visibility:public"],
deps = [
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:macros",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:dynamic_annotations",
],
)
@@ -883,7 +899,9 @@
copts = runtime_copts(),
visibility = ["//visibility:public"],
deps = [
- "@com_google_absl//absl/base:dynamic_annotations",
+ "//tensorflow/core/platform:dynamic_annotations",
+ "//tensorflow/core/platform:macros",
+ "//tensorflow/core/platform:types",
],
)
@@ -897,10 +915,10 @@
"//tensorflow/compiler/xla:executable_run_options",
"//tensorflow/compiler/xla/service:custom_call_status_internal",
"//tensorflow/core/platform:blocking_counter",
+ "//tensorflow/core/platform:dynamic_annotations",
"//tensorflow/core/platform:logging",
+ "//tensorflow/core/platform:types",
"//third_party/eigen3",
- "@com_google_absl//absl/base:core_headers",
- "@com_google_absl//absl/base:dynamic_annotations",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/strings:str_format",
],
@@ -940,11 +958,11 @@
],
deps = [
":runtime_single_threaded_fft",
- "//tensorflow/compiler/xla:types",
"//tensorflow/compiler/xla:xla_data_proto_cc",
"//tensorflow/compiler/xla/tests:xla_internal_test_main",
"//tensorflow/core:lib",
"//tensorflow/core:test",
+ "//tensorflow/core/framework:numeric_types",
"//third_party/eigen3",
],
)
@@ -1181,7 +1199,6 @@
hdrs = ["orc_jit_memory_mapper.h"],
deps = [
"//tensorflow/core:lib",
- "@com_google_absl//absl/base:core_headers",
"@llvm-project//llvm:ExecutionEngine",
],
)
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
index f93d418..58ce23a 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.cc
@@ -29,7 +29,6 @@
// IWYU pragma: no_include "llvm/Config/Disassemblers.def.inc"
// IWYU pragma: no_include "llvm/Config/Targets.def.inc"
#include "absl/base/call_once.h"
-#include "absl/base/dynamic_annotations.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "llvm/ADT/StringRef.h"
@@ -141,6 +140,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
namespace {
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_compiler.h b/tensorflow/compiler/xla/service/cpu/cpu_compiler.h
index bb476d6..6e23411 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_compiler.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_compiler.h
@@ -26,6 +26,7 @@
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/llvm_compiler.h"
#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_executable.h b/tensorflow/compiler/xla/service/cpu/cpu_executable.h
index 75806f7..17479b2 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_executable.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_executable.h
@@ -34,6 +34,7 @@
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/stream_executor/device_memory_allocator.h"
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_runtime.cc b/tensorflow/compiler/xla/service/cpu/cpu_runtime.cc
index a9cc2ad..fd00d74 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_runtime.cc
+++ b/tensorflow/compiler/xla/service/cpu/cpu_runtime.cc
@@ -20,10 +20,7 @@
#include <cstring>
#include <functional>
#include <limits>
-#include <string>
-#include <utility>
-#include "absl/base/dynamic_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
@@ -38,7 +35,9 @@
#include "tensorflow/compiler/xla/service/hlo_parser.h"
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/profiler/lib/traceme.h"
@@ -247,7 +246,7 @@
extern "C" {
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int __xla_cpu_runtime_PrintfToStderr(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY int __xla_cpu_runtime_PrintfToStderr(
const char* format, ...) {
VLOG(3) << "__xla_cpu_runtime_PrintfToStderr " << format;
va_list args;
@@ -257,14 +256,14 @@
return result;
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY int64_t __xla_cpu_runtime_TracingStart(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY int64_t __xla_cpu_runtime_TracingStart(
const void* /* xla::ExecutableRunOptions* */ run_options_ptr,
const char* name) {
VLOG(3) << "TracingStart " << name;
return tensorflow::profiler::TraceMe::ActivityStart(name);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TracingEnd(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TracingEnd(
const void* /* xla::ExecutableRunOptions* */ run_options_ptr, int64_t id) {
VLOG(3) << "TracingEnd " << id;
tensorflow::profiler::TraceMe::ActivityEnd(id);
@@ -272,7 +271,7 @@
} // extern "C"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void*
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void*
__xla_cpu_runtime_AcquireInfeedBufferForDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape, int32_t shape_length) {
@@ -295,7 +294,7 @@
return buffer->data();
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_ReleaseInfeedBufferAfterDequeue(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
@@ -313,7 +312,7 @@
std::move(shape));
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void*
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void*
__xla_cpu_runtime_AcquireOutfeedBufferForPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
const void* shape_ptr, int32_t shape_length) {
@@ -336,7 +335,7 @@
return buffer->data();
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_ReleaseOutfeedBufferAfterPopulation(
const xla::ExecutableRunOptions* run_options, int32_t buffer_length,
void* buffer_ptr, const void* shape_ptr, int32_t shape_length) {
@@ -647,7 +646,7 @@
} // namespace
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_AllToAll(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_AllToAll(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, const void* replica_groups_str,
int32_t replica_groups_str_size, int32_t num_buffers, int64_t buffer_size,
@@ -688,7 +687,7 @@
.status());
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_AllReduce(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_AllReduce(
const xla::ExecutableRunOptions* run_options,
const void* replica_groups_str, int32_t replica_groups_str_size,
int32_t channel_id_present, int64_t op_id, int32_t reduction_kind,
@@ -738,7 +737,7 @@
.status());
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_ReplicaId(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_ReplicaId(
const xla::ExecutableRunOptions* run_options, void* output_buffer) {
int device_ordinal = GetDeviceOrdinal(run_options);
int32_t replica_id =
@@ -748,7 +747,7 @@
std::memcpy(output_buffer, &replica_id, 4);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_CollectivePermute(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_CollectivePermute(
const xla::ExecutableRunOptions* run_options, int32_t channel_id_present,
int64_t op_id, int32_t byte_size, void* input_buffer, void* output_buffer,
const void* source_target_pairs, int32_t source_target_pairs_size) {
diff --git a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
index 92e4bde..de5d5e3 100644
--- a/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/cpu/cpu_transfer_manager.h
@@ -25,6 +25,7 @@
#include "tensorflow/compiler/xla/service/transfer_manager.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/stream_executor/device_memory.h"
diff --git a/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc
index 4bd6b6d..a4b0f51 100644
--- a/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/cpu/elemental_ir_emitter.cc
@@ -43,7 +43,7 @@
cast_result_to_fp16 = true;
lhs = FPCast(lhs, b()->getFloatTy());
rhs = FPCast(rhs, b()->getFloatTy());
- ABSL_FALLTHROUGH_INTENDED;
+ TF_FALLTHROUGH_INTENDED;
case F32:
function_name = "atan2f";
break;
@@ -78,7 +78,7 @@
case F16:
cast_result_to_fp16 = true;
value = FPCast(value, b()->getFloatTy());
- ABSL_FALLTHROUGH_INTENDED;
+ TF_FALLTHROUGH_INTENDED;
case F32:
function_name = "tanhf";
break;
diff --git a/tensorflow/compiler/xla/service/cpu/ir_emitter.h b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
index 8161a40..0a5002c 100644
--- a/tensorflow/compiler/xla/service/cpu/ir_emitter.h
+++ b/tensorflow/compiler/xla/service/cpu/ir_emitter.h
@@ -52,6 +52,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace cpu {
diff --git a/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc b/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc
index a5c2937..244d7d4 100644
--- a/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc
+++ b/tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.cc
@@ -14,8 +14,6 @@
==============================================================================*/
#include "tensorflow/compiler/xla/service/cpu/orc_jit_memory_mapper.h"
-
-#include "absl/base/thread_annotations.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
@@ -25,7 +23,7 @@
static tensorflow::mutex mapper_instance_mutex(tensorflow::LINKER_INITIALIZED);
static llvm::SectionMemoryManager::MemoryMapper* mapper_instance
- ABSL_GUARDED_BY(mapper_instance_mutex) = nullptr;
+ TF_GUARDED_BY(mapper_instance_mutex) = nullptr;
llvm::SectionMemoryManager::MemoryMapper* GetInstance() {
tensorflow::mutex_lock lock(mapper_instance_mutex);
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_conv2d.cc b/tensorflow/compiler/xla/service/cpu/runtime_conv2d.cc
index 108221c..1820f85 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_conv2d.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_conv2d.cc
@@ -17,12 +17,12 @@
#define EIGEN_USE_THREADS
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_conv_impl.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_lightweight_check.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv2DF32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv2DF32(
const void* run_options_ptr, float* out, float* lhs, float* rhs,
int64_t input_batch, int64_t input_rows, int64_t input_cols,
int64_t input_channels, int64_t kernel_rows, int64_t kernel_cols,
@@ -44,7 +44,7 @@
feature_group_count);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv2DF16(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv2DF16(
const void* run_options_ptr, Eigen::half* out, Eigen::half* lhs,
Eigen::half* rhs, int64_t input_batch, int64_t input_rows,
int64_t input_cols, int64_t input_channels, int64_t kernel_rows,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_conv2d.h b/tensorflow/compiler/xla/service/cpu/runtime_conv2d.h
index 20f8877..dbca529 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_conv2d.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_conv2d.h
@@ -16,9 +16,8 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_CONV2D_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_CONV2D_H_
-#include <stdint.h>
-
#include "third_party/eigen3/Eigen/Core"
+#include "tensorflow/core/platform/types.h"
extern "C" {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_conv2d_mkl.cc b/tensorflow/compiler/xla/service/cpu/runtime_conv2d_mkl.cc
index eb325ae..324dadb 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_conv2d_mkl.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_conv2d_mkl.cc
@@ -16,8 +16,8 @@
#include <iostream>
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/executable_run_options.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
#ifdef ENABLE_MKL
#include <omp.h>
@@ -151,7 +151,7 @@
} // namespace
#endif // ENABLE_MKL
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_MKLConv2DF32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_MKLConv2DF32(
const void* run_options_ptr, float* out, float* lhs, float* rhs,
int64_t input_batch, int64_t input_rows, int64_t input_cols,
int64_t input_channels, int64_t kernel_rows, int64_t kernel_cols,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_conv3d.cc b/tensorflow/compiler/xla/service/cpu/runtime_conv3d.cc
index f35d710..a025bf7 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_conv3d.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_conv3d.cc
@@ -17,12 +17,12 @@
#define EIGEN_USE_THREADS
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_conv_impl.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_lightweight_check.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv3DF32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv3DF32(
const void* run_options_ptr, float* out, float* lhs, float* rhs,
int64_t input_batch, int64_t input_x, int64_t input_y, int64_t input_z,
int64_t input_channels, int64_t kernel_x, int64_t kernel_y,
@@ -47,7 +47,7 @@
rhs_z_dilation, feature_group_count);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv3DF16(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenConv3DF16(
const void* run_options_ptr, Eigen::half* out, Eigen::half* lhs,
Eigen::half* rhs, int64_t input_batch, int64_t input_x, int64_t input_y,
int64_t input_z, int64_t input_channels, int64_t kernel_x, int64_t kernel_y,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_conv3d.h b/tensorflow/compiler/xla/service/cpu/runtime_conv3d.h
index 158b08a..dfedaf4 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_conv3d.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_conv3d.h
@@ -16,9 +16,8 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_CONV3D_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_CONV3D_H_
-#include <stdint.h>
-
#include "third_party/eigen3/Eigen/Core"
+#include "tensorflow/core/platform/types.h"
extern "C" {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_custom_call_status.cc b/tensorflow/compiler/xla/service/cpu/runtime_custom_call_status.cc
index 83759e2..773ac0b 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_custom_call_status.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_custom_call_status.cc
@@ -14,10 +14,10 @@
==============================================================================*/
#include "tensorflow/compiler/xla/service/cpu/runtime_custom_call_status.h"
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/service/custom_call_status_internal.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY bool __xla_cpu_runtime_StatusIsSuccess(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY bool __xla_cpu_runtime_StatusIsSuccess(
const void* status_ptr) {
auto status = static_cast<const XlaCustomCallStatus*>(status_ptr);
return !xla::CustomCallStatusGetMessage(status).has_value();
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fft.cc b/tensorflow/compiler/xla/service/cpu/runtime_fft.cc
index 6e39a05..cf74dfc 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fft.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fft.cc
@@ -17,20 +17,21 @@
#define EIGEN_USE_THREADS
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_fft_impl.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_lightweight_check.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenFft(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenFft(
const void* run_options_ptr, void* out, void* operand, int32_t fft_type,
int32_t double_precision, int32_t fft_rank, int64_t input_batch,
int64_t fft_length0, int64_t fft_length1, int64_t fft_length2) {
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(run_options_ptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
- xla::EigenFftImpl(*run_options->intra_op_thread_pool(), out, operand,
- static_cast<xla::internal::FftType>(fft_type),
- static_cast<bool>(double_precision), fft_rank, input_batch,
- fft_length0, fft_length1, fft_length2);
+ tensorflow::xla::EigenFftImpl(
+ *run_options->intra_op_thread_pool(), out, operand,
+ static_cast<tensorflow::xla::FftType>(fft_type),
+ static_cast<bool>(double_precision), fft_rank, input_batch, fft_length0,
+ fft_length1, fft_length2);
}
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fft.h b/tensorflow/compiler/xla/service/cpu/runtime_fft.h
index 76c1aa0..199ea7c 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fft.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fft.h
@@ -16,7 +16,7 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FFT_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FFT_H_
-#include <stdint.h>
+#include "tensorflow/core/platform/types.h"
extern "C" {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fft_impl.h b/tensorflow/compiler/xla/service/cpu/runtime_fft_impl.h
index 45ccc7f..cda6450 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fft_impl.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fft_impl.h
@@ -19,12 +19,13 @@
#include "third_party/eigen3/Eigen/Core"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
-#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/framework/numeric_types.h"
+// 'tensorflow' namespace is used so that int64_t and other types don't require
+// qualification.
+namespace tensorflow {
namespace xla {
-namespace internal {
-
enum class FftType : int32_t {
FFT = 0, // Forward FFT; complex in, complex out.
IFFT = 1, // Inverse FFT; complex in, complex out.
@@ -34,6 +35,8 @@
};
inline constexpr int FftTypeArraySize() { return 4; }
+namespace internal {
+
// Computes either a forward or reverse complex-to-complex FFT.
template <bool Forward, int FFTRank, typename EigenDevice, typename Complex>
void EigenFftC2C(const EigenDevice& device, Complex* out, Complex* operand,
@@ -238,9 +241,9 @@
template <typename EigenDevice>
void EigenFftImpl(const EigenDevice& device, void* out, void* operand,
- internal::FftType fft_type, bool double_precision,
- int32_t fft_rank, int64_t input_batch, int64_t fft_length0,
- int64_t fft_length1, int64_t fft_length2) {
+ FftType fft_type, bool double_precision, int32_t fft_rank,
+ int64_t input_batch, int64_t fft_length0, int64_t fft_length1,
+ int64_t fft_length2) {
switch (fft_rank) {
case 1:
internal::EigenFftWithRank<1, EigenDevice>(device, out, operand, fft_type,
@@ -264,5 +267,6 @@
}
} // namespace xla
+} // namespace tensorflow
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FFT_IMPL_H_
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fft_test.cc b/tensorflow/compiler/xla/service/cpu/runtime_fft_test.cc
index b9b021c..b3e8d7d 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fft_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fft_test.cc
@@ -19,13 +19,13 @@
TEST(FftTypeTest, MatchesProto) {
EXPECT_EQ(::xla::FftType_ARRAYSIZE, 4);
- EXPECT_EQ(::xla::internal::FftTypeArraySize(), 4);
+ EXPECT_EQ(::tensorflow::xla::FftTypeArraySize(), 4);
EXPECT_EQ(::xla::FftType::FFT,
- static_cast<int32_t>(::xla::internal::FftType::FFT));
+ static_cast<int32_t>(::tensorflow::xla::FftType::FFT));
EXPECT_EQ(::xla::FftType::IFFT,
- static_cast<int32_t>(::xla::internal::FftType::IFFT));
+ static_cast<int32_t>(::tensorflow::xla::FftType::IFFT));
EXPECT_EQ(::xla::FftType::RFFT,
- static_cast<int32_t>(::xla::internal::FftType::RFFT));
+ static_cast<int32_t>(::tensorflow::xla::FftType::RFFT));
EXPECT_EQ(::xla::FftType::IRFFT,
- static_cast<int32_t>(::xla::internal::FftType::IRFFT));
+ static_cast<int32_t>(::tensorflow::xla::FftType::IRFFT));
}
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fork_join.cc b/tensorflow/compiler/xla/service/cpu/runtime_fork_join.cc
index c17000f..e8bb939 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fork_join.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fork_join.cc
@@ -17,13 +17,13 @@
#define EIGEN_USE_THREADS
-#include "absl/base/dynamic_annotations.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/service/custom_call_status_internal.h"
#include "tensorflow/core/platform/blocking_counter.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
#include "tensorflow/core/platform/logging.h"
using ComputeFunctionType = void (*)(void*, const void*, const void**, void**,
@@ -57,7 +57,7 @@
// [partition1_dim2_start]
// [partition1_dim2_limit]
//
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_ParallelForkJoin(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_ParallelForkJoin(
void* result_ptr, const void* run_options_ptr, const void** params,
void** buffer_table, void* status, uint64_t* prof_counters,
int32_t num_partitions, int64_t* partitions, int32_t num_partitioned_dims,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fork_join.h b/tensorflow/compiler/xla/service/cpu/runtime_fork_join.h
index 06931a3..058439d 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fork_join.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fork_join.h
@@ -16,7 +16,7 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FORK_JOIN_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FORK_JOIN_H_
-#include <stdint.h>
+#include "tensorflow/core/platform/types.h"
extern "C" {
@@ -24,7 +24,7 @@
// threads before returning. See comments in runtime_fork_join.cc for details.
extern void __xla_cpu_runtime_ParallelForkJoin(
void* result_ptr, const void* run_options_ptr, const void** params,
- void** buffer_table, void* status, uint64_t* prof_counters,
+ void** buffer_table, void* status, tensorflow::uint64* prof_counters,
int32_t num_partitions, int64_t* partitions, int32_t num_partitioned_dims,
void* function_ptr);
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fp16.cc b/tensorflow/compiler/xla/service/cpu/runtime_fp16.cc
index bf62d5c..803add1 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fp16.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fp16.cc
@@ -17,7 +17,7 @@
#include <cstring>
-#include "absl/base/attributes.h"
+#include "tensorflow/core/platform/macros.h"
namespace {
@@ -60,7 +60,7 @@
// that the compiler-rt definitions "win", but that isn't essential.
// Algorithm copied from Eigen.
-uint16_t ABSL_ATTRIBUTE_WEAK __gnu_f2h_ieee(float float_value) {
+uint16_t TF_ATTRIBUTE_WEAK __gnu_f2h_ieee(float float_value) {
AliasedFloatInt f = AliasedFloatInt::FromFloat(float_value);
const AliasedFloatInt f32infty = AliasedFloatInt::FromUInt(255 << 23);
@@ -110,7 +110,7 @@
}
// Algorithm copied from Eigen.
-float ABSL_ATTRIBUTE_WEAK __gnu_h2f_ieee(uint16_t h) {
+float TF_ATTRIBUTE_WEAK __gnu_h2f_ieee(uint16_t h) {
const AliasedFloatInt magic = AliasedFloatInt::FromUInt(113 << 23);
const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift
AliasedFloatInt o;
@@ -131,7 +131,7 @@
return o.as_float();
}
-uint16_t ABSL_ATTRIBUTE_WEAK __truncdfhf2(double d) {
+uint16_t TF_ATTRIBUTE_WEAK __truncdfhf2(double d) {
// This does a double rounding step, but it's precise enough for our use
// cases.
return __gnu_f2h_ieee(static_cast<float>(d));
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_fp16.h b/tensorflow/compiler/xla/service/cpu/runtime_fp16.h
index ded6517..fe91c01 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_fp16.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_fp16.h
@@ -16,15 +16,15 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FP16_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FP16_H_
-#include <stdint.h>
+#include "tensorflow/core/platform/types.h"
// Converts an F32 value to a F16.
-extern "C" uint16_t __gnu_f2h_ieee(float);
+extern "C" tensorflow::uint16 __gnu_f2h_ieee(float);
// Converts an F16 value to a F32.
-extern "C" float __gnu_h2f_ieee(uint16_t);
+extern "C" float __gnu_h2f_ieee(tensorflow::uint16);
// Converts an F64 value to a F16.
-extern "C" uint16_t __truncdfhf2(double);
+extern "C" tensorflow::uint16 __truncdfhf2(double);
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_FP16_H_
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.cc b/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.cc
index 7673e9e..a18db1f 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.cc
@@ -20,19 +20,20 @@
#include <numeric>
#include <string>
-#include "absl/base/dynamic_annotations.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+#include "tensorflow/core/platform/dynamic_annotations.h"
+#include "tensorflow/core/platform/macros.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_KeyValueSort(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_KeyValueSort(
int64_t a, int64_t b, int64_t c, char** values, int32_t values_count,
int32_t* values_primitive_type_size_in_bytes, bool is_stable,
char* run_options, int64_t* prof_counters,
void (*less_than)(char*, char*, char**, char**, int64_t*)) {
// 'values' and 'values_primitive_type_size_in_bytes' are managed by the JIT
// code, so msan can't tell they are initialized.
- ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(values, values_count * sizeof(char*));
- ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(values_primitive_type_size_in_bytes,
- values_count * sizeof(int32_t));
+ TF_ANNOTATE_MEMORY_IS_INITIALIZED(values, values_count * sizeof(char*));
+ TF_ANNOTATE_MEMORY_IS_INITIALIZED(values_primitive_type_size_in_bytes,
+ values_count * sizeof(int32_t));
// High-level idea of the iteration/sorting logic:
// Conceptually we have a 3-dimensional shape [a, b, c]. b corresponds to the
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.h b/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.h
index 45c1a5f..064961f 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_key_value_sort.h
@@ -16,9 +16,8 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_KEY_VALUE_SORT_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_KEY_VALUE_SORT_H_
-#include <stdint.h>
-
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+#include "tensorflow/core/platform/types.h"
extern "C" {
@@ -38,7 +37,7 @@
// - profile counters = 'prof_counters' (int64_t*)
extern void __xla_cpu_runtime_KeyValueSort(
int64_t a, int64_t b, int64_t c, char** values, int32_t values_count,
- int32_t* values_primitive_type_size_in_bytes, bool is_stable,
+ tensorflow::int32* values_primitive_type_size_in_bytes, bool is_stable,
char* run_options, int64_t* prof_counters,
void (*less_than)(char*, char*, char**, char**, int64_t*));
}
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_matmul.cc b/tensorflow/compiler/xla/service/cpu/runtime_matmul.cc
index 807922f..adaaecd 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_matmul.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_matmul.cc
@@ -17,10 +17,10 @@
#define EIGEN_USE_THREADS
-#include "absl/base/dynamic_annotations.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/compiler/xla/executable_run_options.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_lightweight_check.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
#include "tensorflow/core/kernels/eigen_contraction_kernel.h"
@@ -89,7 +89,7 @@
} // namespace
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF16(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF16(
const void* run_options_ptr, Eigen::half* out, Eigen::half* lhs,
Eigen::half* rhs, int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
int32_t transpose_rhs) {
@@ -97,14 +97,14 @@
transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF32(
const void* run_options_ptr, float* out, float* lhs, float* rhs, int64_t m,
int64_t n, int64_t k, int32_t transpose_lhs, int32_t transpose_rhs) {
MatMulDispatch<float>(run_options_ptr, out, lhs, rhs, m, n, k, transpose_lhs,
transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF64(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulF64(
const void* run_options_ptr, double* out, double* lhs, double* rhs,
int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
int32_t transpose_rhs) {
@@ -112,7 +112,7 @@
transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulC64(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulC64(
const void* run_options_ptr, std::complex<float>* out,
std::complex<float>* lhs, std::complex<float>* rhs, int64_t m, int64_t n,
int64_t k, int32_t transpose_lhs, int32_t transpose_rhs) {
@@ -120,7 +120,7 @@
transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulC128(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulC128(
const void* run_options_ptr, std::complex<double>* out,
std::complex<double>* lhs, std::complex<double>* rhs, int64_t m, int64_t n,
int64_t k, int32_t transpose_lhs, int32_t transpose_rhs) {
@@ -128,7 +128,7 @@
transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulS32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenMatMulS32(
const void* run_options_ptr, int32_t* out, int32_t* lhs, int32_t* rhs,
int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
int32_t transpose_rhs) {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_matmul.h b/tensorflow/compiler/xla/service/cpu/runtime_matmul.h
index 2b13701..da714e3 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_matmul.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_matmul.h
@@ -16,11 +16,10 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_MATMUL_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_MATMUL_H_
-#include <stdint.h>
-
#include <complex>
#include "third_party/eigen3/Eigen/Core"
+#include "tensorflow/core/platform/types.h"
extern "C" {
@@ -55,9 +54,10 @@
int64_t k, int32_t transpose_lhs, int32_t transpose_rhs);
extern void __xla_cpu_runtime_EigenMatMulS32(
- const void* /* xla::ExecutableRunOptions* */ run_options_ptr, int32_t* out,
- int32_t* lhs, int32_t* rhs, int64_t m, int64_t n, int64_t k,
- int32_t transpose_lhs, int32_t transpose_rhs);
+ const void* /* xla::ExecutableRunOptions* */ run_options_ptr,
+ tensorflow::int32* out, tensorflow::int32* lhs, tensorflow::int32* rhs,
+ int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
+ int32_t transpose_rhs);
} // extern "C"
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc b/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc
index 82a266e..f87b82c 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_matmul_mkl.cc
@@ -21,8 +21,8 @@
#include "tensorflow/compiler/xla/executable_run_options.h"
#define EIGEN_USE_THREADS
-#include "absl/base/dynamic_annotations.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/ThreadPool"
+#include "tensorflow/core/platform/dynamic_annotations.h"
namespace {
// BLAS GEMM API for 32-bit Matrix Multiplication.
@@ -71,7 +71,7 @@
} // namespace
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_MKLMatMulF32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_MKLMatMulF32(
const void* run_options_ptr, float* out, float* lhs, float* rhs, int64_t m,
int64_t n, int64_t k, int32_t transpose_lhs, int32_t transpose_rhs) {
const xla::ExecutableRunOptions* run_options =
@@ -86,7 +86,7 @@
}
// BLAS GEMM API for 64-bit Matrix Multiplication
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_MKLMatMulF64(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_MKLMatMulF64(
const void* run_options_ptr, double* out, double* lhs, double* rhs,
int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
int32_t transpose_rhs) {
@@ -101,7 +101,7 @@
mkl_set_num_threads_local(prev_num_threads);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_MKLSingleThreadedMatMulF32(const void* run_options_ptr,
float* out, float* lhs, float* rhs,
int64_t m, int64_t n, int64_t k,
@@ -114,7 +114,7 @@
mkl_set_num_threads_local(prev_num_threads);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_MKLSingleThreadedMatMulF64(const void* run_options_ptr,
double* out, double* lhs,
double* rhs, int64_t m, int64_t n,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_pow.cc b/tensorflow/compiler/xla/service/cpu/runtime_pow.cc
index 55e6f29..d3fc177 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_pow.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_pow.cc
@@ -15,7 +15,7 @@
#include "tensorflow/compiler/xla/service/cpu/runtime_pow.h"
-#include "absl/base/attributes.h"
+#include "tensorflow/core/platform/macros.h"
template <typename T>
static T Powi(T a, int32_t b) {
@@ -30,6 +30,6 @@
return recip ? 1 / r : r;
}
-float ABSL_ATTRIBUTE_WEAK __powisf2(float a, int32_t b) { return Powi(a, b); }
+float TF_ATTRIBUTE_WEAK __powisf2(float a, int32_t b) { return Powi(a, b); }
-double ABSL_ATTRIBUTE_WEAK __powidf2(double a, int32_t b) { return Powi(a, b); }
+double TF_ATTRIBUTE_WEAK __powidf2(double a, int32_t b) { return Powi(a, b); }
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_pow.h b/tensorflow/compiler/xla/service/cpu/runtime_pow.h
index fa00046..14d7e2d 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_pow.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_pow.h
@@ -16,7 +16,7 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_POW_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_POW_H_
-#include <stdint.h>
+#include "tensorflow/core/platform/types.h"
// Raises F32 value a to the power of b.
extern "C" float __powisf2(float a, int32_t b);
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.cc b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.cc
index a37c7df..ee8fa0e 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.cc
@@ -15,10 +15,10 @@
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.h"
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_conv_impl.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedConv2DF16(
const void* /*run_options_ptr*/, Eigen::half* out, Eigen::half* lhs,
Eigen::half* rhs, int64_t input_batch, int64_t input_rows,
@@ -38,7 +38,7 @@
feature_group_count);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedConv2DF32(
const void* /*run_options_ptr*/, float* out, float* lhs, float* rhs,
int64_t input_batch, int64_t input_rows, int64_t input_cols,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.h b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.h
index af5904b..8834989 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv2d.h
@@ -16,9 +16,8 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_SINGLE_THREADED_CONV2D_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_SINGLE_THREADED_CONV2D_H_
-#include <stdint.h>
-
#include "third_party/eigen3/Eigen/Core"
+#include "tensorflow/core/platform/types.h"
extern "C" {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.cc b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.cc
index d530d89..c521a75 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.cc
@@ -15,10 +15,10 @@
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.h"
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_conv_impl.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedConv3DF32(
const void* /*run_options_ptr*/, float* out, float* lhs, float* rhs,
int64_t input_batch, int64_t input_x, int64_t input_y, int64_t input_z,
@@ -41,7 +41,7 @@
rhs_z_dilation, feature_group_count);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedConv3DF16(
const void* /*run_options_ptr*/, Eigen::half* out, Eigen::half* lhs,
Eigen::half* rhs, int64_t input_batch, int64_t input_x, int64_t input_y,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.h b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.h
index e75e5ac..c4958f8 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_conv3d.h
@@ -16,9 +16,8 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_SINGLE_THREADED_CONV3D_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_SINGLE_THREADED_CONV3D_H_
-#include <stdint.h>
-
#include "third_party/eigen3/Eigen/Core"
+#include "tensorflow/core/platform/types.h"
extern "C" {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.cc b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.cc
index 53cb8ad..7bdfa26 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.cc
@@ -15,15 +15,16 @@
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.h"
-#include "absl/base/dynamic_annotations.h"
#include "tensorflow/compiler/xla/service/cpu/runtime_fft_impl.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenSingleThreadedFft(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_EigenSingleThreadedFft(
const void* run_options_ptr, void* out, void* operand, int32_t fft_type,
int32_t double_precision, int32_t fft_rank, int64_t input_batch,
int64_t fft_length0, int64_t fft_length1, int64_t fft_length2) {
- xla::EigenFftImpl(Eigen::DefaultDevice(), out, operand,
- static_cast<xla::internal::FftType>(fft_type),
- static_cast<bool>(double_precision), fft_rank, input_batch,
- fft_length0, fft_length1, fft_length2);
+ tensorflow::xla::EigenFftImpl(Eigen::DefaultDevice(), out, operand,
+ static_cast<tensorflow::xla::FftType>(fft_type),
+ static_cast<bool>(double_precision), fft_rank,
+ input_batch, fft_length0, fft_length1,
+ fft_length2);
}
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.h b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.h
index c6927dc..a475769 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_fft.h
@@ -16,7 +16,7 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_SINGLE_THREADED_FFT_H_
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_SINGLE_THREADED_FFT_H_
-#include <stdint.h>
+#include "tensorflow/core/platform/types.h"
extern "C" {
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.cc b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.cc
index 05f5485..3419592 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.cc
@@ -15,8 +15,8 @@
#include "tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h"
-#include "absl/base/attributes.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
+#include "tensorflow/core/platform/dynamic_annotations.h"
#if defined(TENSORFLOW_USE_CUSTOM_CONTRACTION_KERNEL)
#include "tensorflow/core/kernels/eigen_contraction_kernel.h"
@@ -81,7 +81,7 @@
} // namespace
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedMatMulF16(
const void* run_options_ptr, Eigen::half* out, Eigen::half* lhs,
Eigen::half* rhs, int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
@@ -90,7 +90,7 @@
n, k, transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedMatMulF32(const void* run_options_ptr,
float* out, float* lhs,
float* rhs, int64_t m, int64_t n,
@@ -100,7 +100,7 @@
transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedMatMulF64(const void* run_options_ptr,
double* out, double* lhs,
double* rhs, int64_t m,
@@ -111,7 +111,7 @@
transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedMatMulC64(
const void* run_options_ptr, std::complex<float>* out,
std::complex<float>* lhs, std::complex<float>* rhs, int64_t m, int64_t n,
@@ -120,7 +120,7 @@
run_options_ptr, out, lhs, rhs, m, n, k, transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedMatMulC128(
const void* run_options_ptr, std::complex<double>* out,
std::complex<double>* lhs, std::complex<double>* rhs, int64_t m, int64_t n,
@@ -129,7 +129,7 @@
run_options_ptr, out, lhs, rhs, m, n, k, transpose_lhs, transpose_rhs);
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void
__xla_cpu_runtime_EigenSingleThreadedMatMulS32(const void* run_options_ptr,
int32_t* out, int32_t* lhs,
int32_t* rhs, int64_t m,
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h
index 9473eb7..5461f48 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_single_threaded_matmul.h
@@ -19,6 +19,7 @@
#include <complex>
#include "third_party/eigen3/Eigen/Core"
+#include "tensorflow/core/platform/types.h"
extern "C" {
@@ -55,9 +56,10 @@
int32_t transpose_lhs, int32_t transpose_rhs);
extern void __xla_cpu_runtime_EigenSingleThreadedMatMulS32(
- const void* /* xla::ExecutableRunOptions* */ run_options_ptr, int32_t* out,
- int32_t* lhs, int32_t* rhs, int64_t m, int64_t n, int64_t k,
- int32_t transpose_lhs, int32_t transpose_rhs);
+ const void* /* xla::ExecutableRunOptions* */ run_options_ptr,
+ tensorflow::int32* out, tensorflow::int32* lhs, tensorflow::int32* rhs,
+ int64_t m, int64_t n, int64_t k, int32_t transpose_lhs,
+ int32_t transpose_rhs);
} // extern "C"
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_topk.cc b/tensorflow/compiler/xla/service/cpu/runtime_topk.cc
index d619491..a535c5c 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_topk.cc
+++ b/tensorflow/compiler/xla/service/cpu/runtime_topk.cc
@@ -16,20 +16,20 @@
#include "tensorflow/compiler/xla/service/cpu/runtime_topk.h"
#include <algorithm>
-#include <cstring>
#include <memory>
#include <numeric>
#include <vector>
-#include "absl/base/dynamic_annotations.h"
+#include "tensorflow/core/platform/dynamic_annotations.h"
+#include "tensorflow/core/platform/macros.h"
template <typename T>
static void TopK(int64_t batch_size, int64_t input_size, int64_t k,
const T* values, T* out_values, int32_t* out_indices) {
// 'values' is managed by the JIT code, so msan can't tell they are
// initialized.
- ABSL_ANNOTATE_MEMORY_IS_INITIALIZED(values,
- input_size * batch_size * sizeof(T));
+ TF_ANNOTATE_MEMORY_IS_INITIALIZED(values,
+ input_size * batch_size * sizeof(T));
std::vector<int32_t> temp_indices(input_size);
for (int64_t batch = 0; batch != batch_size; ++batch) {
@@ -67,7 +67,7 @@
}
}
-ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TopKF32(
+TF_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_TopKF32(
int64_t batch_size, int64_t input_size, int64_t k, const float* values,
float* out_values, int32_t* out_indices) {
TopK(batch_size, input_size, k, values, out_values, out_indices);
diff --git a/tensorflow/compiler/xla/service/cpu/runtime_topk.h b/tensorflow/compiler/xla/service/cpu/runtime_topk.h
index d634ad6..6fc0c72 100644
--- a/tensorflow/compiler/xla/service/cpu/runtime_topk.h
+++ b/tensorflow/compiler/xla/service/cpu/runtime_topk.h
@@ -16,7 +16,7 @@
#ifndef TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_TOPK_H
#define TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_TOPK_H
-#include <stdint.h>
+#include "tensorflow/core/platform/types.h"
extern "C" {
@@ -24,7 +24,8 @@
// outputs are written to `out_values` and `out_indices`.
extern void __xla_cpu_runtime_TopKF32(int64_t batch_size, int64_t input_size,
int64_t k, const float* values,
- float* out_values, int32_t* out_indices);
+ float* out_values,
+ tensorflow::int32* out_indices);
}
#endif // TENSORFLOW_COMPILER_XLA_SERVICE_CPU_RUNTIME_TOPK_H
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
index 73e5684..9b3fb3b 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor.h
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
index c9aea55..8d6e69c 100644
--- a/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
+++ b/tensorflow/compiler/xla/service/dfs_hlo_visitor_with_default.h
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/dump.cc b/tensorflow/compiler/xla/service/dump.cc
index cfe6ded..5468932 100644
--- a/tensorflow/compiler/xla/service/dump.cc
+++ b/tensorflow/compiler/xla/service/dump.cc
@@ -404,7 +404,7 @@
// dies. But we only add an entry if dumping is enabled for this module, and
// dumping a module leaks buffer space in stdout or bytes on disk *way* faster
// than this hashtable leaks memory.
-static auto& module_id_to_step_number ABSL_GUARDED_BY(mu) =
+static auto& module_id_to_step_number TF_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, int64_t>();
// Maps a module's unique ID to a timestamp indicating when we've first dumped
@@ -415,7 +415,7 @@
// dies. But we only add an entry if dumping is enabled for this module, and
// dumping a module leaks buffer space in stdout or bytes on disk *way* faster
// than this hashtable leaks memory.
-static auto& module_id_to_timestamp ABSL_GUARDED_BY(mu) =
+static auto& module_id_to_timestamp TF_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, uint64_t>();
int64_t StepNumberForModule(const HloModule& module) {
@@ -622,7 +622,7 @@
int64_t execution_count;
uint64_t timestamp;
{
- static auto& module_id_to_execution_count ABSL_GUARDED_BY(mu) =
+ static auto& module_id_to_execution_count TF_GUARDED_BY(mu) =
*new absl::flat_hash_map<int64_t, int64_t>();
tensorflow::mutex_lock lock(mu);
execution_count = module_id_to_execution_count[module.unique_id()]++;
@@ -659,7 +659,7 @@
// have to use its name.
int64_t execution_count;
{
- static auto& module_name_to_execution_count ABSL_GUARDED_BY(mu) =
+ static auto& module_name_to_execution_count TF_GUARDED_BY(mu) =
*new absl::flat_hash_map<std::string, int64_t>();
tensorflow::mutex_lock lock(mu);
execution_count = module_name_to_execution_count[name]++;
diff --git a/tensorflow/compiler/xla/service/dynamic_dimension_inference.h b/tensorflow/compiler/xla/service/dynamic_dimension_inference.h
index 036ce78..ccf5d42 100644
--- a/tensorflow/compiler/xla/service/dynamic_dimension_inference.h
+++ b/tensorflow/compiler/xla/service/dynamic_dimension_inference.h
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/dynamic_window_utils.h b/tensorflow/compiler/xla/service/dynamic_window_utils.h
index 9c62ee6..6502e9c 100644
--- a/tensorflow/compiler/xla/service/dynamic_window_utils.h
+++ b/tensorflow/compiler/xla/service/dynamic_window_utils.h
@@ -25,6 +25,7 @@
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
struct DynamicWindowDims {
HloInstruction* padding_before;
diff --git a/tensorflow/compiler/xla/service/execution_tracker.h b/tensorflow/compiler/xla/service/execution_tracker.h
index 712407f..1cecaca 100644
--- a/tensorflow/compiler/xla/service/execution_tracker.h
+++ b/tensorflow/compiler/xla/service/execution_tracker.h
@@ -27,6 +27,7 @@
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/core/platform/thread_annotations.h"
@@ -84,12 +85,12 @@
private:
// The next handle to assign to an execution.
- int64_t next_handle_ ABSL_GUARDED_BY(execution_mutex_);
+ int64_t next_handle_ TF_GUARDED_BY(execution_mutex_);
// Mapping from ExecutionHandle handle to the corresponding registered
// AsyncExecution object.
std::map<int64_t, std::unique_ptr<AsyncExecution>> handle_to_execution_
- ABSL_GUARDED_BY(execution_mutex_);
+ TF_GUARDED_BY(execution_mutex_);
tensorflow::mutex execution_mutex_; // Guards the execution mapping.
diff --git a/tensorflow/compiler/xla/service/generic_transfer_manager.h b/tensorflow/compiler/xla/service/generic_transfer_manager.h
index c350442..f99fd01 100644
--- a/tensorflow/compiler/xla/service/generic_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/generic_transfer_manager.h
@@ -20,6 +20,7 @@
#include "tensorflow/compiler/xla/service/transfer_manager.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/gpu/bef_thunk.cc b/tensorflow/compiler/xla/service/gpu/bef_thunk.cc
index c88a56c..c4ecd16 100644
--- a/tensorflow/compiler/xla/service/gpu/bef_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/bef_thunk.cc
@@ -148,9 +148,9 @@
// The module data will be set in the execution context for kernel thunk to
// use during execution. The resource contexts cache the loaded modules.
tensorflow::mutex mutex_;
- absl::optional<GpuModuleData> gpu_module_data_ ABSL_GUARDED_BY(mutex_);
+ absl::optional<GpuModuleData> gpu_module_data_ TF_GUARDED_BY(mutex_);
absl::flat_hash_map<CUcontext, std::unique_ptr<tfrt::ResourceContext>>
- resource_contexts_ ABSL_GUARDED_BY(mutex_);
+ resource_contexts_ TF_GUARDED_BY(mutex_);
};
} // namespace
diff --git a/tensorflow/compiler/xla/service/gpu/cholesky_thunk.cc b/tensorflow/compiler/xla/service/gpu/cholesky_thunk.cc
index 77b82e4..f331b6b 100644
--- a/tensorflow/compiler/xla/service/gpu/cholesky_thunk.cc
+++ b/tensorflow/compiler/xla/service/gpu/cholesky_thunk.cc
@@ -32,7 +32,7 @@
static tensorflow::mutex contexts_mu(tensorflow::LINKER_INITIALIZED);
static auto contexts =
- new absl::flat_hash_map<se::Stream*, GpuSolverContext> ABSL_GUARDED_BY(
+ new absl::flat_hash_map<se::Stream*, GpuSolverContext> TF_GUARDED_BY(
contexts_mu);
CholeskyThunk::CholeskyThunk(ThunkInfo thunk_info,
@@ -56,9 +56,9 @@
VLOG(3) << "type=" << PrimitiveType_Name(type_)
<< " uplo=" << se::blas::UpperLowerString(uplo_)
<< " batch_size=" << batch_size_ << " n=" << n_
- << " a=" << a_buffer_.ToString()
- << " workspace=" << workspace_buffer_.ToString()
- << " info=" << info_buffer_.ToString();
+ << " a=" << a_buffer_.ToString()
+ << " workspace=" << workspace_buffer_.ToString()
+ << " info=" << info_buffer_.ToString();
GpuSolverContext* context;
{
diff --git a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h
index ba56be8..9926753 100644
--- a/tensorflow/compiler/xla/service/gpu/convolution_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/convolution_thunk.h
@@ -64,7 +64,7 @@
tensorflow::mutex mu_;
absl::flat_hash_map<const stream_executor::Stream*,
std::unique_ptr<MaybeFusedConvRunner>>
- runner_cache_ ABSL_GUARDED_BY(mu_);
+ runner_cache_ TF_GUARDED_BY(mu_);
};
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
index ccbc0d5..26d00b7 100644
--- a/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
+++ b/tensorflow/compiler/xla/service/gpu/elemental_ir_emitter.cc
@@ -98,7 +98,7 @@
}
}
output_type = F32;
- ABSL_FALLTHROUGH_INTENDED;
+ TF_FALLTHROUGH_INTENDED;
case F32:
break;
case F64:
diff --git a/tensorflow/compiler/xla/service/gpu/gemm_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/gemm_algorithm_picker.cc
index 7dd0779..6bda39f 100644
--- a/tensorflow/compiler/xla/service/gpu/gemm_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/gemm_algorithm_picker.cc
@@ -46,11 +46,11 @@
std::tuple<se::StreamExecutor*, Shape, Shape, Shape, std::string>;
static tensorflow::mutex autotune_cache_mu(tensorflow::LINKER_INITIALIZED);
-static auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_mu) =
+static auto& autotune_cache TF_GUARDED_BY(autotune_cache_mu) =
*new absl::flat_hash_map<GemmCacheKey,
absl::optional<se::blas::AlgorithmType>>();
-static int64_t cache_hits ABSL_GUARDED_BY(autotune_cache_mu) = 0;
-static int64_t cache_misses ABSL_GUARDED_BY(autotune_cache_mu) = 0;
+static int64_t cache_hits TF_GUARDED_BY(autotune_cache_mu) = 0;
+static int64_t cache_misses TF_GUARDED_BY(autotune_cache_mu) = 0;
// Experimentally tries to pick the best algorithm for the given gemm.
//
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_compiler.h b/tensorflow/compiler/xla/service/gpu/gpu_compiler.h
index f75b1a6..13f50a1 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_compiler.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_compiler.h
@@ -31,6 +31,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/hash/hash.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/core/platform/thread_annotations.h"
#include "tensorflow/stream_executor/stream_executor_pimpl.h"
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc b/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc
index af982ab..7182d7f 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc
+++ b/tensorflow/compiler/xla/service/gpu/gpu_conv_algorithm_picker.cc
@@ -323,9 +323,9 @@
}
tensorflow::mutex autotune_cache_lock(tensorflow::LINKER_INITIALIZED);
-auto& autotune_cache ABSL_GUARDED_BY(autotune_cache_lock) =
+auto& autotune_cache TF_GUARDED_BY(autotune_cache_lock) =
*new absl::flat_hash_map<ConvCacheKey, AutotuneResult>();
-auto& autotune_cache_stats ABSL_GUARDED_BY(autotune_cache_lock) =
+auto& autotune_cache_stats TF_GUARDED_BY(autotune_cache_lock) =
*new ConvCacheStats();
} // anonymous namespace
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_executable.h b/tensorflow/compiler/xla/service/gpu/gpu_executable.h
index 2f72b07..f34de33 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_executable.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_executable.h
@@ -38,6 +38,7 @@
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/shaped_buffer.h"
#include "tensorflow/compiler/xla/statusor.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/stream_executor/device_memory_allocator.h"
@@ -244,9 +245,9 @@
// `ResolveConstantGlobals`.
tensorflow::mutex module_handle_mutex_;
std::map<stream_executor::StreamExecutor*, se::ScopedModuleHandle>
- module_handles_ ABSL_GUARDED_BY(module_handle_mutex_);
+ module_handles_ TF_GUARDED_BY(module_handle_mutex_);
std::map<stream_executor::StreamExecutor*, BufferAllocToDeviceMemoryMap>
- module_globals_ ABSL_GUARDED_BY(module_handle_mutex_);
+ module_globals_ TF_GUARDED_BY(module_handle_mutex_);
std::vector<ConstantInfo> constants_;
const absl::flat_hash_map<ShapeIndex, OutputInfo> output_info_;
diff --git a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
index a32c13d..f1960b0 100644
--- a/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/gpu/gpu_transfer_manager.h
@@ -24,6 +24,7 @@
#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/gpu/horizontal_input_fusion.h b/tensorflow/compiler/xla/service/gpu/horizontal_input_fusion.h
index d8d8233..85313d0 100644
--- a/tensorflow/compiler/xla/service/gpu/horizontal_input_fusion.h
+++ b/tensorflow/compiler/xla/service/gpu/horizontal_input_fusion.h
@@ -20,6 +20,7 @@
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace gpu {
diff --git a/tensorflow/compiler/xla/service/gpu/horizontal_loop_fusion.h b/tensorflow/compiler/xla/service/gpu/horizontal_loop_fusion.h
index 8d53351..3824c5d 100644
--- a/tensorflow/compiler/xla/service/gpu/horizontal_loop_fusion.h
+++ b/tensorflow/compiler/xla/service/gpu/horizontal_loop_fusion.h
@@ -20,6 +20,7 @@
#include "tensorflow/compiler/xla/service/hlo_instruction.h"
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace gpu {
diff --git a/tensorflow/compiler/xla/service/gpu/kernel_thunk.h b/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
index 32725c0..59f3cac 100644
--- a/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
+++ b/tensorflow/compiler/xla/service/gpu/kernel_thunk.h
@@ -83,7 +83,7 @@
// Loaded kernels for each `StreamExecutor`. Requires pointer stability of
// values.
std::unordered_map<se::StreamExecutor*, std::unique_ptr<se::KernelBase>>
- kernel_cache_ ABSL_GUARDED_BY(mutex_);
+ kernel_cache_ TF_GUARDED_BY(mutex_);
};
} // namespace gpu
diff --git a/tensorflow/compiler/xla/service/gpu/nvptx_compiler.h b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.h
index 49866c2..1389673 100644
--- a/tensorflow/compiler/xla/service/gpu/nvptx_compiler.h
+++ b/tensorflow/compiler/xla/service/gpu/nvptx_compiler.h
@@ -69,8 +69,8 @@
// We cache the cuda_data_dir() and the result of our search, so that if the
// next module we have to compile has the same cuda_data_dir(), we can skip
// the search.
- std::string cached_cuda_data_dir_ ABSL_GUARDED_BY(mutex_);
- std::string cached_libdevice_dir_ ABSL_GUARDED_BY(mutex_);
+ std::string cached_cuda_data_dir_ TF_GUARDED_BY(mutex_);
+ std::string cached_libdevice_dir_ TF_GUARDED_BY(mutex_);
// Tries to compile the given ptx string to cubin. Returns a vector with the
// compiled cubin. If compilation was unsuccessful, returns an empty vector.
@@ -132,7 +132,7 @@
// is critical here.
absl::node_hash_map<CompilationCacheKey, CompilationCacheValue,
CompilationCacheHash, CompilationCacheEq>
- compilation_cache_ ABSL_GUARDED_BY(mutex_);
+ compilation_cache_ TF_GUARDED_BY(mutex_);
NVPTXCompiler(const NVPTXCompiler&) = delete;
NVPTXCompiler& operator=(const NVPTXCompiler&) = delete;
diff --git a/tensorflow/compiler/xla/service/graphcycles/graphcycles.h b/tensorflow/compiler/xla/service/graphcycles/graphcycles.h
index de88629..0190911 100644
--- a/tensorflow/compiler/xla/service/graphcycles/graphcycles.h
+++ b/tensorflow/compiler/xla/service/graphcycles/graphcycles.h
@@ -42,6 +42,7 @@
#include "absl/types/optional.h"
#include "absl/types/span.h"
+#include "tensorflow/core/platform/macros.h"
namespace tensorflow {
diff --git a/tensorflow/compiler/xla/service/hlo_alias_analysis.h b/tensorflow/compiler/xla/service/hlo_alias_analysis.h
index 81e70f3..8e41ee5 100644
--- a/tensorflow/compiler/xla/service/hlo_alias_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_alias_analysis.h
@@ -31,6 +31,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_buffer.h b/tensorflow/compiler/xla/service/hlo_buffer.h
index 8e72a58..fb1a423 100644
--- a/tensorflow/compiler/xla/service/hlo_buffer.h
+++ b/tensorflow/compiler/xla/service/hlo_buffer.h
@@ -24,6 +24,7 @@
#include "tensorflow/compiler/xla/shape_tree.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_computation.h b/tensorflow/compiler/xla/service/hlo_computation.h
index 984be21..f78df61 100644
--- a/tensorflow/compiler/xla/service/hlo_computation.h
+++ b/tensorflow/compiler/xla/service/hlo_computation.h
@@ -39,6 +39,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_cost_analysis.h b/tensorflow/compiler/xla/service/hlo_cost_analysis.h
index d89d938..fee3dde 100644
--- a/tensorflow/compiler/xla/service/hlo_cost_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_cost_analysis.h
@@ -24,6 +24,7 @@
#include "tensorflow/compiler/xla/shape_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
index 72f2af1..51e1bf7 100644
--- a/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
+++ b/tensorflow/compiler/xla/service/hlo_dataflow_analysis.h
@@ -39,6 +39,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_evaluator.h b/tensorflow/compiler/xla/service/hlo_evaluator.h
index 5e536bd..6dd29fd 100644
--- a/tensorflow/compiler/xla/service/hlo_evaluator.h
+++ b/tensorflow/compiler/xla/service/hlo_evaluator.h
@@ -37,6 +37,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
index a4916c4..3b98e8b 100644
--- a/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
+++ b/tensorflow/compiler/xla/service/hlo_graph_dumper.cc
@@ -1712,14 +1712,14 @@
tensorflow::mutex url_renderer_mu(tensorflow::LINKER_INITIALIZED);
std::function<StatusOr<std::string>(absl::string_view)>* url_renderer
- ABSL_GUARDED_BY(url_renderer_mu) = nullptr;
+ TF_GUARDED_BY(url_renderer_mu) = nullptr;
// Storage for fusion visualization: (module_id, computation_id) -> sequence of
// dot dumps.
tensorflow::mutex fusion_visualizer_state_mu(tensorflow::LINKER_INITIALIZED);
-static auto& fusion_visualizer_state
- ABSL_GUARDED_BY(fusion_visualizer_state_mu) = *new absl::flat_hash_map<
- std::pair<int64_t, int64_t>, std::vector<std::string>>();
+static auto& fusion_visualizer_state TF_GUARDED_BY(fusion_visualizer_state_mu) =
+ *new absl::flat_hash_map<std::pair<int64_t, int64_t>,
+ std::vector<std::string>>();
// Generates a key to the fusion visualizer state mapping.
std::pair<int, int> FusionVisualizerStateKey(
@@ -1732,7 +1732,7 @@
// fusion_visualizer_state and the URL renderer. Precondition: url_renderer !=
// nullptr.
StatusOr<std::string> WrapFusionExplorer(const HloComputation& computation)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {
+ TF_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {
CHECK(url_renderer != nullptr);
tensorflow::mutex_lock lock(fusion_visualizer_state_mu);
const std::vector<std::string>& dot_graphs =
@@ -1824,7 +1824,7 @@
StatusOr<std::string> WrapDotInFormat(const HloComputation& computation,
absl::string_view dot,
RenderedGraphFormat format)
- ABSL_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {
+ TF_EXCLUSIVE_LOCKS_REQUIRED(url_renderer_mu) {
switch (format) {
case RenderedGraphFormat::kUrl:
CHECK(url_renderer != nullptr)
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.cc b/tensorflow/compiler/xla/service/hlo_instruction.cc
index ab89918..08d9f86 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.cc
+++ b/tensorflow/compiler/xla/service/hlo_instruction.cc
@@ -3642,7 +3642,7 @@
const size_t old_dfs_stack_size = dfs_stack.size();
for (HloInstruction* child : current_node->operands()) {
- if (!ABSL_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
+ if (!TF_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
PrintCycle(child, &dfs_stack);
return FailedPrecondition(
"A cycle is detected while visiting instruction %s",
@@ -3652,7 +3652,7 @@
if (!ignore_control_predecessors) {
for (HloInstruction* child : current_node->control_predecessors()) {
- if (!ABSL_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
+ if (!TF_PREDICT_TRUE(PushDFSChild(visitor, &dfs_stack, child))) {
PrintCycle(child, &dfs_stack);
return FailedPrecondition(
"A cycle is detected while visiting instruction %s",
diff --git a/tensorflow/compiler/xla/service/hlo_instruction.h b/tensorflow/compiler/xla/service/hlo_instruction.h
index 4ec68a0..af16c4f 100644
--- a/tensorflow/compiler/xla/service/hlo_instruction.h
+++ b/tensorflow/compiler/xla/service/hlo_instruction.h
@@ -54,6 +54,7 @@
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/iterator_range.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_parser.cc b/tensorflow/compiler/xla/service/hlo_parser.cc
index 530e895..7a1db86 100644
--- a/tensorflow/compiler/xla/service/hlo_parser.cc
+++ b/tensorflow/compiler/xla/service/hlo_parser.cc
@@ -54,6 +54,7 @@
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/gtl/map_util.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/protobuf.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_pass_fix.h b/tensorflow/compiler/xla/service/hlo_pass_fix.h
index aabb8c6..6b14a18 100644
--- a/tensorflow/compiler/xla/service/hlo_pass_fix.h
+++ b/tensorflow/compiler/xla/service/hlo_pass_fix.h
@@ -25,6 +25,7 @@
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_pass_interface.h b/tensorflow/compiler/xla/service/hlo_pass_interface.h
index 7f34f01..4b0f341 100644
--- a/tensorflow/compiler/xla/service/hlo_pass_interface.h
+++ b/tensorflow/compiler/xla/service/hlo_pass_interface.h
@@ -22,6 +22,7 @@
#include "tensorflow/compiler/xla/status_macros.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_pass_pipeline.h b/tensorflow/compiler/xla/service/hlo_pass_pipeline.h
index 73b89c5..3721f1a 100644
--- a/tensorflow/compiler/xla/service/hlo_pass_pipeline.h
+++ b/tensorflow/compiler/xla/service/hlo_pass_pipeline.h
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_sharding.h b/tensorflow/compiler/xla/service/hlo_sharding.h
index 1dcfb7c..9c1e798 100644
--- a/tensorflow/compiler/xla/service/hlo_sharding.h
+++ b/tensorflow/compiler/xla/service/hlo_sharding.h
@@ -32,6 +32,7 @@
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/hlo_value.h b/tensorflow/compiler/xla/service/hlo_value.h
index b4dd6e8..676c46e 100644
--- a/tensorflow/compiler/xla/service/hlo_value.h
+++ b/tensorflow/compiler/xla/service/hlo_value.h
@@ -29,6 +29,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/instruction_fusion.h b/tensorflow/compiler/xla/service/instruction_fusion.h
index 8f5d3ea..8112f86 100644
--- a/tensorflow/compiler/xla/service/instruction_fusion.h
+++ b/tensorflow/compiler/xla/service/instruction_fusion.h
@@ -27,6 +27,7 @@
#include "tensorflow/compiler/xla/service/hlo_module.h"
#include "tensorflow/compiler/xla/service/hlo_pass_interface.h"
#include "tensorflow/compiler/xla/service/hlo_reachability.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/interpreter/BUILD b/tensorflow/compiler/xla/service/interpreter/BUILD
index 4360398..3d96404 100644
--- a/tensorflow/compiler/xla/service/interpreter/BUILD
+++ b/tensorflow/compiler/xla/service/interpreter/BUILD
@@ -17,6 +17,7 @@
"//tensorflow/compiler/xla/service:generic_transfer_manager",
"//tensorflow/compiler/xla/service:transfer_manager",
"//tensorflow/compiler/xla/service/interpreter:platform_id",
+ "//tensorflow/core:lib",
"@com_google_absl//absl/memory",
],
alwayslink = True, # Contains per-platform transfer manager registration
@@ -121,8 +122,10 @@
"//tensorflow/compiler/xla/service:shaped_buffer",
"//tensorflow/compiler/xla/service:transfer_manager",
"//tensorflow/core:lib",
+ "//tensorflow/core/platform:macros",
"//tensorflow/core/platform:mutex",
"//tensorflow/core/platform:stream_executor_no_cuda",
+ "//tensorflow/core/platform:types",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/types:span",
],
diff --git a/tensorflow/compiler/xla/service/interpreter/compiler.h b/tensorflow/compiler/xla/service/interpreter/compiler.h
index 757399a..293fd90 100644
--- a/tensorflow/compiler/xla/service/interpreter/compiler.h
+++ b/tensorflow/compiler/xla/service/interpreter/compiler.h
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/status.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/stream_executor/stream_executor.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/interpreter/executable.h b/tensorflow/compiler/xla/service/interpreter/executable.h
index ef8419b..ea17ef2 100644
--- a/tensorflow/compiler/xla/service/interpreter/executable.h
+++ b/tensorflow/compiler/xla/service/interpreter/executable.h
@@ -31,6 +31,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
@@ -52,7 +53,7 @@
StatusOr<Literal> Evaluate(const ServiceExecutableRunOptions* run_options,
const HloComputation& computation,
absl::Span<const Literal> arg_literals) override
- ABSL_LOCKS_EXCLUDED(evaluator_lock_);
+ TF_LOCKS_EXCLUDED(evaluator_lock_);
// The interpreter interprets executables with an HloEvaluator.
std::unique_ptr<HloEvaluator> evaluator_ TF_PT_GUARDED_BY(evaluator_lock_);
diff --git a/tensorflow/compiler/xla/service/interpreter/interpreter_transfer_manager.h b/tensorflow/compiler/xla/service/interpreter/interpreter_transfer_manager.h
index d8cb210..398ef59 100644
--- a/tensorflow/compiler/xla/service/interpreter/interpreter_transfer_manager.h
+++ b/tensorflow/compiler/xla/service/interpreter/interpreter_transfer_manager.h
@@ -17,6 +17,7 @@
#define TENSORFLOW_COMPILER_XLA_SERVICE_INTERPRETER_INTERPRETER_TRANSFER_MANAGER_H_
#include "tensorflow/compiler/xla/service/generic_transfer_manager.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
index c60a944..ecd97a5 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_loop.h
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/service/llvm_ir/ir_array.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace llvm_ir {
diff --git a/tensorflow/compiler/xla/service/logical_buffer.h b/tensorflow/compiler/xla/service/logical_buffer.h
index 17c5b84..3e4ee2f 100644
--- a/tensorflow/compiler/xla/service/logical_buffer.h
+++ b/tensorflow/compiler/xla/service/logical_buffer.h
@@ -26,6 +26,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/gtl/int_type.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/name_uniquer.h b/tensorflow/compiler/xla/service/name_uniquer.h
index 4ae1e68..e681f4c 100644
--- a/tensorflow/compiler/xla/service/name_uniquer.h
+++ b/tensorflow/compiler/xla/service/name_uniquer.h
@@ -22,6 +22,7 @@
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/platform_util.h b/tensorflow/compiler/xla/service/platform_util.h
index 1919428..e3ee00a 100644
--- a/tensorflow/compiler/xla/service/platform_util.h
+++ b/tensorflow/compiler/xla/service/platform_util.h
@@ -22,6 +22,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/service.h b/tensorflow/compiler/xla/service/service.h
index dedce56..0b232fa 100644
--- a/tensorflow/compiler/xla/service/service.h
+++ b/tensorflow/compiler/xla/service/service.h
@@ -40,6 +40,7 @@
#include "tensorflow/compiler/xla/xla.pb.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/stream_executor_no_cuda.h"
#include "tensorflow/stream_executor/device_memory_allocator.h"
diff --git a/tensorflow/compiler/xla/service/shape_inference.h b/tensorflow/compiler/xla/service/shape_inference.h
index 07731f2..954b1cb 100644
--- a/tensorflow/compiler/xla/service/shape_inference.h
+++ b/tensorflow/compiler/xla/service/shape_inference.h
@@ -27,6 +27,7 @@
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/shaped_buffer.h b/tensorflow/compiler/xla/service/shaped_buffer.h
index e708216..ec28994 100644
--- a/tensorflow/compiler/xla/service/shaped_buffer.h
+++ b/tensorflow/compiler/xla/service/shaped_buffer.h
@@ -192,7 +192,7 @@
// this ScopedShapedBuffer, without freeing any of the associated memory.
//
// It's the caller's job to ensure that the memory contained therein is freed.
- ABSL_MUST_USE_RESULT ShapedBuffer release();
+ TF_MUST_USE_RESULT ShapedBuffer release();
// Extracts the sub-tree rooted at 'index' and returns a ScopedShapedBuffer
// that holds ownership of the subtree. Sets the buffers corresponding to the
diff --git a/tensorflow/compiler/xla/service/source_map_util.h b/tensorflow/compiler/xla/service/source_map_util.h
index d2ba268..ec2b3ce 100644
--- a/tensorflow/compiler/xla/service/source_map_util.h
+++ b/tensorflow/compiler/xla/service/source_map_util.h
@@ -19,6 +19,7 @@
#include "absl/strings/str_format.h"
#include "tensorflow/compiler/xla/service/executable.h"
#include "tensorflow/compiler/xla/status.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace source_map_util {
diff --git a/tensorflow/compiler/xla/service/stream_pool.h b/tensorflow/compiler/xla/service/stream_pool.h
index 6bb3a3f..9cc5b7c 100644
--- a/tensorflow/compiler/xla/service/stream_pool.h
+++ b/tensorflow/compiler/xla/service/stream_pool.h
@@ -56,7 +56,7 @@
void ReturnStream(se::Stream* stream);
tensorflow::mutex mu_;
- std::vector<std::unique_ptr<se::Stream>> streams_ ABSL_GUARDED_BY(mu_);
+ std::vector<std::unique_ptr<se::Stream>> streams_ TF_GUARDED_BY(mu_);
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/service/transfer_manager.cc b/tensorflow/compiler/xla/service/transfer_manager.cc
index 04820ac..b11bb3e 100644
--- a/tensorflow/compiler/xla/service/transfer_manager.cc
+++ b/tensorflow/compiler/xla/service/transfer_manager.cc
@@ -28,6 +28,7 @@
#include "tensorflow/compiler/xla/util.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/notification.h"
using absl::StrCat;
diff --git a/tensorflow/compiler/xla/service/tuple_points_to_analysis.h b/tensorflow/compiler/xla/service/tuple_points_to_analysis.h
index 92e2454..e59313b 100644
--- a/tensorflow/compiler/xla/service/tuple_points_to_analysis.h
+++ b/tensorflow/compiler/xla/service/tuple_points_to_analysis.h
@@ -38,6 +38,7 @@
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/gtl/compactptrset.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc b/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc
index 01c4d57..8553cc2 100644
--- a/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc
+++ b/tensorflow/compiler/xla/service/while_loop_simplifier_test.cc
@@ -45,13 +45,13 @@
class WhileLoopSimplifierTest : public HloTestBase {
protected:
// Makes an HloModule that contains a loop with `num_iters` iteration.
- ABSL_MUST_USE_RESULT std::unique_ptr<VerifiedHloModule>
+ TF_MUST_USE_RESULT std::unique_ptr<VerifiedHloModule>
MakeModuleWithSimpleLoop(int num_iters);
// Similar to MakeModuleWithSimpleLoop except that the loop bound is passed to
// the loop-condition through an element of a tuple which is the
// loop-condition parameter.
- ABSL_MUST_USE_RESULT std::unique_ptr<VerifiedHloModule>
+ TF_MUST_USE_RESULT std::unique_ptr<VerifiedHloModule>
MakeModuleWithSimpleLoopTupleElementLoopBound(int num_iters);
};
diff --git a/tensorflow/compiler/xla/service/xla_debug_info_manager.h b/tensorflow/compiler/xla/service/xla_debug_info_manager.h
index d62cd99..a5e8844 100644
--- a/tensorflow/compiler/xla/service/xla_debug_info_manager.h
+++ b/tensorflow/compiler/xla/service/xla_debug_info_manager.h
@@ -120,12 +120,12 @@
};
tensorflow::mutex mutex_;
- bool tracing_active_ ABSL_GUARDED_BY(mutex_) = false;
+ bool tracing_active_ TF_GUARDED_BY(mutex_) = false;
// Active modules are those still tracked by us. There could be much more
// active modules than running modules, we will try to reduce the trace size
// by only transfer those modules that were running during tracing period.
absl::flat_hash_map<ModuleIdentifier, XlaModuleEntry> active_modules_
- ABSL_GUARDED_BY(mutex_);
+ TF_GUARDED_BY(mutex_);
};
} // namespace xla
diff --git a/tensorflow/compiler/xla/shape_util.h b/tensorflow/compiler/xla/shape_util.h
index 6899593..8104982 100644
--- a/tensorflow/compiler/xla/shape_util.h
+++ b/tensorflow/compiler/xla/shape_util.h
@@ -38,6 +38,7 @@
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/status.h b/tensorflow/compiler/xla/status.h
index 152d375..69abb51 100644
--- a/tensorflow/compiler/xla/status.h
+++ b/tensorflow/compiler/xla/status.h
@@ -17,6 +17,7 @@
#define TENSORFLOW_COMPILER_XLA_STATUS_H_
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/status_macros.cc b/tensorflow/compiler/xla/status_macros.cc
index 53ad3d4..c89b717 100644
--- a/tensorflow/compiler/xla/status_macros.cc
+++ b/tensorflow/compiler/xla/status_macros.cc
@@ -41,7 +41,7 @@
// If log_severity is NUM_SEVERITIES, nothing is logged.
static void LogError(const Status& status, const char* filename, int line,
int log_severity, bool should_log_stack_trace) {
- if (ABSL_PREDICT_TRUE(log_severity != tensorflow::NUM_SEVERITIES)) {
+ if (TF_PREDICT_TRUE(log_severity != tensorflow::NUM_SEVERITIES)) {
std::string stack_trace;
if (should_log_stack_trace) {
stack_trace = absl::StrCat("\n", tensorflow::CurrentStackTrace());
@@ -77,12 +77,12 @@
tensorflow::error::Code code,
const std::string& message, bool should_log,
int log_severity, bool should_log_stack_trace) {
- if (ABSL_PREDICT_FALSE(code == tensorflow::error::OK)) {
+ if (TF_PREDICT_FALSE(code == tensorflow::error::OK)) {
LOG(ERROR) << "Cannot create error with status OK";
code = tensorflow::error::UNKNOWN;
}
const Status status = MakeStatus(code, message);
- if (ABSL_PREDICT_TRUE(should_log)) {
+ if (TF_PREDICT_TRUE(should_log)) {
LogError(status, filename, line, log_severity, should_log_stack_trace);
}
return status;
@@ -151,7 +151,7 @@
const std::string str = prior_message_handling_ == kAppendToPriorMessage
? absl::StrCat(prior_message_, stream_str)
: absl::StrCat(stream_str, prior_message_);
- if (ABSL_PREDICT_FALSE(str.empty())) {
+ if (TF_PREDICT_FALSE(str.empty())) {
return MakeError(
file_, line_, code_,
absl::StrCat(str, "Error without message at ", file_, ":", line_),
diff --git a/tensorflow/compiler/xla/status_macros.h b/tensorflow/compiler/xla/status_macros.h
index bfe47b4..3831c52 100644
--- a/tensorflow/compiler/xla/status_macros.h
+++ b/tensorflow/compiler/xla/status_macros.h
@@ -25,6 +25,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
namespace status_macros {
@@ -172,7 +173,7 @@
StatusAdaptorForMacros(const StatusAdaptorForMacros&) = delete;
StatusAdaptorForMacros& operator=(const StatusAdaptorForMacros&) = delete;
- explicit operator bool() const { return ABSL_PREDICT_TRUE(status_.ok()); }
+ explicit operator bool() const { return TF_PREDICT_TRUE(status_.ok()); }
Status&& Consume() { return std::move(status_); }
@@ -184,7 +185,7 @@
} // namespace xla
#define TF_RET_CHECK(condition) \
- while (ABSL_PREDICT_FALSE(!(condition))) \
+ while (TF_PREDICT_FALSE(!(condition))) \
return xla::status_macros::MakeErrorStream(__FILE__, __LINE__, \
tensorflow::error::INTERNAL) \
.with_log_stack_trace() \
diff --git a/tensorflow/compiler/xla/tests/BUILD b/tensorflow/compiler/xla/tests/BUILD
index 5ae3741..5117df8 100644
--- a/tensorflow/compiler/xla/tests/BUILD
+++ b/tensorflow/compiler/xla/tests/BUILD
@@ -118,7 +118,6 @@
"//tensorflow/compiler/xla:xla_data_proto_cc",
"//tensorflow/core:lib",
"//tensorflow/core:test",
- "@com_google_absl//absl/base:core_headers",
"@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:optional",
"@com_google_absl//absl/types:span",
@@ -1726,8 +1725,10 @@
":literal_test_util",
":test_macros_header",
":xla_internal_test_main", # fixdeps: keep
+ "//tensorflow/compiler/xla:literal",
"//tensorflow/compiler/xla:literal_util",
"//tensorflow/compiler/xla:shape_util",
+ "//tensorflow/compiler/xla:util",
"//tensorflow/compiler/xla:xla_data_proto_cc",
"//tensorflow/compiler/xla/client:xla_builder",
"//tensorflow/compiler/xla/service:custom_call_status",
diff --git a/tensorflow/compiler/xla/tests/custom_call_test.cc b/tensorflow/compiler/xla/tests/custom_call_test.cc
index af1b5a3..10d7148 100644
--- a/tensorflow/compiler/xla/tests/custom_call_test.cc
+++ b/tensorflow/compiler/xla/tests/custom_call_test.cc
@@ -32,6 +32,7 @@
#include "tensorflow/compiler/xla/tests/test_macros.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/dynamic_annotations.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace {
diff --git a/tensorflow/compiler/xla/tests/hlo_test_base.h b/tensorflow/compiler/xla/tests/hlo_test_base.h
index 9d62b02..b5678ff 100644
--- a/tensorflow/compiler/xla/tests/hlo_test_base.h
+++ b/tensorflow/compiler/xla/tests/hlo_test_base.h
@@ -195,7 +195,7 @@
const absl::Span<Literal* const> arguments,
const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
// Same as above, except that the module will be executed without Hlo
// optimization.
@@ -204,25 +204,25 @@
const absl::Span<Literal* const> arguments,
const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
// Executes an hlo module with fake inputs and compares the results.
::testing::AssertionResult RunAndCompare(
std::unique_ptr<HloModule> module, const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
// Same as above, except that the module will be executed without Hlo
// optimization.
::testing::AssertionResult RunAndCompareNoHloPasses(
std::unique_ptr<HloModule> module, const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
// Executes an hlo module with fake inputs and checks that the execution is
// successful.
::testing::AssertionResult Run(std::unique_ptr<HloModule> module,
- bool run_hlo_passes) ABSL_MUST_USE_RESULT;
+ bool run_hlo_passes) TF_MUST_USE_RESULT;
// Convenient wrappers for executing and comparing an hlo module with fake
// input. Module can be passed in directly, or parsed from an hlo_string,
@@ -231,11 +231,11 @@
const absl::string_view hlo_string,
const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
::testing::AssertionResult Run(
const absl::string_view hlo_string, bool run_hlo_passes = true,
ExecutionProfile* profile = nullptr,
- std::string backend_config = "") ABSL_MUST_USE_RESULT;
+ std::string backend_config = "") TF_MUST_USE_RESULT;
// Same as below, except requires passing fake arguments.
::testing::AssertionResult RunAndCompareTwoModules(
@@ -259,27 +259,27 @@
::testing::AssertionResult RunReplicated(
const absl::string_view hlo_string, bool run_hlo_passes = true,
int64_t num_replicas = 1,
- std::string backend_config = "") ABSL_MUST_USE_RESULT;
+ std::string backend_config = "") TF_MUST_USE_RESULT;
// If assert_determinism is true, the assertion will fail unless all runs
// produce exactly the same output.
::testing::AssertionResult RunMultipleTimes(
const absl::string_view hlo_string, bool run_hlo_passes,
std::vector<ExecutionProfile>* profiles, std::string backend_config = "",
- bool assert_determinism = false) ABSL_MUST_USE_RESULT;
+ bool assert_determinism = false) TF_MUST_USE_RESULT;
::testing::AssertionResult RunAndCompareFromFile(
const std::string& filename, const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
::testing::AssertionResult RunAndCompareNoHloPasses(
const absl::string_view hlo_string,
const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
::testing::AssertionResult RunAndCompareNoHloPassesFromFile(
const std::string& filename, const absl::optional<ErrorSpec>& error,
const std::function<void(HloModule*)>& reference_preprocessor = nullptr)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
// Convenience method to force the layout of a given parameter in a module.
// The layout of parameter number 'param_no' in the 'module' is set to
diff --git a/tensorflow/compiler/xla/tests/literal_test_util.h b/tensorflow/compiler/xla/tests/literal_test_util.h
index 3f50da3..07f7149 100644
--- a/tensorflow/compiler/xla/tests/literal_test_util.h
+++ b/tensorflow/compiler/xla/tests/literal_test_util.h
@@ -21,7 +21,6 @@
#include <random>
#include <string>
-#include "absl/base/attributes.h"
#include "absl/types/optional.h"
#include "absl/types/span.h"
#include "tensorflow/compiler/xla/array2d.h"
@@ -35,6 +34,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace xla {
@@ -45,16 +45,16 @@
// Asserts that the given shapes have the same rank, dimension sizes, and
// primitive types.
static ::testing::AssertionResult EqualShapes(
- const Shape& expected, const Shape& actual) ABSL_MUST_USE_RESULT;
+ const Shape& expected, const Shape& actual) TF_MUST_USE_RESULT;
// Asserts that the provided shapes are equal as defined in AssertEqualShapes
// and that they have the same layout.
static ::testing::AssertionResult EqualShapesAndLayouts(
- const Shape& expected, const Shape& actual) ABSL_MUST_USE_RESULT;
+ const Shape& expected, const Shape& actual) TF_MUST_USE_RESULT;
static ::testing::AssertionResult Equal(const LiteralSlice& expected,
const LiteralSlice& actual)
- ABSL_MUST_USE_RESULT;
+ TF_MUST_USE_RESULT;
// Asserts the given literal are (bitwise) equal to given expected values.
template <typename NativeT>
@@ -92,8 +92,7 @@
static ::testing::AssertionResult Near(
const LiteralSlice& expected, const LiteralSlice& actual,
const ErrorSpec& error_spec,
- absl::optional<bool> detailed_message = absl::nullopt)
- ABSL_MUST_USE_RESULT;
+ absl::optional<bool> detailed_message = absl::nullopt) TF_MUST_USE_RESULT;
// Asserts the given literal are within the given error bound of the given
// expected values. Only supported for floating point values.
@@ -146,7 +145,7 @@
// will be compared recursively.
static ::testing::AssertionResult NearOrEqual(
const LiteralSlice& expected, const LiteralSlice& actual,
- const absl::optional<ErrorSpec>& error) ABSL_MUST_USE_RESULT;
+ const absl::optional<ErrorSpec>& error) TF_MUST_USE_RESULT;
private:
LiteralTestUtil(const LiteralTestUtil&) = delete;
diff --git a/tensorflow/compiler/xla/tests/local_client_test_base.h b/tensorflow/compiler/xla/tests/local_client_test_base.h
index e1981b9..66dd884 100644
--- a/tensorflow/compiler/xla/tests/local_client_test_base.h
+++ b/tensorflow/compiler/xla/tests/local_client_test_base.h
@@ -66,13 +66,12 @@
mutable tensorflow::mutex count_mutex_;
// Global counts of allocations and deallocations.
- int64_t allocation_count_ ABSL_GUARDED_BY(count_mutex_) = 0;
- int64_t deallocation_count_ ABSL_GUARDED_BY(count_mutex_) = 0;
+ int64_t allocation_count_ TF_GUARDED_BY(count_mutex_) = 0;
+ int64_t deallocation_count_ TF_GUARDED_BY(count_mutex_) = 0;
// Per-device counts of allocations and deallocations.
- std::map<int, int64_t> device_allocation_count_ ABSL_GUARDED_BY(count_mutex_);
- std::map<int, int64_t> device_deallocation_count_
- ABSL_GUARDED_BY(count_mutex_);
+ std::map<int, int64_t> device_allocation_count_ TF_GUARDED_BY(count_mutex_);
+ std::map<int, int64_t> device_deallocation_count_ TF_GUARDED_BY(count_mutex_);
};
// A base class for tests which exercise the LocalClient interface.
diff --git a/tensorflow/compiler/xla/text_literal_reader.h b/tensorflow/compiler/xla/text_literal_reader.h
index 7de899d..9e50c56 100644
--- a/tensorflow/compiler/xla/text_literal_reader.h
+++ b/tensorflow/compiler/xla/text_literal_reader.h
@@ -24,6 +24,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/text_literal_writer.h b/tensorflow/compiler/xla/text_literal_writer.h
index adc1bf7..6bf8be1 100644
--- a/tensorflow/compiler/xla/text_literal_writer.h
+++ b/tensorflow/compiler/xla/text_literal_writer.h
@@ -21,6 +21,7 @@
#include "tensorflow/compiler/xla/types.h"
#include "tensorflow/compiler/xla/xla_data.pb.h"
#include "tensorflow/core/lib/core/status.h"
+#include "tensorflow/core/platform/macros.h"
namespace xla {
diff --git a/tensorflow/compiler/xla/util.h b/tensorflow/compiler/xla/util.h
index 75c75db..f02a7bc 100644
--- a/tensorflow/compiler/xla/util.h
+++ b/tensorflow/compiler/xla/util.h
@@ -42,6 +42,7 @@
#include "tensorflow/core/lib/math/math_util.h"
#include "tensorflow/core/lib/strings/numbers.h"
#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/protobuf.h"