Replace `tensorflow::Status::OK()` with tensorflow::OkStatus()`.
PiperOrigin-RevId: 452665495
diff --git a/tensorflow/compiler/xla/client/client.cc b/tensorflow/compiler/xla/client/client.cc
index 796ba04..f573685 100644
--- a/tensorflow/compiler/xla/client/client.cc
+++ b/tensorflow/compiler/xla/client/client.cc
@@ -111,7 +111,7 @@
return s;
}
VLOG(3) << "TransferToInfeedResponse: {" << response.DebugString() << "}";
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<Literal> Client::TransferFromOutfeed(
@@ -159,7 +159,7 @@
return s;
}
VLOG(3) << "ResetDeviceResponse: {" << response.DebugString() << "}";
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<Literal> Client::ExecuteAndTransfer(
diff --git a/tensorflow/compiler/xla/client/lib/math.cc b/tensorflow/compiler/xla/client/lib/math.cc
index 6990791..02a45a7 100644
--- a/tensorflow/compiler/xla/client/lib/math.cc
+++ b/tensorflow/compiler/xla/client/lib/math.cc
@@ -93,7 +93,7 @@
"Operands to %s must be real-valued floating-point, but got %s",
op_name, PrimitiveType_Name(elem_ty));
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
XlaOp IsPosInf(XlaOp operand) {
diff --git a/tensorflow/compiler/xla/client/lib/matrix.cc b/tensorflow/compiler/xla/client/lib/matrix.cc
index 6386089..7c58697 100644
--- a/tensorflow/compiler/xla/client/lib/matrix.cc
+++ b/tensorflow/compiler/xla/client/lib/matrix.cc
@@ -602,7 +602,7 @@
auto maybe_invalid_character = [](char d) {
if (absl::ascii_isalpha(d)) {
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
if (d == '.') {
return InvalidArgument("Unsupported \".\" in einsum config.");
diff --git a/tensorflow/compiler/xla/client/lib/tridiagonal.cc b/tensorflow/compiler/xla/client/lib/tridiagonal.cc
index 8f8ea72..24b2f15 100644
--- a/tensorflow/compiler/xla/client/lib/tridiagonal.cc
+++ b/tensorflow/compiler/xla/client/lib/tridiagonal.cc
@@ -45,7 +45,7 @@
expected, actual_num_dims);
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<int64_t> CheckSystemAndReturnNumEquations(XlaOp lower_diagonal,
diff --git a/tensorflow/compiler/xla/client/local_client.cc b/tensorflow/compiler/xla/client/local_client.cc
index 93fc9e7..c44b15f 100644
--- a/tensorflow/compiler/xla/client/local_client.cc
+++ b/tensorflow/compiler/xla/client/local_client.cc
@@ -113,7 +113,7 @@
backend.platform()->Name());
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<std::pair<ServiceExecutableRunOptions, StreamPool::Ptr>>
diff --git a/tensorflow/compiler/xla/client/padding.cc b/tensorflow/compiler/xla/client/padding.cc
index 8e9a7e6..6cf876e 100644
--- a/tensorflow/compiler/xla/client/padding.cc
+++ b/tensorflow/compiler/xla/client/padding.cc
@@ -35,7 +35,7 @@
input_dimensions.size(), window_dimensions.size(),
window_strides.size());
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
std::vector<std::pair<int64_t, int64_t>> MakePadding(
diff --git a/tensorflow/compiler/xla/client/xla_builder.cc b/tensorflow/compiler/xla/client/xla_builder.cc
index a2a7bac..989ea77 100644
--- a/tensorflow/compiler/xla/client/xla_builder.cc
+++ b/tensorflow/compiler/xla/client/xla_builder.cc
@@ -456,7 +456,7 @@
dynamic_size_param_index},
DynamicParameterBinding::DynamicDimension{
target_param_num, target_param_index, target_dim_num}));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status XlaBuilder::SetInstructionFrontendAttribute(const XlaOp op,
@@ -465,7 +465,7 @@
TF_ASSIGN_OR_RETURN(auto instr_proto, LookUpMutableInstruction(op));
auto* frontend_attributes = instr_proto->mutable_frontend_attributes();
(*frontend_attributes->mutable_map())[attribute] = std::move(value);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
XlaComputation XlaBuilder::BuildAndNoteError() {
@@ -485,7 +485,7 @@
first_error_backtrace_.Dump(tensorflow::DebugWriteToString, &backtrace);
return AppendStatus(first_error_, backtrace);
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<XlaComputation> XlaBuilder::Build(bool remove_dynamic_dimensions) {
@@ -592,7 +592,7 @@
alias.param_index, alias.kind));
}
*module->mutable_input_output_alias() = config.ToProto();
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<XlaOp> XlaBuilder::InDimBroadcast(
@@ -1415,7 +1415,7 @@
field_name, i, numbers[i]);
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
};
TF_RETURN_IF_ERROR(
check_spatial_dimensions("input_spatial_dimensions",
@@ -2556,7 +2556,7 @@
"it in builder '%s'",
op.handle(), op.builder()->name(), name());
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
XlaOp XlaBuilder::Reduce(XlaOp operand, XlaOp init_value,
@@ -3877,7 +3877,7 @@
dnum.output_batch_dimension(), dnum.output_feature_dimension(),
dnum.output_spatial_dimensions(0), dnum.output_spatial_dimensions(1));
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<XlaOp> XlaBuilder::AddInstruction(HloInstructionProto&& instr,
diff --git a/tensorflow/compiler/xla/pjrt/distributed/client.cc b/tensorflow/compiler/xla/pjrt/distributed/client.cc
index 039a788..5dc57dd 100644
--- a/tensorflow/compiler/xla/pjrt/distributed/client.cc
+++ b/tensorflow/compiler/xla/pjrt/distributed/client.cc
@@ -229,7 +229,7 @@
tensorflow::ThreadOptions(), "pjrt_distributed_heartbeat",
[this]() { HeartbeatLoop(); }));
LOG(INFO) << "Connected to distributed JAX controller";
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
xla::Status DistributedRuntimeClientImpl::EnumerateDevices(
@@ -258,7 +258,7 @@
}
VLOG(10) << "EnumerateDevices() response: " << response.DebugString();
response.mutable_global_topology()->Swap(global_topology);
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
xla::Status DistributedRuntimeClientImpl::Shutdown() {
@@ -290,7 +290,7 @@
VLOG(10) << "Shutdown() response: " << response.DebugString();
absl::MutexLock lock(&mu_);
state_ = State::kClosed;
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
xla::StatusOr<std::string> DistributedRuntimeClientImpl::BlockingKeyValueGet(
@@ -504,7 +504,7 @@
Status s = coord_agent_->WaitForAllTasks(devices);
if (!s.ok()) return s;
*global_topology = coord_agent_->GetClusterDeviceInfo().xla().devices();
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
xla::StatusOr<std::string>
diff --git a/tensorflow/compiler/xla/pjrt/distributed/client_server_test.cc b/tensorflow/compiler/xla/pjrt/distributed/client_server_test.cc
index 1f2a247..9795c52 100644
--- a/tensorflow/compiler/xla/pjrt/distributed/client_server_test.cc
+++ b/tensorflow/compiler/xla/pjrt/distributed/client_server_test.cc
@@ -145,7 +145,7 @@
TF_RET_CHECK(shutdown_count == num_nodes);
}
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
std::vector<xla::Status> statuses(num_nodes);
@@ -213,7 +213,7 @@
std::string value,
client->BlockingKeyValueGet("key2", absl::InfiniteDuration()));
TF_RET_CHECK(value == "value2");
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
auto thread1_fn = [&]() -> xla::Status {
// Wait for thread0 client to be ready for connection, to ensure global ids
@@ -236,7 +236,7 @@
client->BlockingKeyValueGet("key1", absl::InfiniteDuration()));
TF_RET_CHECK(value == "value1");
TF_RETURN_IF_ERROR(client->KeyValueSet("key2", "value2"));
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
std::vector<std::function<xla::Status()>> functions = {thread0_fn,
@@ -276,13 +276,13 @@
TF_RETURN_IF_ERROR(client->Connect());
if (node_id == 0) {
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
// The call to Shutdown() should be interrupted if a worker stops issuing
// heartbeats.
TF_RETURN_IF_ERROR(client->Shutdown());
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
std::vector<xla::Status> statuses(num_nodes);
@@ -337,10 +337,10 @@
TF_RETURN_IF_ERROR(client->Connect());
if (node_id == 0) {
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
shutdown.WaitForNotification();
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
std::vector<xla::Status> statuses(num_nodes);
@@ -396,7 +396,7 @@
shutdown.WaitForNotification();
TF_RETURN_IF_ERROR(client->Shutdown());
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
std::vector<xla::Status> statuses(num_nodes);
@@ -441,7 +441,7 @@
absl::SleepFor(absl::Milliseconds(200) * node_id);
TF_RETURN_IF_ERROR(client->Connect());
TF_RETURN_IF_ERROR(client->Shutdown());
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
std::vector<xla::Status> statuses(num_nodes);
@@ -478,7 +478,7 @@
TF_RETURN_IF_ERROR(client->Connect());
TF_RETURN_IF_ERROR(client->Shutdown());
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
};
// Note: one fewer thread than 'num_nodes'.
diff --git a/tensorflow/compiler/xla/pjrt/distributed/service.cc b/tensorflow/compiler/xla/pjrt/distributed/service.cc
index 0b73e0c..46d35f6 100644
--- a/tensorflow/compiler/xla/pjrt/distributed/service.cc
+++ b/tensorflow/compiler/xla/pjrt/distributed/service.cc
@@ -117,7 +117,7 @@
"Invalid node ID %d, must be in the range [0, %d)", node_id,
options_.num_nodes);
}
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
xla::Status DistributedRuntimeServiceImpl::ValidateSessionId(
@@ -127,7 +127,7 @@
"Session ID of request %llu does not match active session ID %llu",
session_id, session_id_);
}
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
::grpc::Status DistributedRuntimeServiceImpl::Connect(
diff --git a/tensorflow/compiler/xla/pjrt/distributed/util.h b/tensorflow/compiler/xla/pjrt/distributed/util.h
index abb2b60..ccc1f19 100644
--- a/tensorflow/compiler/xla/pjrt/distributed/util.h
+++ b/tensorflow/compiler/xla/pjrt/distributed/util.h
@@ -23,7 +23,7 @@
inline Status FromGrpcStatus(const ::grpc::Status& s) {
if (s.ok()) {
- return Status::OK();
+ return ::tensorflow::OkStatus();
} else {
return Status(static_cast<tensorflow::error::Code>(s.error_code()),
s.error_message());
diff --git a/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc b/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
index 6d2fb19..1491f03 100644
--- a/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
+++ b/tensorflow/compiler/xla/service/cpu/tests/cpu_noalias_test.cc
@@ -67,7 +67,7 @@
absl::make_unique<DependencyHloOrdering>(hlo_module.get()),
backend().compiler()->BufferSizeBytesFunction(),
[](LogicalBuffer::Color) { return /*alignment=*/1; });
- ASSERT_EQ(status_or_buffer_assn.status(), Status::OK());
+ ASSERT_EQ(status_or_buffer_assn.status(), ::tensorflow::OkStatus());
llvm::LLVMContext context;
llvm_ir::AliasAnalysis aa(*hlo_module, *status_or_buffer_assn.ValueOrDie(),
diff --git a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
index 64c326b..d8977a1 100644
--- a/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
+++ b/tensorflow/compiler/xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.cc
@@ -310,14 +310,14 @@
bitcode_path);
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
// Links libdevice into the given module if the module needs libdevice.
Status LinkLibdeviceIfNecessary(llvm::Module* module,
const std::string& libdevice_dir_path) {
if (!CouldNeedDeviceBitcode(*module)) {
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
// CUDA 9+ uses a single libdevice file for all devices, and we don't support
@@ -354,7 +354,7 @@
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
std::unique_ptr<llvm::TargetMachine> NVPTXGetTargetMachine(
@@ -455,7 +455,7 @@
function_passes.doFinalization();
module_passes.run(*module);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
// One-time module initializer.
@@ -779,7 +779,7 @@
Status LinkROCDLIfNecessary(llvm::Module* module, std::string gcn_arch_name,
const std::string& rocdl_dir_path) {
if (!CouldNeedDeviceBitcode(*module)) {
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
return LinkWithBitcodeVector(module,
@@ -808,7 +808,7 @@
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
// The following routine maps a feature token extracted from the
diff --git a/tensorflow/compiler/xla/service/gpu/tests/hlo_to_llvm_ir.cc b/tensorflow/compiler/xla/service/gpu/tests/hlo_to_llvm_ir.cc
index 413caab..ab0137a 100644
--- a/tensorflow/compiler/xla/service/gpu/tests/hlo_to_llvm_ir.cc
+++ b/tensorflow/compiler/xla/service/gpu/tests/hlo_to_llvm_ir.cc
@@ -100,7 +100,7 @@
"Feature not yet implemented in ROCm"};
#endif
}
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
xla::Status CompileAndPrintLlvmIrFromFile(const std::string& file_name,
@@ -115,7 +115,7 @@
TF_RETURN_IF_ERROR(CompileAndPrintLlvmIr(hlo_module_text, ptx, sm));
}
- return xla::Status::OK();
+ return ::tensorflow::OkStatus();
}
} // namespace
diff --git a/tensorflow/compiler/xla/service/interpreter/executable_base.cc b/tensorflow/compiler/xla/service/interpreter/executable_base.cc
index bb34360..6fee432e 100644
--- a/tensorflow/compiler/xla/service/interpreter/executable_base.cc
+++ b/tensorflow/compiler/xla/service/interpreter/executable_base.cc
@@ -170,7 +170,7 @@
alias->ToString());
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}));
se::StreamExecutor* executor = stream->parent();
diff --git a/tensorflow/compiler/xla/service/interpreter/executor.cc b/tensorflow/compiler/xla/service/interpreter/executor.cc
index 32ada0a..8f5c87f 100644
--- a/tensorflow/compiler/xla/service/interpreter/executor.cc
+++ b/tensorflow/compiler/xla/service/interpreter/executor.cc
@@ -86,13 +86,13 @@
port::Status XlaInterpreterExecutor::SynchronousMemcpy(
DeviceMemoryBase *dev_dst, const void *host_src, uint64_t size) {
memcpy(dev_dst->opaque(), host_src, size);
- return port::Status::OK();
+ return ::tensorflow::OkStatus();
}
port::Status XlaInterpreterExecutor::SynchronousMemcpy(
void *host_dst, const DeviceMemoryBase &dev_src, uint64_t size) {
memcpy(host_dst, dev_src.opaque(), size);
- return port::Status::OK();
+ return ::tensorflow::OkStatus();
}
bool XlaInterpreterExecutor::HostCallback(
diff --git a/tensorflow/compiler/xla/service/interpreter/executor.h b/tensorflow/compiler/xla/service/interpreter/executor.h
index 2b2c6bf..32bc8ab 100644
--- a/tensorflow/compiler/xla/service/interpreter/executor.h
+++ b/tensorflow/compiler/xla/service/interpreter/executor.h
@@ -53,7 +53,7 @@
~XlaInterpreterExecutor() override;
port::Status Init(int device_ordinal, DeviceOptions device_options) override {
- return port::Status::OK();
+ return ::tensorflow::OkStatus();
}
port::Status GetKernel(const MultiKernelLoaderSpec &spec,
@@ -128,11 +128,11 @@
std::function<port::Status()> callback) override;
port::Status AllocateEvent(Event *event) override {
- return port::Status::OK();
+ return ::tensorflow::OkStatus();
}
port::Status DeallocateEvent(Event *event) override {
- return port::Status::OK();
+ return ::tensorflow::OkStatus();
}
port::Status RecordEvent(Stream *stream, Event *event) override {
@@ -173,7 +173,7 @@
CreateDeviceDescription(int device_ordinal);
port::Status EnablePeerAccessTo(StreamExecutorInterface *other) override {
- return port::Status::OK();
+ return ::tensorflow::OkStatus();
}
bool CanEnablePeerAccessTo(StreamExecutorInterface *other) override {
diff --git a/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
index 45aed44..4cc8276 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/dynamic_update_slice_util.cc
@@ -146,7 +146,7 @@
TF_ASSIGN_OR_RETURN(llvm::Value * update_data,
update_array_generator(update_index));
output_array.EmitWriteArrayElement(output_index, update_data, b);
- return Status::OK();
+ return ::tensorflow::OkStatus();
};
if (launch_dimensions != nullptr) {
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
index a450692..c459e95 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.cc
@@ -53,7 +53,7 @@
/*is_first_iteration=*/b_->CreateICmpEQ(
loop->GetIndVarValue(), start)));
llvm_ir::SetToLastInsertPoint(loop->GetExitBasicBlock(), b_);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
}
@@ -71,7 +71,7 @@
TF_RETURN_IF_ERROR(false_block_generator());
}
llvm_ir::SetToLastInsertPoint(if_data.after_block, b_);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
void KernelSupportLibrary::EmitAndCallOutlinedKernel(
diff --git a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
index d708f8f..a09a9bb 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
+++ b/tensorflow/compiler/xla/service/llvm_ir/kernel_support_library.h
@@ -59,12 +59,12 @@
llvm::Value* step,
const std::function<void(llvm::Value* ind_var, bool is_first_iteration)>&
for_body_generator) {
- CHECK_EQ(Status::OK(),
+ CHECK_EQ(::tensorflow::OkStatus(),
ForWithStatus(
name, start, end, step,
[&](llvm::Value* ind_var, bool is_first_iteration) -> Status {
for_body_generator(ind_var, is_first_iteration);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}));
}
@@ -116,7 +116,7 @@
name, start, end, step, peel_first_iteration,
[&](llvm::Value* ind_var, llvm::Value* is_first_iteration) -> Status {
for_body_generator(ind_var, is_first_iteration);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}));
}
@@ -215,7 +215,7 @@
llvm::Value* condition,
const std::function<Status()>& true_block_generator,
const std::function<Status()>& false_block_generator = []() -> Status {
- return Status::OK();
+ return ::tensorflow::OkStatus();
}) {
return IfWithStatus("", condition, true_block_generator,
false_block_generator);
@@ -235,16 +235,16 @@
name, condition,
[&]() {
true_block_generator();
- return Status::OK();
+ return ::tensorflow::OkStatus();
},
[&]() {
false_block_generator();
- return Status::OK();
+ return ::tensorflow::OkStatus();
}));
} else {
TF_CHECK_OK(IfWithStatus(name, condition, [&]() {
true_block_generator();
- return Status::OK();
+ return ::tensorflow::OkStatus();
}));
}
}
diff --git a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
index 301cd1c..73fb7b1 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/llvm_util.cc
@@ -578,7 +578,7 @@
tensorflow::Env::Default()->NewWritableFile(file_name, &f));
TF_RETURN_IF_ERROR(f->Append(text));
TF_RETURN_IF_ERROR(f->Close());
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
void DumpIrIfEnabled(const HloModule& hlo_module,
diff --git a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
index d4b30ce..136d72e 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/loop_emitter.cc
@@ -78,7 +78,7 @@
target_element_generator(array_index));
target_arrays_vec[0].EmitWriteArrayElement(array_index, target_element,
b);
- return Status::OK();
+ return ::tensorflow::OkStatus();
};
}
@@ -95,7 +95,7 @@
target_arrays_vec[i].EmitWriteArrayElement(
array_index, b->CreateExtractValue(target_element, i), b);
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
};
}
@@ -187,7 +187,7 @@
if (exit_bb_ != nullptr) {
b_->SetInsertPoint(exit_bb_);
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
} // namespace llvm_ir
diff --git a/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc b/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc
index bd0bf46..962470c 100644
--- a/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc
+++ b/tensorflow/compiler/xla/service/llvm_ir/sort_util.cc
@@ -147,7 +147,7 @@
write_element(i, compare_keys_index, value2);
}
});
- return Status::OK();
+ return ::tensorflow::OkStatus();
});
}
@@ -311,7 +311,7 @@
// same location in shared memory because we have exactly tile_size / 2 many
// threads, and the linear index calculated by ParallelLoopEmitter uses
// linear_index = blockIdx.x * blockDim.x + threadIdx.x;
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
} // namespace
@@ -415,7 +415,7 @@
element_address_pointee_type, write_element, emit_compare_callback,
b));
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
};
return gpu::ParallelLoopEmitter(compare_loop_body_emitter, iteration_shape,
launch_dimensions, b)
diff --git a/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc b/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc
index 4a688b9..6c419ed 100644
--- a/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc
+++ b/tensorflow/compiler/xla/tools/dumped_computation_to_operation_list.cc
@@ -52,7 +52,7 @@
std::cout << absl::StrFormat("%s :: (%s) -> %s :: %s\n",
HloOpcodeString(hlo->opcode()), params,
ShapeUtil::HumanString(hlo->shape()), path_);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
private:
diff --git a/tensorflow/compiler/xla/tools/hlo_control_flow_flattening.cc b/tensorflow/compiler/xla/tools/hlo_control_flow_flattening.cc
index 6bb3015..928a313 100644
--- a/tensorflow/compiler/xla/tools/hlo_control_flow_flattening.cc
+++ b/tensorflow/compiler/xla/tools/hlo_control_flow_flattening.cc
@@ -266,7 +266,7 @@
/*accept_different_shape=*/true);
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status HloControlFlowFlattening::RemoveInfeed(
@@ -286,7 +286,7 @@
TF_RETURN_IF_ERROR(
computation->ReplaceWithNewInstruction(infeed_hlo, std::move(new_tuple)));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status HloControlFlowFlattening::RemoveRecvDone(
@@ -313,7 +313,7 @@
additional_removed->insert(recv);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(recv));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status HloControlFlowFlattening::RemoveOutfeed(
@@ -329,7 +329,7 @@
->set_custom_call_has_side_effect(true);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(outfeed_hlo, custom_call));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status HloControlFlowFlattening::RemoveSendDone(
@@ -352,7 +352,7 @@
additional_removed->insert(send);
TF_RETURN_IF_ERROR(computation->RemoveInstruction(send));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status HloControlFlowFlattening::RemoveCollective(HloInstruction* hlo) const {
@@ -367,7 +367,7 @@
hlo->ToString(HloPrintOptions().Canonical());
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, custom_call));
custom_call->set_metadata_replaced_op(replaced_collective_op_str);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status HloControlFlowFlattening::RemovePartitionOrReplicaId(
@@ -375,7 +375,7 @@
HloComputation* computation = hlo->parent();
HloInstruction* zero = CreateConstant(hlo->shape(), computation);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, zero));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
StatusOr<bool> HloControlFlowFlattening::Run(HloModule* module) {
diff --git a/tensorflow/compiler/xla/tools/hlo_extractor.cc b/tensorflow/compiler/xla/tools/hlo_extractor.cc
index 469b5bf..90bece3 100644
--- a/tensorflow/compiler/xla/tools/hlo_extractor.cc
+++ b/tensorflow/compiler/xla/tools/hlo_extractor.cc
@@ -54,7 +54,7 @@
parameter_number_++, parameter->shape(), parameter->name());
clone_context_.MapInstruction(parameter, new_parameter.get());
builder_.AddInstruction(std::move(new_parameter));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status DefaultAction(const HloInstruction* hlo) override {
@@ -66,7 +66,7 @@
parameter_number_++;
clone_context_.MapInstruction(hlo, new_parameter.get());
builder_.AddInstruction(std::move(new_parameter));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
std::vector<HloInstruction*> new_operands;
for (auto operand : hlo->operands()) {
@@ -75,7 +75,7 @@
auto instruction =
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &clone_context_);
builder_.AddInstruction(std::move(instruction));
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Status FinishVisit(const HloInstruction* /*root*/) override {
@@ -91,7 +91,7 @@
}
}
}
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
HloModule* module() { return module_.get(); }
diff --git a/tensorflow/compiler/xla/tools/hlo_module_loader.cc b/tensorflow/compiler/xla/tools/hlo_module_loader.cc
index 3cadb4a..8bd8758 100644
--- a/tensorflow/compiler/xla/tools/hlo_module_loader.cc
+++ b/tensorflow/compiler/xla/tools/hlo_module_loader.cc
@@ -42,7 +42,7 @@
HloModuleConfig* config) {
config->set_replica_count(ovr_config.num_replicas);
config->set_num_partitions(ovr_config.num_partitions);
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
} // namespace
diff --git a/tensorflow/compiler/xla/tools/run_hlo_module.cc b/tensorflow/compiler/xla/tools/run_hlo_module.cc
index 24725db..92fa8e2 100644
--- a/tensorflow/compiler/xla/tools/run_hlo_module.cc
+++ b/tensorflow/compiler/xla/tools/run_hlo_module.cc
@@ -199,7 +199,7 @@
if (reference_module == nullptr) {
std::cerr << "Skipping reference runner\n";
- return Status::OK();
+ return ::tensorflow::OkStatus();
}
Literal reference_result =