Remove unused tfrt_fallback_sync.knfb_exec_thin kernel
PiperOrigin-RevId: 398291343
Change-Id: Iaec61a52aaaa7e7e753b2c41e00c822537503217
diff --git a/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc b/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc
index 653303d..fa32456 100644
--- a/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc
+++ b/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.cc
@@ -464,83 +464,6 @@
return op_chain;
}
-Status KernelFallbackSyncExecuteCompat(const tfrt::ExecutionContext& exec_ctx,
- absl::string_view op_name,
- absl::string_view device_name,
- tfrt::SyncKernelFrame* frame,
- const tfrt::OpAttrsRef& attrs) {
- auto* fallback_request_state =
- exec_ctx.request_ctx()
- ->GetDataIfExists<KernelFallbackCompatRequestState>();
- if (!fallback_request_state) {
- return tensorflow::errors::Internal(
- "KernelFallbackCompatRequestState not found in RequestContext.");
- }
-
- DCHECK(exec_ctx.request_ctx()->resource_context());
- auto* runner_cache = exec_ctx.request_ctx()
- ->resource_context()
- ->GetOrCreateResource<OpKernelRunnerCache>(
- kOpKernelRunnerCacheResourceName);
-
- TF_ASSIGN_OR_RETURN(
- auto kernel_runner,
- runner_cache->GetOrCreate(
- exec_ctx.location(), op_name, device_name, frame->GetNumArgs(),
- [&attrs, host = exec_ctx.host()](
- tensorflow::AttrValueMap* attr_value_map) -> llvm::Error {
- VLOG(1) << "KernelFallbackExecuteCompat creating op from OpAttrs: "
- << PrintTfrtOpAttrsToString(attrs);
- return FillAttrValueMap(attrs, host, attr_value_map);
- },
- *fallback_request_state));
-
- gtl::InlinedVector<tensorflow::Tensor, 4> input_tf_tensors;
- input_tf_tensors.reserve(frame->GetNumArgs());
- for (int i = 0; i < frame->GetNumArgs(); ++i) {
- auto& tensor = frame->GetArgAt<tensorflow::Tensor>(i);
- input_tf_tensors.push_back(tensor);
- }
-
- // Check if input tensor dtypes are valid.
- TF_RETURN_IF_ERROR(ValidateInputTypes(
- tfrt::string_view(op_name.data(), op_name.size()), input_tf_tensors,
- kernel_runner->op_kernel()->input_types()));
-
- AsyncOpKernel* async = kernel_runner->op_kernel()->AsAsync();
- if (async) {
- LOG_EVERY_N_SEC(WARNING, 60)
- << "Async kernels are being executed in sync mode, which could affect "
- "performance. Consider async execution instead.";
- }
-
- // TODO(b/166705169): Figure out how to properly fallback GPU kernels.
- auto& run_state = GetThreadLocalOpKernelRunState();
- auto clean_up_inputs =
- gtl::MakeCleanup([&]() { run_state.input_tf_tensors.clear(); });
-
- run_state.input_tf_tensors = std::move(input_tf_tensors);
-
- auto& input_tf_tensor_values = run_state.input_tf_tensor_values;
- input_tf_tensor_values.resize(run_state.input_tf_tensors.size());
- for (int i = 0; i < run_state.input_tf_tensors.size(); ++i) {
- input_tf_tensor_values[i].tensor = &run_state.input_tf_tensors[i];
- }
-
- run_state.SetUpParams(*kernel_runner, *fallback_request_state);
-
- OpKernelContext context(&run_state.params);
- kernel_runner->Run(&context);
-
- if (!context.status().ok()) return context.status();
-
- DCHECK_EQ(context.num_outputs(), frame->GetNumResults());
- for (int i = 0; i < context.num_outputs(); ++i) {
- *frame->GetResultAt(i) = tfrt::Value(std::move(*context.mutable_output(i)));
- }
- return Status::OK();
-}
-
llvm::Expected<Device*> GetTfDevice(const tfrt::ExecutionContext& exec_ctx,
const tfrt::Device& device) {
auto* fallback_request_state =
diff --git a/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h b/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h
index 73e023a..0e9855a 100644
--- a/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h
+++ b/tensorflow/core/runtime_fallback/kernel/kernel_fallback_execute_compat.h
@@ -63,17 +63,6 @@
llvm::MutableArrayRef<tfrt::RCReference<tfrt::AsyncValue>> results,
const tfrt::OpAttrsRef& attrs);
-// `frame` is used to consume the inputs and hold the outputs from kernel
-// execution.
-//
-// TODO(tfrt-devs): switch `attrs` to using tfrt::AggregateAttr after
-// cl/343983780.
-Status KernelFallbackSyncExecuteCompat(const tfrt::ExecutionContext& exec_ctx,
- absl::string_view op_name,
- absl::string_view device_name,
- tfrt::SyncKernelFrame* frame,
- const tfrt::OpAttrsRef& attrs);
-
// TODO(tfrt-devs): Consider moving following method to a separate file.
llvm::Expected<Device*> GetTfDevice(const tfrt::ExecutionContext& exec_ctx,
const tfrt::Device& device);
diff --git a/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.cc b/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.cc
index b1719b9..b1a256f 100644
--- a/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.cc
+++ b/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.cc
@@ -57,10 +57,6 @@
return fallback_common::VerifyExecuteOpCommon(op);
}
-static LogicalResult verify(KNFBExecThinOp op) {
- return fallback_common::VerifyExecuteOpCommon(op);
-}
-
static ParseResult parseExecuteOp(OpAsmParser &parser, OperationState &result) {
fallback_common::ParseExecuteOpOptions parse_options;
parse_options.has_chain = false;
@@ -74,21 +70,6 @@
parser, builder, result, GetTensorType(&builder), parse_options);
}
-static ParseResult parseKNFBExecThinOp(OpAsmParser &parser,
- OperationState &result) {
- fallback_common::ParseExecuteOpOptions parse_options;
- parse_options.has_chain = false;
- parse_options.has_key = false;
- parse_options.has_device = false;
- parse_options.has_func_attr = false;
- parse_options.has_cost = false;
-
- auto &builder = parser.getBuilder();
- return fallback_common::ParseExecuteOpCommon(
- parser, builder, result,
- tfrt::fallback::TFTensorType::get(builder.getContext()), parse_options);
-}
-
static void print(OpAsmPrinter &p, ExecuteOp op) {
p << " " << op->getAttr("op_name") << '(' << op.operands() << ')';
@@ -96,13 +77,6 @@
if (!op.results().empty()) p << " : " << op.results().size();
}
-static void print(OpAsmPrinter &p, KNFBExecThinOp op) {
- p << " " << op->getAttr("op_name") << '(' << op.operands() << ')';
-
- fallback_common::PrintExecuteOpCommon(p, op);
- if (!op.results().empty()) p << " : " << op.results().size();
-}
-
void ExecuteOp::getOpAttrs(
SmallVectorImpl<std::pair<StringRef, Attribute>> *op_attrs) {
fallback_common::GetExecuteOpAttrsCommon(
diff --git a/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.td b/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.td
index 073348b..77a37cf 100644
--- a/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.td
+++ b/tensorflow/core/runtime_fallback/opdefs/tfrt_fallback_sync.td
@@ -74,36 +74,4 @@
"StringRef":$op_name)>];
}
-def KNFBExecThinOp : FallbackSync_Op<"knfb_exec_thin",
- [NoSideEffect, CoreRT_TypedAttributeTrait]> {
- let summary = "The Fallback Sync Executeop2";
- let description = [{
- The KNFBExecThinOp executes an operation on the specified device.
-
- Example:
- %res = tfrt_fallback_sync.knfb_exec_thin "some.op"(%arg) : 1
-
- Note that the trailing number indicates the number of results.
- }];
-
- let arguments = (ins
- Variadic<TFTensorType>:$operands,
- ArrayAttr:$op_attrs,
- StrAttr:$op_name
- );
-
- let results = (outs
- Variadic<TFTensorType>:$results
- );
-
- let extraClassDeclaration = [{
- void getOpAttrs(SmallVectorImpl<std::pair<StringRef, Attribute>>* op_attrs);
- }];
-
- let builders = [
- OpBuilder<(ins "ArrayRef<Type>":$results, "ValueRange":$operands,
- "ArrayRef<std::pair<StringRef, Attribute>>":$op_attrs,
- "StringRef":$op_name)>];
-}
-
#endif
diff --git a/tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc b/tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc
index 1b8313d..b70168d 100644
--- a/tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc
+++ b/tensorflow/core/runtime_fallback/runtime/runtime_fallback_kernels.cc
@@ -1288,34 +1288,6 @@
}
}
-// Sync execution via kernel fallback compat mode, by taking and returning
-// tensorflow::Tensor (as such this is "thin").
-// Graph compiler needs to make sure this kernel is used in proper context.
-// TODO(tfrt-devs): Add function attribute support.
-// TODO(tfrt-devs): Add device support.
-static void KernelFallbackSyncExecuteThinOp(tfrt::SyncKernelFrame* frame) {
- const auto& exec_ctx = frame->GetExecutionContext();
- assert(frame->GetNumAttributes() == 2);
- auto op_attr_array = AggregateAttr(frame->GetAttributeAt(0));
- auto op_name = StringAttr(frame->GetAttributeAt(1));
- auto op_name_sv = op_name.GetValue();
- op_name_sv.consume_front("tf.");
-
- tfrt::OpAttrs op_attrs;
- tfrt::SetUpOpAttrs(op_attr_array, &op_attrs);
-
- // The TF kernel we call takes and returns vectors of tensorflow::Tensor.
- auto status = KernelFallbackSyncExecuteCompat(
- exec_ctx, ToAbslStringView(op_name_sv),
- /*device_name=*/ToAbslStringView(exec_ctx.host()->GetHostDevice().name()),
- frame, tfrt::OpAttrsRef(op_attrs));
-
- if (!status.ok()) {
- frame->SetError(tfrt::MakeStringError(status.error_message()));
- return;
- }
-}
-
void RegisterTfdDelegateKernels(tfrt::KernelRegistry* registry) {
registry->AddKernel("tfd.init_eager_context",
TFRT_KERNEL(TfdInitEagerContext));
@@ -1350,8 +1322,6 @@
registry->AddSyncKernel("tfrt_fallback_sync.executeop",
RuntimeFallbackSyncExecuteOp);
- registry->AddSyncKernel("tfrt_fallback_sync.knfb_exec_thin",
- KernelFallbackSyncExecuteThinOp);
}
} // namespace tfd