Handle the case where api_implements exists but api_prefered_device does not exist.
PiperOrigin-RevId: 315726284
Change-Id: Ifb212525c58f3500e62a5090294c6a244972fa44
diff --git a/tensorflow/core/grappler/optimizers/function_api_info.cc b/tensorflow/core/grappler/optimizers/function_api_info.cc
index 9f6352f..201df2d 100644
--- a/tensorflow/core/grappler/optimizers/function_api_info.cc
+++ b/tensorflow/core/grappler/optimizers/function_api_info.cc
@@ -59,6 +59,13 @@
"Function '", function_def.signature().name(),
"' has a preferred device, but does not implement an interface");
}
+ // Handles the case that api_implements exists but prefered_device does not
+ // exist. Currently this is for tf lite/mlir, which depends on api_implements.
+ if (!interface_name_.empty() && preferred_device_.empty()) {
+ VLOG(1) << "A function has api_implements: " << interface_name_ << ", but "
+ << "api_preferred_device";
+ interface_name_.clear();
+ }
return Status::OK();
}
diff --git a/tensorflow/core/grappler/optimizers/function_api_info_test.cc b/tensorflow/core/grappler/optimizers/function_api_info_test.cc
index 9bb517f..8f1412f 100644
--- a/tensorflow/core/grappler/optimizers/function_api_info_test.cc
+++ b/tensorflow/core/grappler/optimizers/function_api_info_test.cc
@@ -138,11 +138,9 @@
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("DoStuff", GetInterfaceName(lib_api_info, "DoStuffGpu"));
- EXPECT_EQ("DoThings", GetInterfaceName(lib_api_info, "DoThings"));
EXPECT_EQ("CPU", GetPreferredDevice(lib_api_info, "DoStuffCpu"));
EXPECT_EQ("GPU", GetPreferredDevice(lib_api_info, "DoStuffGpu"));
- EXPECT_EQ("", GetPreferredDevice(lib_api_info, "DoThings"));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffCpu", {"DoStuffGpu"}));
EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoStuffGpu", {"DoStuffCpu"}));
@@ -185,6 +183,19 @@
EXPECT_FALSE(ret.ok());
}
+TEST(FunctionApiInfoTest, ImplementsWithoutDevice) {
+ FunctionDefLibrary func_lib;
+ const std::vector<ArgSpec> func_args{{"in1", "float32"}, {"in2", "int32"}};
+ const std::vector<ArgSpec> output_args{{"out", "float32"}};
+ PopulateFunction("DoThings", "DoThings", "", func_args, output_args, "", "",
+ func_lib.add_function());
+ FunctionLibraryApiInfo lib_api_info;
+ const Status ret = lib_api_info.Init(func_lib);
+ EXPECT_TRUE(ret.ok());
+ EXPECT_TRUE(lib_api_info.empty());
+ EXPECT_TRUE(CheckEquivImpl(lib_api_info, "DoThings", {}));
+}
+
} // namespace
} // namespace grappler
} // namespace tensorflow
diff --git a/tensorflow/core/grappler/optimizers/implementation_selector_test.cc b/tensorflow/core/grappler/optimizers/implementation_selector_test.cc
index 2ef8bb8..13a6e60 100644
--- a/tensorflow/core/grappler/optimizers/implementation_selector_test.cc
+++ b/tensorflow/core/grappler/optimizers/implementation_selector_test.cc
@@ -220,6 +220,62 @@
}
}
+TEST_F(ImplementationSelectorTest, NoSwapWithImplementsOnly) {
+ using test::function::NDef;
+ ImplementationSelector optimizer;
+ GraphDef output;
+ GrapplerItem item;
+ // DeviceIndex op based implementation selector.
+ AttrValue device_names;
+ device_names.mutable_list()->add_s("CPU");
+ device_names.mutable_list()->add_s("TPU_REPLICATED_CORE");
+ device_names.mutable_list()->add_s("GPU");
+
+ // Api_implements exists, api_preferred_device does not, no swap.
+ auto cpu_def = test::function::XTimesTwo();
+ auto* func_attr = cpu_def.mutable_attr();
+ (*func_attr)["api_implements"].set_s("times_two");
+
+ auto gpu_def = test::function::XAddX();
+ auto* func2_attr = gpu_def.mutable_attr();
+ (*func2_attr)["api_implements"].set_s("times_two");
+
+ item.graph = test::function::GDef(
+ {NDef("x", "DeviceIndex", {}, {{"device_names", device_names}},
+ CpuDevice),
+ NDef("case", "Case", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
+ NDef("y", "DeviceIndex", {}, {{"device_names", device_names}},
+ GpuDevice),
+ NDef("case_y", "Case", {"y"}, {{"T", DT_FLOAT}}, TpuDevice),
+ NDef("y1", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, GpuDevice),
+ NDef("z1", "Identity", {"y1"}, {{"T", DT_FLOAT}}, GpuDevice),
+ NDef("y2", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}}, CpuDevice),
+ NDef("z2", "Identity", {"y2"}, {{"T", DT_FLOAT}}, CpuDevice)},
+ // FunctionLib
+ {cpu_def, gpu_def});
+
+ TF_EXPECT_OK(optimizer.Optimize(nullptr, item, &output));
+ for (const NodeDef& node : output.node()) {
+ if (node.name() == "x") {
+ // Rewrite DeviceIndex op to a Const op with value of GPU index 1.
+ EXPECT_EQ("Const", node.op());
+ EXPECT_EQ(2, node.attr().at("value").tensor().int_val(0));
+ }
+ if (node.name() == "y") {
+ // Rewrite DeviceIndex op to a Const op with value of CPU index 0.
+ EXPECT_EQ("Const", node.op());
+ EXPECT_EQ(1, node.attr().at("value").tensor().int_val(0));
+ }
+ if (node.name() == "y1") {
+ // api_implements only, no preferred device, no swap.
+ EXPECT_EQ("XTimesTwo", node.op());
+ } else if (node.name() == "y2") {
+ // Make sure the implementation is not changed.
+ EXPECT_EQ("XTimesTwo", node.op());
+ }
+ }
+}
+
TEST_F(ImplementationSelectorTest, SwapImplementation) {
using test::function::NDef;
auto cpu_def = test::function::XTimesTwo();