Pluggable device/op_handler support in c_api_tfrt. And it starts to reuse device name (e.g. /device:CPU:0) borrowed from TensorFlow. It also allows creating different op handler for different GPU devices.
PiperOrigin-RevId: 320713554
Change-Id: Id554249713fe7571e29e8f2f36fc0986ee44e9ec
diff --git a/tensorflow/c/eager/c_api.cc b/tensorflow/c/eager/c_api.cc
index 70acd71..6804247 100644
--- a/tensorflow/c/eager/c_api.cc
+++ b/tensorflow/c/eager/c_api.cc
@@ -725,13 +725,7 @@
TFE_Context* TFE_NewContext(const TFE_ContextOptions* opts, TF_Status* status) {
if (opts->use_tfrt) {
#ifdef PLATFORM_GOOGLE
- tfrt::SmallVector<std::string, 4> op_handler_chains;
- tfrt::SmallVector<tensorflow::DeviceAttributes, 4> device_attributes;
- status->status = tfrt::ListOpHandlerChains(
- opts->session_options.options, &op_handler_chains, &device_attributes);
- if (!status->status.ok()) return nullptr;
- return tensorflow::wrap(new tfrt::ContextInterface(
- op_handler_chains, device_attributes, opts->async));
+ return tensorflow::wrap(new tfrt::ContextInterface(opts->async));
#else
status->status = tensorflow::errors::Unimplemented("TFRT is not supported");
return nullptr;