Converter for Range operator with mixed type parameters.
diff --git a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
index 8e6b634..6b315ea 100644
--- a/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
+++ b/tensorflow/compiler/tf2tensorrt/convert/convert_nodes_test.cc
@@ -3786,13 +3786,26 @@
return dtype == DT_INT32 ? static_cast<int32>(value) : value;
};
+ // A function that builds the next lexicographically greater configuration
+ // for the current one. The configuration is described as a (0,1)-vector
+ // config, where config[i] is 0 or 1 when the i-th parameter is passed as
+ // a weight or tensor, respectively. The function returns TRUE if such
+ // a configuration is built, or FALSE otherwise.
+ auto nextTensorWeigtConfiguration = [this](std::vector<int>& config) {
+ for (int i = config.size(); i-- > 0;) {
+ if (config[i] = 1 - config[i]) return true;
+ }
+ return false;
+ };
+
auto set_parameters = [this](const std::array<const char*, 3>& name,
const std::array<std::vector<float>, 3>& value,
const std::array<DataType, 3>& type,
- bool all_tensors = false, int shape_idx = -1) {
+ const std::vector<int>& config,
+ int shape_idx = -1) {
Reset();
for (int i = 0; i < 3; i++) {
- if (all_tensors) {
+ if (config[i]) {
std::vector<int32> partial_shape_dims = {};
// The correct partial shape will be provided
// (a) for all parameters, when shape_idx > 3
@@ -3827,109 +3840,135 @@
ops::Placeholder(s.WithOpName(param_name[1]), param_type[1]),
ops::Placeholder(s.WithOpName(param_name[2]), param_type[2]));
- const NodeDef& node_def = range.operation.node()->def();
+ const NodeDef& ndef = range.operation.node()->def();
const std::vector<DataType> param_types{DT_FLOAT, DT_HALF, DT_INT32};
// ConverterRange is not implemented for Implicite batch mode.
+ std::vector<int> config(3, 0);
if (trt_mode_ == TrtTestMode::kImplicitBatch) {
- for (bool all_tensors : {false, true}) {
- set_parameters(param_name, param_value, param_type, all_tensors);
- RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
+ do {
+ set_parameters(param_name, param_value, param_type, config);
+ RunValidationAndConversion(ndef, error::UNIMPLEMENTED,
"Conversion for Range is not implemented in "
"implicit batch mode");
- }
+ } while (nextTensorWeigtConfiguration(config));
+
return;
}
- const std::string expected_msg = convert_range_expected_msg(node_def);
- {
- // We expect that all three (start, limit and delta) are passed as weights
- // OR tensors and we reject parameters, if it's not true.
- Reset();
- // Passing (start, limit) as weights
- for (int i = 0; i < 2; i++) {
- AddTestWeights(param_name[i], {1}, param_value[i], param_type[i]);
- }
- // ... and delta as a tensor
- AddTestTensor(param_name[2], {1}, param_type[2], param_value[2]);
+ const std::string expect_msg = convert_range_expected_msg(ndef);
+ bool all_weights = true;
+ do {
+ for (auto limit_type : param_types) {
+ param_type[1] = limit_type;
+ for (auto delta_type : param_types) {
+ param_type[2] = delta_type;
- RunValidationAndConversion(node_def, error::INVALID_ARGUMENT,
- expected_msg + "passed as weights OR tensors");
- }
+ const auto all_integers = start_type == DT_INT32 &&
+ limit_type == DT_INT32 &&
+ delta_type == DT_INT32;
- nvinfer1::DataType trt_type;
- TF_ASSERT_OK(TfTypeToTrtType(tf_type_, &trt_type));
- const std::string expected = DebugString(trt_type);
+ if (all_weights || all_integers && !config[2]) {
+ // Reject invalid parameters if delta = 0 and it's passed as a weight.
+ param_value[2] = {0};
+ set_parameters(param_name, param_value, param_type, config);
+ RunValidationAndConversion(
+ ndef, error::INVALID_ARGUMENT,
+ "The delta parameter of Range operation cannot be equal to 0");
- // Reject invalid parameters if delta = 0 (for weights only).
- for (auto limit_type : param_types) {
- param_type[1] = limit_type;
- for (auto delta_type : param_types) {
- param_type[2] = delta_type;
- param_value[2] = {0};
-
- set_parameters(param_name, param_value, param_type);
- RunValidationAndConversion(
- node_def, error::INVALID_ARGUMENT,
- "The delta parameter of Range operation cannot be equal to 0");
-
- // Reject invalid parameters preventing the limit from
- // being reached for fixed values of start and delta.
- for (int j = 0; j <= 1; j++) {
- param_value[j] = {get_casted_value(start, tf_type_)};
- param_value[1 - j] = {get_casted_value(limit, limit_type)};
- param_value[2] = {(2 * j - 1) * get_casted_value(delta, delta_type)};
- set_parameters(param_name, param_value, param_type);
- const auto error = convert_range_error_msg(
- param_value[0][0], param_value[1][0], param_value[2][0]);
- RunValidationAndConversion(node_def, error::INVALID_ARGUMENT, error);
- }
-
- param_value[0] = {start};
- // When passed as tensors, all parameters should be of DT_INT32 type.
- if (start_type == DT_INT32 && limit_type == DT_INT32 &&
- delta_type == DT_INT32) {
- if (trt_mode_ == TrtTestMode::kDynamicShape) {
- // Wrong dimension for one of parameters.
- for (int j = 0; j < 3; j++) {
- const string err =
- StrCat("Dimension for '", param_name[j],
- "' of Range operator should be equal to 1");
- set_parameters(param_name, param_value, param_type, true, j);
- RunValidationAndConversion(node_def, error::INVALID_ARGUMENT, err);
+ if (!all_weights && !config[2]) {
+ param_value[2] = {-1};
+ set_parameters(param_name, param_value, param_type, config);
+ const string err = StrCat(
+ "The delta parameter of Range operation "
+ "cannot be negative, when one of (start, limit) is passed as "
+ "a tensor, but got ",
+ param_value[2][0]);
+ RunValidationAndConversion(ndef, error::INVALID_ARGUMENT, err);
}
}
- } else {
- // When at least one parameter is set as non-integer tensors,
- // the following test should fail.
- set_parameters(param_name, param_value, param_type, true);
- RunValidationAndConversion(node_def, error::UNIMPLEMENTED,
- expected_msg + "tensors");
+
+ if (all_weights) {
+ // Reject invalid parameters preventing the limit from
+ // being reached for fixed values of start and delta.
+ for (int j = 0; j <= 1; j++) {
+ param_value[j] = {get_casted_value(start, tf_type_)};
+ param_value[1 - j] = {get_casted_value(limit, limit_type)};
+ param_value[2] = {(2 * j - 1) *
+ get_casted_value(delta, delta_type)};
+ set_parameters(param_name, param_value, param_type, config);
+ const auto error = convert_range_error_msg(
+ param_value[0][0], param_value[1][0], param_value[2][0]);
+ RunValidationAndConversion(ndef, error::INVALID_ARGUMENT, error);
+ }
+ }
+
+ param_value[0] = {start};
+ param_value[2] = {delta};
+ if (all_integers) {
+ if (trt_mode_ == TrtTestMode::kDynamicShape) {
+ // Wrong dimension for the parameter passed as a tensor.
+ for (int j = 0; j < 3; j++) {
+ if (!config[j]) continue;
+
+ const string err =
+ StrCat("Dimension for '", param_name[j],
+ "' of Range operator should be equal to 1");
+ set_parameters(param_name, param_value, param_type, config, j);
+ RunValidationAndConversion(ndef, error::INVALID_ARGUMENT, err);
+ }
+ }
+ } else {
+ if (!all_weights) {
+ // The following test should fail, when
+ // (a) at least one parameter is passed as a tensor;
+ // (b) at least one parameter is not of type DT_INT32.
+ set_parameters(param_name, param_value, param_type, config);
+ RunValidationAndConversion(ndef, error::UNIMPLEMENTED, expect_msg);
+ }
+ }
}
}
- }
+ // All other configs will be set so that at least one parameter
+ // will be passed as a tensor
+ all_weights = false;
+ } while (nextTensorWeigtConfiguration(config));
+
+ nvinfer1::DataType trt_type;
+ TF_ASSERT_OK(TfTypeToTrtType(DT_BOOL, &trt_type));
+ const std::string error_msg =
+ "Unsupported data type " + DebugString(trt_type) + " used for '";
+ do {
+ for (auto limit_type : param_types) {
+ param_type[1] = limit_type;
+ for (auto delta_type : param_types) {
+ param_type[2] = delta_type;
+
+ for (int i = 0; i < 3; i++) {
+ if (!config[i]) {
+ const auto saved_type = param_type[i];
+ param_type[i] = DT_BOOL;
+ set_parameters(param_name, param_value, param_type, config);
+ param_type[i] = saved_type;
+ RunValidationAndConversion(ndef, error::INVALID_ARGUMENT,
+ error_msg + param_name[i] + "'");
+ }
+ }
+ }
+ }
+ } while (nextTensorWeigtConfiguration(config));
// The tests that pass all checks in ConvertRange::Validate().
const Status status = Status::OK();
const std::vector<DataType> int_type{DT_INT32};
- for (bool all_tensors : {false, true}) {
- // For now when (start, limit, delta) are passed as tensors
- // these tensors should be of DT_INT32 type.
- int partial_shape_idx = -1;
- if (all_tensors) {
- if (start_type != DT_INT32) {
- continue;
- }
- if (trt_mode_ == TrtTestMode::kDynamicShape) {
- // The correct partial shape will be provided for all parameters
- partial_shape_idx = 3;
- }
- }
-
- // For now only parameters of DT_INT32 type could be used when
- // they are pased as tensors.
- const auto& types = all_tensors ? int_type : param_types;
- const auto jEnd = all_tensors ? 0 : 1;
+ int partial_shape_idx = -1;
+ all_weights = true;
+ do {
+ // For now when at least one of (start, limit, delta) is passed as a tensor
+ // (a) all these parameters should be of DT_INT32 type;
+ // (b) only positive delta could be used.
+ const auto& types = all_weights ? param_types : int_type;
+ const auto jEnd = all_weights ? 1 : 0;
for (auto limit_type : types) {
param_type[1] = limit_type;
for (auto delta_type : types) {
@@ -3955,15 +3994,24 @@
value += delta_curr;
}
- set_parameters(param_name, param_value, param_type, all_tensors,
+ set_parameters(param_name, param_value, param_type, config,
partial_shape_idx);
const std::vector<int> output_dims = {num_values};
- TestOpConverter("my_range", node_def, output_dims, status, status,
+ TestOpConverter("my_range", ndef, output_dims, status, status,
ElementsAreArray(expected_output));
}
}
}
- }
+
+ if (all_weights) {
+ if (start_type != DT_INT32) break;
+ if (trt_mode_ == TrtTestMode::kDynamicShape) partial_shape_idx = 3;
+
+ // All other configs will be set so that at least one parameter
+ // will be passed as a tensor
+ all_weights = false;
+ }
+ } while (nextTensorWeigtConfiguration(config));
}
TEST_P(OpConverter_FP32_FP16_INT32_Test, ConvertLikeOps) {
diff --git a/tensorflow/compiler/tf2tensorrt/convert/ops/fill_ops.cc b/tensorflow/compiler/tf2tensorrt/convert/ops/fill_ops.cc
index 8851c93..4c848e8 100644
--- a/tensorflow/compiler/tf2tensorrt/convert/ops/fill_ops.cc
+++ b/tensorflow/compiler/tf2tensorrt/convert/ops/fill_ops.cc
@@ -132,58 +132,77 @@
const auto& inputs = params.inputs;
const auto& node_def = params.node_def;
- if (!all_same_types(inputs)) {
- return errors::InvalidArgument(convert_range_expected_msg(node_def),
- "passed as weights OR tensors");
- }
-
- if (!all_weights_) {
- if (!all_integers(inputs)) {
- return errors::Unimplemented(convert_range_expected_msg(node_def),
- "tensors");
- }
-
- for (int i = 0; i < 3; i++) {
- const auto& dims = inputs.at(i).GetTrtDims();
- if (dims.nbDims != 1 || dims.d[0] != 1) {
- return errors::InvalidArgument("Dimension for '", InputSpec()[i].name,
- "' of ", node_def.op(), " operator ",
- "should be equal to 1");
- }
- }
- return Status::OK();
- }
-
float param[3];
+ all_weights_ = all_integers_ = true;
for (int i = 0; i < 3; i++) {
const auto& input = inputs.at(i);
- switch (input.TrtDType()) {
- case nvinfer1::DataType::kFLOAT:
- param[i] = get_input_param<float>(input);
- break;
- case nvinfer1::DataType::kHALF:
- param[i] = get_input_param<Eigen::half>(input);
- break;
- default: // nvinfer1::DataType::kINT32:
- param[i] = get_input_param<int>(input);
+ all_integers_ &= input.TrtDType() == nvinfer1::DataType::kINT32;
+ if (input.is_weights()) {
+ switch (input.TrtDType()) {
+ case nvinfer1::DataType::kFLOAT:
+ param[i] = get_input_param<float>(input);
+ break;
+ case nvinfer1::DataType::kHALF:
+ param[i] = get_input_param<Eigen::half>(input);
+ break;
+ case nvinfer1::DataType::kINT32:
+ param[i] = get_input_param<int>(input);
+ break;
+ default:
+ return errors::InvalidArgument(
+ "Unsupported data type ", DebugString(input.TrtDType()),
+ " used for '", InputSpec()[i].name, "'");
+ }
+ } else {
+ all_weights_ = false;
}
}
- if ((delta_ = param[2]) == 0) {
- return errors::InvalidArgument("The delta parameter of ", node_def.op(),
- " operation cannot be equal to 0");
+ if (!(all_weights_ || all_integers_)) {
+ // As of 06/03/2022, when at least one of the (start, limit, delta)
+ // is passed as a tensor, they must all be of type kINT32
+ return errors::Unimplemented(convert_range_expected_msg(node_def));
}
- const auto num_intervals_float = (param[1] - (start_ = param[0])) / delta_;
- if (num_intervals_float < 0) {
- const auto error = convert_range_error_msg(start_, param[1], delta_);
- return errors::InvalidArgument(error);
+ if (inputs.at(2).is_weights()) {
+ if ((delta_ = param[2]) == 0) {
+ return errors::InvalidArgument("The delta parameter of ", node_def.op(),
+ " operation cannot be equal to 0");
+ }
+
+ if (!all_weights_ && delta_ < 0) {
+ return errors::InvalidArgument(
+ "The delta parameter of Range operation "
+ "cannot be negative, when one of (start, limit) is passed as "
+ "a tensor, but got ",
+ delta_);
+ }
}
- num_values_ = static_cast<int>(num_intervals_float);
- if (start_ + delta_ * num_values_ != param[1]) {
- num_values_++;
+ for (int i = 0; i < 3; i++) {
+ const auto& input = inputs.at(i);
+ const auto& dims = input.GetTrtDims();
+ if (dims.nbDims != 1 || dims.d[0] != 1) {
+ return errors::InvalidArgument("Dimension for '", InputSpec()[i].name,
+ "' of ", node_def.op(), " operator ",
+ "should be equal to 1");
+ }
}
+
+ if (all_weights_) {
+ const auto num_intervals_float =
+ (param[1] - (start_ = param[0])) / delta_;
+ if (num_intervals_float < 0) {
+ const auto error = convert_range_error_msg(start_, param[1], delta_);
+ return errors::InvalidArgument(error);
+ }
+
+ num_values_ = static_cast<int>(num_intervals_float);
+ if (start_ + delta_ * num_values_ != param[1]) {
+ num_values_++;
+ }
+ }
+
return Status::OK();
}
@@ -192,7 +211,6 @@
const auto& inputs = params.inputs;
const TRT_TensorOrWeights& input = inputs.at(0);
TRT_TensorOrWeights value_input;
-
nvinfer1::Dims trt_dims{1};
auto builder = TRTNetworkBuilder::Create(params.converter->network(),
params.weight_store);
@@ -201,14 +219,19 @@
ITensorProxyPtr beta_tensor = nullptr;
ITensorProxyPtr scalar_tensor = nullptr;
if (!all_weights_) {
+ ITensorProxyPtr tensors[3];
+ for (int i = 0; i < 3; i++) {
+ TF_RETURN_IF_ERROR(
+ builder->get_tensor4TensorOrWeights(inputs.at(i), tensors + i));
+ }
+
StatusOr<nvinfer1::IElementWiseLayer*> num =
- builder->Sub(/*limit*/ inputs.at(1).tensor()->trt_tensor(),
- /*start*/ inputs.at(0).tensor()->trt_tensor());
+ builder->Sub(/*limit*/ tensors[1]->trt_tensor(),
+ /*start*/ tensors[0]->trt_tensor());
TRT_ENSURE_PTR_OK(num);
- beta_tensor = params.inputs.at(2).tensor();
StatusOr<nvinfer1::IElementWiseLayer*> ceil_div = builder->FloorDiv(
- (*num)->getOutput(0), beta_tensor->trt_tensor() /*delta*/);
+ (*num)->getOutput(0), (beta_tensor = tensors[2])->trt_tensor());
TRT_ENSURE_PTR_OK(ceil_div);
dims_input_tensor = (*ceil_div)->getOutput(0);
dims_input_tensor->setType(nvinfer1::DataType::kINT32);
@@ -241,7 +264,7 @@
trt_dims, scalar_tensor, beta_tensor, delta_);
ITensorProxyPtr output_tensor = (*layer)->getOutput(0);
- if (all_integers(inputs)) {
+ if (all_integers_) {
output_tensor->setType(nvinfer1::DataType::kINT32);
}
@@ -255,31 +278,11 @@
return static_cast<float>(*input.weights().GetPointer<T>());
}
- bool all_integers(const std::vector<TRT_TensorOrWeights>& inputs) const {
- for (int i = 0; i < 3; i++) {
- if (inputs.at(i).TrtDType() != nvinfer1::DataType::kINT32) {
- return false;
- }
- }
- return true;
- }
-
- bool all_same_types(const std::vector<TRT_TensorOrWeights>& inputs) {
- auto i = inputs.size();
- const bool is_weight = inputs.at(--i).is_weights();
- while (i--) {
- if (inputs.at(i).is_weights() != is_weight) {
- return all_weights_ = false;
- }
- }
- all_weights_ = is_weight;
- return true;
- }
-
float start_;
float delta_;
int num_values_;
bool all_weights_;
+ bool all_integers_;
};
std::string convert_range_error_msg(float start, float limit, float delta) {
@@ -291,8 +294,9 @@
}
std::string convert_range_expected_msg(const NodeDef& node_def) {
- return "All parameters (start, limit, delta) of " + node_def.op() +
- " operation in " + node_def.name() + " are expected to be ";
+ return "When at least one of parameters (start, limit, delta) of " +
+ node_def.op() + " operation in " + node_def.name() +
+ " is passed as a tensor, they must all be of type kINT32";
}
REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertFill>(), "Fill");
diff --git a/tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h b/tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h
index 3701559..04fb184 100644
--- a/tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h
+++ b/tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h
@@ -341,6 +341,18 @@
return const_layer;
}
+ Status get_tensor4TensorOrWeights(const TRT_TensorOrWeights& input,
+ ITensorProxyPtr* pTensor) {
+ if (input.is_weights()) {
+ StatusOr<nvinfer1::IConstantLayer*> const_layer = WeightsToConstant(
+ input.weights().GetTrtWeights(), input.GetTrtDims());
+ if (!const_layer.status().ok()) return const_layer.status();
+ *pTensor = (*const_layer)->getOutput(0);
+ } else {
+ *pTensor = input.tensor();
+ }
+ return Status::OK();
+ }
// Creates a nvinfer1::Weights object containing a single scalar.
template <typename T,
typename std::enable_if<std::is_pod<T>::value>::type* = nullptr>