Fix tensor initialization in unit test.
PiperOrigin-RevId: 281649411
Change-Id: I5e909fc1b19ab3e098dfc81ac74c476e0b728f6e
diff --git a/tensorflow/lite/delegates/flex/buffer_map_test.cc b/tensorflow/lite/delegates/flex/buffer_map_test.cc
index 6b09b69..72c49b1 100644
--- a/tensorflow/lite/delegates/flex/buffer_map_test.cc
+++ b/tensorflow/lite/delegates/flex/buffer_map_test.cc
@@ -34,7 +34,7 @@
template <typename T>
UniqueTfLiteTensor MakeLiteTensor(const std::vector<int>& shape,
const std::vector<T>& data) {
- auto tensor = UniqueTfLiteTensor(new TfLiteTensor, [](TfLiteTensor* t) {
+ auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
@@ -42,9 +42,6 @@
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<T>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
- tensor->data.raw = nullptr;
- tensor->is_variable = false;
- memset(&tensor->quantization, 0, sizeof(TfLiteQuantization));
TfLiteTensorRealloc(data.size() * sizeof(T), tensor.get());
memcpy(tensor->data.raw, data.data(), data.size() * sizeof(T));
return tensor;
@@ -53,7 +50,7 @@
template <>
UniqueTfLiteTensor MakeLiteTensor<string>(const std::vector<int>& shape,
const std::vector<string>& data) {
- auto tensor = UniqueTfLiteTensor(new TfLiteTensor, [](TfLiteTensor* t) {
+ auto tensor = UniqueTfLiteTensor(new TfLiteTensor(), [](TfLiteTensor* t) {
TfLiteTensorDataFree(t);
TfLiteIntArrayFree(t->dims);
delete t;
@@ -61,9 +58,6 @@
tensor->allocation_type = kTfLiteDynamic;
tensor->type = typeToTfLiteType<string>();
tensor->dims = ConvertVectorToTfLiteIntArray(shape);
- tensor->data.raw = nullptr;
- tensor->is_variable = false;
- memset(&tensor->quantization, 0, sizeof(TfLiteQuantization));
TfLiteTensorRealloc(data.size() * sizeof(string), tensor.get());
DynamicBuffer b;
diff --git a/tensorflow/lite/kernels/kernel_util_test.cc b/tensorflow/lite/kernels/kernel_util_test.cc
index d410d2b..79e19eb 100644
--- a/tensorflow/lite/kernels/kernel_util_test.cc
+++ b/tensorflow/lite/kernels/kernel_util_test.cc
@@ -146,7 +146,7 @@
TEST_F(KernelUtilTest, CheckAndPopulate) {
// Create input.
- TfLiteTensor input;
+ TfLiteTensor input = {};
input.type = kTfLiteInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@@ -163,7 +163,7 @@
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
- TfLiteTensor filter;
+ TfLiteTensor filter = {};
filter.type = kTfLiteInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@@ -188,7 +188,7 @@
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias.
- TfLiteTensor bias;
+ TfLiteTensor bias = {};
bias.type = kTfLiteInt32;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@@ -208,7 +208,7 @@
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output.
- TfLiteTensor output;
+ TfLiteTensor output = {};
output.type = kTfLiteInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@@ -252,7 +252,7 @@
TEST_F(KernelUtilTest, CheckAndPopulateShift) {
// Create input of type kTfLiteUInt8.
- TfLiteTensor input;
+ TfLiteTensor input = {};
input.type = kTfLiteUInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@@ -269,7 +269,7 @@
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter of type kTfLiteUInt8.
- TfLiteTensor filter;
+ TfLiteTensor filter = {};
filter.type = kTfLiteUInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@@ -291,7 +291,7 @@
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias for kTfLiteUInt8.
- TfLiteTensor bias;
+ TfLiteTensor bias = {};
bias.type = kTfLiteUInt8;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@@ -311,7 +311,7 @@
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output for kTfLiteUInt8.
- TfLiteTensor output;
+ TfLiteTensor output = {};
output.type = kTfLiteUInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@@ -359,7 +359,7 @@
#ifndef __APPLE__ // Some Apple toolchains don't support std::ldexp
TEST_F(KernelUtilTest, CheckAndPopulateZeroValue) {
// Create input.
- TfLiteTensor input;
+ TfLiteTensor input = {};
input.type = kTfLiteInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@@ -376,7 +376,7 @@
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
- TfLiteTensor filter;
+ TfLiteTensor filter = {};
filter.type = kTfLiteInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@@ -401,7 +401,7 @@
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias.
- TfLiteTensor bias;
+ TfLiteTensor bias = {};
bias.type = kTfLiteInt32;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@@ -421,7 +421,7 @@
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output.
- TfLiteTensor output;
+ TfLiteTensor output = {};
output.type = kTfLiteInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@@ -466,7 +466,7 @@
TEST_F(KernelUtilTest, CheckAndPopulateUint8) {
// Create input.
- TfLiteTensor input;
+ TfLiteTensor input = {};
input.type = kTfLiteUInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@@ -483,7 +483,7 @@
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
- TfLiteTensor filter;
+ TfLiteTensor filter = {};
filter.type = kTfLiteUInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@@ -505,7 +505,7 @@
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create bias.
- TfLiteTensor bias;
+ TfLiteTensor bias = {};
bias.type = kTfLiteInt32;
bias.allocation_type = kTfLiteArenaRw;
bias.dims = TfLiteIntArrayCreate(4);
@@ -521,7 +521,7 @@
bias.quantization.params = reinterpret_cast<void*>(bias_params);
// Create output.
- TfLiteTensor output;
+ TfLiteTensor output = {};
output.type = kTfLiteUInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;
@@ -564,7 +564,7 @@
TEST_F(KernelUtilTest, CheckAndPopulateWithoutBias) {
// Create input.
- TfLiteTensor input;
+ TfLiteTensor input = {};
input.type = kTfLiteUInt8;
input.allocation_type = kTfLiteArenaRw;
input.dims = TfLiteIntArrayCreate(1);
@@ -581,7 +581,7 @@
input.quantization.params = reinterpret_cast<void*>(input_params);
// Create filter.
- TfLiteTensor filter;
+ TfLiteTensor filter = {};
filter.type = kTfLiteUInt8;
filter.allocation_type = kTfLiteArenaRw;
filter.dims = TfLiteIntArrayCreate(4);
@@ -603,7 +603,7 @@
filter.quantization.params = reinterpret_cast<void*>(filter_params);
// Create output.
- TfLiteTensor output;
+ TfLiteTensor output = {};
output.type = kTfLiteUInt8;
output.allocation_type = kTfLiteArenaRw;
output.dims = nullptr;