CreateTensor updated to receive only TensorDescriptor instead of TensorDescriptor and shape.
PiperOrigin-RevId: 462339218
diff --git a/tensorflow/lite/delegates/gpu/cl/inference_context.cc b/tensorflow/lite/delegates/gpu/cl/inference_context.cc
index ebe1622..2a4812d 100644
--- a/tensorflow/lite/delegates/gpu/cl/inference_context.cc
+++ b/tensorflow/lite/delegates/gpu/cl/inference_context.cc
@@ -271,7 +271,6 @@
for (const auto& external_tensor : create_info.external_mutable_tensors) {
RETURN_IF_ERROR(
CreateTensor(env->context(),
- gpu_model->tensors[external_tensor.first].GetBHWDCShape(),
gpu_model->tensors[external_tensor.first],
&temp_external_tensors[external_tensor.first]));
external_mutable_tensors_[external_tensor.first] =
@@ -369,7 +368,6 @@
for (const auto& external_tensor : create_info->external_mutable_tensors) {
RETURN_IF_ERROR(
CreateTensor(env->context(),
- gpu_model.tensors[external_tensor.first].GetBHWDCShape(),
gpu_model.tensors[external_tensor.first],
&temp_external_tensors[external_tensor.first]));
external_mutable_tensors_[external_tensor.first] =
@@ -470,12 +468,8 @@
if (it == gpu_model.tensors.end()) {
return absl::InternalError("No variable tensor with this id.");
}
- const auto& t = it->second;
- const auto& shape = t.GetBHWDCShape();
- const auto& descriptor = t;
-
RETURN_IF_ERROR(
- CreateTensor(*context, shape, descriptor,
+ CreateTensor(*context, it->second,
&variable_tensors_[value_and_ref_value.second]));
}
}
@@ -658,8 +652,8 @@
graph_ids_to_strong_shape_tensors_[tensor_id] = id;
const auto& it = strong_shape_tensors_.find(id);
if (it == strong_shape_tensors_.end()) {
- RETURN_IF_ERROR(CreateTensor(*context, tensor_desc.GetBHWCShape(),
- tensor_desc, &strong_shape_tensors_[id]));
+ RETURN_IF_ERROR(
+ CreateTensor(*context, tensor_desc, &strong_shape_tensors_[id]));
}
}
}
diff --git a/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc b/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc
index d6b2d60..fff57ea 100644
--- a/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc
+++ b/tensorflow/lite/delegates/gpu/cl/kernels/cl_test.cc
@@ -16,6 +16,7 @@
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include <utility>
+#include <vector>
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
@@ -65,8 +66,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, src_shape,
- op_def.src_tensors[i], &src[i]));
+ TensorDescriptor descriptor_with_shape = op_def.src_tensors[i];
+ descriptor_with_shape.SetBHWCShape(src_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &src[i]));
RETURN_IF_ERROR(src[i].WriteData(creation_context.queue, src_cpu[i]));
operation->SetSrc(&src[i], i);
}
@@ -78,8 +81,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, dst_shape,
- op_def.dst_tensors[i], &dst[i]));
+ TensorDescriptor descriptor_with_shape = op_def.dst_tensors[i];
+ descriptor_with_shape.SetBHWCShape(dst_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &dst[i]));
operation->SetDst(&dst[i], i);
}
@@ -119,8 +124,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, src_shape,
- op_def.src_tensors[i], &src[i]));
+ TensorDescriptor descriptor_with_shape = op_def.src_tensors[i];
+ descriptor_with_shape.SetBHWDCShape(src_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &src[i]));
RETURN_IF_ERROR(src[i].WriteData(creation_context.queue, src_cpu[i]));
operation->SetSrc(&src[i], i);
}
@@ -132,8 +139,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, dst_shape,
- op_def.dst_tensors[i], &dst[i]));
+ TensorDescriptor descriptor_with_shape = op_def.dst_tensors[i];
+ descriptor_with_shape.SetBHWDCShape(dst_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &dst[i]));
operation->SetDst(&dst[i], i);
}
@@ -184,8 +193,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, dst_shape,
- op_def.dst_tensors[i], &dst[i]));
+ TensorDescriptor descriptor_with_shape = op_def.dst_tensors[i];
+ descriptor_with_shape.SetBHWDCShape(dst_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &dst[i]));
operation->SetDst(&dst[i], i);
}
@@ -217,8 +228,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, src_shape,
- op_def.src_tensors[i], &src[i]));
+ TensorDescriptor descriptor_with_shape = op_def.src_tensors[i];
+ descriptor_with_shape.SetBHWCShape(src_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &src[i]));
RETURN_IF_ERROR(src[i].WriteData(creation_context.queue, src_cpu[i]));
operation->SetSrc(&src[i], i);
}
@@ -230,8 +243,10 @@
return absl::InvalidArgumentError(
"Layout doesn't have Batch dimension, but shape.b != 1");
}
- RETURN_IF_ERROR(CreateTensor(*creation_context.context, dst_shape,
- op_def.dst_tensors[i], &dst[i]));
+ TensorDescriptor descriptor_with_shape = op_def.dst_tensors[i];
+ descriptor_with_shape.SetBHWCShape(dst_shape);
+ RETURN_IF_ERROR(CreateTensor(*creation_context.context,
+ descriptor_with_shape, &dst[i]));
operation->SetDst(&dst[i], i);
}
diff --git a/tensorflow/lite/delegates/gpu/cl/tensor.cc b/tensorflow/lite/delegates/gpu/cl/tensor.cc
index a7ea5b8..ef2bc03 100644
--- a/tensorflow/lite/delegates/gpu/cl/tensor.cc
+++ b/tensorflow/lite/delegates/gpu/cl/tensor.cc
@@ -256,29 +256,6 @@
return absl::OkStatus();
}
-absl::Status CreateTensor(const CLContext& context, const BHWDC& shape,
- const TensorDescriptor& descriptor, cl_mem memory,
- Tensor* result) {
- const bool memory_owner = memory == nullptr;
- if (memory_owner) {
- CLMemory mem;
- RETURN_IF_ERROR(
- AllocateTensorMemory(context, shape, descriptor, nullptr, &mem));
- memory = mem.Release();
- }
- if (descriptor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
- cl_mem image_memory;
- RETURN_IF_ERROR(CreateImageBufferFromBuffer(
- context, memory, descriptor.GetDataType(),
- shape.b * shape.w * shape.h * shape.d * DivideRoundUp(shape.c, 4),
- &image_memory));
- *result = Tensor(memory, memory_owner, image_memory, shape, descriptor);
- } else {
- *result = Tensor(memory, memory_owner, shape, descriptor);
- }
- return absl::OkStatus();
-}
-
absl::Status CreateTensorShared(const CLContext& context, const BHWDC& shape,
const TensorDescriptor& descriptor,
cl_mem memory, Tensor* result) {
@@ -627,15 +604,25 @@
return absl::OkStatus();
}
-absl::Status CreateTensor(const CLContext& context, const BHWC& shape,
+absl::Status CreateTensor(const CLContext& context,
const TensorDescriptor& descriptor, Tensor* result) {
- const BHWDC shape5D(shape.b, shape.h, shape.w, 1, shape.c);
- return CreateTensor(context, shape5D, descriptor, nullptr, result);
-}
-
-absl::Status CreateTensor(const CLContext& context, const BHWDC& shape,
- const TensorDescriptor& descriptor, Tensor* result) {
- return CreateTensor(context, shape, descriptor, nullptr, result);
+ const BHWDC& shape = descriptor.GetBHWDCShape();
+ CLMemory mem;
+ RETURN_IF_ERROR(
+ AllocateTensorMemory(context, shape, descriptor, nullptr, &mem));
+ cl_mem memory = mem.Release();
+ if (descriptor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
+ cl_mem image_memory;
+ RETURN_IF_ERROR(CreateImageBufferFromBuffer(
+ context, memory, descriptor.GetDataType(),
+ shape.b * shape.w * shape.h * shape.d * DivideRoundUp(shape.c, 4),
+ &image_memory));
+ *result =
+ Tensor(memory, /*memory_owner*/ true, image_memory, shape, descriptor);
+ } else {
+ *result = Tensor(memory, /*memory_owner*/ true, shape, descriptor);
+ }
+ return absl::OkStatus();
}
absl::Status CreateSharedTensor(const CLContext& context, cl_mem memory,
diff --git a/tensorflow/lite/delegates/gpu/cl/tensor.h b/tensorflow/lite/delegates/gpu/cl/tensor.h
index 551e64e..d23907e 100644
--- a/tensorflow/lite/delegates/gpu/cl/tensor.h
+++ b/tensorflow/lite/delegates/gpu/cl/tensor.h
@@ -143,10 +143,7 @@
const TensorDescriptor& descriptor,
CLMemory* result);
-absl::Status CreateTensor(const CLContext& context, const BHWC& shape,
- const TensorDescriptor& descriptor, Tensor* result);
-
-absl::Status CreateTensor(const CLContext& context, const BHWDC& shape,
+absl::Status CreateTensor(const CLContext& context,
const TensorDescriptor& descriptor, Tensor* result);
absl::Status CreateSharedTensor(const CLContext& context, cl_mem memory,
diff --git a/tensorflow/lite/delegates/gpu/cl/tensor_test.cc b/tensorflow/lite/delegates/gpu/cl/tensor_test.cc
index 2474c09..06b6b79 100644
--- a/tensorflow/lite/delegates/gpu/cl/tensor_test.cc
+++ b/tensorflow/lite/delegates/gpu/cl/tensor_test.cc
@@ -63,7 +63,9 @@
}
Tensor tensor;
- RETURN_IF_ERROR(CreateTensor(env->context(), shape, descriptor, &tensor));
+ TensorDescriptor descriptor_with_shape = descriptor;
+ descriptor_with_shape.SetBHWCShape(shape);
+ RETURN_IF_ERROR(CreateTensor(env->context(), descriptor_with_shape, &tensor));
RETURN_IF_ERROR(tensor.WriteData(env->queue(), tensor_cpu));
RETURN_IF_ERROR(tensor.ReadData(env->queue(), &tensor_gpu));
@@ -127,7 +129,9 @@
}
Tensor tensor;
- RETURN_IF_ERROR(CreateTensor(env->context(), shape, descriptor, &tensor));
+ TensorDescriptor descriptor_with_shape = descriptor;
+ descriptor_with_shape.SetBHWDCShape(shape);
+ RETURN_IF_ERROR(CreateTensor(env->context(), descriptor_with_shape, &tensor));
RETURN_IF_ERROR(tensor.WriteData(env->queue(), tensor_cpu));
RETURN_IF_ERROR(tensor.ReadData(env->queue(), &tensor_gpu));
diff --git a/tensorflow/lite/delegates/gpu/cl/testing/memory_sharing_sample.cc b/tensorflow/lite/delegates/gpu/cl/testing/memory_sharing_sample.cc
index 00167cb..ee7cfb9 100644
--- a/tensorflow/lite/delegates/gpu/cl/testing/memory_sharing_sample.cc
+++ b/tensorflow/lite/delegates/gpu/cl/testing/memory_sharing_sample.cc
@@ -100,17 +100,20 @@
Tensor input_224_224, output_mv1, output_mv2;
auto data_type = DeduceDataTypeFromPrecision(create_info_mv1.precision);
RETURN_IF_ERROR(CreateTensor(
- env.context(), BHWC(1, 224, 224, 3),
- TensorDescriptor{data_type, TensorStorageType::TEXTURE_2D, Layout::HWC},
+ env.context(),
+ CreateHwcTensorDescriptor(data_type, TensorStorageType::TEXTURE_2D,
+ HWC(224, 224, 3)),
&input_224_224));
- RETURN_IF_ERROR(CreateTensor(
- env.context(), BHWC(1, 1, 1, 1001),
- TensorDescriptor{data_type, TensorStorageType::BUFFER, Layout::HWC},
- &output_mv1));
- RETURN_IF_ERROR(CreateTensor(
- env.context(), BHWC(1, 1, 1, 1001),
- TensorDescriptor{data_type, TensorStorageType::BUFFER, Layout::HWC},
- &output_mv2));
+ RETURN_IF_ERROR(
+ CreateTensor(env.context(),
+ CreateHwcTensorDescriptor(
+ data_type, TensorStorageType::BUFFER, HWC(1, 1, 1001)),
+ &output_mv1));
+ RETURN_IF_ERROR(
+ CreateTensor(env.context(),
+ CreateHwcTensorDescriptor(
+ data_type, TensorStorageType::BUFFER, HWC(1, 1, 1001)),
+ &output_mv2));
create_info_mv1.external_immutable_tensors = {
{graph_mv1.inputs()[0]->id, &input_224_224},
diff --git a/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc b/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc
index 13d7b9e..7fdf504 100644
--- a/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc
+++ b/tensorflow/lite/delegates/gpu/cl/testing/performance_profiling.cc
@@ -17,6 +17,7 @@
#include <chrono> // NOLINT(build/c++11)
#include <iostream>
#include <string>
+#include <vector>
#include "absl/time/time.h"
#include "tensorflow/lite/delegates/gpu/cl/environment.h"
@@ -95,11 +96,11 @@
for (int i = 0; i < graph_cl.outputs().size(); ++i) {
// Assumed that graph outputs have batch size = 1.
auto data_type = DeduceDataTypeFromPrecision(create_info.precision);
- RETURN_IF_ERROR(CreateTensor(
- env.context(), graph_cl.outputs()[i]->tensor.shape,
- TensorDescriptor{data_type, TensorStorageType::TEXTURE_ARRAY,
- Layout::HWC},
- &outputs[i]));
+ TensorDescriptor required_tensor_desc = TensorDescriptor{
+ data_type, TensorStorageType::TEXTURE_ARRAY, Layout::HWC};
+ required_tensor_desc.SetBHWCShape(graph_cl.outputs()[i]->tensor.shape);
+ RETURN_IF_ERROR(
+ CreateTensor(env.context(), required_tensor_desc, &outputs[i]));
create_info.external_immutable_tensors[graph_cl.outputs()[i]->id] =
&outputs[i];
}