Refactor qconv to reduce allocations. (#42007)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/42007
zero buffer and indirection pointers are allocatoed on every iterations.
With this refactor we create op once for qnnpackconv struct and keep
repopulating indirection pointer as necessary.
For deconv moved much of op creation outside so that we can avoid creating and
destroying ops every time.
Test Plan:
CI quantization tests.
deconvolution-test
Imported from OSS
Reviewed By: AshkanAliabadi
Differential Revision: D22726972
fbshipit-source-id: 07c03a4e90b397c36aae537ef7c0b7d81d4adc1a
diff --git a/aten/src/ATen/native/quantized/cpu/qconv.cpp b/aten/src/ATen/native/quantized/cpu/qconv.cpp
index 157f10f..2633bb2 100644
--- a/aten/src/ATen/native/quantized/cpu/qconv.cpp
+++ b/aten/src/ATen/native/quantized/cpu/qconv.cpp
@@ -564,6 +564,13 @@
// Calling unpack after this will throw an assertion.
orig_weight.reset();
}
+
+ // Set padding buffer to zero point. This can only be done if we want
+ // to do it only once.
+ if (zero_buffer_size) {
+ memset(
+ convolution_op->zero_buffer, act_nhwc.q_zero_point(), zero_buffer_size);
+ }
}
TORCH_INTERNAL_ASSERT(pack_w != nullptr, "Packed Weights are NULL");
@@ -591,6 +598,7 @@
const pytorch_qnnp_status run_status = qnnpack::qnnpackConv(
conv_p,
+ convolution_op.get(),
pack_w->getPackedWeights(),
N,
H,
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/include/conv_utils.h b/aten/src/ATen/native/quantized/cpu/qnnpack/include/conv_utils.h
index be7f7a3..2c2dd00 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/include/conv_utils.h
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/include/conv_utils.h
@@ -7,6 +7,7 @@
#include <pytorch_qnnpack.h>
#include <qnnpack/log.h>
#include <qnnpack/operator.h>
+#include <qnnpack/params.h>
namespace qnnpack {
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/include/qnnpack_func.h b/aten/src/ATen/native/quantized/cpu/qnnpack/include/qnnpack_func.h
index c600d84..53e0a80 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/include/qnnpack_func.h
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/include/qnnpack_func.h
@@ -110,6 +110,7 @@
enum pytorch_qnnp_status qnnpackConv(
const conv_param_t& conv_p,
+ const pytorch_qnnp_operator_t convolution,
void* packed_weights,
const size_t batch_size,
const size_t input_height,
@@ -126,6 +127,7 @@
enum pytorch_qnnp_status qnnpackDeConv(
const conv_param_t& deconv_p,
+ const pytorch_qnnp_operator_t deconvolution,
void* packed_weights,
const size_t batch_size,
const size_t input_height,
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/src/conv-run.cc b/aten/src/ATen/native/quantized/cpu/qnnpack/src/conv-run.cc
index 29d8359..c1a7304 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/src/conv-run.cc
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/src/conv-run.cc
@@ -277,6 +277,7 @@
enum pytorch_qnnp_status qnnpackConv(
const conv_param_t& conv_p,
+ const pytorch_qnnp_operator_t convolution,
void* packed_weights,
const size_t batch_size,
const size_t input_height,
@@ -296,7 +297,6 @@
const size_t kernel_height = conv_p.kernel_dims[1];
const size_t kernel_size = kernel_height * kernel_width;
const size_t dilation_width = conv_p.dilation[0];
- const size_t dilation_height = conv_p.dilation[1];
const size_t groups = conv_p.groups;
if (batch_size == 0) {
@@ -323,48 +323,24 @@
output_max);
}
uint32_t stride_width = conv_p.stride_dims[0];
- uint32_t stride_height = conv_p.stride_dims[1];
- const std::array<size_t, 2> output_dims =
- conv_p.compute_output_dims({input_width, input_height});
- const size_t output_width = output_dims[0];
- const size_t output_height = output_dims[1];
- const size_t output_size = output_height * output_width;
-
- // FIXME temporary solution to create a qnnp_op struct for indirection buffer.
- const bool any_padding = (conv_p.padding[0] | conv_p.padding[1] |
- conv_p.padding[2] | conv_p.padding[3]) != 0;
- size_t zero_size = 0, zero_offset = 0;
-
- pytorch_qnnp_operator_t convolution{nullptr};
- convolution =
- static_cast<pytorch_qnnp_operator_t>(calloc(1, sizeof(struct pytorch_qnnp_operator)));
- if (convolution == nullptr) {
+ pytorch_qnnp_status status = pytorch_qnnp_setup_convolution2d_nhwc_q8(
+ convolution,
+ batch_size,
+ input_height,
+ input_width,
+ input,
+ input_pixel_stride,
+ output,
+ output_pixel_stride,
+ threadpool);
+ if (status != pytorch_qnnp_status_success) {
pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
- sizeof(struct pytorch_qnnp_operator));
- return pytorch_qnnp_status_out_of_memory;
+ "failed to run covolution op setup to setup indirection buffer.");
+ return status;
}
- std::unique_ptr<pytorch_qnnp_operator, QnnpackDeleter> qnnpack_uniq_ptr(convolution);
-
- convolution->input = input;
- convolution->input_pixel_stride = input_pixel_stride;
- convolution->groups = groups;
- convolution->group_input_channels = conv_p.group_input_channels;
- convolution->batch_size = batch_size;
- convolution->input_height = input_height;
- convolution->input_width = input_width;
- convolution->output_height = output_height;
- convolution->output_width = output_width;
- convolution->kernel_height = kernel_height;
- convolution->kernel_width = kernel_width;
- convolution->stride_height = stride_height;
- convolution->stride_width = stride_width;
- convolution->dilation_height = dilation_height;
- convolution->dilation_width = dilation_width;
- convolution->input_padding_top = conv_p.padding[0];
- convolution->input_padding_left = conv_p.padding[1];
+ const size_t output_size = convolution->output_height * convolution->output_width;
switch (conv_p.ukernel_type) {
case pytorch_qnnp_ukernel_type_dwconv: {
@@ -373,60 +349,23 @@
const uint32_t cr = pytorch_qnnp_params.q8dw9.cr;
const size_t group_stride = (groups + (cr - 1)) & -cr;
- if (any_padding) {
- if (groups >= 8) {
- zero_size = sizeof(uint8_t) * group_stride;
- zero_offset = 0;
- } else {
- zero_size = sizeof(uint8_t) * group_stride + 8;
- zero_offset = sizeof(uint8_t) * 8;
- }
- void* zero_buffer = malloc(zero_size);
- if (zero_buffer == nullptr) {
- pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for zero padding", zero_size);
- return pytorch_qnnp_status_out_of_memory;
- }
- memset(zero_buffer, input_zero_point, zero_size);
- convolution->zero_buffer = zero_buffer;
- convolution->zero_pointer =
- (void*)((uintptr_t)zero_buffer + zero_offset);
- }
- const size_t step_width = convolution->dilation_width == 1
- ? convolution->stride_width
- : kernel_width;
- const size_t step_height =
- kernel_size + (output_width * step_width - 1) * kernel_height;
- const size_t indirection_buffer_size =
- sizeof(void*) * batch_size * output_height * step_height;
-
- const void** indirection_buffer = (const void**)realloc(
- convolution->indirection_buffer, indirection_buffer_size);
- if (indirection_buffer == nullptr) {
- pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for indirection buffer",
- indirection_buffer_size);
- return pytorch_qnnp_status_out_of_memory;
- }
- convolution->indirection_buffer = indirection_buffer;
-
- pytorch_qnnp_indirection_init_dwconv2d(convolution, 0, step_height, step_width);
-
switch (kernel_size) {
case 9: {
struct q8dwconv_context context = {
.groups = groups,
.group_stride = group_stride,
- .indirection_buffer = (const uint8_t**)indirection_buffer,
+ .indirection_buffer =
+ (const uint8_t**)convolution->indirection_buffer,
.indirection_buffer_row_stride =
- kernel_size + (output_width * width_step - 1) * kernel_height,
+ kernel_size +
+ (convolution->output_width * width_step - 1) * kernel_height,
.indirection_buffer_col_stride =
kernel_height * width_step * sizeof(void*),
.packed_weights = packed_weights,
.output = output,
- .output_height = output_height,
- .output_width = output_width,
- .output_row_stride = output_width * output_pixel_stride,
+ .output_height = convolution->output_height,
+ .output_width = convolution->output_width,
+ .output_row_stride = convolution->output_width * output_pixel_stride,
.output_col_increment =
(output_pixel_stride - groups) * sizeof(uint8_t),
.quantization_params = conv_quantization_params,
@@ -444,7 +383,7 @@
(pthreadpool_function_2d_t)compute_dwconv_unipass,
&context,
batch_size,
- output_height);
+ convolution->output_height);
break;
}
case 25: {
@@ -454,14 +393,15 @@
.indirection_buffer =
(const uint8_t**)convolution->indirection_buffer,
.indirection_buffer_row_stride =
- kernel_size + (output_width * width_step - 1) * kernel_height,
+ kernel_size +
+ (convolution->output_width * width_step - 1) * kernel_height,
.indirection_buffer_col_stride =
kernel_height * width_step * sizeof(void*),
.packed_weights = packed_weights,
.output = output,
- .output_height = output_height,
- .output_width = output_width,
- .output_row_stride = output_width * output_pixel_stride,
+ .output_height = convolution->output_height,
+ .output_width = convolution->output_width,
+ .output_row_stride = convolution->output_width * output_pixel_stride,
.output_col_increment =
(output_pixel_stride - groups) * sizeof(uint8_t),
.quantization_params = conv_quantization_params,
@@ -479,7 +419,7 @@
(pthreadpool_function_2d_t)compute_dwconv_multiipass,
&context,
batch_size,
- output_height);
+ convolution->output_height);
break;
}
default:
@@ -611,43 +551,6 @@
const size_t n_stride = (group_output_channels + (nr - 1)) & -nr;
const size_t m_stride = round_up(output_size, mr);
- if (any_padding) {
- if (group_input_channels >= 8) {
- zero_size = sizeof(uint8_t) * k_stride;
- zero_offset = 0;
- } else {
- zero_size = sizeof(uint8_t) * k_stride + 8;
- zero_offset = 8;
- }
- void* zero_buffer = malloc(zero_size);
- if (zero_buffer == nullptr) {
- pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for zero padding", zero_size);
- return pytorch_qnnp_status_out_of_memory;
- }
- memset(zero_buffer, input_zero_point, zero_size);
- convolution->zero_buffer = zero_buffer;
- convolution->zero_pointer =
- (void*)((uintptr_t)zero_buffer + zero_offset);
- }
-
- const size_t output_tile_size = pytorch_qnnp_params.q8conv.mr;
- const size_t tiled_output_size = round_up(output_size, output_tile_size);
- const size_t indirection_buffer_size =
- sizeof(void*) * batch_size * groups * tiled_output_size * kernel_size;
- const void** indirection_buffer = (const void**)realloc(
- convolution->indirection_buffer, indirection_buffer_size);
- if (indirection_buffer == nullptr) {
- pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for indirection buffer",
- indirection_buffer_size);
- return pytorch_qnnp_status_out_of_memory;
- }
- convolution->indirection_buffer = indirection_buffer;
-
- pytorch_qnnp_indirection_init_conv2d(
- convolution, output_tile_size, tiled_output_size);
-
struct q8conv_context q8conv_context = {
.bs = batch_size,
.ks = kernel_size,
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/src/deconv-run.cc b/aten/src/ATen/native/quantized/cpu/qnnpack/src/deconv-run.cc
index d53a56a..af91013 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/src/deconv-run.cc
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/src/deconv-run.cc
@@ -74,6 +74,7 @@
enum pytorch_qnnp_status qnnpackDeConv(
const conv_param_t& deconv_p,
+ const pytorch_qnnp_operator_t deconvolution,
void* packed_weights,
const size_t batch_size,
const size_t input_height,
@@ -96,12 +97,6 @@
const size_t kernel_width = deconv_p.kernel_dims[0];
const size_t kernel_height = deconv_p.kernel_dims[1];
- const size_t stride_width = deconv_p.stride_dims[0];
- const size_t stride_height = deconv_p.stride_dims[1];
-
- const size_t dilation_width = deconv_p.dilation[0];
- const size_t dilation_height = deconv_p.dilation[1];
-
// Support vars
const size_t group_input_channels = deconv_p.group_input_channels;
const size_t group_output_channels = deconv_p.group_output_channels;
@@ -111,56 +106,6 @@
const size_t k_stride = (group_input_channels + (kr - 1)) & -kr;
const size_t n_stride = (group_output_channels + (nr - 1)) & -nr;
- // Create the kernel
- pytorch_qnnp_operator_t deconvolution =
- static_cast<pytorch_qnnp_operator_t>(
- calloc(1, sizeof(struct pytorch_qnnp_operator)));
- if (deconvolution == nullptr) {
- pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for qnnp_operator structure",
- sizeof(struct pytorch_qnnp_operator));
- pytorch_qnnp_delete_operator(deconvolution);
- return pytorch_qnnp_status_out_of_memory;
- }
- std::unique_ptr<pytorch_qnnp_operator, QnnpackDeleter> qnnpack_uniq_ptr(
- deconvolution);
-
- // Populate the kernel
- size_t zero_size = sizeof(uint8_t) * k_stride;
- size_t zero_offset = 0;
- if (group_input_channels < 8) {
- zero_size += 8;
- zero_offset = 8;
- }
- void* zero_buffer = malloc(zero_size);
- if (zero_buffer == NULL) {
- pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for zero padding", zero_size);
- pytorch_qnnp_delete_operator(deconvolution);
- return pytorch_qnnp_status_out_of_memory;
- }
- memset(zero_buffer, input_zero_point, zero_size);
-
- deconvolution->zero_buffer = zero_buffer;
- deconvolution->zero_pointer = (void*) ((uintptr_t) zero_buffer + zero_offset);
-
- deconvolution->input_padding_top = deconv_p.padding[0];
- deconvolution->input_padding_left = deconv_p.padding[1];
- deconvolution->input_padding_bottom = deconv_p.padding[2];
- deconvolution->input_padding_right = deconv_p.padding[3];
- deconvolution->adjustment_width = deconv_p.adjustment_dims[0];
- deconvolution->adjustment_height = deconv_p.adjustment_dims[1];
-
- deconvolution->kernel_width = kernel_width;
- deconvolution->kernel_height = kernel_height;
- deconvolution->stride_width = stride_width;
- deconvolution->stride_height = stride_height;
- deconvolution->dilation_width = dilation_width;
- deconvolution->dilation_height = dilation_height;
- deconvolution->groups = deconv_p.groups;
- deconvolution->group_input_channels = group_input_channels;
- deconvolution->group_output_channels = group_output_channels;
-
// deconvolution->kernel_zero_point = deconv_p.kernel_zero_points;
// const float kernel_scale = deconv_p.kernel_scale;
// const float deconvolution_scale = input_scale * kernel_scale / output_scale;
@@ -173,9 +118,6 @@
output_min,
output_max);
- deconvolution->ukernel_type = pytorch_qnnp_ukernel_type_conv;
- deconvolution->format = pytorch_qnnp_format_quint8;
-
// Setup the kernel
const std::array<size_t, 2> output_dims =
deconv_p.compute_output_dims({input_width, input_height});
@@ -183,35 +125,25 @@
const size_t output_height = output_dims[1];
const size_t kernel_size = kernel_height * kernel_width;
const size_t output_size = output_height * output_width;
- const size_t groups = deconvolution->groups;
- const size_t output_tile_size = pytorch_qnnp_params.q8conv.mr;
- const size_t tiled_output_size = round_up(output_size, output_tile_size);
- const size_t indirection_buffer_size =
- sizeof(void*) * batch_size * groups * tiled_output_size * kernel_size;
- deconvolution->batch_size = batch_size;
- deconvolution->input_height = input_height;
- deconvolution->input_width = input_width;
- deconvolution->input = input;
- deconvolution->input_pixel_stride = deconv_p.input_channels;
- deconvolution->output_height = output_height;
- deconvolution->output_width = output_width;
- deconvolution->output = output;
- deconvolution->output_pixel_stride = deconv_p.output_channels;
+ const size_t input_pixel_stride = deconv_p.input_channels;
+ const size_t output_pixel_stride = deconv_p.output_channels;
- const void** indirection_buffer = (const void**)realloc(
- deconvolution->indirection_buffer, indirection_buffer_size);
- if (indirection_buffer == NULL) {
+ pytorch_qnnp_status status = pytorch_qnnp_setup_deconvolution2d_nhwc_q8(
+ deconvolution,
+ batch_size,
+ input_height,
+ input_width,
+ input,
+ input_pixel_stride,
+ output,
+ output_pixel_stride,
+ threadpool);
+ if (status != pytorch_qnnp_status_success) {
pytorch_qnnp_log_error(
- "failed to allocate %zu bytes for indirection buffer",
- indirection_buffer_size);
- pytorch_qnnp_delete_operator(deconvolution);
- return pytorch_qnnp_status_out_of_memory;
+ "failed to run decovolution op setup to setup indirection buffer.");
+ return status;
}
- deconvolution->indirection_buffer = indirection_buffer;
-
- pytorch_qnnp_indirection_init_deconv2d(
- deconvolution, output_tile_size, tiled_output_size);
// Run the kernel
const size_t m_stride = round_up(output_size, mr);
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/params.h b/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/params.h
index 67decd4..1784742 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/params.h
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/src/qnnpack/params.h
@@ -577,7 +577,7 @@
extern "C" {
#endif
-extern PYTORCH_QNNP_INTERNAL struct pytorch_qnnp_parameters pytorch_qnnp_params;
+extern struct pytorch_qnnp_parameters pytorch_qnnp_params;
#ifdef __cplusplus
}
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/test/convolution-operator-tester.h b/aten/src/ATen/native/quantized/cpu/qnnpack/test/convolution-operator-tester.h
index 52fbe00..c6771f1 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/test/convolution-operator-tester.h
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/test/convolution-operator-tester.h
@@ -24,6 +24,79 @@
#include "test_utils.h"
using namespace qnnpack::testing;
+pytorch_qnnp_operator_t create_convolution_op(const qnnpack::conv_param_t& conv_p,
+ const uint8_t input_zero_point) {
+ pytorch_qnnp_operator_t convolution = nullptr;
+ convolution =
+ static_cast<pytorch_qnnp_operator_t>(calloc(1, sizeof(struct pytorch_qnnp_operator)));
+ if (convolution == nullptr) {
+ pytorch_qnnp_log_error(
+ "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
+ sizeof(struct pytorch_qnnp_operator));
+ }
+
+ convolution->ukernel_type = conv_p.ukernel_type;
+ convolution->groups = conv_p.groups;
+ convolution->group_input_channels = conv_p.group_input_channels;
+ convolution->kernel_height = conv_p.kernel_dims[1];
+ convolution->kernel_width = conv_p.kernel_dims[0];
+ convolution->stride_height = conv_p.stride_dims[1];
+ convolution->stride_width = conv_p.stride_dims[0];
+ convolution->dilation_height = conv_p.dilation[1];
+ convolution->dilation_width = conv_p.dilation[0];
+ convolution->input_padding_top = conv_p.padding[0];
+ convolution->input_padding_left = conv_p.padding[1];
+ convolution->input_padding_bottom = conv_p.padding[2];
+ convolution->input_padding_right = conv_p.padding[3];
+
+ const bool any_padding = (conv_p.padding[0]| conv_p.padding[1]
+ |conv_p.padding[2] | conv_p.padding[3]) != 0;
+
+ if (any_padding) {
+ size_t zero_size = 0, zero_offset = 0;
+ if (conv_p.ukernel_type == pytorch_qnnp_ukernel_type_dwconv) {
+ const uint32_t cr = pytorch_qnnp_params.q8dw9.cr;
+ const size_t group_stride = (conv_p.groups + (cr - 1)) & -cr;
+ if (conv_p.groups >= 8) {
+ zero_size = sizeof(uint8_t) * group_stride;
+ zero_offset = 0;
+ } else {
+ zero_size = sizeof(uint8_t) * group_stride + 8;
+ zero_offset = sizeof(uint8_t) * 8;
+ }
+ } else if (conv_p.ukernel_type == pytorch_qnnp_ukernel_type_conv ||
+ conv_p.ukernel_type == pytorch_qnnp_ukernel_type_gemm) {
+ const size_t group_input_channels = conv_p.group_input_channels;
+ const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
+ const size_t k_stride = (group_input_channels + (kr - 1)) & -kr;
+ if (conv_p.group_input_channels >= 8) {
+ zero_size = sizeof(uint8_t) * k_stride;
+ zero_offset = 0;
+ } else {
+ zero_size = sizeof(uint8_t) * k_stride + 8;
+ zero_offset = 8;
+ }
+ }
+
+ void* zero_buffer = malloc(zero_size);
+ if (zero_buffer == nullptr) {
+ pytorch_qnnp_log_error(
+ "failed to allocate bytes for zero padding:");
+ }
+ // Need to set to input zero point
+ memset(zero_buffer, input_zero_point, zero_size);
+ convolution->zero_buffer = zero_buffer;
+ convolution->zero_pointer =
+ (void*)((uintptr_t)zero_buffer + zero_offset);
+ }
+
+ if (conv_p.per_channel && conv_p.ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) {
+ pytorch_qnnp_log_error(
+ "Per channel quantized weights are not supported for XZP kernels");
+ }
+ return convolution;
+}
+
class ConvolutionOperatorTester {
public:
inline ConvolutionOperatorTester& padding(uint32_t padding) {
@@ -580,6 +653,7 @@
groupOutputChannels() * groups(),
/*transpose=*/false,
per_channel());
+ auto conv_op = create_convolution_op(conv_p, inputZeroPoint);
auto packW = std::unique_ptr<qnnpack::PrePackConvWeights>(
new qnnpack::PrePackConvWeights(
conv_p,
@@ -588,6 +662,7 @@
bias.data()));
const pytorch_qnnp_status runStatus = qnnpack::qnnpackConv(
conv_p,
+ conv_op,
packW->getPackedWeights(),
batchSize(),
inputHeight(),
@@ -602,6 +677,9 @@
output.data(),
nullptr);
ASSERT_EQ(pytorch_qnnp_status_success, runStatus);
+ ASSERT_EQ(
+ pytorch_qnnp_status_success,
+ pytorch_qnnp_delete_operator(conv_op));
}
break;
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack/test/deconvolution-operator-tester.h b/aten/src/ATen/native/quantized/cpu/qnnpack/test/deconvolution-operator-tester.h
index d59d027..22fa5b0 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack/test/deconvolution-operator-tester.h
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack/test/deconvolution-operator-tester.h
@@ -24,6 +24,56 @@
#include "test_utils.h"
using namespace qnnpack::testing;
+pytorch_qnnp_operator_t create_deconvolution_op(const qnnpack::conv_param_t& conv_p,
+ const uint8_t input_zero_point) {
+ pytorch_qnnp_operator_t deconvolution = nullptr;
+ deconvolution =
+ static_cast<pytorch_qnnp_operator_t>(calloc(1, sizeof(struct pytorch_qnnp_operator)));
+ if (deconvolution == nullptr) {
+ pytorch_qnnp_log_error(
+ "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
+ sizeof(struct pytorch_qnnp_operator));
+ }
+
+ deconvolution->ukernel_type = conv_p.ukernel_type;
+ deconvolution->groups = conv_p.groups;
+ deconvolution->group_input_channels = conv_p.group_input_channels;
+ deconvolution->kernel_height = conv_p.kernel_dims[1];
+ deconvolution->kernel_width = conv_p.kernel_dims[0];
+ deconvolution->stride_height = conv_p.stride_dims[1];
+ deconvolution->stride_width = conv_p.stride_dims[0];
+ deconvolution->dilation_height = conv_p.dilation[1];
+ deconvolution->dilation_width = conv_p.dilation[0];
+ deconvolution->input_padding_top = conv_p.padding[0];
+ deconvolution->input_padding_left = conv_p.padding[1];
+ deconvolution->input_padding_bottom = conv_p.padding[2];
+ deconvolution->input_padding_right = conv_p.padding[3];
+
+ deconvolution->adjustment_width = conv_p.adjustment_dims[0];
+ deconvolution->adjustment_height = conv_p.adjustment_dims[1];
+
+ const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
+ const size_t k_stride = (conv_p.group_input_channels + (kr - 1)) & -kr;
+ size_t zero_size = sizeof(uint8_t) * k_stride;
+ size_t zero_offset = 0;
+ if (conv_p.group_input_channels < 8) {
+ zero_size += 8;
+ zero_offset = 8;
+ }
+ void* zero_buffer = malloc(zero_size);
+ if (zero_buffer == NULL) {
+ pytorch_qnnp_delete_operator(deconvolution);
+ pytorch_qnnp_log_error(
+ "failed to allocate %zu bytes for zero padding", zero_size);
+ }
+ memset(zero_buffer, input_zero_point, zero_size);
+
+ deconvolution->zero_buffer = zero_buffer;
+ deconvolution->zero_pointer = (void*) ((uintptr_t) zero_buffer + zero_offset);
+
+ return deconvolution;
+}
+
class DeconvolutionOperatorTester {
public:
inline DeconvolutionOperatorTester& padding(uint32_t padding) {
@@ -597,6 +647,7 @@
groupOutputChannels() * groups(),
/*transpose=*/true,
per_channel());
+ auto deconv_op = create_deconvolution_op(deconv_p, inputZeroPoint);
auto packW = std::unique_ptr<qnnpack::PrePackConvWeights>(
new qnnpack::PrePackConvWeights(
deconv_p,
@@ -605,6 +656,7 @@
bias.data()));
const pytorch_qnnp_status runStatus = qnnpack::qnnpackDeConv(
deconv_p,
+ deconv_op,
packW->getPackedWeights(),
batchSize(),
inputHeight(),
diff --git a/aten/src/ATen/native/quantized/cpu/qnnpack_utils.h b/aten/src/ATen/native/quantized/cpu/qnnpack_utils.h
index 6243cc1..165b8f9 100644
--- a/aten/src/ATen/native/quantized/cpu/qnnpack_utils.h
+++ b/aten/src/ATen/native/quantized/cpu/qnnpack_utils.h
@@ -121,9 +121,84 @@
groups_ * this->orig_weight.size(1),
this->orig_weight.size(0),
/*transpose=*/false,
- is_per_channel)
- {}
+ is_per_channel) {
+ pytorch_qnnp_operator_t convolution{nullptr};
+ convolution =
+ static_cast<pytorch_qnnp_operator_t>(calloc(1, sizeof(struct pytorch_qnnp_operator)));
+ if (convolution == nullptr) {
+ TORCH_INTERNAL_ASSERT(
+ "failed to allocate %zu bytes for pytorch_qnnp_operator structure",
+ sizeof(struct pytorch_qnnp_operator));
+ }
+
+ convolution_op =
+ std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter>(convolution);
+
+ convolution->ukernel_type = conv_p.ukernel_type;
+ convolution->groups = groups;
+ convolution->group_input_channels = conv_p.group_input_channels;
+ convolution->kernel_height = conv_p.kernel_dims[1];
+ convolution->kernel_width = conv_p.kernel_dims[0];
+ convolution->stride_height = conv_p.stride_dims[1];
+ convolution->stride_width = conv_p.stride_dims[0];
+ convolution->dilation_height = conv_p.dilation[1];
+ convolution->dilation_width = conv_p.dilation[0];
+ convolution->input_padding_top = conv_p.padding[0];
+ convolution->input_padding_left = conv_p.padding[1];
+ convolution->input_padding_bottom = conv_p.padding[2];
+ convolution->input_padding_right = conv_p.padding[3];
+
+ const bool any_padding = (conv_p.padding[0]| conv_p.padding[1]
+ |conv_p.padding[2] | conv_p.padding[3]) != 0;
+
+ zero_buffer_size = 0;
+ if (any_padding) {
+ size_t zero_size = 0, zero_offset = 0;
+ if (conv_p.ukernel_type == pytorch_qnnp_ukernel_type_dwconv) {
+ const uint32_t cr = pytorch_qnnp_params.q8dw9.cr;
+ const size_t group_stride = (groups + (cr - 1)) & -cr;
+ if (groups >= 8) {
+ zero_size = sizeof(uint8_t) * group_stride;
+ zero_offset = 0;
+ } else {
+ zero_size = sizeof(uint8_t) * group_stride + 8;
+ zero_offset = sizeof(uint8_t) * 8;
+ }
+ } else if (conv_p.ukernel_type == pytorch_qnnp_ukernel_type_conv ||
+ conv_p.ukernel_type == pytorch_qnnp_ukernel_type_gemm) {
+ const size_t group_input_channels = conv_p.group_input_channels;
+ const uint32_t kr = pytorch_qnnp_params.q8conv.kr;
+ const size_t k_stride = (group_input_channels + (kr - 1)) & -kr;
+ if (conv_p.group_input_channels >= 8) {
+ zero_size = sizeof(uint8_t) * k_stride;
+ zero_offset = 0;
+ } else {
+ zero_size = sizeof(uint8_t) * k_stride + 8;
+ zero_offset = 8;
+ }
+ }
+
+ void* zero_buffer = malloc(zero_size);
+ if (zero_buffer == nullptr) {
+ TORCH_INTERNAL_ASSERT(
+ "failed to allocate bytes for zero padding:", zero_size);
+ }
+ // Need to set to input zero point
+ //memset(zero_buffer, input_zero_point, zero_size);
+ zero_buffer_size = zero_size;
+ convolution->zero_buffer = zero_buffer;
+ convolution->zero_pointer =
+ (void*)((uintptr_t)zero_buffer + zero_offset);
+ }
+
+ if (conv_p.per_channel && conv_p.ukernel_type == pytorch_qnnp_ukernel_type_xzp_gemm) {
+ TORCH_INTERNAL_ASSERT(
+ "Per channel quantized weights are not supported for XZP kernels");
+ }
+ }
+
+ std::unique_ptr<pytorch_qnnp_operator, QnnpackOperatorDeleter> convolution_op;
std::unique_ptr<qnnpack::PrePackConvWeights> w;
at::Tensor orig_weight;
at::Tensor bias;
@@ -137,6 +212,7 @@
std::vector<uint8_t> w_zero_points;
std::vector<float> requantization_scales;
qnnpack::conv_param_t conv_p;
+ size_t zero_buffer_size;
at::Tensor apply(
const at::Tensor& input,