Fix qconv benchmark (#24019)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/24019

Permutes are done inside the module. We don't need them outside.

Setting of scale/zero_point has changed.

Reviewed By: jianyuh

Differential Revision: D16712437

fbshipit-source-id: e3cedf9d63347fbf8070d1a65a196e6d4b2833fc
diff --git a/benchmarks/operator_benchmark/pt/qconv_test.py b/benchmarks/operator_benchmark/pt/qconv_test.py
index 2668b19..d89ea70 100644
--- a/benchmarks/operator_benchmark/pt/qconv_test.py
+++ b/benchmarks/operator_benchmark/pt/qconv_test.py
@@ -34,17 +34,15 @@
         scale = 1.0 / 255
         zero_point = 0
         X = torch.randn(N, IC, H, W, dtype=torch.float32)
-        X = X.permute([0, 2, 3, 1]).contiguous()
         qX = torch.quantize_linear(X, scale=scale, zero_point=zero_point, dtype=torch.quint8)
         W = torch.randn(OC, IC // G, kernel, kernel, dtype=torch.float32)
-        W = W.permute([0, 2, 3, 1]).contiguous()
         qW = torch.quantize_linear(W, scale=scale, zero_point=0, dtype=torch.qint8)
 
         self.input = qX
         self.qconv2d = nnq.Conv2d(IC, OC, kernel, stride=stride, padding=pad, groups=G)
         self.qconv2d.weight = qW
-        self.qconv2d.scale = scale
-        self.qconv2d.zero_point = zero_point
+        self.qconv2d.scale = torch.tensor([scale], dtype=torch.double)
+        self.qconv2d.zero_point = torch.tensor([zero_point], dtype=torch.int)
         self.set_module_name("QConv2d")
 
     def forward(self):