make ATen/native/cuda/DilatedMaxPool2d.cu data_ptr-correct (#99321)
make ATen/native/cuda/DilatedMaxPool2d.cu data_ptr-correct
Test Plan: Rely on CI.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/99321
Approved by: https://github.com/ezyang
diff --git a/aten/src/ATen/native/cuda/DilatedMaxPool2d.cu b/aten/src/ATen/native/cuda/DilatedMaxPool2d.cu
index 8b38758..ab73f68 100644
--- a/aten/src/ATen/native/cuda/DilatedMaxPool2d.cu
+++ b/aten/src/ATen/native/cuda/DilatedMaxPool2d.cu
@@ -348,9 +348,9 @@
[&] {
using accscalar_t = acc_type<scalar_t, true>;
- scalar_t *output_data = output.data_ptr<scalar_t>();
- scalar_t *input_data = input.data_ptr<scalar_t>();
- int64_t *indices_data = indices.data_ptr<int64_t>();
+ scalar_t *output_data = output.mutable_data_ptr<scalar_t>();
+ const scalar_t *input_data = input.const_data_ptr<scalar_t>();
+ int64_t *indices_data = indices.mutable_data_ptr<int64_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {
@@ -484,9 +484,9 @@
[&] {
using accscalar_t = acc_type<scalar_t, true>;
- scalar_t *gradOutput_data = gradOutput.data_ptr<scalar_t>();
- scalar_t *gradInput_data = gradInput.data_ptr<scalar_t>();
- int64_t *indices_data = indices.data_ptr<int64_t>();
+ const scalar_t *gradOutput_data = gradOutput.const_data_ptr<scalar_t>();
+ scalar_t *gradInput_data = gradInput.mutable_data_ptr<scalar_t>();
+ const int64_t *indices_data = indices.const_data_ptr<int64_t>();
switch (memory_format) {
case MemoryFormat::ChannelsLast: {