[caffe2] Wrap constexpr with preprocessor statements (#132582)

Summary: When the preprocessor check we leave an unused constexpr around, so when `-Wunused-const-variable` is enabled we get an error. Let's inline these values since they're not used anywhere else in order to avoid this.

Test Plan: CI

Differential Revision: D60723823

Pull Request resolved: https://github.com/pytorch/pytorch/pull/132582
Approved by: https://github.com/houseroad
diff --git a/aten/src/ATen/native/cuda/PointwiseOpsKernel.cu b/aten/src/ATen/native/cuda/PointwiseOpsKernel.cu
index 8403729..4f174bf 100644
--- a/aten/src/ATen/native/cuda/PointwiseOpsKernel.cu
+++ b/aten/src/ATen/native/cuda/PointwiseOpsKernel.cu
@@ -11,7 +11,9 @@
 
 namespace at::native {
 
+#if AT_USE_JITERATOR() && CUDA_VERSION >= 11050
 CONSTEXPR_EXCEPT_WIN_CUDA char addcmul_name[] = "addcmul";
+#endif
 void addcmul_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
   auto dtype = iter.common_dtype();
   if (at::isComplexType(dtype)) {
@@ -55,8 +57,10 @@
   }
 }
 
+#if AT_USE_JITERATOR() && CUDA_VERSION >= 11050
 // return a + alpha * (b / static_cast<accscalar_t>(c));
 CONSTEXPR_EXCEPT_WIN_CUDA char addcdiv_name[] = "addcdiv";
+#endif
 void addcdiv_cuda_kernel(TensorIteratorBase& iter, const Scalar& value) {
   auto dtype = iter.common_dtype();
   if (at::isComplexType(dtype)) {