correct use of torch.backends.cudnn.flags() (#93182)

Fixes #77467.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/93182
Approved by: https://github.com/ngimel
diff --git a/test/distributed/algorithms/quantization/test_quantization.py b/test/distributed/algorithms/quantization/test_quantization.py
index aebf3cc..a3b505d 100644
--- a/test/distributed/algorithms/quantization/test_quantization.py
+++ b/test/distributed/algorithms/quantization/test_quantization.py
@@ -45,7 +45,7 @@
         def setUp(self):
             super(DistQuantizationTests, self).setUp()
             self._spawn_processes()
-            torch.backends.cudnn.flags(allow_tf32=False).__enter__()
+            torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
 
         def tearDown(self):
             super(DistQuantizationTests, self).tearDown()
diff --git a/test/distributed/test_distributed_spawn.py b/test/distributed/test_distributed_spawn.py
index b2a23ff..8499f16 100644
--- a/test/distributed/test_distributed_spawn.py
+++ b/test/distributed/test_distributed_spawn.py
@@ -33,7 +33,7 @@
         def setUp(self):
             super().setUp()
             self._spawn_processes()
-            torch.backends.cudnn.flags(allow_tf32=False).__enter__()
+            torch.backends.cudnn.flags(enabled=True, allow_tf32=False).__enter__()
 
 
 if __name__ == "__main__":
diff --git a/test/inductor/test_torchinductor.py b/test/inductor/test_torchinductor.py
index 87b03ab..ce1e25e 100644
--- a/test/inductor/test_torchinductor.py
+++ b/test/inductor/test_torchinductor.py
@@ -4837,7 +4837,7 @@
         rank3_inps = [shrink_rank(x, 4) for x in [grad_out, inp, weight]]
         rank5_inps = [shrink_rank(x, 5) for x in [grad_out, inp, weight]]
 
-        with torch.backends.cudnn.flags(allow_tf32=False):
+        with torch.backends.cudnn.flags(enabled=True, allow_tf32=False):
             self.common(
                 fn,
                 [rank4_inps, rank3_inps, rank5_inps],
diff --git a/test/nn/test_convolution.py b/test/nn/test_convolution.py
index 0f2bb0c..25dbb56 100644
--- a/test/nn/test_convolution.py
+++ b/test/nn/test_convolution.py
@@ -920,7 +920,7 @@
         ]
 
         def run_test(benchmark):
-            with torch.backends.cudnn.flags(benchmark=benchmark):
+            with torch.backends.cudnn.flags(enabled=True, benchmark=benchmark):
                 conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)
                 for size in sizes:
                     x = torch.randn(size, device=device, dtype=dtype)
@@ -1058,7 +1058,7 @@
     @onlyCUDA
     @dtypes(torch.double)
     def test_conv_double_backward(self, device, dtype):
-        with torch.backends.cudnn.flags(deterministic=True):
+        with torch.backends.cudnn.flags(enabled=True, deterministic=True):
             # Double backward only runs with DoubleTensor due to precision reason
             batch_size = 1
             for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]: