[ONNX] Fix cuda test case (#63597) (#64378)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/64378
* skip script test for unsupported autocast.
* Fix test case by adding missed `autocast` and `model.cuda()`.
Test Plan: Imported from OSS
Reviewed By: jansel
Differential Revision: D30919600
Pulled By: malfet
fbshipit-source-id: 3231fc672d97de487d6e4460626df0ba25f212ce
Co-authored-by: BowenBao <bowbao@microsoft.com>
diff --git a/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py b/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py
index 1352f61..ff5aab1 100644
--- a/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py
+++ b/test/onnx/test_pytorch_onnx_onnxruntime_cuda.py
@@ -4,7 +4,7 @@
from torch.cuda.amp import autocast
-from test_pytorch_common import skipIfUnsupportedMinOpsetVersion
+from test_pytorch_common import disableScriptTest, skipIfUnsupportedMinOpsetVersion
from test_pytorch_common import skipIfNoCuda
from test_pytorch_onnx_onnxruntime import TestONNXRuntime
@@ -27,21 +27,24 @@
@skipIfUnsupportedMinOpsetVersion(9)
@skipIfNoCuda
+ @disableScriptTest()
def test_layer_norm_fp16(self):
class LayerNormModel(torch.nn.Module):
def __init__(self):
super(LayerNormModel, self).__init__()
self.layer_norm = torch.nn.LayerNorm([10, 10])
+ @autocast()
def forward(self, x):
return self.layer_norm(x)
x = torch.randn(20, 5, 10, 10, requires_grad=True, dtype=torch.float16, device=torch.device("cuda"))
- self.run_test(LayerNormModel(), x, rtol=1e-3, atol=1e-5)
+ self.run_test(LayerNormModel().cuda(), x, rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(12)
@skipIfNoCuda
+ @disableScriptTest()
def test_softmaxCrossEntropy_fusion_fp16(self):
class FusionModel(torch.nn.Module):
def __init__(self):