Fixed log_normal and geometric for CPU (#19938)
Summary:
log_normal_ and geometric_ were disabled for CPU by mistake in [this PR](https://github.com/pytorch/pytorch/commit/bc53805f2efff483ee71a934b985768ffbd96cb5), this PR fixes it.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/19938
Differential Revision: D15143404
Pulled By: izdeby
fbshipit-source-id: 41c7bd29f046b5a3ac6d601de8c64ab553771d19
diff --git a/aten/src/ATen/Declarations.cwrap b/aten/src/ATen/Declarations.cwrap
index b3bff4f..0eea8e3 100644
--- a/aten/src/ATen/Declarations.cwrap
+++ b/aten/src/ATen/Declarations.cwrap
@@ -2726,6 +2726,7 @@
types:
- floating_point
backends:
+ - CPU
- CUDA
return: self
arguments:
@@ -2759,6 +2760,7 @@
[[
name: _th_geometric_
backends:
+ - CPU
- CUDA
cname: geometric
variants: function
diff --git a/aten/src/TH/generic/THTensorRandom.cpp b/aten/src/TH/generic/THTensorRandom.cpp
index ea0561c..5839b4f 100644
--- a/aten/src/TH/generic/THTensorRandom.cpp
+++ b/aten/src/TH/generic/THTensorRandom.cpp
@@ -55,6 +55,12 @@
THTensor_(clampedRandom)(self, _generator, 0, max);
}
+void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p)
+{
+ std::lock_guard<std::mutex> lock(_generator->mutex);
+ TH_TENSOR_APPLY(scalar_t, self, *self_data = (scalar_t)THRandom_geometric(_generator, p););
+}
+
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
#if defined(TH_REAL_IS_FLOAT)
diff --git a/aten/src/TH/generic/THTensorRandom.h b/aten/src/TH/generic/THTensorRandom.h
index 947a4d8..22b2dd6 100644
--- a/aten/src/TH/generic/THTensorRandom.h
+++ b/aten/src/TH/generic/THTensorRandom.h
@@ -5,6 +5,7 @@
TH_API void THTensor_(random)(THTensor *self, THGenerator *_generator);
TH_API void THTensor_(clampedRandom)(THTensor *self, THGenerator *_generator, int64_t min, int64_t max);
TH_API void THTensor_(cappedRandom)(THTensor *self, THGenerator *_generator, int64_t max);
+TH_API void THTensor_(geometric)(THTensor *self, THGenerator *_generator, double p);
#if defined(TH_REAL_IS_FLOAT) || defined(TH_REAL_IS_DOUBLE)
TH_API void THTensor_(bernoulli_Tensor)(THTensor *self, THGenerator *_generator, THTensor *p);
diff --git a/test/test_torch.py b/test/test_torch.py
index c0d9d10..447ba0a 100644
--- a/test/test_torch.py
+++ b/test/test_torch.py
@@ -901,6 +901,18 @@
def test_max(self):
self._testSelection(torch.max, max)
+ def test_log_normal(self):
+ for device in torch.testing.get_all_device_types():
+ a = torch.tensor([10], dtype=torch.float, device=device).log_normal_()
+ self.assertEqual(a.dtype, torch.float)
+ self.assertEqual(a.size(), torch.Size([1]))
+
+ def test_geometric(self):
+ for device in torch.testing.get_all_device_types():
+ a = torch.tensor([10], dtype=torch.float, device=device).geometric_(0.5)
+ self.assertEqual(a.dtype, torch.float)
+ self.assertEqual(a.size(), torch.Size([1]))
+
@staticmethod
def _test_max_with_inf(self, dtypes=(torch.float, torch.double), device='cpu'):
for dtype in dtypes: