Add tests to logical operation in BinaryOpsKernel.cpp (#41515)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/41515

add test in atest.cpp to cover logical_and_kernel, logical_or_kernel and logical_nor_kernel in Aten/native/cpu/BinaryOpsKernel.cpp

https://pxl.cl/1drmV

Test Plan: CI

Reviewed By: malfet

Differential Revision: D22565235

fbshipit-source-id: 7ad9fd8420d7fdd23fd9a703c75da212f72bde2c
diff --git a/aten/src/ATen/test/atest.cpp b/aten/src/ATen/test/atest.cpp
index 50f7380..2ef7cbe 100644
--- a/aten/src/ATen/test/atest.cpp
+++ b/aten/src/ATen/test/atest.cpp
@@ -2,7 +2,7 @@
 
 #include <ATen/ATen.h>
 
-#include<iostream>
+#include <iostream>
 using namespace std;
 using namespace at;
 
@@ -33,6 +33,32 @@
   ASSERT_TRUE(tensor({a ^ b}).equal(a_tensor ^ b_tensor));
 }
 
+template <class T>
+void run_logical_op_test(const Tensor& exp, T func) {
+  auto x_tensor = tensor({1, 1, 0, 1, 0});
+  auto y_tensor = tensor({0, 1, 0, 1, 1});
+  // Test op over integer tensors
+  auto out_tensor = empty({5}, kInt);
+  func(out_tensor, x_tensor, y_tensor);
+  ASSERT_EQ(out_tensor.dtype(), kInt);
+  ASSERT_TRUE(exp.equal(out_tensor));
+  // Test op over boolean tensors
+  out_tensor = empty({5}, kBool);
+  func(out_tensor, x_tensor.to(kBool), y_tensor.to(kBool));
+  ASSERT_EQ(out_tensor.dtype(), kBool);
+  ASSERT_TRUE(out_tensor.equal(exp.to(kBool)));
+}
+
+TEST(atest, logical_and_operators) {
+  run_logical_op_test(tensor({0, 1, 0, 1, 0}), logical_and_out);
+}
+TEST(atest, logical_or_operators) {
+  run_logical_op_test(tensor({1, 1, 0, 1, 1}), logical_or_out);
+}
+TEST(atest, logical_xor_operators) {
+  run_logical_op_test(tensor({1, 0, 0, 0, 1}), logical_xor_out);
+}
+
 // TEST_CASE( "atest", "[]" ) {
 TEST(atest, atest) {
   manual_seed(123);
@@ -84,8 +110,7 @@
   {
     int isgone = 0;
     {
-      auto f2 =
-          from_blob(data, {1, 2, 3}, [&](void*) { isgone++; });
+      auto f2 = from_blob(data, {1, 2, 3}, [&](void*) { isgone++; });
     }
     ASSERT_EQ(isgone, 1);
   }
@@ -93,8 +118,7 @@
     int isgone = 0;
     Tensor a_view;
     {
-      auto f2 =
-          from_blob(data, {1, 2, 3}, [&](void*) { isgone++; });
+      auto f2 = from_blob(data, {1, 2, 3}, [&](void*) { isgone++; });
       a_view = f2.view({3, 2, 1});
     }
     ASSERT_EQ(isgone, 0);
@@ -105,17 +129,17 @@
   if (at::hasCUDA()) {
     int isgone = 0;
     {
-      auto base = at::empty({1,2,3}, TensorOptions(kCUDA));
+      auto base = at::empty({1, 2, 3}, TensorOptions(kCUDA));
       auto f2 = from_blob(base.data_ptr(), {1, 2, 3}, [&](void*) { isgone++; });
     }
     ASSERT_EQ(isgone, 1);
 
     // Attempt to specify the wrong device in from_blob
-    auto t = at::empty({1,2,3}, TensorOptions(kCUDA, 0));
-    EXPECT_ANY_THROW(from_blob(t.data_ptr(), {1,2,3}, at::Device(kCUDA, 1)));
+    auto t = at::empty({1, 2, 3}, TensorOptions(kCUDA, 0));
+    EXPECT_ANY_THROW(from_blob(t.data_ptr(), {1, 2, 3}, at::Device(kCUDA, 1)));
 
     // Infers the correct device
-    auto t_ = from_blob(t.data_ptr(), {1,2,3}, kCUDA);
+    auto t_ = from_blob(t.data_ptr(), {1, 2, 3}, kCUDA);
     ASSERT_EQ(t_.device(), at::Device(kCUDA, 0));
   }
 }