Utilise the vector version for sinh and cosh (UnaryOpsKernel) (#36396)

Summary:
Utilise the existing methods of `Vec256` class.

Not sure if there should be tests and if yes where.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/36396

Differential Revision: D22155803

Pulled By: VitalyFedyunin

fbshipit-source-id: 500dcb5c79650bc5daa0c9683d65eeab6f9dd1d3
diff --git a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp
index d7104b6..cf7e92d 100644
--- a/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp
+++ b/aten/src/ATen/native/cpu/UnaryOpsKernel.cpp
@@ -184,17 +184,19 @@
 
 static void sinh_kernel(TensorIterator& iter) {
   AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "sinh_cpu", [&]() {
-    cpu_kernel(
+    cpu_kernel_vec(
         iter,
-        [=](scalar_t a) -> scalar_t { return std::sinh(a); });
+        [=](scalar_t a) -> scalar_t { return std::sinh(a); },
+        [=](Vec256<scalar_t> self_vec){return self_vec.sinh();});
   });
 }
 
 static void cosh_kernel(TensorIterator& iter) {
   AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES(iter.dtype(), "cosh_cpu", [&]() {
-    cpu_kernel(
+    cpu_kernel_vec(
         iter,
-        [=](scalar_t a) -> scalar_t { return std::cosh(a); });
+        [=](scalar_t a) -> scalar_t { return std::cosh(a); },
+        [=](Vec256<scalar_t> self_vec){return self_vec.cosh();});
   });
 }