No need to filter XLA for int32/int64 convolutions

PiperOrigin-RevId: 382171555
Change-Id: I131a297cf9b48928d04789afab7ba90b13d751ee
diff --git a/tensorflow/python/kernel_tests/matmul_op_test.py b/tensorflow/python/kernel_tests/matmul_op_test.py
index 1c36088..c59aae2 100644
--- a/tensorflow/python/kernel_tests/matmul_op_test.py
+++ b/tensorflow/python/kernel_tests/matmul_op_test.py
@@ -259,11 +259,6 @@
   # TF2 does not support placeholders under eager so we skip it
   for use_static_shape in set([True, tf2.enabled()]):
     for dtype in dtypes_to_test:
-      if test_util.is_xla_enabled() and (dtype == np.int32 or
-                                         dtype == np.int64):
-        # TODO(b/171924639): Enable this test when XLA DOT supports
-        # integer types.
-        continue
       for m in sizes:
         for n in sizes:
           for k in sizes: