Remove some of the usages of ops.EagerTensor from Keras. Specifically, the no-op usages (v1 keras code never runs when there are EagerTensors, so it never needs to be checked for v1 only code)

PiperOrigin-RevId: 334210496
Change-Id: I0b2e070dcf38e3ba349241a5fb8941c47e7ee6ff
diff --git a/tensorflow/python/keras/engine/base_layer_test.py b/tensorflow/python/keras/engine/base_layer_test.py
index 321eefc..98c852e 100644
--- a/tensorflow/python/keras/engine/base_layer_test.py
+++ b/tensorflow/python/keras/engine/base_layer_test.py
@@ -789,7 +789,6 @@
     with ops.Graph().as_default():
       x1 = array_ops.ones((3, 3))
     x2 = array_ops.ones((3, 3))
-    self.assertIsInstance(x2, ops.EagerTensor)
     with self.assertRaisesRegex(TypeError, 'Graph tensors'):
       math_ops.matmul(x1, x2)
 
diff --git a/tensorflow/python/keras/engine/base_preprocessing_layer.py b/tensorflow/python/keras/engine/base_preprocessing_layer.py
index 0818825..318323b 100644
--- a/tensorflow/python/keras/engine/base_preprocessing_layer.py
+++ b/tensorflow/python/keras/engine/base_preprocessing_layer.py
@@ -271,7 +271,7 @@
         values, default_value=sparse_default_value)
     values = K.get_value(dense_tensor)
 
-  if isinstance(values, (ops.EagerTensor, ops.Tensor)):
+  if isinstance(values, ops.Tensor):
     values = K.get_value(values)
 
   # We may get passed a ndarray or the code above may give us a ndarray.
diff --git a/tensorflow/python/keras/engine/training_arrays_v1.py b/tensorflow/python/keras/engine/training_arrays_v1.py
index df4b8cd..ad9b37f 100644
--- a/tensorflow/python/keras/engine/training_arrays_v1.py
+++ b/tensorflow/python/keras/engine/training_arrays_v1.py
@@ -153,10 +153,6 @@
   use_steps = is_dataset or steps_per_epoch is not None
   do_validation = val_inputs is not None
 
-  # Convert Eager Tensors to NumPy arrays to support batching/shuffling.
-  inputs, targets, sample_weights = training_utils_v1. \
-      convert_eager_tensors_to_numpy((inputs, targets, sample_weights))
-
   # Prepare input data.
   inputs = input_iterator or inputs
   if validation_in_fit and prepared_feed_values_from_dataset:
diff --git a/tensorflow/python/keras/engine/training_utils_v1.py b/tensorflow/python/keras/engine/training_utils_v1.py
index aa0de60..c198bad 100644
--- a/tensorflow/python/keras/engine/training_utils_v1.py
+++ b/tensorflow/python/keras/engine/training_utils_v1.py
@@ -186,9 +186,6 @@
     else:
       self.results = np.concatenate(self.results, axis=0)
 
-    if isinstance(self.results, ops.EagerTensor):
-      self.results = self.results._numpy()  # pylint: disable=protected-access
-
 
 _COPY_THREADS = 4
 _COPY_POOL = None
@@ -250,8 +247,6 @@
     # initialization is effectively instantaneous.
     shape = (self.num_samples,) + batch_element.shape[1:]
     dtype = batch_element.dtype
-    if isinstance(batch_element, ops.EagerTensor):
-      dtype = dtype.as_numpy_dtype
 
     self.results = np.empty(shape=shape, dtype=dtype)
 
@@ -328,7 +323,7 @@
         # or a composite tensor's Value object. In either case, we can't
         # allocate an array to hold the object - we'll handle it later.
         self.results.append(ConcatAggregator(self.batch_size))
-      elif isinstance(batch_element, (np.ndarray, ops.EagerTensor)):
+      elif isinstance(batch_element, np.ndarray):
         self.results.append(
             (ConcatAggregator(self.batch_size) if self.use_steps else
              SliceAggregator(self.num_samples, self.batch_size)))
@@ -866,12 +861,13 @@
                        'you should pass a 2D sample_weight array.')
   else:
     if sample_weight is not None and len(sample_weight.shape) != 1:
-      raise ValueError('Found a sample_weight array with shape {}. In order to '
-                       'use timestep-wise sample weights, you should specify '
-                       'sample_weight_mode="temporal" in compile(); found "{}" '
-                       'instead. If you just mean to use sample-wise weights, '
-                       'make sure your sample_weight array is 1D.'
-                       .format(sample_weight.shape, sample_weight_mode))
+      raise ValueError(
+          'Found a sample_weight array with shape {}. In order to '
+          'use timestep-wise sample weights, you should specify '
+          'sample_weight_mode="temporal" in compile(); founssd "{}" '
+          'instead. If you just mean to use sample-wise weights, '
+          'make sure your sample_weight array is 1D.'.format(
+              sample_weight.shape, sample_weight_mode))
 
   if sample_weight is not None:
     if len(sample_weight.shape) > len(y.shape):
@@ -1628,7 +1624,7 @@
         if v.ndim == 1:
           v = np.expand_dims(v, 1)
 
-      if isinstance(v, (np.ndarray, ops.EagerTensor)):
+      if isinstance(v, np.ndarray):
         # We fix the placeholder shape except the batch size.
         # This is suboptimal, but it is the best we can do with the info
         # we have. The user should call `model._set_inputs(placeholders)`
@@ -1675,25 +1671,6 @@
   return ['output_%d' % (i + 1) for i in range(len(outputs_list))]
 
 
-def convert_eager_tensors_to_numpy(structure):
-  """Convert every EagerTensor in `structure` to NumPy.
-
-  Arguments:
-    structure: An arbitrary structure of elements to be converted to NumPy
-      arrays.
-
-  Returns:
-    An identical structure with EagerTensors converted to NumPy arrays.
-  """
-
-  def _convert(element):
-    if isinstance(element, ops.EagerTensor):
-      return element.numpy()
-    return element
-
-  return nest.map_structure(_convert, structure)
-
-
 def should_run_validation(validation_freq, epoch):
   """Checks if validation should be run this epoch.
 
diff --git a/tensorflow/python/keras/engine/training_v1.py b/tensorflow/python/keras/engine/training_v1.py
index 69a60e0..54969bb 100644
--- a/tensorflow/python/keras/engine/training_v1.py
+++ b/tensorflow/python/keras/engine/training_v1.py
@@ -3141,7 +3141,7 @@
 
 
 def _is_symbolic_tensor(x):
-  return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
+  return tensor_util.is_tensor(x)
 
 
 def _convert_scipy_sparse_tensor(value, expected_input):