Roolback of 268229dfedd5ee95ddb8028625a3b27ae5361a50

PiperOrigin-RevId: 276513946
Change-Id: Idb7f04f12f462bc5f17e5928de686ed251138553
diff --git a/tensorflow/python/keras/distribute/keras_utils_test.py b/tensorflow/python/keras/distribute/keras_utils_test.py
index 63ad04b..c476912 100644
--- a/tensorflow/python/keras/distribute/keras_utils_test.py
+++ b/tensorflow/python/keras/distribute/keras_utils_test.py
@@ -259,7 +259,8 @@
 
       dataset = keras_test_lib.get_dataset(distribution)
       exception_error_message = (
-          '`validation_split` argument is not supported when ')
+          '`validation_split` argument is not supported when input `x`'
+          ' is a dataset or a dataset iterator.+')
 
       # Test with validation split
       with self.assertRaisesRegexp(ValueError, exception_error_message):
diff --git a/tensorflow/python/keras/engine/data_adapter.py b/tensorflow/python/keras/engine/data_adapter.py
index bf1be80..46d8944 100644
--- a/tensorflow/python/keras/engine/data_adapter.py
+++ b/tensorflow/python/keras/engine/data_adapter.py
@@ -415,9 +415,6 @@
   as Numpy, but it ignores any case where all the inputs are Tensors or Numpy
   arrays (because that case is handled by the base TensorLikeDataAdapter).
 
-  It ignores scipy sparse matrices and Composite Tensors because those are
-  handled by the CompositeTensorDataAdapter.
-
   It also does not handle lists/tuples of scalars, because those are handled
   by the ListsOfScalarsDataAdapter.
   """
@@ -437,8 +434,7 @@
           hasattr(v, "__len__")
       )
 
-    if (not TensorLikeDataAdapter.can_handle(x, y) and
-        not CompositeTensorDataAdapter.can_handle(x, y)):
+    if not TensorLikeDataAdapter.can_handle(x, y):
       return all(_is_array_like(v) for v in flat_inputs)
     else:
       return False
diff --git a/tensorflow/python/keras/engine/training.py b/tensorflow/python/keras/engine/training.py
index 82549db..307a04d 100644
--- a/tensorflow/python/keras/engine/training.py
+++ b/tensorflow/python/keras/engine/training.py
@@ -155,8 +155,7 @@
     self._compile_distribution = False
 
     self._run_eagerly = None
-    self._experimental_run_tf_function = (
-        ops.executing_eagerly_outside_functions())
+    self._experimental_run_tf_function = False
 
   def get_weights(self):
     """Retrieves the weights of the model.
@@ -309,18 +308,11 @@
             'Session arguments: %s' % (self._function_kwargs,))
 
     self._set_optimizer(optimizer)
-    is_any_keras_optimizer_v1 = any(
-        (isinstance(opt, optimizers.Optimizer)
-         and not isinstance(opt, optimizers.TFOptimizer)
-        ) for opt in nest.flatten(self.optimizer))
-
-    if is_any_keras_optimizer_v1 and ops.executing_eagerly_outside_functions():
-      raise ValueError('`tf.compat.v1.keras` Optimizer (', optimizer, ') is '
-                       'not supported when eager execution is enabled. Use a '
-                       '`tf.keras` Optimizer instead, or disable eager '
-                       'execution.')
+    is_any_optimizer_v1 = any(isinstance(opt, optimizers.Optimizer)
+                              for opt in nest.flatten(self.optimizer))
 
     if ((target_tensors is not None)
+        or is_any_optimizer_v1
         or not ops.executing_eagerly_outside_functions()):
       # Fallback out of things that aren't supported with v2 loops
       self._experimental_run_tf_function = False
@@ -3315,11 +3307,6 @@
   """
   if issparse is not None and issparse(value):
     if ops.is_dense_tensor_like(expected_input):
-      if ops.executing_eagerly_outside_functions():
-        # In TF2 we do not silently densify sparse matrices.
-        raise ValueError('A SciPy sparse matrix was passed to a model '
-                         'that expects dense inputs. Please densify your '
-                         'inputs first, such as by calling `x.toarray().')
       return value.toarray()
     else:
       sparse_coo = value.tocoo()
diff --git a/tensorflow/python/keras/engine/training_eager.py b/tensorflow/python/keras/engine/training_eager.py
index 27acdf2..be1b2e8 100644
--- a/tensorflow/python/keras/engine/training_eager.py
+++ b/tensorflow/python/keras/engine/training_eager.py
@@ -157,7 +157,6 @@
             weights = mask
           else:
             # Update dimensions of weights to match with mask if possible.
-            weights = math_ops.cast(weights, outs[i].dtype)
             mask, _, weights = (
                 tf_losses_utils.squeeze_or_expand_dimensions(
                     mask, sample_weight=weights))
diff --git a/tensorflow/python/keras/engine/training_test.py b/tensorflow/python/keras/engine/training_test.py
index 5a9a857..05a033a 100644
--- a/tensorflow/python/keras/engine/training_test.py
+++ b/tensorflow/python/keras/engine/training_test.py
@@ -644,31 +644,10 @@
     input_a_np = np.random.random((10, 3))
     input_b_np = np.random.random((10, 4))
 
-    # Test execution on inputs that are lists of scalars.
-    # TF2 and TF1 have slightly different semantics:
-    if (testing_utils.should_run_tf_function()
-        or testing_utils.should_run_eagerly()):
-      # In TF2 to avoid any ambiguity when there are nested lists
-      # the entire input gets converted to a
-      # single numpy array (& it only works in the case of a single io model)
-      model.fit(np.ndarray.tolist(input_a_np),
-                np.ndarray.tolist(input_b_np),
-                epochs=2,
-                batch_size=5,
-                verbose=2)
-    else:
-      # In TF1 there was logic to try disambiguating between the individual
-      # inputs when lists are nested. This allowed multi-io functional models
-      # to support lists of scalars as input, but it caused ambiguity issues
-      # for subclass models & made it trickier to pass multi-dimensional inputs
-      # as lists of scalars to single io models. This was an excessive amount
-      # of complexity for what boiled down to a convenience method we were
-      # mainly just using for writing tests.
-      model.fit([np.ndarray.tolist(input_a_np)],
-                [np.ndarray.tolist(input_b_np)],
-                epochs=2,
-                batch_size=5,
-                verbose=2)
+    model.fit([np.ndarray.tolist(input_a_np)], [np.ndarray.tolist(input_b_np)],
+              epochs=2,
+              batch_size=5,
+              verbose=2)
 
   @keras_parameterized.run_all_keras_modes
   def test_evaluate_predict_on_arrays(self):
@@ -858,43 +837,12 @@
     model = MyModel()
     self.assertIn('{"a": {}}', model.to_json())
 
-  @keras_parameterized.run_all_keras_modes(always_skip_v1=True)
+  @keras_parameterized.run_all_keras_modes
   def test_training_on_sparse_data_with_dense_placeholders(self):
-    if scipy_sparse is None:
-      return
+    # TODO(kaftan) Test seems to not work, file ticket
+    if testing_utils.should_run_eagerly() and context.executing_eagerly():
+      self.skipTest('Skipping running model eagerly.')
 
-    test_inputs = [
-        scipy_sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)
-    ]
-    test_outputs = [
-        scipy_sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)
-    ]
-    in1 = keras.layers.Input(shape=(3,))
-    in2 = keras.layers.Input(shape=(3,))
-    out1 = keras.layers.Dropout(0.5, name='dropout')(in1)
-    out2 = keras.layers.Dense(4, name='dense_1')(in2)
-    model = keras.Model([in1, in2], [out1, out2])
-    model.experimental_run_tf_function = testing_utils.should_run_tf_function()
-
-    with self.assertRaisesRegexp(ValueError, 'Please densify'):
-      model.predict(test_inputs, batch_size=2)
-    optimizer = 'rmsprop'
-    model.compile(
-        optimizer,
-        'mse',
-        metrics=['mae', metrics_module.CategoricalAccuracy()],
-        run_eagerly=testing_utils.should_run_eagerly(),
-        experimental_run_tf_function=testing_utils.should_run_tf_function())
-
-    with self.assertRaisesRegexp(ValueError, 'Please densify'):
-      model.fit(test_inputs, test_outputs,
-                epochs=1, batch_size=2)
-
-    with self.assertRaisesRegexp(ValueError, 'Please densify'):
-      model.evaluate(test_inputs, test_outputs, batch_size=2)
-
-  @tf_test_util.run_deprecated_v1
-  def test_training_on_sparse_data_with_dense_placeholders_v1(self):
     if scipy_sparse is None:
       return
 
@@ -910,17 +858,23 @@
     out2 = keras.layers.Dense(4, name='dense_1')(in2)
     model = keras.Model([in1, in2], [out1, out2])
     model.predict(test_inputs, batch_size=2)
-    optimizer = 'rmsprop'
+    optimizer = RMSPropOptimizer(learning_rate=0.001)
     model.compile(
         optimizer,
         'mse',
-        metrics=['mae', metrics_module.CategoricalAccuracy()])
+        metrics=['mae', metrics_module.CategoricalAccuracy()],
+        run_eagerly=testing_utils.should_run_eagerly(),
+        experimental_run_tf_function=testing_utils.should_run_tf_function())
     model.fit(test_inputs, test_outputs,
               epochs=1, batch_size=2, validation_split=0.5)
     model.evaluate(test_inputs, test_outputs, batch_size=2)
 
   @keras_parameterized.run_all_keras_modes
   def test_compile_with_sparse_placeholders(self):
+    # TODO(kaftan) Test seems to not work, file ticket
+    if testing_utils.should_run_eagerly() and context.executing_eagerly():
+      self.skipTest('Skipping running model eagerly.')
+
     input_layer = keras.layers.Input(shape=(10,), sparse=True)
     weights = variables_lib.Variable(
         np.ones((10, 1)).astype(np.float32), name='weights')
@@ -929,7 +883,7 @@
     model = keras.Model([input_layer], output_layer)
     model.compile(
         loss='binary_crossentropy',
-        optimizer='adam',
+        optimizer=keras.optimizers.Adam(lr=0.0001),
         metrics=['accuracy'],
         run_eagerly=testing_utils.should_run_eagerly(),
         experimental_run_tf_function=testing_utils.should_run_tf_function())
@@ -2478,8 +2432,12 @@
 
 class TestTrainingWithDataTensors(keras_parameterized.TestCase):
 
-  @tf_test_util.run_deprecated_v1
+  @keras_parameterized.run_all_keras_modes
   def test_training_and_eval_methods_on_symbolic_tensors_single_io(self):
+    # TODO(kaftan) Test seems to not work, file ticket
+    if  context.executing_eagerly():
+      self.skipTest('Skipping eager execution.')
+
     x = keras.layers.Input(shape=(3,), name='input')
     y = keras.layers.Dense(4, name='dense')(x)
     model = keras.Model(x, y)
@@ -2489,7 +2447,9 @@
     model.compile(
         optimizer,
         loss,
-        metrics=['mae', metrics_module.CategoricalAccuracy()])
+        metrics=['mae', metrics_module.CategoricalAccuracy()],
+        run_eagerly=testing_utils.should_run_eagerly(),
+        experimental_run_tf_function=testing_utils.should_run_tf_function())
 
     inputs = keras.backend.zeros(shape=(10, 3))
     targets = keras.backend.zeros(shape=(10, 4))
@@ -2518,8 +2478,12 @@
               epochs=1, steps_per_epoch=2, verbose=0,
               validation_data=(inputs, targets), validation_steps=2)
 
-  @tf_test_util.run_deprecated_v1
+  @keras_parameterized.run_all_keras_modes
   def test_training_and_eval_methods_on_symbolic_tensors_multi_io(self):
+    # TODO(kaftan) Test seems to not work, file ticket
+    if context.executing_eagerly():
+      self.skipTest('Skipping eager execution.')
+
     a = keras.layers.Input(shape=(3,), name='input_a')
     b = keras.layers.Input(shape=(3,), name='input_b')
 
@@ -2537,7 +2501,9 @@
         optimizer,
         loss,
         metrics=['mae', metrics_module.CategoricalAccuracy()],
-        loss_weights=loss_weights)
+        loss_weights=loss_weights,
+        run_eagerly=testing_utils.should_run_eagerly(),
+        experimental_run_tf_function=testing_utils.should_run_tf_function())
 
     input_a_tf = keras.backend.zeros(shape=(10, 3))
     input_b_tf = keras.backend.zeros(shape=(10, 3))
@@ -2751,7 +2717,6 @@
       out = model.predict(None, steps=3)
       self.assertEqual(out.shape, (10 * 3, 4))
 
-  @keras_parameterized.run_all_keras_modes
   def test_model_with_partial_loss(self):
     with self.cached_session():
       a = keras.Input(shape=(3,), name='input_a')
@@ -2919,7 +2884,6 @@
       self.assertEqual(out[0].shape, (10 * 3, 4))
       self.assertEqual(out[1].shape, (10 * 3, 4))
 
-  @keras_parameterized.run_all_keras_modes
   def test_target_tensors(self):
     with self.cached_session():
       # single-output, as list
@@ -3239,29 +3203,32 @@
 
   @keras_parameterized.run_all_keras_modes
   def test_metrics_masking(self):
-    np.random.seed(1337)
-    model = keras.models.Sequential()
-    model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
-    model.add(
-        keras.layers.TimeDistributed(
-            keras.layers.Dense(1, kernel_initializer='ones')))
-    model.compile(
-        RMSPropOptimizer(learning_rate=0.001),
-        loss='mse',
-        weighted_metrics=['accuracy'],
-        run_eagerly=testing_utils.should_run_eagerly(),
-        experimental_run_tf_function=testing_utils.should_run_tf_function())
+    if testing_utils.should_run_eagerly():
+      self.skipTest('b/120495761')
+    with self.cached_session():
+      np.random.seed(1337)
+      model = keras.models.Sequential()
+      model.add(keras.layers.Masking(mask_value=0, input_shape=(2, 1)))
+      model.add(
+          keras.layers.TimeDistributed(
+              keras.layers.Dense(1, kernel_initializer='ones')))
+      model.compile(
+          RMSPropOptimizer(learning_rate=0.001),
+          loss='mse',
+          weighted_metrics=['accuracy'],
+          run_eagerly=testing_utils.should_run_eagerly(),
+          experimental_run_tf_function=testing_utils.should_run_tf_function())
 
-    # verify that masking is applied.
-    x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]])
-    y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]])
-    scores = model.train_on_batch(x, y)
-    self.assertArrayNear(scores, [0.25, 0.75], 0.1)
+      # verify that masking is applied.
+      x = np.array([[[1], [1]], [[1], [1]], [[0], [0]]])
+      y = np.array([[[1], [1]], [[0], [1]], [[1], [1]]])
+      scores = model.train_on_batch(x, y)
+      self.assertArrayNear(scores, [0.25, 0.75], 0.1)
 
-    # verify that masking is combined with sample weights.
-    w = np.array([3, 2, 4])
-    scores = model.train_on_batch(x, y, sample_weight=w)
-    self.assertArrayNear(scores, [0.3328, 0.8], 0.001)
+      # verify that masking is combined with sample weights.
+      w = np.array([3, 2, 4])
+      scores = model.train_on_batch(x, y, sample_weight=w)
+      self.assertArrayNear(scores, [0.3328, 0.8], 0.001)
 
   @keras_parameterized.run_all_keras_modes
   def test_add_metric_with_tensor_on_model(self):
diff --git a/tensorflow/python/keras/engine/training_utils.py b/tensorflow/python/keras/engine/training_utils.py
index 3f660d8..4c5f9d4 100644
--- a/tensorflow/python/keras/engine/training_utils.py
+++ b/tensorflow/python/keras/engine/training_utils.py
@@ -1063,7 +1063,6 @@
       weights = mask
     else:
       # Update dimensions of weights to match with mask.
-      weights = math_ops.cast(weights, dtype=y_pred.dtype)
       mask, _, weights = tf_losses_utils.squeeze_or_expand_dimensions(
           mask, sample_weight=weights)
       weights *= mask
diff --git a/tensorflow/python/keras/engine/training_v2_utils.py b/tensorflow/python/keras/engine/training_v2_utils.py
index bab2774..9454402 100644
--- a/tensorflow/python/keras/engine/training_v2_utils.py
+++ b/tensorflow/python/keras/engine/training_v2_utils.py
@@ -360,7 +360,7 @@
 
   # If `model._distribution_strategy` is True, then we are in a replica context
   # at this point.
-  inputs = training_utils.cast_to_model_input_dtypes(inputs, model)
+  inputs = training_utils.cast_if_floating_dtype(inputs)
   if isinstance(inputs, collections.Sequence):
     # Unwrap lists with only one input, as we do when training on batch
     if len(inputs) == 1:
diff --git a/tensorflow/python/keras/layers/gru_test.py b/tensorflow/python/keras/layers/gru_test.py
index 76890c4..be486bc 100644
--- a/tensorflow/python/keras/layers/gru_test.py
+++ b/tensorflow/python/keras/layers/gru_test.py
@@ -44,7 +44,6 @@
                 'return_sequences': True},
         input_shape=(num_samples, timesteps, embedding_dim))
 
-  @tf_test_util.run_v2_only
   def test_float64_GRU(self):
     num_samples = 2
     timesteps = 3
diff --git a/tensorflow/python/keras/layers/gru_v2_test.py b/tensorflow/python/keras/layers/gru_v2_test.py
index 1bccffa..0135091 100644
--- a/tensorflow/python/keras/layers/gru_v2_test.py
+++ b/tensorflow/python/keras/layers/gru_v2_test.py
@@ -343,7 +343,6 @@
                 'return_sequences': True},
         input_shape=(num_samples, timesteps, embedding_dim))
 
-  @test_util.run_v2_only
   def test_float64_GRU(self):
     num_samples = 2
     timesteps = 3
diff --git a/tensorflow/python/keras/layers/lstm_test.py b/tensorflow/python/keras/layers/lstm_test.py
index a4dcf39..78e515d 100644
--- a/tensorflow/python/keras/layers/lstm_test.py
+++ b/tensorflow/python/keras/layers/lstm_test.py
@@ -23,7 +23,6 @@
 
 from tensorflow.python import keras
 from tensorflow.python.eager import context
-from tensorflow.python.framework import test_util as tf_test_util
 from tensorflow.python.keras import keras_parameterized
 from tensorflow.python.keras import testing_utils
 from tensorflow.python.platform import test
@@ -45,7 +44,6 @@
                 'return_sequences': True},
         input_shape=(num_samples, timesteps, embedding_dim))
 
-  @tf_test_util.run_v2_only
   def test_float64_LSTM(self):
     num_samples = 2
     timesteps = 3
diff --git a/tensorflow/python/keras/layers/lstm_v2_test.py b/tensorflow/python/keras/layers/lstm_v2_test.py
index d0759f9..20f21c8 100644
--- a/tensorflow/python/keras/layers/lstm_v2_test.py
+++ b/tensorflow/python/keras/layers/lstm_v2_test.py
@@ -570,7 +570,6 @@
         },
         input_shape=(num_samples, timesteps, embedding_dim))
 
-  @test_util.run_v2_only
   def test_float64_LSTM(self):
     num_samples = 2
     timesteps = 3
diff --git a/tensorflow/python/keras/layers/recurrent_test.py b/tensorflow/python/keras/layers/recurrent_test.py
index e48c3a8..29ad229 100644
--- a/tensorflow/python/keras/layers/recurrent_test.py
+++ b/tensorflow/python/keras/layers/recurrent_test.py
@@ -1477,7 +1477,7 @@
               input_layer, initial_state=initial_states)
       model = keras.Model(input_layer, rnn_output)
       model.compile(
-          optimizer='rmsprop', loss='mse',
+          optimizer=keras.optimizers.RMSprop(), loss='mse',
           run_eagerly=testing_utils.should_run_eagerly(),
           experimental_run_tf_function=testing_utils.should_run_tf_function())
       return model
diff --git a/tensorflow/python/keras/layers/simplernn_test.py b/tensorflow/python/keras/layers/simplernn_test.py
index bbd8c8d..731e312 100644
--- a/tensorflow/python/keras/layers/simplernn_test.py
+++ b/tensorflow/python/keras/layers/simplernn_test.py
@@ -22,7 +22,6 @@
 
 from tensorflow.python import keras
 from tensorflow.python.eager import context
-from tensorflow.python.framework import test_util as tf_test_util
 from tensorflow.python.keras import keras_parameterized
 from tensorflow.python.keras import testing_utils
 from tensorflow.python.platform import test
@@ -43,7 +42,6 @@
                 'return_sequences': True},
         input_shape=(num_samples, timesteps, embedding_dim))
 
-  @tf_test_util.run_v2_only
   def test_float64_SimpleRNN(self):
     num_samples = 2
     timesteps = 3
diff --git a/tensorflow/python/keras/layers/wrappers_test.py b/tensorflow/python/keras/layers/wrappers_test.py
index c96ce16..cedccd6 100644
--- a/tensorflow/python/keras/layers/wrappers_test.py
+++ b/tensorflow/python/keras/layers/wrappers_test.py
@@ -376,8 +376,6 @@
           layer=[keras.layers.LSTM,
                  keras.layers.Dense]))
   def test_TimeDistributed_with_ragged_input(self, layer):
-    if testing_utils.should_run_tf_function():
-      self.skipTest('b/143103634')
     np.random.seed(100)
     layer = layer(4)
     ragged_data = ragged_factory_ops.constant(
@@ -389,9 +387,6 @@
     x_ragged = keras.Input(shape=(None, 2, 1), dtype='float32', ragged=True)
     y_ragged = keras.layers.TimeDistributed(layer)(x_ragged)
     model_1 = keras.models.Model(x_ragged, y_ragged)
-    model_1._experimental_run_tf_function = (
-        testing_utils.should_run_tf_function())
-    model_1._run_eagerly = testing_utils.should_run_eagerly()
     output_ragged = model_1.predict(ragged_data, steps=1)
 
     x_dense = keras.Input(shape=(None, 2, 1), dtype='float32')
@@ -399,9 +394,6 @@
     y_dense = keras.layers.TimeDistributed(layer)(masking)
     model_2 = keras.models.Model(x_dense, y_dense)
     dense_data = ragged_data.to_tensor()
-    model_2._experimental_run_tf_function = (
-        testing_utils.should_run_tf_function())
-    model_2._run_eagerly = testing_utils.should_run_eagerly()
     output_dense = model_2.predict(dense_data, steps=1)
 
     output_ragged = ragged_tensor.convert_to_tensor_or_ragged_tensor(
diff --git a/tensorflow/python/keras/saving/hdf5_format_test.py b/tensorflow/python/keras/saving/hdf5_format_test.py
index 9655741..e8da051a 100644
--- a/tensorflow/python/keras/saving/hdf5_format_test.py
+++ b/tensorflow/python/keras/saving/hdf5_format_test.py
@@ -315,7 +315,7 @@
                                        name='d1'))
       ref_model.add(keras.layers.Dense(num_classes, name='d2'))
       ref_model.compile(loss=keras.losses.MSE,
-                        optimizer='rmsprop',
+                        optimizer=keras.optimizers.RMSprop(lr=0.0001),
                         metrics=[keras.metrics.categorical_accuracy])
 
       f_ref_model = h5py.File(h5_path, 'w')
@@ -327,7 +327,7 @@
                                    input_dim=input_dim, name='d1'))
       model.add(keras.layers.Dense(num_classes, name='d2'))
       model.compile(loss=keras.losses.MSE,
-                    optimizer='rmsprop',
+                    optimizer=keras.optimizers.RMSprop(lr=0.0001),
                     metrics=[keras.metrics.categorical_accuracy])
     with self.assertRaisesRegexp(ValueError,
                                  r'Layer #0 \(named \"d1\"\) expects 1 '
diff --git a/tensorflow/python/keras/saving/save_test.py b/tensorflow/python/keras/saving/save_test.py
index 9f9edf5..58dd58b 100644
--- a/tensorflow/python/keras/saving/save_test.py
+++ b/tensorflow/python/keras/saving/save_test.py
@@ -103,7 +103,7 @@
 
     model.compile(
         loss=keras.losses.MSE,
-        optimizer='rmsprop',
+        optimizer=keras.optimizers.RMSprop(lr=0.0001),
         metrics=[keras.metrics.categorical_accuracy])
 
     config = model.to_json()
@@ -145,7 +145,7 @@
 
     model.compile(
         loss=keras.losses.MSE,
-        optimizer='rmsprop',
+        optimizer=keras.optimizers.RMSprop(lr=0.0001),
         metrics=[keras.metrics.categorical_accuracy])
 
     config = model.to_json()