Micro-optimizations to improve pure eager calls

Specifically:
- Avoid function calling overhead in hot path
- Add python slots for frequently used classes

benchmark_tf_identity saw improvement of 2.67 -> 2.47

PiperOrigin-RevId: 272668606
diff --git a/tensorflow/python/eager/context.py b/tensorflow/python/eager/context.py
index 09095d6..60188a6 100644
--- a/tensorflow/python/eager/context.py
+++ b/tensorflow/python/eager/context.py
@@ -1653,10 +1653,11 @@
   Returns:
     `True` if the current thread has eager execution enabled.
   """
-  if context_safe() is None:
+  ctx = context_safe()
+  if ctx is None:
     return default_execution_mode == EAGER_MODE
 
-  return context().executing_eagerly()
+  return ctx.executing_eagerly()
 
 
 @tf_export(v1=["executing_eagerly"])
diff --git a/tensorflow/python/framework/dtypes.py b/tensorflow/python/framework/dtypes.py
index 1a8b5c3..423cd14 100644
--- a/tensorflow/python/framework/dtypes.py
+++ b/tensorflow/python/framework/dtypes.py
@@ -60,6 +60,7 @@
   The `tf.as_dtype()` function converts numpy types and string type
   names to a `DType` object.
   """
+  __slots__ = ["_type_enum"]
 
   def __init__(self, type_enum):
     """Creates a new `DataType`.
@@ -261,11 +262,14 @@
     """Returns True iff this DType refers to the same type as `other`."""
     if other is None:
       return False
-    try:
-      dtype = as_dtype(other).as_datatype_enum
-      return self._type_enum == dtype  # pylint: disable=protected-access
-    except TypeError:
-      return False
+
+    if type(other) != DType:  # pylint: disable=unidiomatic-typecheck
+      try:
+        other = as_dtype(other)
+      except TypeError:
+        return False
+
+    return self._type_enum == other._type_enum  # pylint: disable=protected-access
 
   def __ne__(self, other):
     """Returns True iff self != other."""
diff --git a/tensorflow/python/framework/python_op_gen.cc b/tensorflow/python/framework/python_op_gen.cc
index 6eec2eb..04a36ac 100644
--- a/tensorflow/python/framework/python_op_gen.cc
+++ b/tensorflow/python/framework/python_op_gen.cc
@@ -708,8 +708,8 @@
 
   strings::StrAppend(&result_,
                      "  _ctx = _context._context or _context.context()\n"
-                     "  if _ctx._thread_local_data.is_eager:",
-                     "\n");
+                     "  tld = _ctx._thread_local_data\n",
+                     "  if tld.is_eager:", "\n");
   if (eager_not_allowed_error.empty()) {
     AddEagerFastPathExecute();
   } else {
@@ -762,9 +762,9 @@
 }
 
 void GenEagerPythonOp::AddEagerFastPathExecute() {
-  string fastpath_execute_params = strings::StrCat(
-      "_ctx._context_handle, _ctx._thread_local_data.device_name, \"",
-      op_def_.name(), "\", ", "name, _ctx.op_callbacks");
+  string fastpath_execute_params =
+      strings::StrCat("_ctx._context_handle, tld.device_name, \"",
+                      op_def_.name(), "\", ", "name, tld.op_callbacks");
   string fallback_params;
 
   for (int i = 0; i < api_def_.in_arg_size(); i++) {
diff --git a/tensorflow/python/framework/tensor_shape.py b/tensorflow/python/framework/tensor_shape.py
index 4a26b72..4c2db59 100644
--- a/tensorflow/python/framework/tensor_shape.py
+++ b/tensorflow/python/framework/tensor_shape.py
@@ -739,6 +739,7 @@
   for details of shape functions and how to register them. Alternatively,
   the shape may be set explicitly using `tf.Tensor.set_shape`.
   """
+  __slots__ = ["_dims"]
 
   def __init__(self, dims):
     """Creates a new TensorShape with the given dimensions.