Now that the migration model mapping guides have been released, update a number of variable_scope reuse-based api docstrings' migration docstring blocks.

PiperOrigin-RevId: 401623464
Change-Id: I5925c0917c128149fce3754387731014aaf1d63f
diff --git a/tensorflow/python/ops/template.py b/tensorflow/python/ops/template.py
index 4f948cd..4c35494 100644
--- a/tensorflow/python/ops/template.py
+++ b/tensorflow/python/ops/template.py
@@ -41,6 +41,25 @@
                   **kwargs):
   """Given an arbitrary function, wrap it so that it does variable sharing.
 
+  @compatibility(TF2)
+  `tf.compat.v1.make_template` is a legacy API that is only compatible
+  with eager execution enabled and `tf.function` if you combine it with
+  `tf.compat.v1.keras.utils.track_tf1_style_variables`. See the model mapping
+  migration guide section on `make_template` for more info:
+
+  https://www.tensorflow.org/guide/migrate/model_mapping#using_tfcompatv1make_template_in_the_decorated_method
+
+  Even if you use legacy apis for `variable_scope`-based variable reuse,
+  we recommend using
+  `tf.compat.v1.keras.utils.track_tf1_style_variables` directly and not using
+  `tf.compat.v1.make_template`, as it interoperates with eager execution in a
+  simpler and more predictable fashion than `make_template`.
+
+  The TF2 API approach would be tracking your variables using
+  `tf.Module`s or Keras layers and models rather than relying on
+  `make_template`.
+  @end_compatibility
+
   This wraps `func_` in a Template and partially evaluates it. Templates are
   functions that create variables the first time they are called and reuse them
   thereafter. In order for `func_` to be compatible with a `Template` it must
diff --git a/tensorflow/python/ops/variable_scope.py b/tensorflow/python/ops/variable_scope.py
index b35df4b..021e9e3 100644
--- a/tensorflow/python/ops/variable_scope.py
+++ b/tensorflow/python/ops/variable_scope.py
@@ -203,7 +203,22 @@
 AUTO_REUSE = _ReuseMode.AUTO_REUSE
 tf_export(v1=["AUTO_REUSE"]).export_constant(__name__, "AUTO_REUSE")
 AUTO_REUSE.__doc__ = """
-When passed in as the value for the `reuse` flag, AUTO_REUSE indicates that
+@compatibility(TF2)
+`tf.compat.v1.AUTO_REUSE` is a legacy API that is a no-op when TF2 behaviors
+are enabled.
+
+If you rely on `get_variable` and auto-reuse, see the
+[model mapping guide](https://www.tensorflow.org/guide/migrate/model_mapping)
+for more info on how to migrate your code.
+
+Note: when you use the `tf.compat.v1.keras.utils.track_tf1_style_variables`
+API as described in the above guide, `get_variable` will always behave as if
+`v1.AUTO_REUSE` is set. Without the decorator, reuse will be ignored and new
+variables will always be created, regardless of if they have already been
+created.
+@end_compatibility
+
+When passed in as the value for the `reuse` flag, `AUTO_REUSE` indicates that
 get_variable() should create the requested variable if it doesn't exist or, if
 it does exist, simply return it.
 """
@@ -1451,7 +1466,34 @@
 
 @tf_export(v1=["get_variable_scope"])
 def get_variable_scope():
-  """Returns the current variable scope."""
+  """Returns the current variable scope.
+
+  @compatibility(TF2)
+  Although it is a legacy `compat.v1` api,
+  `tf.compat.v1.get_variable` is compatible with eager
+  execution and `tf.function`
+
+  However, to maintain variable-scope based variable reuse
+  you will need to combine it with
+  `tf.compat.v1.keras.utils.track_tf1_style_variables`. (Though
+  it will behave as if reuse is always set to `tf.compat.v1.AUTO_REUSE`.)
+
+  See the
+  [migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
+  for more info.
+
+  The TF2 equivalent, if you are just trying to track
+  variable name prefixes and not control `get_variable`-based variable reuse,
+  would be to use `tf.name_scope` and capture the output of opening the
+  scope (which represents the current name prefix).
+
+  For example:
+  ```python
+  x = tf.name_scope('foo') as current_scope:
+    ...
+  ```
+  @end_compatibility
+  """
   return get_variable_scope_store().current_scope
 
 
@@ -1593,6 +1635,39 @@
 
 get_variable_or_local_docstring = ("""%s
 
+@compatibility(TF2)
+Although it is a legacy `compat.v1` api,
+`tf.compat.v1.get_variable` is mostly compatible with eager
+execution and `tf.function` but only if you combine it with the
+`tf.compat.v1.keras.utils.track_tf1_style_variables` decorator. (Though
+it will behave as if reuse is always set to `AUTO_REUSE`.)
+
+See the
+[model migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
+for more info.
+
+If you do not combine it with
+`tf.compat.v1.keras.utils.track_tf1_style_variables`, `get_variable` will create
+a brand new variable every single time it is called and will never reuse
+variables, regardless of variable names or `reuse` arguments.
+
+The TF2 equivalent of this symbol would be `tf.Variable`, but note
+that when using `tf.Variable` you must make sure you track your variables
+(and regularizer arguments) either manually or via `tf.Module` or
+`tf.keras.layers.Layer` mechanisms.
+
+A section of the 
+[migration guide](https://www.tensorflow.org/guide/migrate/model_mapping#incremental_migration_to_native_tf2)
+provides more details on incrementally migrating these usages to `tf.Variable`
+as well.
+
+Note: The `partitioner` arg is not compatible with TF2 behaviors even when
+using `tf.compat.v1.keras.utils.track_tf1_style_variables`. It can be replaced
+by using `ParameterServerStrategy` and its partitioners. See the
+[multi-gpu migration guide](https://www.tensorflow.org/guide/migrate/multi_worker_cpu_gpu_training)
+and the ParameterServerStrategy guides it references for more info.
+@end_compatibility
+
 %sThis function prefixes the name with the current variable scope
 and performs reuse checks. See the
 [Variable Scope How To](https://tensorflow.org/guide/variables)
@@ -2069,6 +2144,30 @@
 class variable_scope(object):
   """A context manager for defining ops that creates variables (layers).
 
+  @compatibility(TF2)
+  Although it is a legacy `compat.v1` api,
+  `tf.compat.v1.variable_scope` is mostly compatible with eager
+  execution and `tf.function` as long as you combine it with the
+  `tf.compat.v1.keras.utils.track_tf1_style_variables` decorator (though
+  it will behave as if reuse is always set to `AUTO_REUSE`.)
+
+  See the
+  [model migration guide](www.tensorflow.org/guide/migrate/model_mapping)
+  for more info on
+  migrating code that relies on `variable_scope`-based variable reuse.
+
+  When you use it with eager execution enabled but without
+  `tf.compat.v1.keras.utils.track_tf1_style_variables`,
+  `tf.compat.v1.variable_scope` will still be able to prefix the names
+  of variables created within the scope but it will not enable variable reuse
+  or error-raising checks around variable reuse (`get_variable` calls within
+  it would always create new variables).
+
+  Once you have switched away from `get_variable`-based variable reuse
+  mechanisms, to switch to TF2 APIs you can just use
+  `tf.name_scope` to prefix variable names.
+  @end_compatibility
+
   This context manager validates that the (optional) `values` are from the same
   graph, ensures that graph is the default graph, and pushes a name scope and a
   variable scope.