Make config.autotune_remote_cache be a three-way option (#132285)

Similar to fx_graph_cache config, make autotune config be three-way so we can hard enable/disable via config options.

Differential Revision: [D60537105](https://our.internmc.facebook.com/intern/diff/D60537105)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/132285
Approved by: https://github.com/aorenste
diff --git a/torch/_inductor/config.py b/torch/_inductor/config.py
index 75f5c18..20c47c6 100644
--- a/torch/_inductor/config.py
+++ b/torch/_inductor/config.py
@@ -17,6 +17,14 @@
     return None
 
 
+def autotune_remote_cache_default() -> Optional[bool]:
+    if os.environ.get("TORCHINDUCTOR_AUTOTUNE_REMOTE_CACHE") == "1":
+        return True
+    if os.environ.get("TORCHINDUCTOR_AUTOTUNE_REMOTE_CACHE") == "0":
+        return False
+    return None
+
+
 # add some debug printouts
 debug = False
 
@@ -41,7 +49,10 @@
 autotune_local_cache = True
 
 # enable autotune remote cache
-autotune_remote_cache = os.environ.get("TORCHINDUCTOR_AUTOTUNE_REMOTE_CACHE") == "1"
+# False: Disables the cache
+# True: Enables the cache
+# None: Not set -- Off for OSS, JustKnobs based for internal
+autotune_remote_cache: Optional[bool] = autotune_remote_cache_default()
 
 # Force disabled all inductor level caching -- This will override any other caching flag
 force_disable_caches = os.environ.get("TORCHINDUCTOR_FORCE_DISABLE_CACHES") == "1"
diff --git a/torch/_inductor/runtime/triton_heuristics.py b/torch/_inductor/runtime/triton_heuristics.py
index 1209d13..d29c6b3 100644
--- a/torch/_inductor/runtime/triton_heuristics.py
+++ b/torch/_inductor/runtime/triton_heuristics.py
@@ -1022,8 +1022,8 @@
 
 
 def should_use_remote_autotune_cache(inductor_meta):
-    if inductor_meta.get("autotune_remote_cache"):
-        return True
+    if inductor_meta.get("autotune_remote_cache") is not None:
+        return inductor_meta.get("autotune_remote_cache")
     if not inductor_meta.get("is_fbcode"):
         return False
     if inductor_meta.get("is_hip"):