[DCP] Add API logging for checkpoint high level API (#102278)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/102278
Approved by: https://github.com/fduwjj
diff --git a/torch/distributed/checkpoint/state_dict_loader.py b/torch/distributed/checkpoint/state_dict_loader.py
index 11b8e36..f9416ae 100644
--- a/torch/distributed/checkpoint/state_dict_loader.py
+++ b/torch/distributed/checkpoint/state_dict_loader.py
@@ -1,5 +1,6 @@
from typing import Any, Dict, Optional
+import torch
import torch.distributed as dist
from .storage import (
@@ -84,6 +85,9 @@
and it is the user's responsibility to ensure that this is set so that each
rank has an individual GPU, via ``torch.cuda.set_device()``.
"""
+
+ torch._C._log_api_usage_once("torch.distributed.checkpoint.load_state_dict")
+
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
if planner is None:
planner = DefaultLoadPlanner()
diff --git a/torch/distributed/checkpoint/state_dict_saver.py b/torch/distributed/checkpoint/state_dict_saver.py
index 13c29f6..0312289 100644
--- a/torch/distributed/checkpoint/state_dict_saver.py
+++ b/torch/distributed/checkpoint/state_dict_saver.py
@@ -1,6 +1,7 @@
from typing import Optional
-import torch.distributed as dist
+import torch
+import torch.distributed as dist
from .planner import SavePlanner
from .default_planner import DefaultSavePlanner
@@ -76,6 +77,9 @@
and it is the user's responsibility to ensure that this is set so that
each rank has an individual GPU, via ``torch.cuda.set_device()``.
"""
+
+ torch._C._log_api_usage_once("torch.distributed.checkpoint.save_state_dict")
+
distW = _DistWrapper(process_group, not no_dist, coordinator_rank)
if planner is None:
planner = DefaultSavePlanner()