[Static Runtime] Fix broken test_static_runtime build (#62098)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/62098
The build was broken by D29821533 (https://github.com/pytorch/pytorch/commit/1d2ea76afb4f9ac40c43555da2f3d94dd3549136). The `clamp` overloads used in `deep_wide.h`
are no longer available in the `at::native` namespace.
Use `at::cpu::clamp` and `at::clamp::clip_out` (which should be an alias for
clamp) instead.
Reviewed By: hlu1
Differential Revision: D29880187
fbshipit-source-id: 210b6d2be8a8142e7af1a0ba07e55a95b1a77d25
diff --git a/benchmarks/static_runtime/deep_wide_pt.h b/benchmarks/static_runtime/deep_wide_pt.h
index 2c9cf08..44d13d1 100644
--- a/benchmarks/static_runtime/deep_wide_pt.h
+++ b/benchmarks/static_runtime/deep_wide_pt.h
@@ -54,7 +54,7 @@
auto wide_offset = at::add(wide, mu_);
auto wide_normalized = at::mul(wide_offset, sigma_);
// Placeholder for ReplaceNaN
- auto wide_preproc = at::native::clamp(wide_normalized, -10.0, 10.0);
+ auto wide_preproc = at::cpu::clamp(wide_normalized, -10.0, 10.0);
auto user_emb_t = at::native::transpose(user_emb, 1, 2);
auto dp_unflatten = at::native::bmm_cpu(ad_emb_packed, user_emb_t);
@@ -87,7 +87,7 @@
at::add_out(prealloc_tensors[0], wide, mu_);
at::mul_out(prealloc_tensors[1], prealloc_tensors[0], sigma_);
- at::native::clamp_out(
+ at::native::clip_out(
prealloc_tensors[1], -10.0, 10.0, prealloc_tensors[2]);
// Potential optimization: original tensor could be pre-transposed.