change op benchmark forward_only flag (#28967)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/28967
Change forward_only flag to take True or False so it should be integrated with PEP.
Test Plan:
```
[mingzhe0908@devgpu203.prn2 ~/fbsource/fbcode] ~/fbsource/fbcode/buck-out/opt/gen/caffe2/benchmarks/operator_benchmark/pt/add_test.par --forward_only True --iterations 1
# ----------------------------------------
# PyTorch/Caffe2 Operator Micro-benchmarks
# ----------------------------------------
# Tag : short
# Benchmarking PyTorch: add
# Mode: Eager
# Name: add_M64_N64_K64_cpu
# Input: M: 64, N: 64, K: 64, device: cpu
Forward Execution Time (us) : 152.489
# Benchmarking PyTorch: add
# Mode: Eager
# Name: add_M64_N64_K128_cpu
# Input: M: 64, N: 64, K: 128, device: cpu
Forward Execution Time (us) : 236.608
[mingzhe0908@devgpu203.prn2 ~/fbsource/fbcode] ~/fbsource/fbcode/buck-out/opt/gen/caffe2/benchmarks/operator_benchmark/pt/add_test.par --forward_only False --iterations 1
# ----------------------------------------
# PyTorch/Caffe2 Operator Micro-benchmarks
# ----------------------------------------
# Tag : short
# Benchmarking PyTorch: add
# Mode: Eager
# Name: add_M64_N64_K64_cpu
# Input: M: 64, N: 64, K: 64, device: cpu
Forward Execution Time (us) : 147.174
# Benchmarking PyTorch: add
# Mode: Eager
# Name: add_M64_N64_K128_cpu
# Input: M: 64, N: 64, K: 128, device: cpu
Forward Execution Time (us) : 253.437
# Benchmarking PyTorch: add
# Mode: Eager
# Name: add_M64_N64_K64_cpu_bwdall
# Input: M: 64, N: 64, K: 64, device: cpu
Backward Execution Time (us) : 1044.082
Reviewed By: hl475
Differential Revision: D18247416
fbshipit-source-id: 1c6cff1ac98233d4f0ca298e0cb4a0d3466e5834
diff --git a/benchmarks/operator_benchmark/benchmark_runner.py b/benchmarks/operator_benchmark/benchmark_runner.py
index 6560f36..ae8374d 100644
--- a/benchmarks/operator_benchmark/benchmark_runner.py
+++ b/benchmarks/operator_benchmark/benchmark_runner.py
@@ -92,21 +92,29 @@
parser.add_argument(
"--ai_pep_format",
- help="Print result when running on AI-PEP",
+ type=benchmark_utils.str2bool,
+ nargs='?',
+ const=True,
default=False,
- type=bool
+ help="Print result when running on AI-PEP"
)
parser.add_argument(
"--use_jit",
- help="Run operators with PyTorch JIT mode",
- action='store_true'
+ type=benchmark_utils.str2bool,
+ nargs='?',
+ const=True,
+ default=False,
+ help="Run operators with PyTorch JIT mode"
)
parser.add_argument(
"--forward_only",
- help="Only run the forward path of operators",
- action='store_true'
+ type=benchmark_utils.str2bool,
+ nargs='?',
+ const=True,
+ default=False,
+ help="Only run the forward path of operators"
)
parser.add_argument(
diff --git a/benchmarks/operator_benchmark/benchmark_utils.py b/benchmarks/operator_benchmark/benchmark_utils.py
index ff75eb2..2a3fb5f 100644
--- a/benchmarks/operator_benchmark/benchmark_utils.py
+++ b/benchmarks/operator_benchmark/benchmark_utils.py
@@ -21,6 +21,15 @@
def shape_to_string(shape):
return ', '.join([str(x) for x in shape])
+def str2bool(v):
+ if isinstance(v, bool):
+ return v
+ if v.lower() in ('yes', 'true', 't', 'y', '1'):
+ return True
+ elif v.lower() in ('no', 'false', 'f', 'n', '0'):
+ return False
+ else:
+ raise argparse.ArgumentTypeError('Boolean value expected.')
def numpy_random(dtype, *shapes):
""" Return a random numpy tensor of the provided dtype.