Add quick utility to transform scripted/traced models for mobile. (#35904)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/35904
Currently this optimization means transform conv2d and linears to
prepacked(xnnpack) equivalent.
Test Plan: buck run fbsource//xplat/caffe2:optimize_for_mobile -- --model="/tmp/inpainting_fbnet.pt"
Reviewed By: AshkanAliabadi
Differential Revision: D20824433
fbshipit-source-id: 88d5c0d21b77911f95f018b03398b0df758ab0d7
diff --git a/binaries/optimize_for_mobile.cc b/binaries/optimize_for_mobile.cc
new file mode 100644
index 0000000..82e3c4f
--- /dev/null
+++ b/binaries/optimize_for_mobile.cc
@@ -0,0 +1,55 @@
+/**
+ * Copyright (c) 2016-present, Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <string>
+
+#include "torch/csrc/jit/api/module.h"
+#include "torch/csrc/jit/passes/xnnpack_rewrite.h"
+#include "torch/csrc/jit/serialization/import.h"
+
+C10_DEFINE_string(model, "", "The given torch script model to transform.");
+C10_DEFINE_string(
+ output,
+ "",
+ "Name of the output model to be saved.");
+
+int main(int argc, char** argv) {
+ c10::SetUsageMessage(
+ "Run speed benchmark for pytorch model.\n"
+ "Example usage:\n"
+ "./optimize_for_mobile"
+ " --model=<model_file>"
+ " --output=<output_file_name>");
+ if (!c10::ParseCommandLineFlags(&argc, &argv)) {
+ std::cerr << "Failed to parse command line flags!" << std::endl;
+ return 1;
+ }
+
+ CAFFE_ENFORCE(FLAGS_model != "", "Valid input must be provided.");
+
+ std::string output_model_name =
+ FLAGS_model.substr(0, FLAGS_model.find(".")) + "_mobile_optimized.pt";
+
+ if (FLAGS_output != "") {
+ output_model_name = FLAGS_output;
+ }
+
+ auto module = torch::jit::load(FLAGS_model);
+ torch::jit::optimizeForMobile(module);
+ module.save(output_model_name);
+
+ return 0;
+}