analyzer: Add support of MLIR conversion
usage)
mlir = analyzer.ModelAnalyzer.analyze(tflite_input, 'mlir')
print(mlir)
PiperOrigin-RevId: 375897630
Change-Id: Ia1aa6c5e49200045df3a3f3d3888439c5c1a8d50
diff --git a/tensorflow/compiler/mlir/lite/BUILD b/tensorflow/compiler/mlir/lite/BUILD
index 906623d..af0de27 100644
--- a/tensorflow/compiler/mlir/lite/BUILD
+++ b/tensorflow/compiler/mlir/lite/BUILD
@@ -27,6 +27,7 @@
"//learning/brain/mlir/...",
"//third_party/iree/...",
"//tensorflow/compiler/mlir/...",
+ "//tensorflow/lite/python/...",
# Allow visibility from the mlir language server.
"//learning/brain/mlir/mlir_lsp_server/...",
],
@@ -1094,3 +1095,23 @@
"@llvm-project//llvm:Support",
],
)
+
+# Smaller version of flatbuffer_translate which only converts flatbuffer to MLIR.
+cc_library(
+ name = "flatbuffer_to_mlir",
+ srcs = [
+ "flatbuffer_to_mlir.cc",
+ ],
+ hdrs = [
+ "flatbuffer_to_mlir.h",
+ ],
+ deps = [
+ ":flatbuffer_import",
+ "@llvm-project//llvm:Support",
+ "@llvm-project//mlir:IR",
+ "@llvm-project//mlir:QuantOps",
+ "@llvm-project//mlir:StandardOps",
+ "@llvm-project//mlir:Support",
+ "@llvm-project//mlir:Translation",
+ ],
+)
diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.cc b/tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.cc
new file mode 100644
index 0000000..3faf12e
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.cc
@@ -0,0 +1,91 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/InitLLVM.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/raw_ostream.h"
+#include "mlir/IR/Attributes.h" // from @llvm-project
+#include "mlir/IR/Builders.h" // from @llvm-project
+#include "mlir/IR/BuiltinOps.h" // from @llvm-project
+#include "mlir/IR/BuiltinTypes.h" // from @llvm-project
+#include "mlir/IR/Location.h" // from @llvm-project
+#include "mlir/IR/MLIRContext.h" // from @llvm-project
+#include "mlir/IR/Operation.h" // from @llvm-project
+#include "mlir/IR/Types.h" // from @llvm-project
+#include "mlir/IR/Value.h" // from @llvm-project
+#include "mlir/IR/Verifier.h" // from @llvm-project
+#include "mlir/Support/FileUtilities.h" // from @llvm-project
+#include "mlir/Translation.h" // from @llvm-project
+#include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
+
+namespace mlir {
+namespace TFL {
+namespace {
+static OwningModuleRef FlatBufferFileToMlirTranslation(
+ llvm::SourceMgr* source_mgr, MLIRContext* context) {
+ const llvm::MemoryBuffer* input =
+ source_mgr->getMemoryBuffer(source_mgr->getMainFileID());
+ std::string error;
+ auto loc =
+ mlir::FileLineColLoc::get(context, input->getBufferIdentifier(), 0, 0);
+ std::vector<std::string> inputs;
+ std::vector<std::string> outputs;
+ return tflite::FlatBufferToMlir(
+ absl::string_view(input->getBufferStart(), input->getBufferSize()),
+ context, loc, false, inputs, outputs, false);
+}
+
+} // namespace
+
+std::string FlatBufferFileToMlir(const std::string& inputFilename) {
+ // referred logic from mlir::mlirTranslateMain().
+ int argc = 2;
+ const char* argv_array[2];
+ const char** argv = argv_array;
+ argv[0] = "flatbuffer_to_mlir";
+ argv[1] = inputFilename.c_str();
+ llvm::InitLLVM y(argc, argv);
+
+ std::string errorMessage;
+ auto input = mlir::openInputFile(inputFilename, &errorMessage);
+ if (!input) {
+ llvm::errs() << errorMessage << "\n";
+ return "";
+ }
+
+ mlir::MLIRContext context;
+ context.printOpOnDiagnostic(true);
+ llvm::SourceMgr sourceMgr;
+ sourceMgr.AddNewSourceBuffer(std::move(input), llvm::SMLoc());
+
+ OwningModuleRef module =
+ FlatBufferFileToMlirTranslation(&sourceMgr, &context);
+ if (!module || failed(verify(*module))) return "";
+
+ std::string mlir_output;
+ llvm::raw_string_ostream output_stream(mlir_output);
+ // Dump MLIR with eliding large elements.
+ module->print(
+ output_stream,
+ mlir::OpPrintingFlags().useLocalScope().elideLargeElementsAttrs());
+ return mlir_output;
+}
+
+} // namespace TFL
+} // namespace mlir
diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.h b/tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.h
new file mode 100644
index 0000000..db66a9f
--- /dev/null
+++ b/tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.h
@@ -0,0 +1,31 @@
+/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TO_MLIR_H_
+#define TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TO_MLIR_H_
+
+#include <string>
+
+namespace mlir {
+namespace TFL {
+
+// Translates the given FlatBuffer into MLIR and returns translated MLIR
+// as string.
+std::string FlatBufferFileToMlir(const std::string& inputFilename);
+
+} // namespace TFL
+} // namespace mlir
+
+#endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TO_MLIR_H_
diff --git a/tensorflow/lite/python/BUILD b/tensorflow/lite/python/BUILD
index ddd18a6..4979cc3 100644
--- a/tensorflow/lite/python/BUILD
+++ b/tensorflow/lite/python/BUILD
@@ -566,6 +566,7 @@
name = "analyzer_test",
srcs = ["analyzer_test.py"],
data = [
+ "//tensorflow/lite:testdata/conv_huge_im2col.bin",
"//tensorflow/lite/python/testdata:interpreter_test_data",
],
python_version = "PY3",
diff --git a/tensorflow/lite/python/analyzer.py b/tensorflow/lite/python/analyzer.py
index e35166c..f42149c 100644
--- a/tensorflow/lite/python/analyzer.py
+++ b/tensorflow/lite/python/analyzer.py
@@ -58,17 +58,19 @@
Args:
tflite_model: TFLite flatbuffer model.
- result_format: txt|html|webserver.
+ result_format: txt|mlir|html|webserver.
Returns:
Analyzed report with the given result_format.
"""
- if result_format == "html":
+ if result_format == "txt":
+ return _analyzer_wrapper.ModelAnalyzer(tflite_model)
+ elif result_format == "mlir":
+ return _analyzer_wrapper.FlatBufferToMlir(tflite_model)
+ elif result_format == "html":
return visualize.create_html(tflite_model)
elif result_format == "webserver":
html_body = visualize.create_html(tflite_model)
_handle_webserver("localhost", 8080, html_body)
- elif result_format == "txt":
- return _analyzer_wrapper.ModelAnalyzer(tflite_model)
else:
raise ValueError(f"result_format '{result_format}' is not supported")
diff --git a/tensorflow/lite/python/analyzer_test.py b/tensorflow/lite/python/analyzer_test.py
index 94c2d0a..5ac0910 100644
--- a/tensorflow/lite/python/analyzer_test.py
+++ b/tensorflow/lite/python/analyzer_test.py
@@ -36,6 +36,26 @@
self.assertIn("<html>\n<head>", html)
self.assertIn("FULLY_CONNECTED (0)", html)
+ def testMlir(self):
+ model_path = resource_loader.get_path_to_datafile(
+ "testdata/permute_float.tflite")
+ mlir = analyzer.ModelAnalyzer.analyze(model_path, "mlir")
+ self.assertIn(
+ "func @main(%arg0: tensor<1x4xf32>) -> tensor<1x4xf32> attributes "
+ '{tf.entry_function = {inputs = "input", outputs = "output"}}', mlir)
+ self.assertIn(
+ '%1 = "tfl.fully_connected"(%arg0, %0, %cst) {fused_activation_function'
+ ' = "NONE", keep_num_dims = false, weights_format = "DEFAULT"} : '
+ "(tensor<1x4xf32>, tensor<4x4xf32>, none) -> tensor<1x4xf32>", mlir)
+
+ def testMlirHugeConst(self):
+ model_path = resource_loader.get_path_to_datafile(
+ "../testdata/conv_huge_im2col.bin")
+ mlir = analyzer.ModelAnalyzer.analyze(model_path, "mlir")
+ self.assertIn(
+ '%1 = "tfl.pseudo_const"() {value = opaque<"_", "0xDEADBEEF"> : '
+ "tensor<3x3x3x8xf32>} : () -> tensor<3x3x3x8xf32>", mlir)
+
if __name__ == "__main__":
test.main()
diff --git a/tensorflow/lite/python/analyzer_wrapper/BUILD b/tensorflow/lite/python/analyzer_wrapper/BUILD
index 6298232..8e94240 100644
--- a/tensorflow/lite/python/analyzer_wrapper/BUILD
+++ b/tensorflow/lite/python/analyzer_wrapper/BUILD
@@ -10,9 +10,11 @@
srcs = [
"analyzer_wrapper.cc",
],
+ link_in_framework = True,
module_name = "_pywrap_analyzer_wrapper",
deps = [
":model_analyzer",
+ "//tensorflow/compiler/mlir/lite:flatbuffer_to_mlir",
"@pybind11",
],
)
diff --git a/tensorflow/lite/python/analyzer_wrapper/analyzer_wrapper.cc b/tensorflow/lite/python/analyzer_wrapper/analyzer_wrapper.cc
index bb7e5a1..1686eac 100644
--- a/tensorflow/lite/python/analyzer_wrapper/analyzer_wrapper.cc
+++ b/tensorflow/lite/python/analyzer_wrapper/analyzer_wrapper.cc
@@ -14,6 +14,7 @@
==============================================================================*/
#include "pybind11/pybind11.h"
+#include "tensorflow/compiler/mlir/lite/flatbuffer_to_mlir.h"
#include "tensorflow/lite/python/analyzer_wrapper/model_analyzer.h"
PYBIND11_MODULE(_pywrap_analyzer_wrapper, m) {
@@ -25,4 +26,12 @@
R"pbdoc(
Returns txt dump of the given TFLite file.
)pbdoc");
+ m.def(
+ "FlatBufferToMlir",
+ [](const std::string& model_path) {
+ return ::mlir::TFL::FlatBufferFileToMlir(model_path);
+ },
+ R"pbdoc(
+ Returns MLIR dump of the given TFLite file.
+ )pbdoc");
}