Update for removal of FuncOp forwarding
PiperOrigin-RevId: 442669961
diff --git a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/gml_st/transforms/test_passes.h b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/gml_st/transforms/test_passes.h
index 4e807eb..26629fc 100644
--- a/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/gml_st/transforms/test_passes.h
+++ b/tensorflow/compiler/mlir/hlo/include/mlir-hlo/Dialect/gml_st/transforms/test_passes.h
@@ -22,9 +22,9 @@
namespace mlir {
namespace gml_st {
-std::unique_ptr<OperationPass<FuncOp>> createTestGmlStLoopPeelingPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createTestGmlStLoopPeelingPass();
-std::unique_ptr<OperationPass<FuncOp>> createTestGmlStLoopTilingPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createTestGmlStLoopTilingPass();
#define GEN_PASS_REGISTRATION
#include "mlir-hlo/Dialect/gml_st/transforms/test_passes.h.inc"
diff --git a/tensorflow/compiler/mlir/hlo/lib/Analysis/shape_component_analysis.cc b/tensorflow/compiler/mlir/hlo/lib/Analysis/shape_component_analysis.cc
index b408e56..ae68d30 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Analysis/shape_component_analysis.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Analysis/shape_component_analysis.cc
@@ -404,8 +404,8 @@
//
// TODO(ezhulenev): Add symbolic shape attribute verifier to the jitrt
// dialect.
- if (auto func =
- dyn_cast_or_null<FuncOp>(argument.getOwner()->getParentOp())) {
+ if (auto func = dyn_cast_or_null<func::FuncOp>(
+ argument.getOwner()->getParentOp())) {
if (auto shape = func.getArgAttrOfType<DenseIntElementsAttr>(
argument.getArgNumber(), "jitrt.symbolic_shape")) {
auto &dims = insert(ShapeOrValueInfo::getShapeInfoOf(argument));
diff --git a/tensorflow/compiler/mlir/hlo/lib/Analysis/test_shape_component_analysis.cc b/tensorflow/compiler/mlir/hlo/lib/Analysis/test_shape_component_analysis.cc
index 2844716..d4ace69 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Analysis/test_shape_component_analysis.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Analysis/test_shape_component_analysis.cc
@@ -63,7 +63,8 @@
} // end anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> createTestShapeComponentAnalysisPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createTestShapeComponentAnalysisPass() {
return std::make_unique<TestShapeComponentAnalysisPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Analysis/test_userange_analysis.cc b/tensorflow/compiler/mlir/hlo/lib/Analysis/test_userange_analysis.cc
index 6fd6336..94eb373 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Analysis/test_userange_analysis.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Analysis/test_userange_analysis.cc
@@ -41,7 +41,7 @@
} // end anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> createTestUserangePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createTestUserangePass() {
return std::make_unique<TestUserangePass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/gml_st_to_scf.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/gml_st_to_scf.cc
index a829c52..2f391f6 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/gml_st_to_scf.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/gml_st_to_scf.cc
@@ -107,7 +107,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createGmlStToScfPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createGmlStToScfPass() {
return std::make_unique<GmlStToScfPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/greedy_tiling.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/greedy_tiling.cc
index ff50ca4..518561e 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/greedy_tiling.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/greedy_tiling.cc
@@ -47,7 +47,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createGreedyTilingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createGreedyTilingPass() {
return std::make_unique<GreedyTilingPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/test_passes.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/test_passes.cc
index f8ae04f..c9a9fad 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/test_passes.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/gml_st/transforms/test_passes.cc
@@ -143,7 +143,7 @@
}
void runOnOperation() override {
- FuncOp funcOp = getOperation();
+ func::FuncOp funcOp = getOperation();
auto distTypes = llvm::to_vector<2>(llvm::map_range(
distribution_types, [](std::string &str) { return StringRef(str); }));
@@ -166,11 +166,11 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createTestGmlStLoopPeelingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createTestGmlStLoopPeelingPass() {
return std::make_unique<TestGmlStLoopPeelingPass>();
}
-std::unique_ptr<OperationPass<FuncOp>> createTestGmlStLoopTilingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createTestGmlStLoopTilingPass() {
return std::make_unique<TestGmlStLoopTilingPass>();
}
} // namespace gml_st
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_fuse_linalg.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_fuse_linalg.cc
index d325100..c039586 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_fuse_linalg.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_fuse_linalg.cc
@@ -212,7 +212,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createLhloFuseLinalgPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createLhloFuseLinalgPass(
bool use_parallel_loops, ArrayRef<unsigned> tile_sizes) {
return std::make_unique<LhloFuseLinalgPass>(use_parallel_loops, tile_sizes);
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_affine.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_affine.cc
index 9ab58ab..ef237b4 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_affine.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_affine.cc
@@ -669,7 +669,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createLhloLegalizeToAffinePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createLhloLegalizeToAffinePass() {
return std::make_unique<LhloLegalizeToAffinePass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_gpu.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_gpu.cc
index bc14252..18a4b2e 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_gpu.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_gpu.cc
@@ -196,7 +196,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeToGpuPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeToGpuPass() {
return std::make_unique<LhloLegalizeToGpuPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_parallel_loops.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_parallel_loops.cc
index c28a74d..0b66ff3 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_parallel_loops.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/lhlo/transforms/lhlo_legalize_to_parallel_loops.cc
@@ -739,7 +739,8 @@
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeLhloToParallelLoopsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createLegalizeLhloToParallelLoopsPass() {
return std::make_unique<LhloLegalizeToParallelLoopsPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/broadcast_propagation.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/broadcast_propagation.cc
index 1557525..90bc20d 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/broadcast_propagation.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/broadcast_propagation.cc
@@ -446,7 +446,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createBroadcastPropagationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createBroadcastPropagationPass() {
return std::make_unique<BroadcastPropagationPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/chlo_legalize_to_hlo_pass.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/chlo_legalize_to_hlo_pass.cc
index c7c8c71..4e2bf52 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/chlo_legalize_to_hlo_pass.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/chlo_legalize_to_hlo_pass.cc
@@ -78,7 +78,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createChloLegalizeToHloPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createChloLegalizeToHloPass(
bool legalize_broadcasts, bool expand_compositions) {
return std::make_unique<ChloLegalizeToHloPass>(legalize_broadcasts,
expand_compositions);
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/collapse_elementwise_map.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/collapse_elementwise_map.cc
index 5751280..a6d98ed 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/collapse_elementwise_map.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/collapse_elementwise_map.cc
@@ -92,7 +92,8 @@
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createCollapseElementwiseMapPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createCollapseElementwiseMapPass() {
return std::make_unique<CollapseElementwiseMapPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/expand_hlo_tuples.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/expand_hlo_tuples.cc
index ce6ae6a..abe2ef2 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/expand_hlo_tuples.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/expand_hlo_tuples.cc
@@ -49,7 +49,7 @@
// Expands the mhlo.tuple used in return op. Also updates function
// signature accordingly.
- void ExpandTupledTensorInReturnOp(FuncOp func) {
+ void ExpandTupledTensorInReturnOp(func::FuncOp func) {
FunctionType old_func_type = func.getFunctionType();
// Update input signatures.
// We will flatten the tuples for the function inputs as well.
@@ -131,7 +131,8 @@
void runOnOperation() override {
auto module = getOperation();
// Find `main` function.
- auto entry_function = module.lookupSymbol<FuncOp>(entry_function_name_);
+ auto entry_function =
+ module.lookupSymbol<func::FuncOp>(entry_function_name_);
if (!entry_function) {
return;
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/group_reduction_dimensions.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/group_reduction_dimensions.cc
index 5010e48..12c564c 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/group_reduction_dimensions.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/group_reduction_dimensions.cc
@@ -324,7 +324,7 @@
prefer_columns_reductions);
}
-std::unique_ptr<OperationPass<FuncOp>> createGroupReductionDimensionsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createGroupReductionDimensionsPass(
bool prefer_columns_reductions) {
return std::make_unique<GroupReductionDimensionsPass>(
prefer_columns_reductions);
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_shape_ops_to_standard.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_shape_ops_to_standard.cc
index ef3d78b..9cde04a 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_shape_ops_to_standard.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_shape_ops_to_standard.cc
@@ -233,7 +233,7 @@
// clang-format on
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
createLegalizeHloShapeOpsToStandardPass() {
return std::make_unique<HloLegalizeShapeOpsToStandardPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_to_lhlo.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_to_lhlo.cc
index cef74c7..2a1ad18 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_to_lhlo.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/hlo_legalize_to_lhlo.cc
@@ -457,7 +457,7 @@
bufferization::BufferizeTypeConverter converter;
auto isMemRefType = [](Type type) { return type.isa<BaseMemRefType>(); };
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType()) &&
converter.isLegal(&op.getBody());
});
@@ -474,8 +474,8 @@
});
populateHLOToLHLOConversionPattern(&context, &converter, &patterns);
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
- converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
populateCallOpTypeConversionPattern(patterns, converter);
populateBranchOpInterfaceTypeConversionPattern(patterns, converter);
populateReturnOpTypeConversionPattern(patterns, converter);
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_control_flow.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_control_flow.cc
index 439e77e..f1329ae 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_control_flow.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_control_flow.cc
@@ -348,7 +348,7 @@
: public LegalizeControlFlowPassBase<LegalizeControlFlowPass> {
// Perform the lowering to MLIR control flow.
void runOnOperation() override {
- FuncOp f = getOperation();
+ func::FuncOp f = getOperation();
MLIRContext* ctx = f.getContext();
RewritePatternSet patterns(&getContext());
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_einsum_to_dot_general.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_einsum_to_dot_general.cc
index 0700b61..a5d5a13 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_einsum_to_dot_general.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_einsum_to_dot_general.cc
@@ -193,7 +193,8 @@
patterns->add<EinsumToDotGeneralPattern>(context);
}
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeEinsumToDotGeneralPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createLegalizeEinsumToDotGeneralPass() {
return std::make_unique<LegalizeEinsumToDotGeneralPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_gather_to_torch_index_select.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_gather_to_torch_index_select.cc
index a1dbfec..16a73b1 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_gather_to_torch_index_select.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_gather_to_torch_index_select.cc
@@ -147,7 +147,7 @@
patterns->add<GatherIsTorchIndexSelect>(context);
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
createLegalizeGatherToTorchIndexSelectPass() {
return std::make_unique<LegalizeGatherToTorchIndexSelectPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_shape_computations.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_shape_computations.cc
index 14aabfa..8e09000 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_shape_computations.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_shape_computations.cc
@@ -230,7 +230,8 @@
GetDimSizeConverter, ReshapeConverter>(context);
}
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeShapeComputationsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createLegalizeShapeComputationsPass() {
return std::make_unique<HloLegalizeShapeComputationsPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_to_linalg.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_to_linalg.cc
index 7c6c97c..82aa67f 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_to_linalg.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/legalize_to_linalg.cc
@@ -3130,7 +3130,7 @@
ReduceRegionReturnOpConversion>(context, PatternBenefit(1000));
}
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeHloToLinalgPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeHloToLinalgPass() {
return std::make_unique<HloLegalizeToLinalgPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/merge_assuming_ops.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/merge_assuming_ops.cc
index 5f52552..d982936 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/merge_assuming_ops.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/merge_assuming_ops.cc
@@ -474,7 +474,7 @@
tensor::CastOp::getCanonicalizationPatterns(*patterns, context);
}
-std::unique_ptr<OperationPass<FuncOp>> createMergeAssumingOpsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createMergeAssumingOpsPass() {
return std::make_unique<MergeAssumingOpsPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_canonicalize_reduction.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_canonicalize_reduction.cc
index 646796a..9612f90 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_canonicalize_reduction.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_canonicalize_reduction.cc
@@ -248,7 +248,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createHloCanonicalizeReductionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createHloCanonicalizeReductionPass() {
return std::make_unique<HloCanonicalizeReductionPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_flatten_tuple.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_flatten_tuple.cc
index 0505f3d..7034e50 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_flatten_tuple.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/mhlo_flatten_tuple.cc
@@ -226,7 +226,7 @@
static PassRegistration<FlattenTuplePass> pass;
-std::unique_ptr<OperationPass<FuncOp>> createFlattenTuplePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createFlattenTuplePass() {
return std::make_unique<FlattenTuplePass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/rank_specialization.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/rank_specialization.cc
index c29cdbc..e3cfd3c 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/rank_specialization.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/rank_specialization.cc
@@ -953,11 +953,12 @@
shape::AnyOp::getCanonicalizationPatterns(*patterns, context);
}
-std::unique_ptr<OperationPass<FuncOp>> createRankSpecializationClusterPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createRankSpecializationClusterPass() {
return std::make_unique<RankSpecializationClusterPass>();
}
-std::unique_ptr<OperationPass<FuncOp>> createRankSpecializationToSCFPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createRankSpecializationToSCFPass(
int64_t max_target_rank) {
return std::make_unique<RankSpecializationToSCFPass>(max_target_rank);
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/sink_constants_to_control_flow.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/sink_constants_to_control_flow.cc
index ff96d9b..b9d129f 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/sink_constants_to_control_flow.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/sink_constants_to_control_flow.cc
@@ -79,7 +79,8 @@
// TODO(hinsu): Rename this pass and move to a different file along with the
// generalization to make all ops isolated from above.
-std::unique_ptr<OperationPass<FuncOp>> createSinkConstantsToControlFlowPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createSinkConstantsToControlFlowPass() {
return std::make_unique<SinkConstantsToControlFlowPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/test_infer_shaped_type_pass.cc b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/test_infer_shaped_type_pass.cc
index cb45a1f..507532d 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/test_infer_shaped_type_pass.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Dialect/mhlo/transforms/test_infer_shaped_type_pass.cc
@@ -104,7 +104,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createTestInferShapedTypeMethodsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createTestInferShapedTypeMethodsPass() {
return std::make_unique<TestInferShapedTypeMethodsPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_packing.cc b/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_packing.cc
index 8fe3fba..7e2195f 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_packing.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_packing.cc
@@ -473,12 +473,12 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createBufferPackingPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createBufferPackingPass(
unsigned window_size) {
return std::make_unique<BufferPackingPass>(window_size);
}
-std::unique_ptr<OperationPass<FuncOp>> createMemoryCountPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createMemoryCountPass() {
return std::make_unique<MemoryCountPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_reuse.cc b/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_reuse.cc
index a380ecb..b0d187b 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_reuse.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Transforms/buffer_reuse.cc
@@ -294,7 +294,7 @@
} // end namespace
-std::unique_ptr<OperationPass<FuncOp>> createBufferReusePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createBufferReusePass() {
return std::make_unique<BufferReusePass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Transforms/copy_removal.cc b/tensorflow/compiler/mlir/hlo/lib/Transforms/copy_removal.cc
index d57d444..d814697 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Transforms/copy_removal.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Transforms/copy_removal.cc
@@ -239,7 +239,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> createCopyRemovalPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createCopyRemovalPass() {
return std::make_unique<CopyRemovalPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Transforms/lower_index_cast_pass.cc b/tensorflow/compiler/mlir/hlo/lib/Transforms/lower_index_cast_pass.cc
index 449c27a..545d305 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Transforms/lower_index_cast_pass.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Transforms/lower_index_cast_pass.cc
@@ -70,7 +70,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateLowerIndexCastPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLowerIndexCastPass() {
return std::make_unique<LowerIndexCastPass>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Transforms/shape_simplification.cc b/tensorflow/compiler/mlir/hlo/lib/Transforms/shape_simplification.cc
index f5a82b8..20a6d49 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Transforms/shape_simplification.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Transforms/shape_simplification.cc
@@ -249,7 +249,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateShapeSimplification() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateShapeSimplification() {
return std::make_unique<ShapeSimplification>();
}
diff --git a/tensorflow/compiler/mlir/hlo/lib/Transforms/symbolic_shape_optimization.cc b/tensorflow/compiler/mlir/hlo/lib/Transforms/symbolic_shape_optimization.cc
index 008dc2a..c11f870 100644
--- a/tensorflow/compiler/mlir/hlo/lib/Transforms/symbolic_shape_optimization.cc
+++ b/tensorflow/compiler/mlir/hlo/lib/Transforms/symbolic_shape_optimization.cc
@@ -817,7 +817,8 @@
} // end namespace
-std::unique_ptr<OperationPass<FuncOp>> createSymbolicShapeOptimizationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+createSymbolicShapeOptimizationPass() {
return std::make_unique<SymbolicShapeOptimizationPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.cc b/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.cc
index c23a5f9..397c99b 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.cc
@@ -99,7 +99,7 @@
// A deny list for op cost computation since those ops are not arithemtic.
inline bool IsNonArithmeticOp(mlir::Operation* op) {
- if (llvm::isa<func::ReturnOp, FuncOp>(op)) return true;
+ if (llvm::isa<func::ReturnOp, func::FuncOp>(op)) return true;
if (op->hasTrait<OpTrait::ConstantLike>()) return true;
if (llvm::isa<QConstOp, SparseQConstOp>(op)) return true;
if (!NotTFLQuantDequantizeOp(op)) return true;
@@ -143,7 +143,7 @@
return hardware_op->second->IsOpSupported(op);
}
-double TargetHardware::GetFuncCost(FuncOp* func) const {
+double TargetHardware::GetFuncCost(func::FuncOp* func) const {
double total_cost = 0.0;
func->walk([&](Operation* op) {
if (IsNonArithmeticOp(op)) return;
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h b/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h
index cbbf495..56412ee 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/hardwares/target_hardware.h
@@ -74,7 +74,7 @@
// Returns the cost of running the whole function on this hardware.
// By default this is the sum of the cost of individual cost for each op.
- virtual double GetFuncCost(FuncOp* func) const;
+ virtual double GetFuncCost(func::FuncOp* func) const;
// Returns true if 'op' can run on this Hardware.
virtual bool IsOpSupported(mlir::Operation* op) const;
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/tflite_import_export.cc b/tensorflow/compiler/mlir/lite/experimental/tac/tflite_import_export.cc
index bdfc7df..b227a8e 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/tflite_import_export.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/tflite_import_export.cc
@@ -43,7 +43,7 @@
module.walk([&](mlir::Operation* op) {
if (!mlir::TFL::tac::IsNonConstOp(op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallOpInterface>(op))
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallOpInterface>(op))
return;
// Attach cost per target.
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/compute_cost.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/compute_cost.cc
index 031cd13..e121b15 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/compute_cost.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/compute_cost.cc
@@ -85,7 +85,7 @@
void ComputeCostPass::runOnOperation() {
auto module = getOperation();
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
// We only care about those functions annotated with "tac.interface_name".
auto interface_name = GetInterFaceName(func);
if (!interface_name.hasValue()) continue;
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc
index dd81caf..23e8760 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.cc
@@ -86,7 +86,8 @@
return total_element_count;
}
-struct GetOpCostPass : mlir::PassWrapper<GetOpCostPass, OperationPass<FuncOp>> {
+struct GetOpCostPass
+ : mlir::PassWrapper<GetOpCostPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(GetOpCostPass)
llvm::StringRef getArgument() const final { return "tfl-get-op-cost"; }
@@ -101,7 +102,7 @@
OpBuilder builder(func);
func.walk([&](Operation* op) {
if (IsNonConstOp(op) && !IsTerminatorOp(op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallOpInterface>(op)) {
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallOpInterface>(op)) {
auto hardware = GetTargetAnnotation(op);
if (!hardware) return;
float cost = GetCostForOp(op, hardware.getValue());
@@ -121,7 +122,7 @@
return device_hardware->GetOpCost(op);
}
-float GetCostForFunc(FuncOp* func, const std::string& hardware) {
+float GetCostForFunc(func::FuncOp* func, const std::string& hardware) {
auto* device_hardware = GetTargetHardware(hardware);
if (device_hardware == nullptr) {
return kDefaultFixedValuedCost;
@@ -187,7 +188,7 @@
return kDefaultFixedValuedCost;
}
-std::unique_ptr<OperationPass<FuncOp>> CreateGetOpCostPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateGetOpCostPass() {
return std::make_unique<GetOpCostPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.h b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.h
index 24ab52f..8644523 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.h
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/cost_model.h
@@ -37,7 +37,7 @@
float GetCostForOp(Operation* op, const std::string& hardware);
// Get the estimated cost for the whole function under the given hardware.
-float GetCostForFunc(FuncOp* func, const std::string& hardware);
+float GetCostForFunc(func::FuncOp* func, const std::string& hardware);
// Get the transfer cost given from & to hardware info.
// We will only calculate for the "necessary" tensor transferred.
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
index 5a1e3cf..fdf5bf9 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.cc
@@ -205,7 +205,7 @@
}
};
-void OptimizeQuantizedOpToFloat(FuncOp func, MLIRContext* context) {
+void OptimizeQuantizedOpToFloat(func::FuncOp func, MLIRContext* context) {
RewritePatternSet patterns(func.getContext());
patterns
.add<FoldQuantizedI32ToFloat, FoldQuantizeDequantize, RemoveUnusedQuant>(
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h
index 1adf2b7..7bb3981 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform.h
@@ -35,10 +35,10 @@
// Convert quantized ops to float, this will essentially insert dequantize &
// quantize pair around the op.
-void ConvertQuantizedOpToFloat(FuncOp func, OpBuilder* builder);
+void ConvertQuantizedOpToFloat(func::FuncOp func, OpBuilder* builder);
// This will optimize the quantized ops -> float graph.
-void OptimizeQuantizedOpToFloat(FuncOp func, MLIRContext* context);
+void OptimizeQuantizedOpToFloat(func::FuncOp func, MLIRContext* context);
} // namespace tac
} // namespace TFL
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_gpu.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_gpu.cc
index e26fd0b..76cb7a0 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_gpu.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_gpu.cc
@@ -43,7 +43,8 @@
namespace {
struct DeviceTransformGPUPass
- : public mlir::PassWrapper<DeviceTransformGPUPass, OperationPass<FuncOp>> {
+ : public mlir::PassWrapper<DeviceTransformGPUPass,
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DeviceTransformGPUPass)
llvm::StringRef getArgument() const final {
@@ -72,7 +73,7 @@
return gpu_hardware.GetTransformations(context);
}
-std::unique_ptr<OperationPass<FuncOp>> CreateDeviceTransformGPUPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDeviceTransformGPUPass() {
return std::make_unique<DeviceTransformGPUPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_nnapi.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_nnapi.cc
index 39a1170..9daf905 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_nnapi.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/device_transform_nnapi.cc
@@ -42,7 +42,7 @@
struct DeviceTransformNNAPIPass
: public mlir::PassWrapper<DeviceTransformNNAPIPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DeviceTransformNNAPIPass)
llvm::StringRef getArgument() const final {
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc
index cb02a52..c15bf2d 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/fold_constants_to_subgraph.cc
@@ -40,7 +40,7 @@
namespace tac {
namespace {
-// This pass is used to fold tfl.const ops to each subgraph (FuncOp):
+// This pass is used to fold tfl.const ops to each subgraph (func::FuncOp):
// See the example below:
//
// In main:
@@ -86,7 +86,7 @@
};
void CopyConstantIntoFunc(int argument_index, Operation* const_op,
- FuncOp func) {
+ func::FuncOp func) {
assert((llvm::isa<TFL::ConstOp, TFL::QConstOp>(const_op)) &&
"Expect QConst or Const op.");
OpBuilder builder(func.getBody());
@@ -122,7 +122,7 @@
void FoldConstantsToSubgraphPass::runOnOperation() {
auto module = getOperation();
- for (auto fn : module.getOps<FuncOp>()) {
+ for (auto fn : module.getOps<func::FuncOp>()) {
fn.walk([&](Operation* op) {
if (!llvm::isa<TFL::ConstOp, TFL::QConstOp>(op)) return;
@@ -151,7 +151,7 @@
}
// Copy the const into the consumer func and replace their usages.
- FuncOp func = module.lookupSymbol<FuncOp>(function_name);
+ func::FuncOp func = module.lookupSymbol<func::FuncOp>(function_name);
CopyConstantIntoFunc(argument_index, op, func);
}
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc
index 33b7476..f887ca9 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/get_alternative_subgraph.cc
@@ -115,25 +115,27 @@
// transform/optimize for those devices.
// This will only happen if the whole subgraph can be supported by the target
// or can be supported after some transformations.
- void GetAlternativeGraphForFunc(ArrayRef<std::string> devices, FuncOp func,
- ModuleOp module, OpBuilder* builder);
+ void GetAlternativeGraphForFunc(ArrayRef<std::string> devices,
+ func::FuncOp func, ModuleOp module,
+ OpBuilder* builder);
// If all ops in the func op is able to be represented in the hardware, we
// will return true, else will be false.
// This is basically all or nothing.
- bool IsAllSupportedbySpec(FuncOp func,
+ bool IsAllSupportedbySpec(func::FuncOp func,
const InferenceDeviceType& inference_type);
// Given a func and a targeted device, we will try to clonse the func &
// transform/optimize for that device.
// It's simply clone the FuncOp and hardware specific transformations.
- FuncOp GetAlternativeViewForSpec(
- FuncOp func, const InferenceDeviceType& current_device_inference_type,
+ func::FuncOp GetAlternativeViewForSpec(
+ func::FuncOp func,
+ const InferenceDeviceType& current_device_inference_type,
const InferenceDeviceType& target_device_inference_type, ModuleOp module,
OpBuilder* builder);
// Apply any device-specific optimizations.
- void Optimize(FuncOp func, const std::string& hardware);
+ void Optimize(func::FuncOp func, const std::string& hardware);
ListOption<std::string> device_specs_flag_{
*this, "device-specs",
@@ -143,7 +145,7 @@
};
void AlternativeSubgraphPass::GetAlternativeGraphForFunc(
- ArrayRef<std::string> devices, FuncOp func, ModuleOp module,
+ ArrayRef<std::string> devices, func::FuncOp func, ModuleOp module,
OpBuilder* builder) {
auto current_device = GetTargetAnnotation(func);
if (current_device->empty()) {
@@ -171,7 +173,7 @@
for (const auto& device_inference_type : all_inference_device_type) {
if (device_inference_type != current_device_type) {
- FuncOp cloned_func = GetAlternativeViewForSpec(
+ func::FuncOp cloned_func = GetAlternativeViewForSpec(
func, current_device_type, device_inference_type, module, builder);
// If we found unsupported ops, we will just go ahead and remove this
// function.
@@ -193,12 +195,12 @@
}
bool AlternativeSubgraphPass::IsAllSupportedbySpec(
- FuncOp func, const InferenceDeviceType& device_inference_type) {
+ func::FuncOp func, const InferenceDeviceType& device_inference_type) {
bool found_unsupported = false;
func.walk([&](Operation* op) {
if (IsNonConstOp(op) && !IsTerminatorOp(op) &&
NotTFLQuantDequantizeOp(op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallOpInterface>(op) &&
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallOpInterface>(op) &&
!IsSupported(op, device_inference_type.hardware)) {
found_unsupported = true;
}
@@ -206,7 +208,7 @@
return !found_unsupported;
}
-void AlternativeSubgraphPass::Optimize(FuncOp func,
+void AlternativeSubgraphPass::Optimize(func::FuncOp func,
const std::string& hardware) {
auto* ctx = &getContext();
RewritePatternSet patterns = GetHardwareRewritePatterns(ctx, hardware);
@@ -216,11 +218,11 @@
// Get the alternative view of the func for the given device_inference_type.
// It's possible the transformed func can still contain unsupported ops for the
// given device_inference_type.
-FuncOp AlternativeSubgraphPass::GetAlternativeViewForSpec(
- FuncOp func, const InferenceDeviceType& current_device_inference_type,
+func::FuncOp AlternativeSubgraphPass::GetAlternativeViewForSpec(
+ func::FuncOp func, const InferenceDeviceType& current_device_inference_type,
const InferenceDeviceType& target_device_inference_type, ModuleOp module,
OpBuilder* builder) {
- FuncOp cloned_func = func.clone();
+ func::FuncOp cloned_func = func.clone();
cloned_func.setPrivate();
auto interface_name = GetInterFaceName(func);
if (!interface_name.hasValue()) {
@@ -252,7 +254,7 @@
// Set device for each op.
cloned_func.walk([&](Operation* op) {
if (IsNonConstOp(op) && !IsTerminatorOp(op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallableOpInterface>(op)) {
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallableOpInterface>(op)) {
op->setAttr(kDevice, builder->getStringAttr(
target_device_inference_type.hardware));
op->setAttr(kInferenceType,
@@ -280,9 +282,9 @@
signalPassFailure();
}
- SmallVector<FuncOp, 25> funcs_to_be_processed;
+ SmallVector<func::FuncOp, 25> funcs_to_be_processed;
// We only process if func has device annotations.
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
auto device_attr = func->getAttrOfType<StringAttr>(kDevice);
if (device_attr != nullptr) funcs_to_be_processed.push_back(func);
}
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/passes.h b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/passes.h
index ebf3d62..201ce16 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/passes.h
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/passes.h
@@ -30,11 +30,11 @@
// Create an instance of the TargetAnnotationPass.
// TODO(b/177376459): Remove in favor of the one below.
-std::unique_ptr<OperationPass<FuncOp>> CreateTargetAnnotationPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTargetAnnotationPass(
llvm::ArrayRef<std::string> device_specs);
// Create and instance of TargetAnnotationPass.
-std::unique_ptr<OperationPass<FuncOp>> CreateTargetAnnotationPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTargetAnnotationPass(
const TacModule* module);
// Create an instance of the RaiseTargetSubgraphsPass.
@@ -51,10 +51,10 @@
std::unique_ptr<OperationPass<ModuleOp>> CreatePickSubgraphsPass();
// Create an instance of DeviceTransformGPUPass.
-std::unique_ptr<OperationPass<FuncOp>> CreateDeviceTransformGPUPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDeviceTransformGPUPass();
// Create an instance of GetOpCostPass.
-std::unique_ptr<OperationPass<FuncOp>> CreateGetOpCostPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateGetOpCostPass();
// Create an instance of FoldConstantsToSubgraphPass.
std::unique_ptr<OperationPass<ModuleOp>> CreateFoldConstantsToSubgraphPass(
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc
index eb93000..15b1322 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/pick_subgraphs.cc
@@ -99,7 +99,7 @@
// available_choces can be viewed as "real implementation" assosicated with
// the hardware.
- std::unordered_map<InferenceDeviceType, FuncOp,
+ std::unordered_map<InferenceDeviceType, func::FuncOp,
InferenceDeviceType::inference_device_type_hash>
available_choices;
@@ -135,23 +135,25 @@
}
void runOnOperation() override;
- std::unordered_map<std::string, std::vector<FuncOp>> CollectSubgraphFuncs(
- ModuleOp module);
+ std::unordered_map<std::string, std::vector<func::FuncOp>>
+ CollectSubgraphFuncs(ModuleOp module);
void BuildSubgraphs(
- FuncOp main_fn,
- const std::unordered_map<std::string, std::vector<FuncOp>>& func_impls,
+ func::FuncOp main_fn,
+ const std::unordered_map<std::string, std::vector<func::FuncOp>>&
+ func_impls,
llvm::SetVector<Operation*>* unprocessed_subgraphs,
SmallVector<func::CallOp, 4>* output_subgraphs);
void ProcessSubgraph(func::CallOp current_graph,
llvm::SetVector<Operation*>* unprocessed_subgraphs);
- bool PickSubgraphs(llvm::SetVector<Operation*>* all_subgraphs,
- ArrayRef<func::CallOp> output_subgraphs,
- const std::unordered_map<std::string, std::vector<FuncOp>>&
- collected_impl_funcs,
- OpBuilder* builder);
+ bool PickSubgraphs(
+ llvm::SetVector<Operation*>* all_subgraphs,
+ ArrayRef<func::CallOp> output_subgraphs,
+ const std::unordered_map<std::string, std::vector<func::FuncOp>>&
+ collected_impl_funcs,
+ OpBuilder* builder);
// Make the decisions based on the subgraphs.
// It may be the case we cannot decide the best scenarios for the user,
@@ -163,18 +165,18 @@
// TODO(renjieliu): we may change the vector to a map of hardware with
// corresponding ipml.
void RewireSubgraphs(
- const std::unordered_map<std::string, std::vector<FuncOp>>&
+ const std::unordered_map<std::string, std::vector<func::FuncOp>>&
collected_impl_funcs,
OpBuilder* builder);
- float GetCostOrFail(FuncOp func);
+ float GetCostOrFail(func::FuncOp func);
llvm::DenseMap<Operation*, Subgraph> subgraphs_;
llvm::DenseMap<Operation*, InferenceDeviceType> decisions_;
};
-float PickSubgraphsPass::GetCostOrFail(FuncOp func) {
+float PickSubgraphsPass::GetCostOrFail(func::FuncOp func) {
float self_cost;
if (!GetCostOnOp(func, &self_cost)) {
func.emitError("we cannot find cost for this func");
@@ -238,7 +240,7 @@
// Find the best plan for the current subgraph.
for (const auto& kv : current_subgraph.available_choices) {
const auto& current_inference_device_type = kv.first;
- FuncOp impl_target = kv.second;
+ func::FuncOp impl_target = kv.second;
float self_compute_cost = GetCostOrFail(impl_target);
GraphView current_graph_view;
@@ -249,7 +251,7 @@
float input_total_cost = std::numeric_limits<float>::max();
for (const auto& input_kv : input_subgraph->available_choices) {
const auto& input_inference_device_type = input_kv.first;
- FuncOp input_impl_target = input_kv.second;
+ func::FuncOp input_impl_target = input_kv.second;
float input_compute_cost = GetCostOrFail(input_impl_target);
float transfer_cost =
@@ -281,8 +283,9 @@
}
void PickSubgraphsPass::BuildSubgraphs(
- FuncOp fn,
- const std::unordered_map<std::string, std::vector<FuncOp>>& func_impls,
+ func::FuncOp fn,
+ const std::unordered_map<std::string, std::vector<func::FuncOp>>&
+ func_impls,
llvm::SetVector<Operation*>* unprocessed_subgraphs,
SmallVector<func::CallOp, 4>* output_subgraphs) {
llvm::DenseSet<Operation*> returned_call_op_set;
@@ -343,16 +346,17 @@
}
// Collect all the subgraphs (and their alternatives) in the module.
-std::unordered_map<std::string, std::vector<FuncOp>>
+std::unordered_map<std::string, std::vector<func::FuncOp>>
PickSubgraphsPass::CollectSubgraphFuncs(ModuleOp module) {
- std::unordered_map<std::string, std::vector<FuncOp>> func_impls;
- for (auto func : module.getOps<FuncOp>()) {
+ std::unordered_map<std::string, std::vector<func::FuncOp>> func_impls;
+ for (auto func : module.getOps<func::FuncOp>()) {
auto interface_name = GetInterFaceName(func);
if (interface_name.hasValue()) {
auto impls_iter = func_impls.find(interface_name.getValue());
if (impls_iter == func_impls.end())
impls_iter =
- func_impls.emplace(interface_name.getValue(), std::vector<FuncOp>())
+ func_impls
+ .emplace(interface_name.getValue(), std::vector<func::FuncOp>())
.first;
impls_iter->second.push_back(func);
}
@@ -411,7 +415,7 @@
// This rewire subgraph is essentially "hook" the call op with the "best" choice
// (subgraph).
void PickSubgraphsPass::RewireSubgraphs(
- const std::unordered_map<std::string, std::vector<FuncOp>>&
+ const std::unordered_map<std::string, std::vector<func::FuncOp>>&
collected_impl_funcs,
OpBuilder* builder) {
for (auto& kv : decisions_) {
@@ -454,7 +458,7 @@
bool PickSubgraphsPass::PickSubgraphs(
llvm::SetVector<Operation*>* all_subgraphs,
ArrayRef<func::CallOp> output_subgraphs,
- const std::unordered_map<std::string, std::vector<FuncOp>>&
+ const std::unordered_map<std::string, std::vector<func::FuncOp>>&
collected_impl_funcs,
OpBuilder* builder) {
// Process those collected unprocessed subgraphs.
@@ -495,12 +499,12 @@
// Also collect the output subgraphs.
// Output subgraphs are essentially those subgraphs pointed by the return
// op.
- const std::unordered_map<std::string, std::vector<FuncOp>> func_impls =
+ const std::unordered_map<std::string, std::vector<func::FuncOp>> func_impls =
CollectSubgraphFuncs(module);
llvm::SetVector<Operation*> unprocessed_subgraphs;
SmallVector<func::CallOp, 4> output_subgraphs;
- for (auto fn : module.getOps<FuncOp>()) {
+ for (auto fn : module.getOps<func::FuncOp>()) {
BuildSubgraphs(fn, func_impls, &unprocessed_subgraphs, &output_subgraphs);
}
OpBuilder builder(module);
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc
index 335a10b..b0568ed 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/raise_target_subgraphs.cc
@@ -112,10 +112,10 @@
void ExtractSubgraphToFunc(Subgraph* subgraph, OpBuilder* builder,
ModuleOp module);
- FuncOp BuildFuncOp(Subgraph* subgraph, OpBuilder* builder, ModuleOp module_op,
- SmallVector<Value, 4>* inputs,
- SmallVector<Value, 4>* outputs,
- InferenceDeviceType* inference_device_type);
+ func::FuncOp BuildFuncOp(Subgraph* subgraph, OpBuilder* builder,
+ ModuleOp module_op, SmallVector<Value, 4>* inputs,
+ SmallVector<Value, 4>* outputs,
+ InferenceDeviceType* inference_device_type);
int subgraph_count_ = 0;
};
@@ -192,7 +192,7 @@
GetInferenceString(subgrpah.inference_device_type.inference_type));
}
-FuncOp RaiseTargetSubgraphsPass::BuildFuncOp(
+func::FuncOp RaiseTargetSubgraphsPass::BuildFuncOp(
Subgraph* subgraph, OpBuilder* builder, ModuleOp module_op,
SmallVector<Value, 4>* inputs, SmallVector<Value, 4>* outputs,
InferenceDeviceType* inference_device_type) {
@@ -225,8 +225,9 @@
subgraph->inference_device_type.inference_type))));
*inference_device_type = subgraph->inference_device_type;
- FuncOp new_func = FuncOp::create(builder->getUnknownLoc(), function_name,
- function_type, llvm::makeArrayRef(attrs));
+ func::FuncOp new_func =
+ func::FuncOp::create(builder->getUnknownLoc(), function_name,
+ function_type, llvm::makeArrayRef(attrs));
new_func.setPrivate();
new_func.addEntryBlock();
@@ -287,8 +288,8 @@
SmallVector<Value, 4> func_outputs;
InferenceDeviceType inference_device_type;
- FuncOp func = BuildFuncOp(subgraph, builder, module, &func_inputs,
- &func_outputs, &inference_device_type);
+ func::FuncOp func = BuildFuncOp(subgraph, builder, module, &func_inputs,
+ &func_outputs, &inference_device_type);
// We just use the location of the last ops in the subgraph as the location
// for the call_op.
@@ -342,7 +343,7 @@
int current_subgraph_id = -1;
for (auto& op : *block) {
if (IsNonConstQuantizeOp(&op) && !IsTerminatorOp(&op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallOpInterface>(op)) {
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallOpInterface>(op)) {
auto current_device_type = GetInferenceDeviceTypeForOp(&op);
if (!(current_device_type.hasValue() &&
current_device_type == previous_device_type)) {
@@ -366,7 +367,7 @@
void RaiseTargetSubgraphsPass::runOnOperation() {
auto module = getOperation();
- SmallVector<FuncOp, 16> funcs(module.getOps<FuncOp>());
+ SmallVector<func::FuncOp, 16> funcs(module.getOps<func::FuncOp>());
for (auto func : funcs) {
for (auto& block : func) {
auto builder = OpBuilder::atBlockBegin(&block);
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/tac_pass.h b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/tac_pass.h
index 2ce4cf2..de6017e 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/tac_pass.h
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/tac_pass.h
@@ -55,9 +55,9 @@
// When adding new Pass to TAC, users should use this class as the base class
// as it provides access to the TAC module.
template <typename T>
-class TacFunctionPass : public TacPass<FuncOp> {
+class TacFunctionPass : public TacPass<func::FuncOp> {
public:
- using TacPass<FuncOp>::TacPass;
+ using TacPass<func::FuncOp>::TacPass;
~TacFunctionPass() override {}
diff --git a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/target_annotation.cc b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/target_annotation.cc
index fe657b8..009f27d 100644
--- a/tensorflow/compiler/mlir/lite/experimental/tac/transforms/target_annotation.cc
+++ b/tensorflow/compiler/mlir/lite/experimental/tac/transforms/target_annotation.cc
@@ -111,7 +111,7 @@
// default to CPU
if (!device_is_set) {
if (IsNonConstOp(op) && !IsTerminatorOp(op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallableOpInterface>(op)) {
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallableOpInterface>(op)) {
SetAnnotation(op, kDevice, "CPU", builder);
device_is_set = true;
}
@@ -129,7 +129,7 @@
// We only care about TFL dialect.
if (IsNonConstOp(op) && NotTFLQuantDequantizeOp(op) &&
!IsTerminatorOp(op) &&
- !llvm::isa<func::ReturnOp, FuncOp, CallOpInterface>(op)) {
+ !llvm::isa<func::ReturnOp, func::FuncOp, CallOpInterface>(op)) {
SetTargetAnnotation(op, device_specs_flag_, &builder);
}
});
@@ -137,12 +137,12 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTargetAnnotationPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTargetAnnotationPass(
llvm::ArrayRef<std::string> device_specs) {
return std::make_unique<TargetAnnotationPass>(device_specs);
}
-std::unique_ptr<OperationPass<FuncOp>> CreateTargetAnnotationPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTargetAnnotationPass(
const TacModule* module) {
return std::make_unique<TargetAnnotationPass>(module);
}
diff --git a/tensorflow/compiler/mlir/lite/flatbuffer_import.cc b/tensorflow/compiler/mlir/lite/flatbuffer_import.cc
index 6e6cd86..179f202 100644
--- a/tensorflow/compiler/mlir/lite/flatbuffer_import.cc
+++ b/tensorflow/compiler/mlir/lite/flatbuffer_import.cc
@@ -1418,7 +1418,7 @@
// Adds a CallOp in `region` to call the `func` and returns the results of
// CallOp.
-void AddCallOpInWhileOpRegion(mlir::Region& region, mlir::func::FuncOp func) {
+void AddCallOpInWhileOpRegion(mlir::Region& region, mlir::FuncOp func) {
OpBuilder op_builder{region};
region.push_back(new mlir::Block());
Location loc = region.getLoc();
@@ -1436,11 +1436,11 @@
void AddRegionsForTflWhileOp(mlir::ModuleOp module) {
mlir::SymbolTable symbol_table(module);
module.walk([&](mlir::TFL::WhileOp while_op) {
- auto cond = symbol_table.lookup<mlir::func::FuncOp>(
+ auto cond = symbol_table.lookup<mlir::FuncOp>(
while_op->getAttr("cond").cast<mlir::FlatSymbolRefAttr>().getValue());
AddCallOpInWhileOpRegion(while_op.cond(), cond);
while_op->removeAttr("cond");
- auto body = symbol_table.lookup<mlir::func::FuncOp>(
+ auto body = symbol_table.lookup<mlir::FuncOp>(
while_op->getAttr("body").cast<mlir::FlatSymbolRefAttr>().getValue());
AddCallOpInWhileOpRegion(while_op.body(), body);
while_op->removeAttr("body");
diff --git a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc
index 74c566f..9fa1295 100644
--- a/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc
+++ b/tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc
@@ -56,7 +56,7 @@
namespace {
class ImportQuantStatsPass
- : public PassWrapper<ImportQuantStatsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<ImportQuantStatsPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ImportQuantStatsPass)
@@ -186,7 +186,7 @@
}
void ImportQuantStatsPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
OpBuilder builder(func);
func.walk([&](Operation *op) {
@@ -211,7 +211,7 @@
}
// Creates an instance of the default quant parameters pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateImportQuantStatsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateImportQuantStatsPass(
OperationToName op_to_name, const std::string &stats_str) {
auto pass = absl::make_unique<ImportQuantStatsPass>(op_to_name);
if (pass->ParseQuantStats(stats_str)) return nullptr;
@@ -221,7 +221,7 @@
// Creates an instance pass to import quantization stats to the operations in
// the function. A custom method to get the name from the op is used because
// different dialect ops might have different ways to assign the name.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateImportQuantStatsPassForTFControlDialect(const std::string &stats_str) {
auto get_name_func = [](Operation *op) {
Location loc = tensorflow::GetLocationWithoutOpType(op->getLoc());
diff --git a/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc b/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc
index 75f7edc..7280c51 100644
--- a/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc
+++ b/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.cc
@@ -24,7 +24,7 @@
namespace mlir {
namespace TFL {
-void ConvertTFLQuantOpsToMlirQuantOps(FuncOp func) {
+void ConvertTFLQuantOpsToMlirQuantOps(func::FuncOp func) {
OpBuilder b(func);
func.walk([&](Operation* op) {
b.setInsertionPoint(op);
@@ -54,7 +54,7 @@
});
}
-void ConvertMlirQuantOpsToTFLQuantOps(FuncOp func) {
+void ConvertMlirQuantOpsToTFLQuantOps(func::FuncOp func) {
OpBuilder b(func);
func.walk([&](Operation* op) {
b.setInsertionPoint(op);
diff --git a/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h b/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h
index be9da08..94742d1 100644
--- a/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h
+++ b/tensorflow/compiler/mlir/lite/quantization/lite/tfl_to_std.h
@@ -23,20 +23,21 @@
// Converts all the tfl.quantize/tfl.dequantize ops to the ops in the mlir.quant
// dialect ones in the function.
-void ConvertTFLQuantOpsToMlirQuantOps(FuncOp func);
+void ConvertTFLQuantOpsToMlirQuantOps(func::FuncOp func);
// Converts all the mlir.quant dialect ops to the tfl.quantize/tfl.dequantize
// ops in the function.
-void ConvertMlirQuantOpsToTFLQuantOps(FuncOp func);
+void ConvertMlirQuantOpsToTFLQuantOps(func::FuncOp func);
// A helper class to convert target function to another representation using
// `ConvertForward` function during construction and convert target function
// back to the original representation using `ConvertBackward` function during
// deconstruction.
-template <void (*ConvertForward)(FuncOp), void (*ConvertBackward)(FuncOp)>
+template <void (*ConvertForward)(func::FuncOp),
+ void (*ConvertBackward)(func::FuncOp)>
class ScopedOpsConverter {
public:
- explicit ScopedOpsConverter(FuncOp func) : func_(func) {
+ explicit ScopedOpsConverter(func::FuncOp func) : func_(func) {
ConvertForward(func_);
}
@@ -48,7 +49,7 @@
~ScopedOpsConverter() { ConvertBackward(func_); }
private:
- FuncOp func_;
+ func::FuncOp func_;
};
using ScopedTFLQuantOpsToMlirQuantOpsConverter =
diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_context.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
index 980743b..60cb9fb 100644
--- a/tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
+++ b/tensorflow/compiler/mlir/lite/quantization/quantization_context.cc
@@ -43,7 +43,7 @@
namespace mlir {
namespace quant {
-QuantizeContext::QuantizeContext(FuncOp func, const DeviceTarget &spec)
+QuantizeContext::QuantizeContext(func::FuncOp func, const DeviceTarget &spec)
: func_(func), target_spec_(spec) {
llvm::DenseMap<Value, int> value_to_state;
func.walk([&](quant::QuantizeRegionOp op) {
diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_context.h b/tensorflow/compiler/mlir/lite/quantization/quantization_context.h
index 5e7c93c..8d7cf26 100644
--- a/tensorflow/compiler/mlir/lite/quantization/quantization_context.h
+++ b/tensorflow/compiler/mlir/lite/quantization/quantization_context.h
@@ -64,7 +64,7 @@
// This class manages all the intermediate quantization states.
class QuantizeContext {
public:
- QuantizeContext(FuncOp func, const DeviceTarget &spec);
+ QuantizeContext(func::FuncOp func, const DeviceTarget &spec);
// Returns all the quant region ops.
std::vector<quant::QuantizeRegionOp> GetAllOps();
@@ -226,7 +226,7 @@
llvm::DenseMap<OpValue, int> result_states_;
};
- FuncOp func_;
+ func::FuncOp func_;
DeviceTarget target_spec_;
diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc
index 8c863d2..fc53469 100644
--- a/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc
+++ b/tensorflow/compiler/mlir/lite/quantization/quantization_driver.cc
@@ -102,7 +102,7 @@
//
class QuantizationDriver {
public:
- explicit QuantizationDriver(FuncOp fn, bool is_signed,
+ explicit QuantizationDriver(func::FuncOp fn, bool is_signed,
bool disable_per_channel,
OpQuantSpecGetter op_quant_spec_getter,
OpQuantScaleSpecGetter op_quant_scale_spec_getter,
@@ -341,7 +341,7 @@
}
if (current_op == op) llvm::dbgs() << "===>>>";
llvm::dbgs() << op->getName() << " : (";
- if (llvm::isa<FuncOp>(op)) {
+ if (llvm::isa<func::FuncOp>(op)) {
for (auto &arg : fn_.getArguments()) {
if (auto params = GetArgQuantState(arg).params) {
params.print(llvm::dbgs());
@@ -375,7 +375,7 @@
});
}
- FuncOp fn_;
+ func::FuncOp fn_;
OpBuilder builder_;
bool is_signed_;
bool disable_per_channel_;
diff --git a/tensorflow/compiler/mlir/lite/quantization/quantization_passes.h b/tensorflow/compiler/mlir/lite/quantization/quantization_passes.h
index 04b68eb..b843e62 100644
--- a/tensorflow/compiler/mlir/lite/quantization/quantization_passes.h
+++ b/tensorflow/compiler/mlir/lite/quantization/quantization_passes.h
@@ -28,13 +28,13 @@
// Creates an instance pass to import quantization stats to the operations in
// the function. A custom method to get the name from the op is used because
// different dialect ops might have different ways to assign the name.
-std::unique_ptr<OperationPass<FuncOp>> CreateImportQuantStatsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateImportQuantStatsPass(
OperationToName op_to_name, const std::string& stats_str);
// Creates an instance pass to import quantization stats to the operations in
// the function. A custom method to get the name from the op is used because
// different dialect ops might have different ways to assign the name.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateImportQuantStatsPassForTFControlDialect(const std::string& stats_str);
} // namespace quant
diff --git a/tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc b/tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
index 3fa232a..f4fb2a3 100644
--- a/tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
+++ b/tensorflow/compiler/mlir/lite/quantization/tensorflow/fallback_to_flex_ops.cc
@@ -171,7 +171,7 @@
// Fallbacks ops that are not supported by TF Quantization to TFLite Flex ops.
class FallbackToFlexOps
- : public PassWrapper<FallbackToFlexOps, OperationPass<FuncOp>> {
+ : public PassWrapper<FallbackToFlexOps, OperationPass<func::FuncOp>> {
public:
FallbackToFlexOps() {}
explicit FallbackToFlexOps(const std::string &mode) { mode_ = mode; }
@@ -273,7 +273,7 @@
void FallbackToFlexOps::runOnOperation() {
if (mode_.empty()) return;
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
MLIRContext *ctx = &getContext();
// Convert binary ops to BiasAdd ops if possible.
@@ -295,7 +295,7 @@
}
} // namespace internal
-std::unique_ptr<OperationPass<FuncOp>> CreateFallbackToFlexOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateFallbackToFlexOpsPass(
const std::string &mode) {
return std::make_unique<internal::FallbackToFlexOps>(mode);
}
diff --git a/tensorflow/compiler/mlir/lite/quantization/tensorflow/passes.h b/tensorflow/compiler/mlir/lite/quantization/tensorflow/passes.h
index c0217e5..f7bde2f 100644
--- a/tensorflow/compiler/mlir/lite/quantization/tensorflow/passes.h
+++ b/tensorflow/compiler/mlir/lite/quantization/tensorflow/passes.h
@@ -26,10 +26,10 @@
namespace TF {
// Legalize the tf ops to the quant ops, so the quantization passes can work.
-std::unique_ptr<OperationPass<FuncOp>> CreateLegalizeTFToQuantPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLegalizeTFToQuantPass();
// Fallbacks ops that are not supported by TF Quantization to TFLite Flex ops.
-std::unique_ptr<OperationPass<FuncOp>> CreateFallbackToFlexOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateFallbackToFlexOpsPass(
const std::string &mode = "DEFAULT");
} // namespace TF
diff --git a/tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc b/tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc
index 66a5b7d..2944a24 100644
--- a/tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc
+++ b/tensorflow/compiler/mlir/lite/quantization/tensorflow/tf_to_quant.cc
@@ -28,7 +28,7 @@
// Legalize TF quantization emulation ops to that in Quant ops dialect.
struct LegalizeTFToQuant
- : public PassWrapper<LegalizeTFToQuant, OperationPass<FuncOp>> {
+ : public PassWrapper<LegalizeTFToQuant, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LegalizeTFToQuant)
explicit LegalizeTFToQuant() = default;
@@ -159,7 +159,7 @@
} // namespace
// Creates an instance of the TensorFlow dialect to QuantOps dialect pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateLegalizeTFToQuantPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLegalizeTFToQuantPass() {
return std::make_unique<LegalizeTFToQuant>();
}
diff --git a/tensorflow/compiler/mlir/lite/tf_tfl_passes.cc b/tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
index be8becb..c5dd5d1 100644
--- a/tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
+++ b/tensorflow/compiler/mlir/lite/tf_tfl_passes.cc
@@ -39,7 +39,7 @@
namespace mlir {
/// Create a pass to convert from the TFExecutor to the TF control dialect.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateTFExecutorToControlDialectConversion();
} // namespace mlir
@@ -402,7 +402,7 @@
// This does not yet include quantization passes.
void CreateTFLStandardPipeline(OpPassManager& pm,
const StandardPipelineOptions& options) {
- OpPassManager& func_pm = pm.nest<FuncOp>();
+ OpPassManager& func_pm = pm.nest<func::FuncOp>();
// tf_executor dialect passes - Cleaning up the IR.
mlir::TF::StandardPipelineOptions standard_pipeline_options;
diff --git a/tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc b/tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
index 61f6b4c..f5bb372 100644
--- a/tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/decompose_hybrid_quantization.cc
@@ -38,7 +38,7 @@
class DecomposeHybridQuantizationPass
: public PassWrapper<DecomposeHybridQuantizationPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DecomposeHybridQuantizationPass)
@@ -157,7 +157,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateDecomposeHybridQuantizationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateDecomposeHybridQuantizationPass() {
return std::make_unique<DecomposeHybridQuantizationPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc b/tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
index b5209cd..6921de0 100644
--- a/tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/default_quant_params.cc
@@ -45,7 +45,7 @@
namespace {
class DefaultQuantParamsPass
- : public PassWrapper<DefaultQuantParamsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<DefaultQuantParamsPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DefaultQuantParamsPass)
@@ -104,7 +104,7 @@
} // namespace
void DefaultQuantParamsPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
OpBuilder builder(func);
std::vector<Value> activation_values;
@@ -236,7 +236,7 @@
}
// Creates an instance of the default quant parameters pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateDefaultQuantParamsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDefaultQuantParamsPass(
double default_min, double default_max, bool is_signed) {
return absl::make_unique<DefaultQuantParamsPass>(default_min, default_max,
is_signed);
diff --git a/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc b/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc
index 918edd4..3621894 100644
--- a/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/dense_to_sparse.cc
@@ -290,7 +290,7 @@
// 4.2. If no matching block config is found, encode the weight with random
// sparsity, and add Densify() op to fall back to dense execution.
struct DenseToSparse
- : public PassWrapper<DenseToSparse, OperationPass<FuncOp>> {
+ : public PassWrapper<DenseToSparse, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DenseToSparse)
void runOnOperation() override;
@@ -307,7 +307,7 @@
};
void DenseToSparse::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
OpBuilder builder(func);
func.walk([&](SparseOpInterface sparse_op) {
@@ -441,7 +441,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect DenseToSparse pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateDenseToSparsePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDenseToSparsePass() {
return absl::make_unique<DenseToSparse>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/dilated_conv.cc b/tensorflow/compiler/mlir/lite/transforms/dilated_conv.cc
index 6b9c34d..aa858a3 100644
--- a/tensorflow/compiler/mlir/lite/transforms/dilated_conv.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/dilated_conv.cc
@@ -21,7 +21,7 @@
namespace {
struct IdentifyDilatedConvPass
- : public PassWrapper<IdentifyDilatedConvPass, OperationPass<FuncOp>> {
+ : public PassWrapper<IdentifyDilatedConvPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(IdentifyDilatedConvPass)
void runOnOperation() override;
diff --git a/tensorflow/compiler/mlir/lite/transforms/get_arithmetic_count.cc b/tensorflow/compiler/mlir/lite/transforms/get_arithmetic_count.cc
index f108e19..f800311 100644
--- a/tensorflow/compiler/mlir/lite/transforms/get_arithmetic_count.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/get_arithmetic_count.cc
@@ -35,7 +35,7 @@
namespace {
struct GetArithmeticCountPass
- : public PassWrapper<GetArithmeticCountPass, OperationPass<FuncOp>> {
+ : public PassWrapper<GetArithmeticCountPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(GetArithmeticCountPass)
void runOnOperation() override;
@@ -67,7 +67,7 @@
/// Creates an instance of the TensorFlow Lite dialect GetArithmeticCount
/// pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateGetArithmeticCountPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateGetArithmeticCountPass() {
return std::make_unique<GetArithmeticCountPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/insert_call_once_op.cc b/tensorflow/compiler/mlir/lite/transforms/insert_call_once_op.cc
index db260bee..2d3caf9 100644
--- a/tensorflow/compiler/mlir/lite/transforms/insert_call_once_op.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/insert_call_once_op.cc
@@ -61,7 +61,7 @@
SymbolTable symbol_table(module);
for (auto sym_ref : session_init_op.initializers()) {
- FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
+ func::FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
sym_ref.cast<FlatSymbolRefAttr>().getValue());
if (!init_func_op) {
@@ -69,7 +69,7 @@
return signalPassFailure();
}
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
auto dict_attr =
func->getAttrOfType<mlir::DictionaryAttr>("tf.entry_function");
if (!dict_attr) continue;
diff --git a/tensorflow/compiler/mlir/lite/transforms/legalize_jax_random.cc b/tensorflow/compiler/mlir/lite/transforms/legalize_jax_random.cc
index 1d2d406..8466c79 100644
--- a/tensorflow/compiler/mlir/lite/transforms/legalize_jax_random.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/legalize_jax_random.cc
@@ -54,7 +54,7 @@
namespace {
struct LegalizeJaxRandomPass
- : public PassWrapper<LegalizeJaxRandomPass, OperationPass<FuncOp>> {
+ : public PassWrapper<LegalizeJaxRandomPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(LegalizeJaxRandomPass)
@@ -124,7 +124,7 @@
static PassRegistration<LegalizeJaxRandomPass> pass;
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateLegalizeJaxRandomPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLegalizeJaxRandomPass() {
return std::make_unique<LegalizeJaxRandomPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
index cac052a..d42faa1 100644
--- a/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/legalize_tf.cc
@@ -74,7 +74,7 @@
constexpr char kTfLiteInputIndices[] = "_tflite_input_indices";
// Legalize operations in functions.
-class LegalizeTF : public PassWrapper<LegalizeTF, OperationPass<FuncOp>> {
+class LegalizeTF : public PassWrapper<LegalizeTF, OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<quant::QuantizationDialect, TFL::TensorFlowLiteDialect>();
}
@@ -935,7 +935,7 @@
LegalizeUnidirectionalSequenceRnn>(context);
}
-bool applyPatterns(FuncOp func, ConversionTarget& target,
+bool applyPatterns(func::FuncOp func, ConversionTarget& target,
FrozenRewritePatternSet& frozenPatterns) {
// Keep trying to convert.
// TODO(karimnosseir): This is similar to what apply greedy patterns does.
@@ -1018,7 +1018,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect LegalizeTF pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateLegalizeTFPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLegalizeTFPass(
bool run_tfl_runtime_verification, bool preserve_assert_op) {
return std::make_unique<LegalizeTF>(run_tfl_runtime_verification,
preserve_assert_op);
diff --git a/tensorflow/compiler/mlir/lite/transforms/legalize_tf_while.cc b/tensorflow/compiler/mlir/lite/transforms/legalize_tf_while.cc
index 8686fa9..d703237 100644
--- a/tensorflow/compiler/mlir/lite/transforms/legalize_tf_while.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/legalize_tf_while.cc
@@ -49,17 +49,17 @@
return "Legalize from TensorFlow While to TensorFlow Lite While";
}
- void RunOnFunction(FuncOp func);
+ void RunOnFunction(func::FuncOp func);
void runOnOperation() override {
- for (auto op : getOperation().getOps<FuncOp>()) RunOnFunction(op);
+ for (auto op : getOperation().getOps<func::FuncOp>()) RunOnFunction(op);
}
};
} // namespace
// Inserts call to the given function into the 'region'.
-void CreateRegionWithCall(FuncOp func, Region& region, Location loc) {
+void CreateRegionWithCall(func::FuncOp func, Region& region, Location loc) {
OpBuilder builder(region);
auto block = builder.createBlock(®ion);
SmallVector<Value, 4> new_operands;
@@ -85,7 +85,7 @@
op->erase();
}
-void LegalizeWhile::RunOnFunction(FuncOp func) {
+void LegalizeWhile::RunOnFunction(func::FuncOp func) {
// Convert all TF WhileOps inside the function body to TFL While ops.
func.getBody().walk([](TF::WhileOp while_op) { RunOnWhile(while_op); });
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc b/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc
index 96f862a..46b9e8b 100644
--- a/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc
@@ -226,7 +226,8 @@
};
class LiftTfliteFlexOpsPass
- : public mlir::PassWrapper<LiftTfliteFlexOpsPass, OperationPass<FuncOp>> {
+ : public mlir::PassWrapper<LiftTfliteFlexOpsPass,
+ OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry.insert<TF::TensorFlowDialect>();
}
@@ -243,7 +244,7 @@
void runOnOperation() override {
MLIRContext* context = &getContext();
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
mlir::RewritePatternSet patterns(context);
patterns.add<LiftFlexCustomOp>(context);
@@ -256,7 +257,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateLiftTfliteFlexOpsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLiftTfliteFlexOpsPass() {
return std::make_unique<LiftTfliteFlexOpsPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.h b/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.h
index e4b500b..56d5c80 100644
--- a/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.h
+++ b/tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.h
@@ -24,7 +24,7 @@
// Creates an instance of the lift TFLite Flex ops pass that lifts TFLite Flex
// ops into TF dialect operations.
-std::unique_ptr<OperationPass<FuncOp>> CreateLiftTfliteFlexOpsPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLiftTfliteFlexOpsPass();
} // namespace TFL
} // namespace mlir
diff --git a/tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc b/tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
index 14ce128..710c86f 100644
--- a/tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/lower_static_tensor_list.cc
@@ -184,7 +184,7 @@
// Gets the index of tensorlist arguments which size might get changed by the
// function.
llvm::SmallSet<int, 4> GetResizedTensorListIndexes(
- FuncOp func, const llvm::SmallSet<int, 4> &tensor_list_args) {
+ func::FuncOp func, const llvm::SmallSet<int, 4> &tensor_list_args) {
// `indexes` stores the argument index of tensorlists which size may get
// updated in the function.
llvm::SmallSet<int, 4> indexes;
@@ -786,22 +786,24 @@
// Create functions in a higher scope before restoring the insertion point.
// Additionally, create the SymbolTable before further modifying the module.
auto original_point = rewriter.saveInsertionPoint();
- rewriter.setInsertionPointAfter(op->getParentOfType<FuncOp>());
+ rewriter.setInsertionPointAfter(op->getParentOfType<func::FuncOp>());
SymbolTable manager(op->getParentOfType<ModuleOp>());
// Constructs `then_branch`, which is executed when `if_cond` evaluates to
// true.
- auto then_branch_op = rewriter.create<FuncOp>(loc, "cond_true", func_type);
+ auto then_branch_op =
+ rewriter.create<func::FuncOp>(loc, "cond_true", func_type);
CreateCondTrueBranch(op, shape_dtype, result_type, then_branch_op,
&rewriter);
- then_branch_op.setVisibility(FuncOp::Visibility::Private);
+ then_branch_op.setVisibility(func::FuncOp::Visibility::Private);
// Constructs `else_branch`, which is executed when `if_cond` evaluates to
// false.
- auto else_branch_op = rewriter.create<FuncOp>(loc, "cond_false", func_type);
+ auto else_branch_op =
+ rewriter.create<func::FuncOp>(loc, "cond_false", func_type);
CreateCondFalseBranch(loc, shape_dtype, result_type, else_branch_op,
&rewriter);
- else_branch_op.setVisibility(FuncOp::Visibility::Private);
+ else_branch_op.setVisibility(func::FuncOp::Visibility::Private);
// Inserts the two blocks' names into the symbol table held by the module.
// Using SymbolTable will ensure that the inserted symbol names are
@@ -828,7 +830,7 @@
// Create a new tensorlist of size 'size - input_size' and concat it
// with the input tensorlist.
void CreateCondTrueBranch(TF::TensorListResizeOp resize_op, Type shape_dtype,
- Type result_type, FuncOp branch_func,
+ Type result_type, func::FuncOp branch_func,
ConversionPatternRewriter *rewriter) const {
auto guard = OpBuilder::InsertionGuard(*rewriter);
auto inputs = branch_func.getFunctionType().getInputs();
@@ -866,7 +868,7 @@
}
void CreateCondFalseBranch(Location loc, Type shape_dtype, Type result_type,
- FuncOp branch_func,
+ func::FuncOp branch_func,
ConversionPatternRewriter *rewriter) const {
// When the input tensorlist's size is larger or equal than the requested
// size, the else branch is executed.
@@ -1138,7 +1140,7 @@
// Returns a set of integers that correspond to the tensorlist arguments in
// the function.
-llvm::SmallSet<int, 4> GetTensorListArgumentsIndex(FuncOp func) {
+llvm::SmallSet<int, 4> GetTensorListArgumentsIndex(func::FuncOp func) {
llvm::SmallSet<int, 4> set;
for (const auto &arg_and_idx : llvm::enumerate(func.getArguments())) {
if (IsTensorListType(arg_and_idx.value().getType(), arg_and_idx.value())) {
@@ -1150,7 +1152,7 @@
// Returns a set of integers that correspond to the tensorlist results in the
// function.
-llvm::SmallSet<int, 4> GetTensorListResultsIndex(FuncOp func) {
+llvm::SmallSet<int, 4> GetTensorListResultsIndex(func::FuncOp func) {
llvm::SmallSet<int, 4> set;
for (const auto &result_and_idx :
@@ -1210,7 +1212,7 @@
// Updates the specified function's type and region signature.
void UpdateFunctionAndRegionType(ConversionPatternRewriter &rewriter,
- FuncOp func,
+ func::FuncOp func,
llvm::ArrayRef<Type> updated_argument_types,
llvm::ArrayRef<Type> updated_result_types) {
// Change `func`'s argument type to `unranked_argument_types`. If its
@@ -1237,7 +1239,7 @@
const llvm::SmallSet<int, 4> &tensor_list_args,
const llvm::SmallSet<int, 4> &resized_tensor_lists) {
int func_index = 0;
- for (FuncOp func : {op.cond_function(), op.body_function()}) {
+ for (func::FuncOp func : {op.cond_function(), op.body_function()}) {
++func_index;
if (!func) continue;
@@ -1282,7 +1284,7 @@
const llvm::SmallSet<int, 4> &tensor_list_args,
const llvm::SmallSet<int, 4> &resized_tensor_lists,
llvm::ArrayRef<Type> updated_result_types) {
- for (FuncOp func : {op.else_function(), op.then_function()}) {
+ for (func::FuncOp func : {op.else_function(), op.then_function()}) {
if (!func) continue;
FunctionType func_type = func.getFunctionType();
@@ -1310,7 +1312,7 @@
// will let us konw which tensorlist result maps to which tensorlist in the
// arguments. Once we know this info it will help us decide the types of the
// result tensorlist based on the operand's of the `If` op.
-llvm::DenseMap<int, int> MapTensorListResultToArgument(FuncOp func) {
+llvm::DenseMap<int, int> MapTensorListResultToArgument(func::FuncOp func) {
// `map_fn` will trace upwards along the use-def chain of the ssa value. It
// starts from the last ssa value (returned by the function), and check its
// parent op iteratively. If the root ssa value appears in the function's
@@ -1532,7 +1534,7 @@
TF::TensorListResizeOp, TF::TensorListConcatV2Op>();
// TODO(hinsu): Use TFLite constant op for constants.
target.addLegalOp<arith::ConstantOp>();
- target.addLegalOp<FuncOp>();
+ target.addLegalOp<func::FuncOp>();
target.addDynamicallyLegalOp<func::ReturnOp>(is_legal);
target.addDynamicallyLegalOp<TF::YieldOp>(is_legal);
target.addLegalOp<TFL::CustomOp>();
diff --git a/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc b/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
index 7406270..744da46 100644
--- a/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/modify_io_nodes.cc
@@ -45,7 +45,7 @@
// to what are specified. The task was not just adding cast operations, but,
// instead, using tfl.quantize and tfl.dequantize ops to scale the tensors.
struct ModifyIONodesPass
- : public PassWrapper<ModifyIONodesPass, OperationPass<FuncOp>> {
+ : public PassWrapper<ModifyIONodesPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ModifyIONodesPass)
@@ -72,13 +72,13 @@
// Modifies the element types of entry block arguments to be user specified
// and returns the new argument types.
- LogicalResult ModifyInputNodes(FuncOp func,
+ LogicalResult ModifyInputNodes(func::FuncOp func,
llvm::SmallVectorImpl<Type>& new_input_types,
OpBuilder builder);
// Modifies the element types of entry block returns to be user specified
// and returns the new return types.
- LogicalResult ModifyOutputNodes(FuncOp func,
+ LogicalResult ModifyOutputNodes(func::FuncOp func,
llvm::SmallVectorImpl<Type>& new_output_types,
OpBuilder builder);
@@ -108,7 +108,7 @@
}
LogicalResult ModifyIONodesPass::ModifyInputNodes(
- FuncOp func, llvm::SmallVectorImpl<Type>& new_input_types,
+ func::FuncOp func, llvm::SmallVectorImpl<Type>& new_input_types,
OpBuilder builder) {
if (input_type.isa<FloatType>()) {
return success();
@@ -161,7 +161,7 @@
}
LogicalResult ModifyIONodesPass::ModifyOutputNodes(
- FuncOp func, llvm::SmallVectorImpl<Type>& new_output_types,
+ func::FuncOp func, llvm::SmallVectorImpl<Type>& new_output_types,
OpBuilder builder) {
Block& block = func.front();
auto* terminator = block.getTerminator();
@@ -245,7 +245,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite modify io nodes pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateModifyIONodesPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateModifyIONodesPass(
Type input_type, Type output_type) {
return std::make_unique<ModifyIONodesPass>(input_type, output_type);
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/optimize.cc b/tensorflow/compiler/mlir/lite/transforms/optimize.cc
index d66ceb8..e51f6ea 100644
--- a/tensorflow/compiler/mlir/lite/transforms/optimize.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/optimize.cc
@@ -92,7 +92,8 @@
using ::llvm::cast;
// Optimize TFLite operations in functions.
-class OptimizePass : public PassWrapper<OptimizePass, OperationPass<FuncOp>> {
+class OptimizePass
+ : public PassWrapper<OptimizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OptimizePass)
@@ -1697,7 +1698,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect Optimize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateOptimizePass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizePass(
bool enable_canonicalization) {
return std::make_unique<OptimizePass>(enable_canonicalization);
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc b/tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc
index 1ef2006..a725813 100644
--- a/tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/optimize_functional_ops.cc
@@ -55,7 +55,7 @@
// op operands' types.
//
// Requires the function has exactly one block.
-void UpdateFuncType(FuncOp func) {
+void UpdateFuncType(func::FuncOp func) {
Operation* terminator = func.front().getTerminator();
auto return_types = llvm::to_vector<4>(terminator->getOperandTypes());
@@ -68,7 +68,7 @@
}
// TODO(jpienaar): Remove when recursive side-effect modeling is added.
-bool IsSideEffectFree(FuncOp func) {
+bool IsSideEffectFree(func::FuncOp func) {
return !func.getBody()
.walk([&](Operation* op) {
if (!MemoryEffectOpInterface::hasNoEffect(op) &&
@@ -92,12 +92,12 @@
// and therefore one terminator op. So, that function return type can be
// updated if operands' shapes change after inlining. Without this
// restriction, it would require tensor cast ops.
- FuncOp parent_op = op->getParentOfType<FuncOp>();
+ func::FuncOp parent_op = op->getParentOfType<func::FuncOp>();
if (!llvm::hasSingleElement(parent_op)) return failure();
// Find the then and else branch functions.
- FuncOp then_func = op.then_function();
- FuncOp else_func = op.else_function();
+ func::FuncOp then_func = op.then_function();
+ func::FuncOp else_func = op.else_function();
// If the If has no uses and its functions are side-effect free, then
// remove.
@@ -121,7 +121,7 @@
// Identify the branch to inline.
bool cond_value = (*cond.value_begin<APInt>()).getSExtValue();
- FuncOp func = cond_value ? then_func : else_func;
+ func::FuncOp func = cond_value ? then_func : else_func;
// Make sure that the function has exactly one block to simplify inlining.
// TFLite doesn't use control flow with blocks so functions with more than
diff --git a/tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc b/tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
index fb0afe3..bdafc4a 100644
--- a/tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/optimize_op_order.cc
@@ -103,7 +103,7 @@
// This transformation pass optimizes the op execution order of the ops in the
// model.
struct OptimizeOpOrderPass
- : public PassWrapper<OptimizeOpOrderPass, OperationPass<FuncOp>> {
+ : public PassWrapper<OptimizeOpOrderPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(OptimizeOpOrderPass)
void runOnOperation() override;
@@ -131,7 +131,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite optimize op order pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateOptimizeOpOrderPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizeOpOrderPass() {
return std::make_unique<OptimizeOpOrderPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/post_quantize.cc b/tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
index 110de59..7551c2a 100644
--- a/tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/post_quantize.cc
@@ -43,7 +43,7 @@
// Applies all the clean up steps after quantization.
class PostQuantizePass
- : public PassWrapper<PostQuantizePass, OperationPass<FuncOp>> {
+ : public PassWrapper<PostQuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PostQuantizePass)
@@ -83,7 +83,8 @@
// Cleans up unnecessary QDQ pattern for input/output ops.
class PostQuantizeRemoveQDQPass
- : public PassWrapper<PostQuantizeRemoveQDQPass, OperationPass<FuncOp>> {
+ : public PassWrapper<PostQuantizeRemoveQDQPass,
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PostQuantizeRemoveQDQPass)
@@ -104,7 +105,7 @@
};
// TODO(fengliuai): migrate to use modify_io_nodes pass.
-void RemoveQuantizationAdaptorOps(FuncOp func) {
+void RemoveQuantizationAdaptorOps(func::FuncOp func) {
mlir::OpBuilder builder(func.getBody());
auto& bb = func.front();
auto loc = func.getLoc();
@@ -293,7 +294,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect PostQuantize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePostQuantizePass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePostQuantizePass(
bool emit_quant_adaptor_ops, const quant::CustomOpMap& custom_op_map) {
return std::make_unique<PostQuantizePass>(emit_quant_adaptor_ops,
custom_op_map);
@@ -301,7 +302,7 @@
// Creates an instance of the TensorFlow Lite dialect PostQuantizeRemoveQDQ
// pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePostQuantizeRemoveQDQPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePostQuantizeRemoveQDQPass() {
return std::make_unique<PostQuantizeRemoveQDQPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc b/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
index 9187183..323d281 100644
--- a/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/prepare_composite_functions_tf.cc
@@ -108,7 +108,7 @@
// Convert func annotated with `tfl_fusable_op` attribute to tfl custom op.
LogicalResult ConvertTflFusableOp(
- FuncOp func, StringRef custom_op_name,
+ func::FuncOp func, StringRef custom_op_name,
ArrayRef<std::pair<StringRef, Attribute>> attrs) {
func.eraseBody();
func.addEntryBlock();
@@ -130,7 +130,7 @@
// Abstracts the conversion of the embedded lookup composite function.
class ConvertEmbeddedLookupFunc {
public:
- explicit ConvertEmbeddedLookupFunc(FuncOp func) : func_(func) {}
+ explicit ConvertEmbeddedLookupFunc(func::FuncOp func) : func_(func) {}
void RewriteFunc() {
func_->setAttr(kTFImplements,
@@ -160,7 +160,7 @@
}
private:
- FuncOp func_;
+ func::FuncOp func_;
};
// This pass uses mechanisms listed in RFC:
@@ -193,13 +193,15 @@
private:
// TODO(b/160915525): Consolidate FuncAttr and StringAttr into one.
- void ConvertTFImplements(FuncOp func, StringAttr attr);
- void ConvertTFImplementsWithAttributes(FuncOp func, FuncAttr attr);
- void ConvertTFAPIImplements(FuncOp func, StringAttr attr, ModuleOp module);
+ void ConvertTFImplements(func::FuncOp func, StringAttr attr);
+ void ConvertTFImplementsWithAttributes(func::FuncOp func, FuncAttr attr);
+ void ConvertTFAPIImplements(func::FuncOp func, StringAttr attr,
+ ModuleOp module);
void runOnOperation() override;
};
-LogicalResult CheckFusableLayerNormalizedLstmCellSimple(FuncOp lstm_func) {
+LogicalResult CheckFusableLayerNormalizedLstmCellSimple(
+ func::FuncOp lstm_func) {
for (int i = 0; i < 5; ++i) {
auto input = lstm_func.getArgument(i);
auto input_type = input.getType().dyn_cast_or_null<RankedTensorType>();
@@ -214,7 +216,7 @@
return success();
}
-LogicalResult CheckFusableLstmCellSimple(FuncOp lstm_func) {
+LogicalResult CheckFusableLstmCellSimple(func::FuncOp lstm_func) {
for (int i = 0; i < 4; ++i) {
auto input = lstm_func.getArgument(i);
auto input_type = input.getType().dyn_cast_or_null<RankedTensorType>();
@@ -245,11 +247,11 @@
return success();
}
-LogicalResult CheckFusableKerasLstm(FuncOp lstm_func, ModuleOp module) {
- for (auto func : module.getOps<FuncOp>()) {
+LogicalResult CheckFusableKerasLstm(func::FuncOp lstm_func, ModuleOp module) {
+ for (auto func : module.getOps<func::FuncOp>()) {
if (func == lstm_func) continue;
auto result = func.walk([&](CallOpInterface op) {
- if (dyn_cast<FuncOp>(op.resolveCallable()) == lstm_func) {
+ if (dyn_cast<func::FuncOp>(op.resolveCallable()) == lstm_func) {
// Keras LSTM have 5 outputs.
// We should make sure only the first or the second output are
// consumed.
@@ -312,7 +314,7 @@
return success();
}
-void PrepareCompositeFunctionsPass::ConvertTFImplements(FuncOp func,
+void PrepareCompositeFunctionsPass::ConvertTFImplements(func::FuncOp func,
StringAttr attr) {
if (attr.getValue() == "embedding_matmul") {
// Convert the composite embedding_matmul function body to a
@@ -359,7 +361,7 @@
}
void PrepareCompositeFunctionsPass::ConvertTFImplementsWithAttributes(
- FuncOp func, FuncAttr attr) {
+ func::FuncOp func, FuncAttr attr) {
StringRef api_name = attr.getName().getLeafReference().getValue();
bool enable_fuse_tftext =
fuse_tftext_flag || IsTFTextRegistered(tensorflow::OpRegistry::Global());
@@ -404,7 +406,7 @@
}
}
-void PrepareCompositeFunctionsPass::ConvertTFAPIImplements(FuncOp func,
+void PrepareCompositeFunctionsPass::ConvertTFAPIImplements(func::FuncOp func,
StringAttr attr,
ModuleOp module) {
// Keras lstm tf.api_implements usually has attribute like "lstm_abcde91...".
@@ -425,7 +427,7 @@
void PrepareCompositeFunctionsPass::runOnOperation() {
auto module = getOperation();
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
// We have three kinds of implements:
// 1) tf._implements, with string attributes.
// 2) tf._implements, with proto attributes.
diff --git a/tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc b/tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
index f121394..3c2f96b 100644
--- a/tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc
@@ -98,7 +98,7 @@
// making the quantization rule for some operations in the quantization-aware
// training quantization simpler.
class PrepareQuantizePass
- : public PassWrapper<PrepareQuantizePass, OperationPass<FuncOp>> {
+ : public PassWrapper<PrepareQuantizePass, OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry
.insert<TensorFlowLiteDialect, ::mlir::quant::QuantizationDialect>();
@@ -138,16 +138,16 @@
// non-float tensor types will be skipped because they are not quantizable.
// Return true if number of input nodes doesn't equal to that of the input
// ranges.
- bool SetInputNodesQuantizationParams(FuncOp func);
+ bool SetInputNodesQuantizationParams(func::FuncOp func);
// The function might contain more stats ops than required, and it will
// introduce requantize if the calibration stats have conflicts. This method
// tries to remove all the redundant stats ops.
- bool RemoveRedundantStats(FuncOp func);
+ bool RemoveRedundantStats(func::FuncOp func);
// Verify the quantization specification is expected for quantizing the
// current function.
- bool IsLegalQuantSpecs(FuncOp func) {
+ bool IsLegalQuantSpecs(func::FuncOp func) {
if (func.getName() == quant_specs_.target_func) {
return func.getNumArguments() == quant_specs_.input_ranges.size();
}
@@ -168,16 +168,16 @@
// Apply some sanity check and report some warnings for those who don't follow
// the best quantization practice. This also fixes some simple violations.
- void SanityCheckAndAdjustment(FuncOp func);
+ void SanityCheckAndAdjustment(func::FuncOp func);
// Whether the func contains Quantize ops. This is used to determine whether
// to use the quantization parameters from the fixed output range property.
- bool ContainsQuantizeOps(FuncOp func);
+ bool ContainsQuantizeOps(func::FuncOp func);
quant::QuantizationSpecs quant_specs_;
};
-bool PrepareQuantizePass::SetInputNodesQuantizationParams(FuncOp func) {
+bool PrepareQuantizePass::SetInputNodesQuantizationParams(func::FuncOp func) {
StringRef func_name = func.getName();
auto& target_func = quant_specs_.target_func;
// Skip this function because it isn't the target function from the spec or
@@ -259,7 +259,7 @@
#include "tensorflow/compiler/mlir/lite/utils/generated_op_quant_spec_getters.inc"
-bool PrepareQuantizePass::RemoveRedundantStats(FuncOp func) {
+bool PrepareQuantizePass::RemoveRedundantStats(func::FuncOp func) {
return RemoveRedundantStatsOps(func, GetOpQuantSpec);
}
@@ -273,7 +273,7 @@
return {};
}
-void PrepareQuantizePass::SanityCheckAndAdjustment(FuncOp func) {
+void PrepareQuantizePass::SanityCheckAndAdjustment(func::FuncOp func) {
// If an op output has two users: one of them is a quantize op and another
// one is returned directly, we decide to return the quantized result instead,
// so this op can be quantized. This is only applied on the returned result
@@ -353,7 +353,7 @@
});
}
-bool PrepareQuantizePass::ContainsQuantizeOps(FuncOp func) {
+bool PrepareQuantizePass::ContainsQuantizeOps(func::FuncOp func) {
for (const auto& op : func.getOps()) {
if (llvm::isa<quant::DequantizeCastOp>(op)) return true;
}
@@ -364,7 +364,7 @@
quant::ConvertStatsToQDQs<quant::QuantizeCastOp, quant::DequantizeCastOp>;
void PrepareQuantizePass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
MLIRContext* ctx = func.getContext();
ScopedTFLQuantOpsToMlirQuantOpsConverter converter(func);
@@ -441,7 +441,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect PrepareQuantize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePrepareQuantizePass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareQuantizePass(
const quant::QuantizationSpecs& quant_specs) {
return std::make_unique<PrepareQuantizePass>(quant_specs);
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc b/tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
index 12d4475..b96c685 100644
--- a/tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/prepare_quantize_dynamic_range.cc
@@ -77,7 +77,7 @@
// applicable.
class PrepareDynamicRangeQuantizePass
: public PassWrapper<PrepareDynamicRangeQuantizePass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry
.insert<TensorFlowLiteDialect, ::mlir::quant::QuantizationDialect>();
@@ -117,7 +117,7 @@
// dynamic range quantization. And stats ops may cause conflict while
// processing the function for dynamic range quantization. Therefore, this
// method preprocess the function to remove all stats ops.
- void removeAllStatsOp(FuncOp func);
+ void removeAllStatsOp(func::FuncOp func);
void runOnOperation() override;
@@ -431,7 +431,7 @@
};
// Remove all the stats ops which are redundant for dynamic range quantizaiton.
-void PrepareDynamicRangeQuantizePass::removeAllStatsOp(FuncOp func) {
+void PrepareDynamicRangeQuantizePass::removeAllStatsOp(func::FuncOp func) {
func.walk([&](quant::StatisticsOp stats_op) {
stats_op.replaceAllUsesWith(stats_op.arg());
stats_op.erase();
@@ -439,7 +439,7 @@
}
void PrepareDynamicRangeQuantizePass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
MLIRContext* ctx = func.getContext();
ConvertTFLQuantOpsToMlirQuantOps(func);
@@ -456,7 +456,8 @@
// Creates an instance of the TensorFlow Lite dialect
// PrepareDynamicRangeQuantize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePrepareDynamicRangeQuantizePass(
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreatePrepareDynamicRangeQuantizePass(
const quant::QuantizationSpecs& quant_specs) {
return std::make_unique<PrepareDynamicRangeQuantizePass>(quant_specs);
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc b/tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
index 05861a0..d64b74a 100644
--- a/tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc
@@ -98,7 +98,8 @@
namespace {
// Prepare TF operations in functions for subsequent legalization.
-class PrepareTFPass : public PassWrapper<PrepareTFPass, OperationPass<FuncOp>> {
+class PrepareTFPass
+ : public PassWrapper<PrepareTFPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PrepareTFPass)
@@ -1224,13 +1225,13 @@
// Converts a set of TF2XLA ops into pure TF ops for future legalizations as
// TF2XLA ops aren't supported by later stages.
-LogicalResult ConvertTf2XlaOps(FuncOp func, MLIRContext *context) {
+LogicalResult ConvertTf2XlaOps(func::FuncOp func, MLIRContext *context) {
ConversionTarget target(*context);
target.addLegalDialect<arith::ArithmeticDialect>();
target.addLegalDialect<func::FuncDialect>();
target.addLegalDialect<TF::TensorFlowDialect>();
target.addLegalOp<ModuleOp>();
- target.addLegalOp<FuncOp>();
+ target.addLegalOp<func::FuncOp>();
target.addIllegalOp<TF::XlaConvOp>();
target.addIllegalOp<TF::XlaGatherOp>();
@@ -1433,7 +1434,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect PrepareTF pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePrepareTFPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareTFPass(
bool unfold_batch_matmul, bool allow_bf16_and_f16_type_legalization,
bool use_fake_quant_num_bits) {
return std::make_unique<PrepareTFPass>(unfold_batch_matmul,
diff --git a/tensorflow/compiler/mlir/lite/transforms/quantize.cc b/tensorflow/compiler/mlir/lite/transforms/quantize.cc
index a037f3e..fed664d3 100644
--- a/tensorflow/compiler/mlir/lite/transforms/quantize.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/quantize.cc
@@ -261,7 +261,8 @@
};
// Applies quantization on the model in TFL dialect.
-struct QuantizePass : public PassWrapper<QuantizePass, OperationPass<FuncOp>> {
+struct QuantizePass
+ : public PassWrapper<QuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass)
@@ -336,7 +337,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect QuantizeTFL pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateQuantizePass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
const quant::QuantizationSpecs& quant_specs, const StringSet& ops_blocklist,
const StringSet& nodes_blocklist) {
quant::QuantizationSpecs updated_quant_specs;
@@ -351,7 +352,7 @@
return std::make_unique<QuantizePass>(updated_quant_specs);
}
-std::unique_ptr<OperationPass<FuncOp>> CreateQuantizePass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass(
bool verify_numeric, bool whole_model_verify, bool legacy_float_scale,
const StringSet& ops_blocklist, const StringSet& nodes_blocklist) {
quant::QuantizationSpecs quant_specs;
diff --git a/tensorflow/compiler/mlir/lite/transforms/raise_custom_ops.cc b/tensorflow/compiler/mlir/lite/transforms/raise_custom_ops.cc
index 1d01799..ec3f997 100644
--- a/tensorflow/compiler/mlir/lite/transforms/raise_custom_ops.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/raise_custom_ops.cc
@@ -44,7 +44,7 @@
// This transformation pass takes an operation with unknown op properties and
// wrap it by a TFL::CustomTfOp.
struct RaiseCustomOpsPass
- : public PassWrapper<RaiseCustomOpsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<RaiseCustomOpsPass, OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry ®istry) const final {
registry.insert<TensorFlowLiteDialect>();
}
@@ -122,7 +122,7 @@
} // namespace
// Creates an instance of the TensorFlow Lite dialect raise custom op pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateRaiseCustomOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateRaiseCustomOpsPass(
const std::vector<std::string> &target_ops) {
return std::make_unique<RaiseCustomOpsPass>(target_ops);
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/reduce_while_operands.cc b/tensorflow/compiler/mlir/lite/transforms/reduce_while_operands.cc
index a22eca3..0a2862f 100644
--- a/tensorflow/compiler/mlir/lite/transforms/reduce_while_operands.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/reduce_while_operands.cc
@@ -53,7 +53,7 @@
namespace {
struct ReduceWhileOperandsPass
- : public PassWrapper<ReduceWhileOperandsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<ReduceWhileOperandsPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ReduceWhileOperandsPass)
@@ -298,7 +298,7 @@
static PassRegistration<ReduceWhileOperandsPass> pass;
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateReduceWhileOperandsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateReduceWhileOperandsPass() {
return std::make_unique<ReduceWhileOperandsPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/runtime_verify.cc b/tensorflow/compiler/mlir/lite/transforms/runtime_verify.cc
index 7960640..6db9b9f 100644
--- a/tensorflow/compiler/mlir/lite/transforms/runtime_verify.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/runtime_verify.cc
@@ -24,7 +24,7 @@
// This pass verifies that the TFL ops meet the TFL runtime constraints.
class RuntimeVerifyPass
- : public mlir::PassWrapper<RuntimeVerifyPass, OperationPass<FuncOp>> {
+ : public mlir::PassWrapper<RuntimeVerifyPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RuntimeVerifyPass)
@@ -54,7 +54,7 @@
} // namespace
// Verifies TFL runtime constraints.
-std::unique_ptr<OperationPass<FuncOp>> CreateRuntimeVerifyPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateRuntimeVerifyPass() {
return std::make_unique<RuntimeVerifyPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc b/tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc
index 8ebd386..738777b 100644
--- a/tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/split_merged_operands.cc
@@ -66,7 +66,7 @@
namespace {
struct SplitMergedOperandsPass
- : public PassWrapper<SplitMergedOperandsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<SplitMergedOperandsPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(SplitMergedOperandsPass)
void runOnOperation() override;
@@ -131,7 +131,7 @@
/// Creates an instance of the TensorFlow Lite dialect SplitMergedOperands
/// pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateSplitMergedOperandsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateSplitMergedOperandsPass() {
return std::make_unique<SplitMergedOperandsPass>();
}
diff --git a/tensorflow/compiler/mlir/lite/transforms/trim_functions_tf.cc b/tensorflow/compiler/mlir/lite/transforms/trim_functions_tf.cc
index 940ad99..4cb5132 100644
--- a/tensorflow/compiler/mlir/lite/transforms/trim_functions_tf.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/trim_functions_tf.cc
@@ -86,8 +86,8 @@
// if no trim_funcs_allowlist_ is specified, this pass is a no-op.
if (trim_funcs_allowlist_.empty()) return false;
- llvm::SmallVector<FuncOp, 4> funcs_to_trim;
- for (auto func : getOperation().getOps<FuncOp>()) {
+ llvm::SmallVector<func::FuncOp, 4> funcs_to_trim;
+ for (auto func : getOperation().getOps<func::FuncOp>()) {
if (llvm::is_contained(trim_funcs_allowlist_, func.getName())) {
// If no main is specified in the allowlist, use the 1st func
// in trim_funcs_allowlist as the main.
@@ -117,10 +117,10 @@
// TODO(ashwinm): Instead, we should make sure that references to all
// SymbolRefAttrs of all ops are present.
SymbolTable symbol_table = SymbolTable(getOperation());
- llvm::SetVector<FuncOp> reachable_funcs;
- for (auto func : getOperation().getOps<FuncOp>()) {
+ llvm::SetVector<func::FuncOp> reachable_funcs;
+ for (auto func : getOperation().getOps<func::FuncOp>()) {
auto walk_result = func.walk([&](func::CallOp op) -> WalkResult {
- if (!symbol_table.lookup<FuncOp>(op.getCallee()))
+ if (!symbol_table.lookup<func::FuncOp>(op.getCallee()))
return getOperation().emitError()
<< func.getName() << " is not in the funcs allowlist";
return WalkResult::advance();
diff --git a/tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc b/tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc
index 1619789..a278215 100644
--- a/tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc
+++ b/tensorflow/compiler/mlir/lite/transforms/while_loop_outline.cc
@@ -113,10 +113,11 @@
return false;
}
-FuncOp CreateOutlineFunc(StringRef name, Region& region,
- bool passthru_extra_args, int num_loop_carried,
- const llvm::SetVector<Value>& extern_values,
- const SmallVectorImpl<Type>& types, Location loc) {
+func::FuncOp CreateOutlineFunc(StringRef name, Region& region,
+ bool passthru_extra_args, int num_loop_carried,
+ const llvm::SetVector<Value>& extern_values,
+ const SmallVectorImpl<Type>& types,
+ Location loc) {
MLIRContext* context = loc.getContext();
OpBuilder builder(context);
FunctionType type;
@@ -129,7 +130,7 @@
type = FunctionType::get(context, types, result_types);
}
- auto outlined_func = builder.create<FuncOp>(loc, name, type);
+ auto outlined_func = builder.create<func::FuncOp>(loc, name, type);
outlined_func.getBody().takeBody(region);
Region& func_region = outlined_func.getBody();
diff --git a/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc b/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc
index ed4be7e..8fed72f 100644
--- a/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc
+++ b/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.cc
@@ -25,7 +25,7 @@
// Moves the TF operations out from the tfl.TFCustomOps wrappers inside the
// function. This is a no-op for the ops which are not wrapped.
-LogicalResult UnwrapTFCustomOps(FuncOp fn, OpBuilder& builder) {
+LogicalResult UnwrapTFCustomOps(func::FuncOp fn, OpBuilder& builder) {
llvm::SmallVector<Operation*, 4> wrapped_ops;
fn.walk([&](TFL::CustomTfOp custom_op) {
auto* real_op = &custom_op.body().front().front();
@@ -67,7 +67,7 @@
// Removes the wrapper of the tf.FakeQuant* ops and creates the tfl.quantize
// and tfl.dequantize pairs before tf.FakeQuant* being foled.
-LogicalResult ConvertFakeQuantOps(FuncOp func, MLIRContext* ctx,
+LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
bool use_fake_quant_num_bits) {
OpBuilder builder(func);
if (failed(UnwrapTFCustomOps(func, builder))) {
diff --git a/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h b/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
index de84416..e97651a 100644
--- a/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
+++ b/tensorflow/compiler/mlir/lite/utils/fake_quant_utils.h
@@ -155,7 +155,7 @@
// Removes the wrapper of the tf.FakeQuant* ops and creates the tfl.quantize
// and tfl.dequantize pairs before tf.FakeQuant* being foled.
-LogicalResult ConvertFakeQuantOps(FuncOp func, MLIRContext *ctx,
+LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext *ctx,
bool use_fake_quant_num_bits = false);
// Returns the names of all the considered tf.FakeQuant* ops.
diff --git a/tensorflow/compiler/mlir/lite/utils/lstm_utils.h b/tensorflow/compiler/mlir/lite/utils/lstm_utils.h
index cac77dd..6749f82 100644
--- a/tensorflow/compiler/mlir/lite/utils/lstm_utils.h
+++ b/tensorflow/compiler/mlir/lite/utils/lstm_utils.h
@@ -102,7 +102,7 @@
virtual void SetOutputLayerNormCoefficients();
// specified state
- FuncOp fused_func_op_;
+ func::FuncOp fused_func_op_;
Value input_;
Value weight_;
Value bias_;
diff --git a/tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc b/tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
index af69c43..9692f63 100644
--- a/tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
+++ b/tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc
@@ -43,7 +43,8 @@
namespace mlir {
namespace TFL {
-FuncOp createLstmCompositeFunc(mlir::Builder* builder, bool ln, bool cifg) {
+func::FuncOp createLstmCompositeFunc(mlir::Builder* builder, bool ln,
+ bool cifg) {
SmallVector<int64_t, 2> input_shape{1, 2};
SmallVector<int64_t, 2> weight_shape{3, 12};
SmallVector<int64_t, 1> bias_shape{2};
@@ -63,9 +64,9 @@
layer_norm_scale_type};
auto func_type = builder->getFunctionType(input_types, output_type);
- auto func =
- FuncOp::create(mlir::NameLoc::get(builder->getStringAttr("fused_func")),
- "fused_func", func_type, {});
+ auto func = func::FuncOp::create(
+ mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
+ func_type, {});
func.addEntryBlock();
std::vector<std::string> attributes;
@@ -109,9 +110,9 @@
builder_.reset();
}
- FuncOp fused_lstm_func_;
- FuncOp fused_lstm_func_cifg_;
- FuncOp fused_ln_lstm_func_;
+ func::FuncOp fused_lstm_func_;
+ func::FuncOp fused_lstm_func_cifg_;
+ func::FuncOp fused_ln_lstm_func_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
};
diff --git a/tensorflow/compiler/mlir/lite/utils/nms_utils.cc b/tensorflow/compiler/mlir/lite/utils/nms_utils.cc
index 14e0ff8..0207923 100644
--- a/tensorflow/compiler/mlir/lite/utils/nms_utils.cc
+++ b/tensorflow/compiler/mlir/lite/utils/nms_utils.cc
@@ -106,7 +106,8 @@
}
LogicalResult ConvertSSDPostProcessFunc::CreateNMSCustomOptions(
- FuncOp func, DictionaryAttr attrs, std::string& custom_option_buffer) {
+ func::FuncOp func, DictionaryAttr attrs,
+ std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
@@ -135,7 +136,7 @@
}
LogicalResult ConvertSSDPostProcessFunc::AddIntAttr(
- FuncOp func, DictionaryAttr attrs, const std::string& attribute,
+ func::FuncOp func, DictionaryAttr attrs, const std::string& attribute,
flexbuffers::Builder* builder) {
auto int_attr = attrs.get(attribute).dyn_cast_or_null<IntegerAttr>();
if (!int_attr) {
@@ -147,7 +148,7 @@
}
LogicalResult ConvertSSDPostProcessFunc::AddFloatAttr(
- FuncOp func, DictionaryAttr attrs, const std::string& attribute,
+ func::FuncOp func, DictionaryAttr attrs, const std::string& attribute,
flexbuffers::Builder* builder) {
auto float_attr = attrs.get(attribute).dyn_cast_or_null<FloatAttr>();
if (!float_attr) {
@@ -159,7 +160,7 @@
}
LogicalResult ConvertSSDPostProcessFunc::HasIntAttr(
- FuncOp func, DictionaryAttr attrs, const std::string& attribute) {
+ func::FuncOp func, DictionaryAttr attrs, const std::string& attribute) {
auto int_attr = attrs.get(attribute).dyn_cast_or_null<IntegerAttr>();
if (!int_attr) {
return func.emitWarning()
@@ -169,7 +170,7 @@
}
LogicalResult ConvertSSDPostProcessFunc::HasFloatAttr(
- FuncOp func, DictionaryAttr attrs, const std::string& attribute) {
+ func::FuncOp func, DictionaryAttr attrs, const std::string& attribute) {
auto float_attr = attrs.get(attribute).dyn_cast_or_null<FloatAttr>();
if (!float_attr) {
return func.emitWarning()
diff --git a/tensorflow/compiler/mlir/lite/utils/nms_utils.h b/tensorflow/compiler/mlir/lite/utils/nms_utils.h
index 842ea47..a0739ea 100644
--- a/tensorflow/compiler/mlir/lite/utils/nms_utils.h
+++ b/tensorflow/compiler/mlir/lite/utils/nms_utils.h
@@ -34,21 +34,21 @@
// Abstracts the conversion of the padded NMS composite function.
class ConvertNMSPaddedFunc {
public:
- explicit ConvertNMSPaddedFunc(FuncOp func) : func_(func) {}
+ explicit ConvertNMSPaddedFunc(func::FuncOp func) : func_(func) {}
void RewriteFunc();
LogicalResult VerifySignature();
private:
- FuncOp func_;
+ func::FuncOp func_;
};
// Abstracts the conversion of the SSD post-processing composite function to
// TFLite.
class ConvertSSDPostProcessFunc {
public:
- explicit ConvertSSDPostProcessFunc(FuncOp func, mlir::TF::FuncAttr attr)
+ explicit ConvertSSDPostProcessFunc(func::FuncOp func, mlir::TF::FuncAttr attr)
: func_(func), attr_(attr) {}
LogicalResult RewriteFunc();
@@ -56,24 +56,24 @@
LogicalResult VerifySignature();
private:
- LogicalResult CreateNMSCustomOptions(FuncOp func, DictionaryAttr attrs,
+ LogicalResult CreateNMSCustomOptions(func::FuncOp func, DictionaryAttr attrs,
std::string& custom_option_buffer);
- LogicalResult AddIntAttr(FuncOp func, DictionaryAttr attrs,
+ LogicalResult AddIntAttr(func::FuncOp func, DictionaryAttr attrs,
const std::string& attribute,
flexbuffers::Builder* builder);
- LogicalResult AddFloatAttr(FuncOp func, DictionaryAttr attrs,
+ LogicalResult AddFloatAttr(func::FuncOp func, DictionaryAttr attrs,
const std::string& attribute,
flexbuffers::Builder* builder);
- LogicalResult HasIntAttr(FuncOp func, DictionaryAttr attrs,
+ LogicalResult HasIntAttr(func::FuncOp func, DictionaryAttr attrs,
const std::string& attribute);
- LogicalResult HasFloatAttr(FuncOp func, DictionaryAttr attrs,
+ LogicalResult HasFloatAttr(func::FuncOp func, DictionaryAttr attrs,
const std::string& attribute);
- FuncOp func_;
+ func::FuncOp func_;
mlir::TF::FuncAttr attr_;
};
diff --git a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc
index 6e1bee0..5ca0e6f 100644
--- a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc
+++ b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.cc
@@ -42,7 +42,7 @@
StringRef(content.data(), content.size()));
}
-inline LogicalResult HasIntegerArrayWithSize(FuncOp* func,
+inline LogicalResult HasIntegerArrayWithSize(func::FuncOp* func,
const DictionaryAttr& attrs,
const std::string& attr_name,
int N) {
@@ -64,8 +64,9 @@
}
inline LogicalResult GetIntegerArraySafe(
- FuncOp* func, const DictionaryAttr& attrs, const std::string& attr_name,
- llvm::SmallVectorImpl<int32_t>* results, int N) {
+ func::FuncOp* func, const DictionaryAttr& attrs,
+ const std::string& attr_name, llvm::SmallVectorImpl<int32_t>* results,
+ int N) {
ArrayAttr array_attr = attrs.get(attr_name).dyn_cast_or_null<ArrayAttr>();
if (array_attr == nullptr || array_attr.size() != N) {
return func->emitError()
diff --git a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h
index 7928dfa..b027394 100644
--- a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h
+++ b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils.h
@@ -28,7 +28,7 @@
// Fuse MaxUnpooling2D ops annotated by tf.function to a TFLite custom op.
class ConvertMaxUnpoolingFunc {
public:
- explicit ConvertMaxUnpoolingFunc(FuncOp func, mlir::TF::FuncAttr attr)
+ explicit ConvertMaxUnpoolingFunc(func::FuncOp func, mlir::TF::FuncAttr attr)
: func_(func), attr_(attr) {}
LogicalResult RewriteFunc();
@@ -38,21 +38,21 @@
private:
LogicalResult CreateCustomOptions(std::string& custom_option_buffer);
- FuncOp func_;
+ func::FuncOp func_;
mlir::TF::FuncAttr attr_;
};
// Fuse DenseImageWarp ops annotated by tf.function to a TFLite custom op.
class ConvertDenseImageWarpFunc {
public:
- explicit ConvertDenseImageWarpFunc(FuncOp func) : func_(func) {}
+ explicit ConvertDenseImageWarpFunc(func::FuncOp func) : func_(func) {}
LogicalResult RewriteFunc();
LogicalResult VerifySignature();
private:
- FuncOp func_;
+ func::FuncOp func_;
};
} // end namespace TFL
diff --git a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc
index 3886947..960856d 100644
--- a/tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc
+++ b/tensorflow/compiler/mlir/lite/utils/perception_ops_utils_test.cc
@@ -34,13 +34,13 @@
namespace {
template <int NInput, int NOutput>
-FuncOp createMaxUnpoolingFunc(
+func::FuncOp createMaxUnpoolingFunc(
mlir::Builder* builder, const SmallVector<mlir::Type, NInput>& input_types,
const SmallVector<mlir::Type, NOutput>& output_types) {
auto func_type = builder->getFunctionType(input_types, output_types);
- auto func =
- FuncOp::create(mlir::NameLoc::get(builder->getStringAttr("fused_func")),
- "fused_func", func_type, {});
+ auto func = func::FuncOp::create(
+ mlir::NameLoc::get(builder->getStringAttr("fused_func")), "fused_func",
+ func_type, {});
func.addEntryBlock();
mlir::StringAttr attr_value = builder->getStringAttr("MaxUnpooling2D");
@@ -48,9 +48,9 @@
return func;
}
-FuncOp createMaxUnpoolingFunc(mlir::Builder* builder,
- const SmallVector<int64_t, 4>& input_shape,
- const SmallVector<int64_t, 4>& output_shape) {
+func::FuncOp createMaxUnpoolingFunc(
+ mlir::Builder* builder, const SmallVector<int64_t, 4>& input_shape,
+ const SmallVector<int64_t, 4>& output_shape) {
auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
auto indices_type = RankedTensorType::get(input_shape, builder->getI64Type());
auto output_type = RankedTensorType::get(output_shape, builder->getF32Type());
@@ -125,7 +125,7 @@
builder_.reset();
}
- FuncOp fused_max_unpooling_func_;
+ func::FuncOp fused_max_unpooling_func_;
mlir::TF::FuncAttr func_attr_;
std::unique_ptr<mlir::MLIRContext> context_;
std::unique_ptr<mlir::Builder> builder_;
diff --git a/tensorflow/compiler/mlir/lite/utils/tftext_utils.cc b/tensorflow/compiler/mlir/lite/utils/tftext_utils.cc
index 2f039e2..250996d 100644
--- a/tensorflow/compiler/mlir/lite/utils/tftext_utils.cc
+++ b/tensorflow/compiler/mlir/lite/utils/tftext_utils.cc
@@ -61,11 +61,11 @@
StringRef(content.data(), content.size()));
}
-inline TensorType GetInputType(FuncOp func, int idx) {
+inline TensorType GetInputType(func::FuncOp func, int idx) {
return func.getFunctionType().getInput(idx).dyn_cast_or_null<TensorType>();
}
-inline TensorType GetResultType(FuncOp func, int idx) {
+inline TensorType GetResultType(func::FuncOp func, int idx) {
return func.getFunctionType().getResult(idx).dyn_cast_or_null<TensorType>();
}
@@ -73,7 +73,7 @@
return type && type.hasRank() && type.getRank() == rank;
}
-LogicalResult VerifyWhitespaceTokenizer(FuncOp func) {
+LogicalResult VerifyWhitespaceTokenizer(func::FuncOp func) {
// In the case of input tensor with 0 rank.
// Whitespace tokenizer generates 1 output:
// * String tensor for tokens.
@@ -128,7 +128,7 @@
return success();
}
-LogicalResult ConvertWhitespaceTokenizer(FuncOp func, llvm::StringRef api,
+LogicalResult ConvertWhitespaceTokenizer(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
@@ -142,7 +142,7 @@
return success();
}
-LogicalResult VerifyNgrams(FuncOp func) {
+LogicalResult VerifyNgrams(func::FuncOp func) {
// The inputs and outputs should be the same:
// * A string tensor for tokens/ragged tensor values.
// * Zero or more row_split tensors.
@@ -206,7 +206,7 @@
return success();
}
-LogicalResult CreateNgramsCustomOption(FuncOp func, DictionaryAttr attrs,
+LogicalResult CreateNgramsCustomOption(func::FuncOp func, DictionaryAttr attrs,
std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
@@ -253,7 +253,8 @@
return success();
}
-LogicalResult ConvertNgrams(FuncOp func, llvm::StringRef api, FuncAttr attr) {
+LogicalResult ConvertNgrams(func::FuncOp func, llvm::StringRef api,
+ FuncAttr attr) {
func.eraseBody();
func.addEntryBlock();
func->setAttr(kTFImplements, attr);
@@ -270,7 +271,7 @@
return success();
}
-LogicalResult VerifySgnnProjection(FuncOp func, FuncAttr attr) {
+LogicalResult VerifySgnnProjection(func::FuncOp func, FuncAttr attr) {
if (func.getFunctionType().getNumInputs() != 2 ||
func.getFunctionType().getNumResults() != 1) {
return func.emitError() << "Mismatched number of inputs and outputs.";
@@ -310,7 +311,8 @@
}
LogicalResult CreateSgnnProjectionCustomOption(
- FuncOp func, DictionaryAttr attrs, std::string& custom_option_buffer) {
+ func::FuncOp func, DictionaryAttr attrs,
+ std::string& custom_option_buffer) {
flexbuffers::Builder fbb;
size_t start_map = fbb.StartMap();
@@ -331,7 +333,7 @@
return success();
}
-LogicalResult ConvertSgnnProjection(FuncOp func, llvm::StringRef api,
+LogicalResult ConvertSgnnProjection(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
// See more details in tensorflow_models/sequence_projection/sgnn/sgnn.py
func.eraseBody();
@@ -351,7 +353,7 @@
}
} // namespace
-LogicalResult ConvertTFTextAPI(FuncOp func, llvm::StringRef api,
+LogicalResult ConvertTFTextAPI(func::FuncOp func, llvm::StringRef api,
FuncAttr attr) {
if (api.str() == kWhitespaceTokenizer) {
if (succeeded(VerifyWhitespaceTokenizer(func))) {
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_custom_aggregation_op_to_quant_stats.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_custom_aggregation_op_to_quant_stats.cc
index 226e604..7e7817c 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_custom_aggregation_op_to_quant_stats.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_custom_aggregation_op_to_quant_stats.cc
@@ -37,7 +37,7 @@
class ConvertCustomAggregationOpToQuantStatsPass
: public PassWrapper<ConvertCustomAggregationOpToQuantStatsPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
ConvertCustomAggregationOpToQuantStatsPass)
@@ -102,7 +102,7 @@
void ConvertCustomAggregationOpToQuantStatsPass::runOnOperation() {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
patterns.add<ConvertCustomAggregationOpToQuantStats>(ctx);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
@@ -114,7 +114,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateConvertCustomAggregationOpToQuantStatsPass() {
return std::make_unique<ConvertCustomAggregationOpToQuantStatsPass>();
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_fake_quant_to_qdq.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_fake_quant_to_qdq.cc
index 829a3f2..50b9995 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_fake_quant_to_qdq.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/convert_fake_quant_to_qdq.cc
@@ -27,7 +27,8 @@
namespace {
class ConvertFakeQuantToQdqPass
- : public PassWrapper<ConvertFakeQuantToQdqPass, OperationPass<FuncOp>> {
+ : public PassWrapper<ConvertFakeQuantToQdqPass,
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ConvertFakeQuantToQdqPass)
@@ -54,7 +55,7 @@
void ConvertFakeQuantToQdqPass::runOnOperation() {
MLIRContext* ctx = &getContext();
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (failed(
ConvertFakeQuantOps(func, ctx, /*use_fake_quant_num_bits=*/false))) {
@@ -69,7 +70,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateConvertFakeQuantToQdqPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateConvertFakeQuantToQdqPass() {
return std::make_unique<ConvertFakeQuantToQdqPass>();
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
index c3c56cf..7441b68 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_custom_aggregation_ops.cc
@@ -37,7 +37,7 @@
class InsertCustomAggregationOpsPass
: public PassWrapper<InsertCustomAggregationOpsPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(InsertCustomAggregationOpsPass)
@@ -120,7 +120,7 @@
void InsertCustomAggregationOpsPass::runOnOperation() {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
patterns.add<AddCustomAggregationOp>(ctx);
if (failed(applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
@@ -131,7 +131,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateInsertCustomAggregationOpsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateInsertCustomAggregationOpsPass() {
return std::make_unique<InsertCustomAggregationOpsPass>();
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
index 4c9423b..53f3def 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_main_function.cc
@@ -57,23 +57,25 @@
// Checks if the module has a main function.
bool HasMainFunction(ModuleOp& module) {
StringAttr main_func_id = StringAttr::get(module.getContext(), "main");
- for (auto function : module.getOps<FuncOp>()) {
+ for (auto function : module.getOps<func::FuncOp>()) {
if (function.getName() == main_func_id) return true;
}
return false;
}
// Checks if a FuncOp is exported.
-bool IsExported(FuncOp& op) {
+bool IsExported(func::FuncOp& op) {
auto exported_names = op->getAttrOfType<ArrayAttr>(kExportedNameAttr);
return exported_names && !exported_names.empty();
}
// Check if a function is an entry function.
-bool IsEntryFunction(FuncOp& op) { return op->hasAttr(kEntryFunctionAttr); }
+bool IsEntryFunction(func::FuncOp& op) {
+ return op->hasAttr(kEntryFunctionAttr);
+}
// Sets a function to be private so it can be referred internally.
-void SetFunctionPrivate(FuncOp& func) {
+void SetFunctionPrivate(func::FuncOp& func) {
func.setVisibility(SymbolTable::Visibility::Private);
// The `tf_saved_model` attributes can only be appied to public functions.
@@ -111,7 +113,7 @@
llvm::SmallVector<Location> arg_locs;
llvm::SmallVector<Type> arg_types, result_types;
std::vector<std::string> input_names, output_names;
- for (auto function : module.getOps<FuncOp>()) {
+ for (auto function : module.getOps<func::FuncOp>()) {
if (function.isPrivate() || !IsExported(function)) continue;
arg_types.append(function.getArgumentTypes().begin(),
function.getArgumentTypes().end());
@@ -148,7 +150,8 @@
// Creates a new main function.
auto func_type = FunctionType::get(context, arg_types, result_types);
- auto main_func = builder.create<FuncOp>(module.getLoc(), "main", func_type);
+ auto main_func =
+ builder.create<func::FuncOp>(module.getLoc(), "main", func_type);
builder.createBlock(&main_func.getBody(), main_func.begin(), arg_types,
arg_locs);
SmallVector<NamedAttribute> func_attrs;
@@ -190,7 +193,7 @@
int arg_idx = 0;
int result_idx = 0;
llvm::SmallVector<Value> returning_values;
- for (auto function : module.getOps<FuncOp>()) {
+ for (auto function : module.getOps<func::FuncOp>()) {
if (function.isPrivate() || !IsExported(function) ||
!IsEntryFunction(function)) {
continue;
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
index a71f241..4a23852 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/insert_quantized_functions.cc
@@ -89,9 +89,9 @@
}
// Copy all functions used by this signature to the final MLIR module.
- for (FuncOp func : module_ref->getOps<FuncOp>()) {
+ for (func::FuncOp func : module_ref->getOps<func::FuncOp>()) {
// Set the function to private.
- FuncOp new_func = func.clone();
+ func::FuncOp new_func = func.clone();
new_func.setPrivate();
// The insert here is a NO-OP if the function already exists.
symbol_table.insert(new_func);
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
index cc54a7e9..c29f5e3 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/lift_quantizable_spots_as_functions.cc
@@ -78,7 +78,7 @@
populateWithGenerated(patterns);
FrozenRewritePatternSet frozen_patterns(std::move(patterns));
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
if (failed(applyPatternsAndFoldGreedily(func, frozen_patterns))) {
func.emitError() << "quant-lift-quantizable-spots-as-functions failed.";
signalPassFailure();
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc
index f406155..e006950 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/post_quantize.cc
@@ -37,7 +37,7 @@
// Applies all the clean up steps after quantization.
class PostQuantizePass
- : public PassWrapper<PostQuantizePass, OperationPass<FuncOp>> {
+ : public PassWrapper<PostQuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PostQuantizePass)
@@ -114,7 +114,7 @@
} // namespace
// Creates an instance of the TensorFlow dialect PostQuantize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePostQuantizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePostQuantizePass() {
return std::make_unique<PostQuantizePass>();
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
index c1bc84b..b341181 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc
@@ -28,7 +28,7 @@
namespace {
class PrepareLiftingPass
- : public PassWrapper<PrepareLiftingPass, OperationPass<FuncOp>> {
+ : public PassWrapper<PrepareLiftingPass, OperationPass<func::FuncOp>> {
public:
StringRef getArgument() const final {
// This is the argument used to refer to the pass in
@@ -63,7 +63,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreatePrepareLiftingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareLiftingPass() {
return std::make_unique<PrepareLiftingPass>();
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
index 14309f3..1354067 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.cc
@@ -69,7 +69,7 @@
// making the quantization rule for some operations in the quantization-aware
// training quantization simpler.
class PrepareQuantizePass
- : public PassWrapper<PrepareQuantizePass, OperationPass<FuncOp>> {
+ : public PassWrapper<PrepareQuantizePass, OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry& registry) const override {
registry
.insert<TF::TensorFlowDialect, ::mlir::quant::QuantizationDialect>();
@@ -113,16 +113,16 @@
// non-float tensor types will be skipped because they are not quantizable.
// Return true if number of input nodes doesn't equal to that of the input
// ranges.
- bool SetInputNodesQuantizationParams(FuncOp func);
+ bool SetInputNodesQuantizationParams(func::FuncOp func);
// The function might contain more stats ops than required, and it will
// introduce requantize if the calibration stats have conflicts. This method
// tries to remove all the redundant stats ops.
- bool RemoveRedundantStats(FuncOp func);
+ bool RemoveRedundantStats(func::FuncOp func);
// Verify the quantization specification is expected for quantizing the
// current function.
- bool IsLegalQuantSpecs(FuncOp func) {
+ bool IsLegalQuantSpecs(func::FuncOp func) {
if (func.getName() == quant_specs_.target_func) {
return func.getNumArguments() == quant_specs_.input_ranges.size();
}
@@ -143,16 +143,16 @@
// Apply some sanity check and report some warnings for those who don't follow
// the best quantization practice. This also fixes some simple violations.
- void SanityCheckAndAdjustment(FuncOp func);
+ void SanityCheckAndAdjustment(func::FuncOp func);
// Whether the func contains Quantize ops. This is used to determine whether
// to use the quantization parameters from the fixed output range property.
- bool ContainsQuantizeOps(FuncOp func);
+ bool ContainsQuantizeOps(func::FuncOp func);
QuantizationSpecs quant_specs_;
};
-bool PrepareQuantizePass::SetInputNodesQuantizationParams(FuncOp func) {
+bool PrepareQuantizePass::SetInputNodesQuantizationParams(func::FuncOp func) {
StringRef func_name = func.getName();
auto has_quantize_op = [&](const Value arg) {
return (arg.hasOneUse() &&
@@ -248,7 +248,7 @@
return spec;
}
-bool PrepareQuantizePass::RemoveRedundantStats(FuncOp func) {
+bool PrepareQuantizePass::RemoveRedundantStats(func::FuncOp func) {
return RemoveRedundantStatsOps(func, GetOpQuantSpec, GetTfQuantScaleSpec);
}
@@ -262,7 +262,7 @@
return {};
}
-void PrepareQuantizePass::SanityCheckAndAdjustment(FuncOp func) {
+void PrepareQuantizePass::SanityCheckAndAdjustment(func::FuncOp func) {
// If an op output has two users: one of them is a quantize op and another
// one is returned directly, we decide to return the quantized result instead,
// so this op can be quantized. This is only applied on the returned result
@@ -330,7 +330,7 @@
});
}
-bool PrepareQuantizePass::ContainsQuantizeOps(FuncOp func) {
+bool PrepareQuantizePass::ContainsQuantizeOps(func::FuncOp func) {
for (const auto& op : func.getOps()) {
if (llvm::isa<quant::DequantizeCastOp>(op)) return true;
}
@@ -343,7 +343,7 @@
#include "tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_quantize.inc"
void PrepareQuantizePass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
MLIRContext* ctx = func.getContext();
if (quant_specs_.post_training_quantization) {
@@ -394,7 +394,7 @@
} // namespace
// Creates an instance of the TensorFlow dialect PrepareQuantize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreatePrepareQuantizePass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareQuantizePass(
QuantizationMethod quantization_method) {
return std::make_unique<PrepareQuantizePass>(quantization_method);
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
index d5f2027..71db3ae 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize.cc
@@ -267,7 +267,8 @@
};
// Applies quantization on the model in TF dialect.
-struct QuantizePass : public PassWrapper<QuantizePass, OperationPass<FuncOp>> {
+struct QuantizePass
+ : public PassWrapper<QuantizePass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(QuantizePass)
@@ -316,7 +317,7 @@
} // namespace
// Creates an instance of the TensorFlow dialect Quantize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateQuantizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateQuantizePass() {
QuantizationSpecs quant_specs;
return std::make_unique<QuantizePass>(quant_specs);
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc b/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
index 821015c..54b8c1c 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc
@@ -249,7 +249,8 @@
// attr_name_1 is the name of the of the attribute needs to be set in the
// quantized function, attr_name_2 is the name of the attribute corresponding to
// the attribute identifier in the float function.
-LogicalResult TransferAttributes(FuncOp float_func, FuncOp quantized_func) {
+LogicalResult TransferAttributes(func::FuncOp float_func,
+ func::FuncOp quantized_func) {
// A map to find an attribute from its identifier.
llvm::StringMap<Attribute> identifier_to_attr;
for (Operation& inner_op : float_func.getBody().front().getOperations()) {
@@ -417,12 +418,13 @@
// Make a copy of the quantized function.
auto module = call_op->getParentOfType<ModuleOp>();
SymbolTable symbol_table(module);
- FuncOp float_func =
- dyn_cast<FuncOp>(symbol_table.lookup(f_attr.getValue()));
- FuncOp quantized_func =
- dyn_cast<FuncOp>(symbol_table.lookup(quantized_function_name.str()));
+ func::FuncOp float_func =
+ dyn_cast<func::FuncOp>(symbol_table.lookup(f_attr.getValue()));
+ func::FuncOp quantized_func = dyn_cast<func::FuncOp>(
+ symbol_table.lookup(quantized_function_name.str()));
rewriter.setInsertionPointAfter(float_func);
- FuncOp new_quantized_func = dyn_cast<FuncOp>(quantized_func->clone());
+ func::FuncOp new_quantized_func =
+ dyn_cast<func::FuncOp>(quantized_func->clone());
if (new_quantized_func == nullptr) {
return failure();
}
@@ -492,9 +494,10 @@
// This can be removed when the composite call supports quantized types.
pm.enableVerifier(false);
- pm.addNestedPass<FuncOp>(CreatePrepareQuantizePass(quantization_method_));
- pm.addNestedPass<FuncOp>(CreateQuantizePass());
- pm.addNestedPass<FuncOp>(CreatePostQuantizePass());
+ pm.addNestedPass<func::FuncOp>(
+ CreatePrepareQuantizePass(quantization_method_));
+ pm.addNestedPass<func::FuncOp>(CreateQuantizePass());
+ pm.addNestedPass<func::FuncOp>(CreatePostQuantizePass());
if (failed(pm.run(module))) {
signalPassFailure();
}
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.cc b/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.cc
index b927fa5..f4ab1a9 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.cc
@@ -36,7 +36,7 @@
// Removes the wrapper of the tf.FakeQuant* ops and creates the quant.qcast
// and quant.dcast pairs before tf.FakeQuant* ops are being foled.
-LogicalResult ConvertFakeQuantOps(FuncOp func, MLIRContext* ctx,
+LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext* ctx,
bool use_fake_quant_num_bits) {
OpBuilder builder(func);
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h b/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
index 7e4bf59..ce6a7ff 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/utils/fake_quant_utils.h
@@ -145,7 +145,7 @@
// Removes the wrapper of the tf.FakeQuant* ops and creates the quant.qcast
// and quant.dcast pairs before tf.FakeQuant* ops are being folded.
-LogicalResult ConvertFakeQuantOps(FuncOp func, MLIRContext* ctx,
+LogicalResult ConvertFakeQuantOps(func::FuncOp func, MLIRContext *ctx,
bool use_fake_quant_num_bits);
} // namespace quant
diff --git a/tensorflow/compiler/mlir/quantization/tensorflow/utils/lift_as_function_call_utils.cc b/tensorflow/compiler/mlir/quantization/tensorflow/utils/lift_as_function_call_utils.cc
index e8d9468..ce628fe 100644
--- a/tensorflow/compiler/mlir/quantization/tensorflow/utils/lift_as_function_call_utils.cc
+++ b/tensorflow/compiler/mlir/quantization/tensorflow/utils/lift_as_function_call_utils.cc
@@ -35,7 +35,7 @@
// Checks if the op is inside a lifted function.
bool IsInLiftedFunc(Operation *op) {
- return op->getParentOfType<FuncOp>()->hasAttr(kFusedFunctionAttr);
+ return op->getParentOfType<func::FuncOp>()->hasAttr(kFusedFunctionAttr);
}
// Inserts the function to the symbol table of the module thread-safely.
@@ -182,7 +182,7 @@
auto module = result_op->getParentOfType<ModuleOp>();
// Create a private function and copy all ops between arguments and results.
- auto current_func = result_op->getParentOfType<FuncOp>();
+ auto current_func = result_op->getParentOfType<func::FuncOp>();
auto guard = OpBuilder::InsertionGuard(builder);
builder.setInsertionPointAfter(current_func);
TypeRange arg_types(
@@ -194,7 +194,7 @@
for (const auto &arg : arguments) {
arg_locs.push_back(arg.getLoc());
}
- auto wrap_func = builder.create<FuncOp>(location, func_name, func_type);
+ auto wrap_func = builder.create<func::FuncOp>(location, func_name, func_type);
wrap_func.setVisibility(SymbolTable::Visibility::Private);
wrap_func->setAttr(kFusedFunctionAttr, builder.getUnitAttr());
builder.createBlock(&wrap_func.getBody(), wrap_func.begin(), arg_types,
diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/per_function_aggregate_analysis.h b/tensorflow/compiler/mlir/tensorflow/analysis/per_function_aggregate_analysis.h
index 0be839c..5ba6590 100644
--- a/tensorflow/compiler/mlir/tensorflow/analysis/per_function_aggregate_analysis.h
+++ b/tensorflow/compiler/mlir/tensorflow/analysis/per_function_aggregate_analysis.h
@@ -37,7 +37,7 @@
using Info = InfoT;
// Returns the analysis info for the given function.
- const Info& GetAnalysisForFunc(FuncOp func) const {
+ const Info& GetAnalysisForFunc(func::FuncOp func) const {
auto it = info_map_.find(func);
assert(it != info_map_.end());
return it->second;
@@ -46,14 +46,14 @@
protected:
// Since `InfoT` might be large, DenseMap is used instead of SmallDenseMap to
// avoid stack overflow.
- llvm::DenseMap<FuncOp, InfoT> info_map_;
+ llvm::DenseMap<func::FuncOp, InfoT> info_map_;
};
} // namespace detail
// Base CRTP class to help write passes that are consumes a per-function
// aggregate analysis and operate on all non-extern functions (similar to a
-// OperationPass<FuncOp>, but with no concurrency between functions). The
+// OperationPass<func::FuncOp>, but with no concurrency between functions). The
// derived classes need to provide a runOnFunction() method that accepts the
// function and the analysis information for that function.
template <typename DerivedT, typename AnalysisT>
@@ -73,7 +73,7 @@
DerivedT& derived = *static_cast<DerivedT*>(this);
auto& analysis = this->template getAnalysis<AnalysisT>();
- for (auto func : op.getOps<FuncOp>())
+ for (auto func : op.getOps<func::FuncOp>())
if (!func.isExternal())
derived.runOnFunction(func, analysis.GetAnalysisForFunc(func));
}
diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc
index 3238e91..a28346d 100644
--- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc
+++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.cc
@@ -108,7 +108,7 @@
}
// Returns backtracking analysis for the given function.
- const InfoT& GetAnalysisForFunc(FuncOp func) const {
+ const InfoT& GetAnalysisForFunc(func::FuncOp func) const {
return GetAnalysisForRegion(func.getBody());
}
@@ -144,7 +144,7 @@
return &it->second;
}
- Optional<const InfoT*> GetAnalysisIfExists(FuncOp func) const {
+ Optional<const InfoT*> GetAnalysisIfExists(func::FuncOp func) const {
return GetAnalysisIfExists(func.getBody());
}
@@ -201,8 +201,8 @@
} else if (isa<IdentityNOp, IdentityOp>(op)) {
value = op->getOperand(res_index);
} else if (auto call = dyn_cast<CallOpInterface>(op)) {
- FuncOp func =
- dyn_cast<FuncOp>(call.resolveCallable(&symbol_table_collection_));
+ func::FuncOp func = dyn_cast<func::FuncOp>(
+ call.resolveCallable(&symbol_table_collection_));
if (!func) break;
// Check if the function being called has been analyzed. if not,
// we cannot backtrack the value further.
@@ -281,7 +281,7 @@
// Constructs the analysis info by analyzing the given function.
ResourceAliasAnalysisInfo::ResourceAliasAnalysisInfo(
- FuncOp func_op, const BacktrackAnalysis& backtrack_analysis,
+ func::FuncOp func_op, const BacktrackAnalysis& backtrack_analysis,
SymbolTableCollection& symbol_table_collection) {
// This function populates resource_value_to_ids_ and id_to_resource_values_.
@@ -386,7 +386,7 @@
AnalyzeWhileLoop(while_region, backtrack_analysis.GetAnalysisForRegion(
while_region.body()));
} else if (auto case_op = dyn_cast<CaseOp>(op)) {
- llvm::SmallVector<FuncOp, 4> functions;
+ llvm::SmallVector<func::FuncOp, 4> functions;
case_op.get_branch_functions(functions);
AnalyzeFunctionalCaseOrIfOp(case_op, functions, backtrack_analysis);
} else if (auto if_op = dyn_cast<IfOp>(op)) {
@@ -396,7 +396,7 @@
} else if (llvm::isa<CaseRegionOp, IfRegionOp>(op)) {
AnalyzeRegionCaseOrIfOp(op, backtrack_analysis);
} else if (auto call = dyn_cast<CallOpInterface>(op)) {
- FuncOp func = dyn_cast_or_null<FuncOp>(
+ func::FuncOp func = dyn_cast_or_null<func::FuncOp>(
call.resolveCallable(&symbol_table_collection));
if (!func) {
assign_unknown_id_to_all(op->getResults());
@@ -531,11 +531,11 @@
template <class CaseOrIfOp>
void ResourceAliasAnalysisInfo::AnalyzeFunctionalCaseOrIfOp(
- CaseOrIfOp case_or_if_op, llvm::ArrayRef<FuncOp> functions,
+ CaseOrIfOp case_or_if_op, llvm::ArrayRef<func::FuncOp> functions,
const BacktrackAnalysis& backtrack_analysis) {
llvm::SmallVector<const BacktrackAnalysisInfo*, 2> infos;
infos.reserve(functions.size());
- for (FuncOp func : functions)
+ for (func::FuncOp func : functions)
infos.push_back(&backtrack_analysis.GetAnalysisForFunc(func));
// If a result is a passthrough of all branches' inputs, merge the resource
@@ -638,7 +638,7 @@
detail::BacktrackAnalysis backtrack_analysis(module, symbol_table_collection);
// Analyze each function.
- for (auto func : module.getOps<FuncOp>())
+ for (auto func : module.getOps<func::FuncOp>())
this->info_map_.try_emplace(func, func, backtrack_analysis,
symbol_table_collection);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h
index 002e205..e8eb89f 100644
--- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h
+++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_alias_analysis.h
@@ -42,7 +42,7 @@
class ResourceAliasAnalysisInfo {
public:
// Constructs analysis info by analyzing the given function.
- ResourceAliasAnalysisInfo(FuncOp func,
+ ResourceAliasAnalysisInfo(func::FuncOp func,
const BacktrackAnalysis& backtrack_analysis,
SymbolTableCollection& symbol_table_collection);
@@ -89,7 +89,7 @@
// Analyzes tf.Case/tf.If ops to compute resource IDs.
template <class CaseOrIfOp>
void AnalyzeFunctionalCaseOrIfOp(CaseOrIfOp case_or_if_op,
- llvm::ArrayRef<FuncOp> functions,
+ llvm::ArrayRef<func::FuncOp> functions,
const BacktrackAnalysis& backtrack_analysis);
// Analyzes tf.CaseRegion/tf.IfRegion ops to compute resource IDs.
diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc b/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc
index ac78488..13171815 100644
--- a/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc
+++ b/tensorflow/compiler/mlir/tensorflow/analysis/resource_value_typed_analyzer.cc
@@ -38,11 +38,11 @@
// Helper that returns the FuncOp that is the SessionInit function which
// will be called to initialize all resources.
// Returns nullptr if no function is found.
-FuncOp GetSessionInitializerFunc(ModuleOp module) {
+func::FuncOp GetSessionInitializerFunc(ModuleOp module) {
auto session_init_op = tf_saved_model::GetSessionInitializerOp(module);
if (session_init_op && !session_init_op.initializers().empty()) {
SymbolTable symbol_table(module);
- FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
+ func::FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
session_init_op.initializers()[0].cast<FlatSymbolRefAttr>().getValue());
return init_func_op;
}
@@ -73,7 +73,7 @@
} // namespace
ResourceAnalyzer::ResourceAnalyzer(ModuleOp module, bool skip_session_init) {
auto session_init_func = GetSessionInitializerFunc(module);
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
if (skip_session_init && func == session_init_func) continue;
(void)AnalyzeRegion(func.getRegion());
}
@@ -120,7 +120,7 @@
return;
}
if (auto call = dyn_cast<CallOpInterface>(op)) {
- if (auto func = dyn_cast<FuncOp>(call.resolveCallable())) {
+ if (auto func = dyn_cast<func::FuncOp>(call.resolveCallable())) {
PropagatePotentiallyWrittenUpFromCallee(func.getRegion(),
call.getArgOperands());
}
diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc
index a3e80b2..399b124 100644
--- a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc
+++ b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc
@@ -263,7 +263,7 @@
// populates `op_side_effect_map_`.
explicit OpSideEffectCollector(ModuleOp module) {
symbol_table_collection_.getSymbolTable(module);
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
CollectOpSideEffects(func);
}
}
@@ -301,11 +301,11 @@
// Propagate side effects from regions or functions attached to `op` for
// some special cases.
- if (auto func = llvm::dyn_cast<FuncOp>(op)) {
+ if (auto func = llvm::dyn_cast<func::FuncOp>(op)) {
AddRegionSideEffectsForOp(func.getBody(), op);
} else if (auto call = llvm::dyn_cast<CallOpInterface>(op)) {
- FuncOp func_op =
- dyn_cast<FuncOp>(call.resolveCallable(&symbol_table_collection_));
+ func::FuncOp func_op =
+ dyn_cast<func::FuncOp>(call.resolveCallable(&symbol_table_collection_));
if (func_op) {
AddRegionSideEffectsForOp(func_op.getBody(), op);
}
@@ -317,7 +317,7 @@
} else if (auto while_region_op = dyn_cast<WhileRegionOp>(op)) {
AddRegionSideEffectsForOp(while_region_op.body(), op);
} else if (auto case_op = dyn_cast<CaseOp>(op)) {
- llvm::SmallVector<FuncOp, 4> branch_funcs;
+ llvm::SmallVector<func::FuncOp, 4> branch_funcs;
case_op.get_branch_functions(branch_funcs);
for (auto branch_func : branch_funcs) {
AddRegionSideEffectsForOp(branch_func.getBody(), op);
@@ -469,7 +469,7 @@
}
}
-void SideEffectAnalysisInfo::AnalyzeFunction(FuncOp func_op) {
+void SideEffectAnalysisInfo::AnalyzeFunction(func::FuncOp func_op) {
// AnalyzeRegion() recursively analyzes the function body, and only populates
// control_predecessors_.
AnalyzeRegion(&func_op.getBody());
@@ -664,7 +664,7 @@
detail::OpSideEffectCollector op_side_effect_collector(module);
// Analyze all functions.
- for (auto func : module.getOps<FuncOp>())
+ for (auto func : module.getOps<func::FuncOp>())
this->info_map_.try_emplace(func, func,
op_side_effect_collector,
alias_analysis_.GetAnalysisForFunc(func));
diff --git a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h
index 3171206..5599abd 100644
--- a/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h
+++ b/tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.h
@@ -57,7 +57,7 @@
SideEffectAnalysisInfo() = default;
// Constructs analysis info by analyzing the given function.
- SideEffectAnalysisInfo(FuncOp func_op,
+ SideEffectAnalysisInfo(func::FuncOp func_op,
const OpSideEffectCollector& op_side_effect_collector,
const TF::ResourceAliasAnalysis::Info& alias_analysis)
: op_side_effect_collector_(op_side_effect_collector),
@@ -112,7 +112,7 @@
private:
// Runs the analysis and populates `sorted_control_predecessors_` and
// `sorted_control_successors_` for `func_op`. Clears `control_predecessors_`.
- void AnalyzeFunction(FuncOp func_op);
+ void AnalyzeFunction(func::FuncOp func_op);
// Runs the analysis and populates `control_predecessors_` for `region`.
void AnalyzeRegion(Region* region);
diff --git a/tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc b/tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc
index e2fd0dd..e7acc10 100644
--- a/tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc
+++ b/tensorflow/compiler/mlir/tensorflow/c/c_api_unified_experimental_mlir.cc
@@ -234,7 +234,7 @@
private:
std::unique_ptr<MLIRContext> context_;
OwningOpRef<mlir::ModuleOp> module_;
- FuncOp func_;
+ func::FuncOp func_;
std::unique_ptr<tensorflow::FunctionDef> fdef_;
};
@@ -247,8 +247,9 @@
RegisterDialects(*context_);
// TODO(aminim) figure out the location story here
module_ = ModuleOp::create(builder_.getUnknownLoc());
- func_ = FuncOp::create(builder_.getUnknownLoc(), name,
- builder_.getFunctionType(llvm::None, llvm::None));
+ func_ =
+ func::FuncOp::create(builder_.getUnknownLoc(), name,
+ builder_.getFunctionType(llvm::None, llvm::None));
module_->push_back(func_);
builder_ = OpBuilder::atBlockBegin(func_.addEntryBlock());
}
@@ -279,7 +280,7 @@
private:
std::unique_ptr<MLIRContext> context_;
OpBuilder builder_;
- FuncOp func_;
+ func::FuncOp func_;
OwningOpRef<mlir::ModuleOp> module_;
};
@@ -523,7 +524,8 @@
}
PassManager pm(func_.getContext());
::tensorflow::applyTensorflowAndCLOptions(pm);
- pm.addNestedPass<FuncOp>(CreateFunctionalToExecutorDialectConversionPass());
+ pm.addNestedPass<func::FuncOp>(
+ CreateFunctionalToExecutorDialectConversionPass());
pm.addPass(CreateBreakUpIslandsPass());
// In case of failure, the `diag_handler` converts MLIR errors emitted to
diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td
index 7a7f561..b443b0f 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_device_ops.td
@@ -365,8 +365,8 @@
let extraClassDeclaration = [{
// returns the function that this operation will launch.
- FuncOp getFunc() {
- return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, funcAttr());
+ func::FuncOp getFunc() {
+ return SymbolTable::lookupNearestSymbolFrom<func::FuncOp>(*this, funcAttr());
}
}];
}
diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
index 2a549a7..cae6812 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_generated_ops.td
@@ -10506,8 +10506,8 @@
CallInterfaceCallable getCallableForCallee() { return fAttr(); }
// returns the callee of this operation.
- FuncOp func() {
- return SymbolTable::lookupNearestSymbolFrom<FuncOp>(*this, f());
+ func::FuncOp func() {
+ return SymbolTable::lookupNearestSymbolFrom<func::FuncOp>(*this, f());
}
}];
@@ -20839,7 +20839,7 @@
TF_DerivedResultTypeListAttr Toutputs = TF_DerivedResultTypeListAttr<0>;
let extraClassDeclaration = [{
- FuncOp GetHostFunc(mlir::OwningOpRef<mlir::ModuleOp>* mlir_module);
+ func::FuncOp GetHostFunc(mlir::OwningOpRef<mlir::ModuleOp>* mlir_module);
}];
let hasVerifier = 1;
diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
index b0863f5..92cc5ec 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_a_m.cc
@@ -830,7 +830,7 @@
TypeRangeWithDesc result{op->getResultTypes(), "result"};
for (auto branch : llvm::enumerate(branches)) {
- auto branch_func = symbol_table.lookupNearestSymbolFrom<FuncOp>(
+ auto branch_func = symbol_table.lookupNearestSymbolFrom<func::FuncOp>(
op, branch.value().cast<SymbolRefAttr>());
if (!branch_func)
return op->emitOpError()
diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
index a7efbc8..35799b5 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc
@@ -467,7 +467,7 @@
SymbolRefAttr func = op->getAttr("f").template cast<SymbolRefAttr>();
auto function =
- dyn_cast_or_null<FuncOp>(SymbolTable::lookupSymbolIn(module, func));
+ dyn_cast_or_null<func::FuncOp>(SymbolTable::lookupSymbolIn(module, func));
if (!function) {
return op.emitError("'f' attribute refers to an undefined function: ")
@@ -3072,9 +3072,9 @@
if (failed(WhileOpAdaptor(*this).verify(getLoc()))) return failure();
auto cond_fn =
- symbol_table.lookupNearestSymbolFrom<FuncOp>(*this, condAttr());
+ symbol_table.lookupNearestSymbolFrom<func::FuncOp>(*this, condAttr());
auto body_fn =
- symbol_table.lookupNearestSymbolFrom<FuncOp>(*this, bodyAttr());
+ symbol_table.lookupNearestSymbolFrom<func::FuncOp>(*this, bodyAttr());
if (!cond_fn) {
return emitOpError("cond refers to an undefined function : ") << cond();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_remaining_ops.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_remaining_ops.cc
index 2f0d561..da2a417 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_remaining_ops.cc
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_remaining_ops.cc
@@ -98,7 +98,7 @@
<< status.error_message();
}
- FuncOp func = module_for_func->lookupSymbol<FuncOp>("host_func");
+ func::FuncOp func = module_for_func->lookupSymbol<func::FuncOp>("host_func");
if (!func)
return op.emitError()
<< "serialized module in attribute 'host_mlir_module' does not "
@@ -120,13 +120,13 @@
return success();
}
-FuncOp _XlaHostComputeMlirOp::GetHostFunc(
+func::FuncOp _XlaHostComputeMlirOp::GetHostFunc(
mlir::OwningOpRef<mlir::ModuleOp>* mlir_module) {
if (!tensorflow::DeserializeMlirModule(host_mlir_module().str(),
this->getContext(), mlir_module)
.ok())
return nullptr;
- return (*mlir_module)->lookupSymbol<FuncOp>("host_func");
+ return (*mlir_module)->lookupSymbol<func::FuncOp>("host_func");
}
//===----------------------------------------------------------------------===//
diff --git a/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc b/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc
index 0c19950..cc67216 100644
--- a/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc
+++ b/tensorflow/compiler/mlir/tensorflow/ir/tf_saved_model.cc
@@ -203,7 +203,7 @@
"reference a valid symbol, got invalid symbol '"
<< symbol_name << "'";
}
- auto arg_type = cast<FuncOp>(op).getArgument(arg_index).getType();
+ auto arg_type = cast<func::FuncOp>(op).getArgument(arg_index).getType();
return VerifyBoundInputArgType(op, arg_type, symbol_op);
}
if (named_attr.getName() == "tf_saved_model.index_path") {
@@ -225,7 +225,7 @@
<< named_attr.getName().getValue() << "'";
}
-static bool HasAnyTfSavedModelArgAttr(FuncOp func) {
+static bool HasAnyTfSavedModelArgAttr(func::FuncOp func) {
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (func.getArgAttr(i, "tf_saved_model.index_path") ||
func.getArgAttr(i, "tf_saved_model.bound_input")) {
@@ -268,7 +268,7 @@
}
}
}
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
const bool is_exported = IsExported(func);
if (is_exported && !func.isPublic()) {
@@ -309,7 +309,7 @@
"have analyzable symbol uses";
}
for (auto symbol_use : *symbol_uses) {
- auto func = symbol_table.lookupNearestSymbolFrom<FuncOp>(
+ auto func = symbol_table.lookupNearestSymbolFrom<func::FuncOp>(
symbol_use.getUser(), symbol_use.getSymbolRef());
if (func && IsExported(func)) {
// If it is an init function, then it can be used by the unique
@@ -327,7 +327,7 @@
return success();
}
-LogicalResult VerifyExportedFunc(FuncOp func) {
+LogicalResult VerifyExportedFunc(func::FuncOp func) {
bool reached_bound_inputs = false;
auto module = func->getParentOfType<ModuleOp>();
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
@@ -377,7 +377,7 @@
LogicalResult TensorFlowSavedModelDialect::verifyOperationAttribute(
Operation *op, NamedAttribute named_attr) {
if (named_attr.getName() == "tf_saved_model.exported_names") {
- if (!isa<FuncOp, GlobalTensorOp>(op)) {
+ if (!isa<func::FuncOp, GlobalTensorOp>(op)) {
return op->emitError() << "'tf_saved_model.exported_names' must be on a "
"'func' or 'tf_saved_model.global_tensor' op";
}
@@ -391,7 +391,7 @@
"whose immediate parent has attribute "
"'tf_saved_model.semantics'";
}
- if (auto func = dyn_cast<FuncOp>(op)) {
+ if (auto func = dyn_cast<func::FuncOp>(op)) {
if (failed(VerifyExportedFunc(func))) {
return failure();
}
@@ -436,7 +436,7 @@
return module->getAttr("tf_saved_model.semantics") != nullptr;
}
-Operation *LookupBoundInput(FuncOp func, int arg_index,
+Operation *LookupBoundInput(func::FuncOp func, int arg_index,
const SymbolTable &symbol_table) {
auto attr = func.getArgAttrOfType<FlatSymbolRefAttr>(
arg_index, "tf_saved_model.bound_input");
@@ -459,7 +459,7 @@
PatternRewriter &rewriter) const override {
SymbolTable symbol_table(op->getParentOfType<ModuleOp>());
- SmallVector<FuncOp, 2> to_remove;
+ SmallVector<func::FuncOp, 2> to_remove;
SmallVector<mlir::Attribute, 2> to_keep;
for (auto sym_ref : op.initializers()) {
auto init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/annotate_parameter_replication.cc b/tensorflow/compiler/mlir/tensorflow/transforms/annotate_parameter_replication.cc
index 9fb4c9f..96ae182 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/annotate_parameter_replication.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/annotate_parameter_replication.cc
@@ -71,7 +71,7 @@
mirrored_index.cast<IntegerAttr>().getInt());
}
}
- auto func = llvm::cast<FuncOp>(m.lookupSymbol(cluster_func.func()));
+ auto func = llvm::cast<func::FuncOp>(m.lookupSymbol(cluster_func.func()));
for (auto entry : llvm::enumerate(cluster_func.getOperands())) {
auto operand = SkipIdentityAndReadVariable(entry.value());
auto block_arg = operand.dyn_cast<BlockArgument>();
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc b/tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc
index 603c8cb..45ffc5a 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/batchmatmul_to_einsum.cc
@@ -100,7 +100,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateBatchMatMulToEinsumPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateBatchMatMulToEinsumPass() {
return std::make_unique<BatchMatMulToEinsumPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/bridge.cc b/tensorflow/compiler/mlir/tensorflow/transforms/bridge.cc
index 7980866..39e9289 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/bridge.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/bridge.cc
@@ -82,29 +82,31 @@
const llvm::SmallVector<std::string, 4> ops_to_preserve = {
"tf.TPUReplicateMetadata", "tf.TPUCompilationResult",
"tf.TPUReplicatedOutput"};
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
// It is assumed at this stage there are no V1 control flow ops as Graph
// functionalization is ran before import. Ops can be lifted out of
// tf_executor dialect islands/graphs.
- pm.addNestedPass<FuncOp>(CreateExecutorDialectToFunctionalConversionPass());
+ pm.addNestedPass<func::FuncOp>(
+ CreateExecutorDialectToFunctionalConversionPass());
// Guarantee all functions have one use, which enables more exact shape
// inference.
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
// Run shape inference so that tf_executor/tf_device ops created later will
// likely to inherit more concrete types.
pm.addPass(TF::CreateTFShapeInferencePass());
- pm.addNestedPass<FuncOp>(CreateTPUReorderReplicateAndPartitionedInputsPass());
+ pm.addNestedPass<func::FuncOp>(
+ CreateTPUReorderReplicateAndPartitionedInputsPass());
pm.addPass(CreateTPUClusterFormationPass());
// Run TPU cluster cleanup attributes so ops with no outside compiled
// attribute have no host device attribute.
pm.addPass(CreateTPUClusterCleanupAttributesPass());
pm.addPass(CreateOutsideCompiledToHostLaunchPass());
- pm.addNestedPass<FuncOp>(TFDevice::CreateDeviceAttributeToLaunchPass());
+ pm.addNestedPass<func::FuncOp>(TFDevice::CreateDeviceAttributeToLaunchPass());
// Running canonicalizer before decomposing resource ops in cluster helps the
// latter pass to converge faster as it does not have to spend time folding
// away dead ops.
- pm.addNestedPass<FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
// Place DecomposeResourceOpsPass before TFExecutorConstantSinking pass
// because DecomposeResourceOpsPass uses pattern rewriter which hoists
// changed constants out of tf_device.Launch.
@@ -112,7 +114,7 @@
// Encode this in its own scope so that func_pm is not mistakenly used
// later on.
{
- OpPassManager &func_pm = pm.nest<FuncOp>();
+ OpPassManager &func_pm = pm.nest<func::FuncOp>();
func_pm.addPass(CreateTPUHostComputationExpansionPass());
func_pm.addPass(CreateTPUUpdateEmbeddingEnqueueOpInputsPass());
}
@@ -126,7 +128,7 @@
// LaunchToDeviceAttributePass. This LaunchToDeviceAttribute pass needs to
// come before TPUClusterCleanupAttributes pass or else the device attribute
// will be removed from launch causing an error.
- pm.addNestedPass<FuncOp>(TFDevice::CreateLaunchToDeviceAttributePass());
+ pm.addNestedPass<func::FuncOp>(TFDevice::CreateLaunchToDeviceAttributePass());
// TODO(b/173622615): This can be removed once more passes support outside
// compilation represented by op and conversion back to attribute is removed.
@@ -135,27 +137,27 @@
// function call ops which get inlined by the subsequent inliner pass.
pm.addPass(TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
TF::CreateDropWhileShapeInvariantInDeviceClusterPass());
// Run another shape inference pass because resource decomposition might have
// created new partial types. Also, after dropping `shape_invariant` attribute
// from While/WhileRegion ops within cluster would lead to more precise
// shapes.
pm.addPass(TF::CreateTFShapeInferencePass());
- pm.addNestedPass<FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
pm.addPass(CreateTPUClusterCleanupAttributesPass());
pm.addPass(TFDevice::CreateResourceOpLiftingPass());
// Re-run the canonicalizer pass as some cleanup during resource op lifting
// pass opens up some opportunities for canonicalization of cluster ops.
// Specifically, we want to eliminate pass through results from the cluster
// op.
- pm.addNestedPass<FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
// TODO(b/173622615): This should incrementally be moved down as
// more passes support this representation and then can be removed once
// all passes support it.
pm.addPass(TFDevice::CreateHostLaunchToOutsideCompiledPass());
- pm.addNestedPass<FuncOp>(createCSEPass());
+ pm.addNestedPass<func::FuncOp>(createCSEPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_merge_control_flow_pass) {
pm.addPass(TFDevice::CreateMergeControlFlowPass());
@@ -165,35 +167,37 @@
pm.addPass(CreateTPUExtractHeadTailOutsideCompilationPass());
pm.addPass(CreateTPUExtractOutsideCompilationPass());
- pm.addNestedPass<FuncOp>(TFDevice::CreateClusterConstantSinkingPass());
+ pm.addNestedPass<func::FuncOp>(TFDevice::CreateClusterConstantSinkingPass());
pm.addPass(TF::CreateResourceDeviceInferencePass());
pm.addPass(TFDevice::CreateClusterOutliningPass());
pm.addPass(CreateTPUResourceReadForWritePass());
pm.addPass(TFDevice::CreateMarkInputOutputAliasesPass());
pm.addPass(CreateTPUShardingIdentificationPass());
- pm.addNestedPass<FuncOp>(CreateTPUResourceReadsWritesPartitioningPass());
+ pm.addNestedPass<func::FuncOp>(
+ CreateTPUResourceReadsWritesPartitioningPass());
pm.addPass(TFDevice::CreateAnnotateParameterReplicationPass());
pm.addPass(CreateTPURewritePass());
pm.addPass(createSymbolDCEPass());
- pm.addNestedPass<FuncOp>(TFDevice::CreateReplicateInvariantOpHoistingPass());
+ pm.addNestedPass<func::FuncOp>(
+ TFDevice::CreateReplicateInvariantOpHoistingPass());
pm.addPass(CreateTPUMergeVariablesWithExecutePass());
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
TF::CreateHoistReplicateInvariantResourceWritesPass());
- pm.addNestedPass<FuncOp>(CreateTPUColocateCompositeResourceOps());
+ pm.addNestedPass<func::FuncOp>(CreateTPUColocateCompositeResourceOps());
pm.addPass(CreateTPUVariableRuntimeReformattingPass());
pm.addPass(TF::CreateTFRegionControlFlowToFunctional());
}
} // namespace
void CreateTPUBridgePipeline(OpPassManager &pm) {
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
CreateCanonicalizeCompileAndReplicateAttributesPass());
CreateTPUBridgePipelineImpl(pm);
}
void CreateTPUBridgePipelineV1(OpPassManager &pm) {
// Convert to unified compilation and replication attributes.
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
CreateCanonicalizeCompileAndReplicateAttributesPass());
// Guarantee all functions have one use, which enables more exact shape
// inference.
@@ -213,7 +217,7 @@
// attributes like we do for the V2 pipeline, so we need to convert them from
// unified to legacy attributes before they get exposed to outside of the
// bridge.
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
CreateConvertToLegacyCompileAndReplicateAttributesPass());
}
@@ -242,7 +246,7 @@
void AddGraphExportLoweringPasses(OpPassManager &pm) {
auto add_pass = [&](std::unique_ptr<Pass> pass) {
- pm.addNestedPass<FuncOp>(std::move(pass));
+ pm.addNestedPass<func::FuncOp>(std::move(pass));
pm.addPass(CreateBreakUpIslandsPass());
};
@@ -251,7 +255,7 @@
add_pass(TFDevice::CreateReplicaIDToDeviceOrdinalPass());
add_pass(TFDevice::CreateParallelExecuteToIslandsPass());
add_pass(TFDevice::CreateLaunchToDeviceAttributePass());
- pm.addNestedPass<FuncOp>(TFTPU::CreateTPUDevicePropagationPass());
+ pm.addNestedPass<func::FuncOp>(TFTPU::CreateTPUDevicePropagationPass());
pm.addPass(createSymbolDCEPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_convert_control_to_data_outputs_pass) {
@@ -290,19 +294,20 @@
// all graphs should have control dependencies to enforce this.
VLOG(2) << "Create TF XLA Bridge pipeline";
const llvm::SmallVector<std::string, 4> ops_to_preserve = {};
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
// It is assumed at this stage there are no V1 control flow ops as Graph
// functionalization is ran before import. Ops can be lifted out of
// tf_executor dialect islands/graphs.
- pm.addNestedPass<FuncOp>(CreateExecutorDialectToFunctionalConversionPass());
+ pm.addNestedPass<func::FuncOp>(
+ CreateExecutorDialectToFunctionalConversionPass());
// Guarantee all functions have one use, which enables more exact shape
// inference.
pm.addPass(TF::CreateTFShapeInferencePass());
// Running canonicalizer before decomposing resource ops in cluster helps the
// latter pass to converge faster as it does not have to spend time folding
// away dead ops.
- pm.addNestedPass<FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
// Encapsulate StatefulPartitionedCallOp within a cluster so that the
// composite resource ops can be decomposed.
pm.addPass(TFDevice::CreateXlaClusterFormationPass());
@@ -313,7 +318,7 @@
// from While/WhileRegion ops within cluster would lead to more precise
// shapes.
pm.addPass(TF::CreateTFShapeInferencePass());
- pm.addNestedPass<FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
pm.addPass(TFDevice::CreateResourceOpLiftingPass());
// Inline the StatefulPartitionedCallOp op based in the parent region.
pm.addPass(TFDevice::CreateXlaInlineDeviceOpsPass());
@@ -321,9 +326,9 @@
// pass opens up some opportunities for canonicalization of cluster ops.
// Specifically, we want to eliminate pass through results from the cluster
// op.
- pm.addNestedPass<FuncOp>(createCanonicalizerPass());
+ pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
- pm.addNestedPass<FuncOp>(createCSEPass());
+ pm.addNestedPass<func::FuncOp>(createCSEPass());
pm.addPass(createSymbolDCEPass());
pm.addPass(TF::CreateTFRegionControlFlowToFunctional());
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize_compile_and_replicate_attributes.cc b/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize_compile_and_replicate_attributes.cc
index a122b71..e656b0e 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize_compile_and_replicate_attributes.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/canonicalize_compile_and_replicate_attributes.cc
@@ -42,7 +42,7 @@
};
void CanonicalizeCompileAndReplicateAttributesPass::runOnOperation() {
- FuncOp func_op = getOperation();
+ func::FuncOp func_op = getOperation();
ModuleOp module_op = func_op->getParentOfType<ModuleOp>();
mlir::OpBuilder builder(module_op.getContext());
func_op->walk([&](mlir::Operation* op) {
@@ -57,7 +57,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateCanonicalizeCompileAndReplicateAttributesPass() {
return std::make_unique<CanonicalizeCompileAndReplicateAttributesPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_formation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_formation.cc
index 62a2efb..fbdac9d 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_formation.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_formation.cc
@@ -233,7 +233,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateClusterFormationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateClusterFormationPass() {
return std::make_unique<ClusterFormationPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.cc b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.cc
index 004a61f..56f594b 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.cc
@@ -705,7 +705,7 @@
});
}
-void EmitInputsConstraintsRemarks(FuncOp func,
+void EmitInputsConstraintsRemarks(func::FuncOp func,
const ValuesConstraintSet &constraints) {
constraints.Walk([&](Value value, ValueConstraint constraint) {
if (auto arg = value.dyn_cast<BlockArgument>())
@@ -716,7 +716,7 @@
}
LogicalResult InferFunctionBodyValuesConstraints(
- FuncOp func, ValuesConstraintSet &constraints) {
+ func::FuncOp func, ValuesConstraintSet &constraints) {
for (unsigned i = 0; i < func.getNumResults(); ++i) {
auto str = func.getResultAttrOfType<StringAttr>(i, "tf.constraint");
if (!str) continue;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.h b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.h
index 385e92d..668f298 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_ops_by_policy.h
@@ -272,7 +272,7 @@
// Emits constraints remarks for function inputs that are in the constraints
// set (entry block arguments have constraints).
-void EmitInputsConstraintsRemarks(FuncOp func,
+void EmitInputsConstraintsRemarks(func::FuncOp func,
const ValuesConstraintSet& constraints);
// Infers constraints for the values in the function body from the function
@@ -285,7 +285,7 @@
// return %v : tensor<?x?xf32>
// }
LogicalResult InferFunctionBodyValuesConstraints(
- FuncOp func, ValuesConstraintSet& constraints);
+ func::FuncOp func, ValuesConstraintSet& constraints);
} // namespace TFDevice
} // namespace mlir
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_outlining.cc b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_outlining.cc
index b31b0d6..6fd79e7 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_outlining.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_outlining.cc
@@ -55,8 +55,8 @@
// Builds a function that outlines region attached to cluster_op or launch_op,
// and inserts built function into given module.
template <typename ClusterOrLaunchOp>
-FuncOp BuildFunction(llvm::ArrayRef<Value> live_ins, ClusterOrLaunchOp op,
- SymbolTable* symbol_table, OpBuilder* builder) {
+func::FuncOp BuildFunction(llvm::ArrayRef<Value> live_ins, ClusterOrLaunchOp op,
+ SymbolTable* symbol_table, OpBuilder* builder) {
llvm::SmallVector<Type, 4> operand_types;
operand_types.reserve(live_ins.size());
for (Value v : live_ins) operand_types.emplace_back(v.getType());
@@ -65,7 +65,8 @@
// TODO(lyandy): Define better name for outlined function. Potentially some
// name can be added during cluster formation.
- FuncOp outlined_func = FuncOp::create(op.getLoc(), "_func", func_type);
+ func::FuncOp outlined_func =
+ func::FuncOp::create(op.getLoc(), "_func", func_type);
// This function is not externally visible and marking it private would allow
// symbol-dce pass to remove it when it is not referenced anymore.
@@ -105,7 +106,7 @@
llvm::SetVector<Value> live_ins;
getUsedValuesDefinedAbove(cluster_op.body(), cluster_op.body(), live_ins);
- FuncOp outlined_func =
+ func::FuncOp outlined_func =
BuildFunction(live_ins.getArrayRef(), cluster_op, symbol_table, builder);
cluster_op->setAttr(
builder->getStringAttr(kFuncAttr),
@@ -128,7 +129,7 @@
llvm::SetVector<Value> live_ins;
getUsedValuesDefinedAbove(launch_op.body(), launch_op.body(), live_ins);
- FuncOp outlined_func =
+ func::FuncOp outlined_func =
BuildFunction(live_ins.getArrayRef(), launch_op, symbol_table, builder);
launch_op->setAttr(
builder->getStringAttr(kFuncAttr),
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc
index ac50c3c..56c22ab 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/cluster_tf_ops_pass.cc
@@ -101,7 +101,7 @@
// The operations to be included in the body of the function.
llvm::SmallVector<Operation *, 4> ops;
- FuncOp partition_op;
+ func::FuncOp partition_op;
};
// Returns a map that maps the host address to the metadata of the function
@@ -109,7 +109,7 @@
// values, result values, result devices and the operations to be included in
// the function body.
llvm::Optional<llvm::StringMap<FunctionMetadata>> GetFunctionMetadatas(
- FuncOp func_op) {
+ func::FuncOp func_op) {
llvm::StringMap<FunctionMetadata> metadatas;
WalkResult result = func_op.getBody().walk([&](Operation *op) {
std::string op_host = GetHost(op);
@@ -206,7 +206,7 @@
FunctionType func_type =
FunctionType::get(context, input_types, result_types);
Location loc = metadata.ops.front()->getLoc();
- FuncOp func_op = FuncOp::create(loc, func_name, func_type);
+ func::FuncOp func_op = func::FuncOp::create(loc, func_name, func_type);
// Sets the device attribute for every input and every result of the
// function.
for (int i : llvm::seq<int>(0, metadata.input_devices.size())) {
@@ -303,8 +303,8 @@
void runOnOperation() override {
MLIRContext *context = &getContext();
ModuleOp module_op = getOperation();
- SmallVector<FuncOp, 4> original_func;
- for (auto func_op : module_op.getOps<FuncOp>()) {
+ SmallVector<func::FuncOp, 4> original_func;
+ for (auto func_op : module_op.getOps<func::FuncOp>()) {
original_func.push_back(func_op);
}
for (auto func_op : original_func) {
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc b/tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc
index 7d5e97a..4268d48 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/collection_ops_util.cc
@@ -192,7 +192,7 @@
infer_from_op);
if (type_from_else.hasValue()) return type_from_else;
} else if (auto call = llvm::dyn_cast<CallOpInterface>(use.getOwner())) {
- auto callee = dyn_cast<FuncOp>(call.resolveCallable());
+ auto callee = dyn_cast<func::FuncOp>(call.resolveCallable());
auto type_from_callee = GetElementTypeFromAccess(
callee.getArgument(use.getOperandNumber()), module, infer_from_op);
if (type_from_callee.hasValue()) return type_from_callee;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/convert_control_to_data_outputs.cc b/tensorflow/compiler/mlir/tensorflow/transforms/convert_control_to_data_outputs.cc
index 3fe52b6..64272d0 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/convert_control_to_data_outputs.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/convert_control_to_data_outputs.cc
@@ -57,7 +57,7 @@
// Returns a vector of all tf.WhileOp(s) which use func as while body. If any of
// the uses is as a while condition, an empty vector is returned.
-SmallVector<TF::WhileOp> GetWhileCallers(FuncOp func,
+SmallVector<TF::WhileOp> GetWhileCallers(func::FuncOp func,
SymbolUserMap& symbol_map) {
SmallVector<TF::WhileOp> while_callers;
for (auto user : symbol_map.getUsers(func)) {
@@ -77,7 +77,7 @@
// `resource_equivalence_classes`. Resources are equivalent if they are accessed
// by a common op, and equivalent resources will be assigned to the same chain.
void CollectChainResources(
- FuncOp func, ResourceToOpsMapTy& chain_resource_to_ops_map,
+ func::FuncOp func, ResourceToOpsMapTy& chain_resource_to_ops_map,
llvm::EquivalenceClasses<ResourceId>& resource_equivalence_classes,
const TF::SideEffectAnalysis::Info& side_effect_analysis) {
auto graph_op = cast<GraphOp>(func.front().front());
@@ -145,7 +145,7 @@
// Remove all control outputs of the function. Traverses NoOp control barrier
// chains from FetchOp to all NoOp control barriers. Returns true
// iff at least one control output is deleted.
-bool RemoveAllControlOutputs(FuncOp func) {
+bool RemoveAllControlOutputs(func::FuncOp func) {
auto graph_op = cast<GraphOp>(func.front().front());
FetchOp fetch = graph_op.GetFetch();
@@ -187,7 +187,7 @@
// Appends function arguments with `num_resources` number of arguments of
// requested type.
-void AppendFunctionArguments(FuncOp func, int num_resources,
+void AppendFunctionArguments(func::FuncOp func, int num_resources,
ShapedType chaining_data_type) {
for (int i = 0; i < num_resources; ++i) {
func.getRegion().addArgument(chaining_data_type, func.getLoc());
@@ -201,7 +201,7 @@
// Appends function results with `num_resources` number of results of requested
// type.
-void AppendFunctionResults(FuncOp func, int num_resources,
+void AppendFunctionResults(func::FuncOp func, int num_resources,
ShapedType chaining_data_type) {
Block& block = func.front();
auto graph_op = cast<GraphOp>(block.front());
@@ -260,7 +260,7 @@
// equivalence class, and (2) a control dependency from all the operations that
// read/write to a resource of the class to the chain_sink operation.
void ChainResourceOps(
- FuncOp func, ResourceToOpsMapTy& chain_resource_to_ops_map,
+ func::FuncOp func, ResourceToOpsMapTy& chain_resource_to_ops_map,
llvm::EquivalenceClasses<ResourceId>& resource_equivalence_classes,
int num_old_outputs) {
assert(num_old_outputs + resource_equivalence_classes.getNumClasses() ==
@@ -373,7 +373,7 @@
// Converts the control outputs of the while body to data outputs, thus
// removing control barrier at the end of while loop body.
void ConvertControlToDataOutputs(
- FuncOp while_body, SmallVectorImpl<TF::WhileOp>& while_callers,
+ func::FuncOp while_body, SmallVectorImpl<TF::WhileOp>& while_callers,
OperationSetTy& recompute_analysis_for_funcs,
const TF::SideEffectAnalysis::Info& side_effect_analysis) {
if (while_callers.empty()) return;
@@ -431,8 +431,9 @@
// If the while callers are modified as part of the optimization, then the
// side effect analysis of their parent functions are invalidated. They
// need to be recomputed.
- recompute_analysis_for_funcs.insert(while_op->getParentOfType<FuncOp>());
- FuncOp while_cond = while_op.cond_function();
+ recompute_analysis_for_funcs.insert(
+ while_op->getParentOfType<func::FuncOp>());
+ func::FuncOp while_cond = while_op.cond_function();
// Rewrite while op with extra chaining arguments and results.
while_op = RewriteWhileOp(while_op, num_chains, chaining_data_type);
bool first_visit = visited.insert(while_cond).second;
@@ -455,13 +456,13 @@
SymbolTableCollection table;
SymbolUserMap symbol_map(table, module);
- llvm::SmallDenseMap<FuncOp, SmallVector<TF::WhileOp>>
+ llvm::SmallDenseMap<func::FuncOp, SmallVector<TF::WhileOp>>
while_body_func_to_while_ops;
// Get all the while body functions and the corresponding while ops first
// because the symbol user map is invalidated once we start deleting while
// ops.
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
if (func.isExternal()) continue;
SmallVector<TF::WhileOp> while_callers = GetWhileCallers(func, symbol_map);
if (while_callers.empty()) continue;
@@ -472,7 +473,7 @@
OperationSetTy recompute_analysis_for_funcs;
for (auto& entry : while_body_func_to_while_ops) {
- FuncOp while_body = entry.getFirst();
+ func::FuncOp while_body = entry.getFirst();
SmallVector<TF::WhileOp>& while_callers = entry.getSecond();
if (recompute_analysis_for_funcs.contains(while_body)) {
// TODO(b/202540801): Recomputing side effect analysis for the entire
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/convert_to_legacy_compile_and_replicate_attributes.cc b/tensorflow/compiler/mlir/tensorflow/transforms/convert_to_legacy_compile_and_replicate_attributes.cc
index 864ac0e..5d75ade 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/convert_to_legacy_compile_and_replicate_attributes.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/convert_to_legacy_compile_and_replicate_attributes.cc
@@ -40,7 +40,7 @@
void runOnOperation() override;
};
-LogicalResult ConvertToLegacyAttributes(FuncOp func_op) {
+LogicalResult ConvertToLegacyAttributes(func::FuncOp func_op) {
auto result = func_op->walk([&](mlir::Operation* op) {
if (failed(TF::HasValidCompilationAndReplicationAttributes(*op)))
return WalkResult::interrupt();
@@ -55,13 +55,13 @@
}
void ConvertToLegacyCompileAndReplicateAttributesPass::runOnOperation() {
- FuncOp func_op = getOperation();
+ func::FuncOp func_op = getOperation();
if (failed(ConvertToLegacyAttributes(func_op))) return signalPassFailure();
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateConvertToLegacyCompileAndReplicateAttributesPass() {
return std::make_unique<ConvertToLegacyCompileAndReplicateAttributesPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops_pass.cc
index 3af0641..3ec2af3 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/decompose_resource_ops_pass.cc
@@ -46,18 +46,18 @@
SymbolUserMap symbol_map(table, module);
// Create map from caller to set of all callee(s).
- llvm::DenseMap<FuncOp, llvm::DenseSet<FuncOp>> caller_callee_map;
+ llvm::DenseMap<func::FuncOp, llvm::DenseSet<func::FuncOp>> caller_callee_map;
// Use worklist to populate the set of reachable functions.
- std::queue<FuncOp> function_worklist;
+ std::queue<func::FuncOp> function_worklist;
// Iterates over all functions within the module to (1) create caller-callee
// map, and (2) initialize function worklist with functions referenced from
// device cluster ops.
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
for (auto user : symbol_map.getUsers(func)) {
// Populate caller-callee map.
- if (FuncOp caller = user->getParentOfType<FuncOp>())
+ if (func::FuncOp caller = user->getParentOfType<func::FuncOp>())
caller_callee_map[caller].insert(func);
// Initialize function worklist with functions refrerenced in device
// cluster.
@@ -71,7 +71,7 @@
// Uses worklist algorithm to insert all functions reachable from device
// cluster ops.
while (!function_worklist.empty()) {
- FuncOp caller = function_worklist.front();
+ func::FuncOp caller = function_worklist.front();
function_worklist.pop();
for (auto callee : caller_callee_map[caller]) {
if (reachable_functions.insert(callee).second)
@@ -117,7 +117,7 @@
// Apply patterns to reachable functions.
for (Operation* op : reachable_functions) {
- assert(isa<FuncOp>(op));
+ assert(isa<func::FuncOp>(op));
if (failed(applyPatternsAndFoldGreedily(op, patterns))) {
return op->emitError() << kBadDecompositionMessage;
}
@@ -128,7 +128,7 @@
// collected many cluster ops when we were populating reachable functions. But
// we would still need to do a walk to find all clusters that do not
// reference any function.
- for (FuncOp func : module.getOps<FuncOp>()) {
+ for (func::FuncOp func : module.getOps<func::FuncOp>()) {
// If we have already applied patterns to a function then we can skip
// applying patterns to any device clusters it contains.
if (reachable_functions.contains(func)) continue;
@@ -182,7 +182,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateDecomposeResourceOpsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeResourceOpsPass() {
return std::make_unique<DecomposeResourceOpsPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/deduplicate_bound_input_bindings.cc b/tensorflow/compiler/mlir/tensorflow/transforms/deduplicate_bound_input_bindings.cc
index 53a9b6f..31a5d08 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/deduplicate_bound_input_bindings.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/deduplicate_bound_input_bindings.cc
@@ -34,7 +34,7 @@
};
void DedupBoundInputBindingPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (!mlir::tf_saved_model::IsExported(func)) return;
llvm::SmallDenseMap<Attribute, unsigned, 8> unique_bound_inputs;
llvm::BitVector arg_indices_to_erase(func.getNumArguments());
@@ -54,7 +54,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateDedupBoundInputBindingPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateDedupBoundInputBindingPass() {
return std::make_unique<DedupBoundInputBindingPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/device_attribute_to_launch.cc b/tensorflow/compiler/mlir/tensorflow/transforms/device_attribute_to_launch.cc
index a2ff2c1..4a4e1d3 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/device_attribute_to_launch.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/device_attribute_to_launch.cc
@@ -67,7 +67,8 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateDeviceAttributeToLaunchPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateDeviceAttributeToLaunchPass() {
return std::make_unique<DeviceAttributeToLaunch>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/device_index_selector.cc b/tensorflow/compiler/mlir/tensorflow/transforms/device_index_selector.cc
index 475cf76..04dfc3d 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/device_index_selector.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/device_index_selector.cc
@@ -52,7 +52,7 @@
} // namespace
void DeviceIndexSelector::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
// Convert all the DeviceIndex ops to constant values.
func.getBody().walk([](TF::DeviceIndexOp op) {
// This just selects the default in all cases where DeviceIndex feeds into
@@ -75,7 +75,7 @@
}
// Creates an instance of the TensorFlow DeviceIndex selector pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateDeviceIndexSelectorPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDeviceIndexSelectorPass() {
return std::make_unique<DeviceIndexSelector>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/drop_while_shape_invariant.cc b/tensorflow/compiler/mlir/tensorflow/transforms/drop_while_shape_invariant.cc
index 30189c3..77398aa 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/drop_while_shape_invariant.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/drop_while_shape_invariant.cc
@@ -52,11 +52,12 @@
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateDropWhileShapeInvariantPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateDropWhileShapeInvariantPass() {
return std::make_unique<DropWhileShapeInvariantPass>();
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateDropWhileShapeInvariantInDeviceClusterPass() {
return std::make_unique<DropWhileShapeInvariantInDeviceClusterPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc b/tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
index 00083f8..12eb368 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/einsum.cc
@@ -543,7 +543,7 @@
return rewriter.notifyMatchFailure(op, "unsupported einsum lowering");
}
-std::unique_ptr<OperationPass<FuncOp>> CreateTransformEinsumPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTransformEinsumPass() {
return std::make_unique<TransformEinsumPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/executor_island_coarsening.cc b/tensorflow/compiler/mlir/tensorflow/transforms/executor_island_coarsening.cc
index 6f58f0a..17c4257 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/executor_island_coarsening.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/executor_island_coarsening.cc
@@ -502,7 +502,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTFExecutorIslandCoarseningPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateTFExecutorIslandCoarseningPass() {
return std::make_unique<ExecutorIslandCoarseningPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc
index 20dd808..93c9f5b 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_inline_tpu_island.cc
@@ -63,7 +63,7 @@
auto call_interface = cast<CallOpInterface>(call_op.getOperation());
auto called_func =
- dyn_cast_or_null<FuncOp>(call_interface.resolveCallable());
+ dyn_cast_or_null<func::FuncOp>(call_interface.resolveCallable());
if (failed(inlineCall(inliner, call_interface,
cast<CallableOpInterface>(called_func.getOperation()),
@@ -79,8 +79,8 @@
if (walk_result.wasInterrupted()) return signalPassFailure();
// Move all remaining nested functions back into the parent module.
Block &nested_block = nested_module->getRegion(0).front();
- for (FuncOp func_op :
- llvm::make_early_inc_range(nested_block.getOps<FuncOp>())) {
+ for (func::FuncOp func_op :
+ llvm::make_early_inc_range(nested_block.getOps<func::FuncOp>())) {
if (!symbol_table.lookupSymbolIn(getOperation(), func_op.getName())) {
nested_block.getOperations().remove(func_op.getOperation());
symbol_table.insert(func_op.getOperation());
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc
index 296e5c6..4ce2354 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_island_coarsening.cc
@@ -284,18 +284,18 @@
ModuleOp module) {
SymbolTableCollection table;
SymbolUserMap symbol_map(table, module);
- llvm::DenseMap<FuncOp, llvm::DenseSet<FuncOp>> caller_callee_map;
+ llvm::DenseMap<func::FuncOp, llvm::DenseSet<func::FuncOp>> caller_callee_map;
// Creates work queue for determining reachability below.
- std::queue<FuncOp> function_worklist;
+ std::queue<func::FuncOp> function_worklist;
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
for (auto user : symbol_map.getUsers(func)) {
// Populates work queue with func ops called from TPUPartionedCall.
if (llvm::isa<TF::TPUPartitionedCallOp>(user)) {
function_worklist.push(func);
}
// Populates caller to called func map.
- if (FuncOp caller = user->getParentOfType<FuncOp>()) {
+ if (func::FuncOp caller = user->getParentOfType<func::FuncOp>()) {
caller_callee_map[caller].insert(func);
}
}
@@ -305,7 +305,7 @@
// and iteratively descending through called ops.
SmallPtrSet<Operation*, 16> reachable_functions;
while (!function_worklist.empty()) {
- FuncOp caller = function_worklist.front();
+ func::FuncOp caller = function_worklist.front();
function_worklist.pop();
if (reachable_functions.insert(caller).second) {
for (auto callee : caller_callee_map[caller]) {
@@ -321,8 +321,8 @@
// Map tpu cluster names to the functions that contain operations for this
// cluster.
- DenseMap<StringRef, DenseSet<FuncOp>> tpu_funcs;
- for (FuncOp func_op : getOperation().getOps<FuncOp>()) {
+ DenseMap<StringRef, DenseSet<func::FuncOp>> tpu_funcs;
+ for (func::FuncOp func_op : getOperation().getOps<func::FuncOp>()) {
func_op.walk([&](Operation* op) {
StringAttr cluster_name =
op->getAttrOfType<StringAttr>(TF::kReplicationInfoAttr);
@@ -343,7 +343,8 @@
for (NamedAttribute attr : op->getAttrs()) {
auto symbol_ref = attr.getValue().dyn_cast<FlatSymbolRefAttr>();
if (!symbol_ref) continue;
- FuncOp callee = symbol_table.lookup<FuncOp>(symbol_ref.getValue());
+ func::FuncOp callee =
+ symbol_table.lookup<func::FuncOp>(symbol_ref.getValue());
if (!callee) continue;
if (funcs_for_cluster->second.count(callee)) return true;
}
@@ -353,7 +354,7 @@
// Populates skip set with functions reachable from TPUPartionedCall ops.
const auto functions_to_skip =
FindTPUPartitionedCallReachableFunctions(getOperation());
- for (FuncOp func_op : getOperation().getOps<FuncOp>()) {
+ for (func::FuncOp func_op : getOperation().getOps<func::FuncOp>()) {
if (functions_to_skip.contains(func_op)) {
continue;
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc
index 7400e9c..2ba6ddd 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/executor_tpuv1_outline_tpu_island.cc
@@ -53,8 +53,8 @@
// Move FuncOp referenced by `symbol_ref` from one symbol table to another.
void MoveFuncOp(FlatSymbolRefAttr &symbol_ref, SymbolTable &from,
SymbolTable &to) {
- if (to.lookup<FuncOp>(symbol_ref.getValue())) return;
- FuncOp callee = from.lookup<FuncOp>(symbol_ref.getValue());
+ if (to.lookup<func::FuncOp>(symbol_ref.getValue())) return;
+ func::FuncOp callee = from.lookup<func::FuncOp>(symbol_ref.getValue());
callee.getOperation()->getBlock()->getOperations().remove(
callee.getOperation());
to.insert(callee);
@@ -106,8 +106,8 @@
// Create the outlined function
SmallString<32> name = kOutlinedFuncPrefix;
name += llvm::Twine(prefix_id++).str();
- auto outlined_func =
- OpBuilder(ctx).create<FuncOp>(island_op.getLoc(), name, func_type);
+ auto outlined_func = OpBuilder(ctx).create<func::FuncOp>(island_op.getLoc(),
+ name, func_type);
outlined_symbol_table.insert(outlined_func);
outlined_func.setNested();
@@ -153,7 +153,7 @@
// Outlined all the transitively called functions by moving them in the
// outlined module.
- for (FuncOp func : outlined_module.getOps<FuncOp>()) {
+ for (func::FuncOp func : outlined_module.getOps<func::FuncOp>()) {
func.walk([&](Operation *op) {
for (NamedAttribute attr : op->getAttrs()) {
if (auto symbol_ref = attr.getValue().dyn_cast<FlatSymbolRefAttr>()) {
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc b/tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc
index 83275ed..bef5a0f 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/fold_broadcast.cc
@@ -198,7 +198,7 @@
} // namespace
namespace TF {
-std::unique_ptr<OperationPass<FuncOp>> CreateBroadcastFoldPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateBroadcastFoldPass() {
return absl::make_unique<BroadcastFoldPass>();
}
} // namespace TF
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/freeze_global_tensors.cc b/tensorflow/compiler/mlir/tensorflow/transforms/freeze_global_tensors.cc
index d23a89c..cfb8be2 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/freeze_global_tensors.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/freeze_global_tensors.cc
@@ -48,7 +48,8 @@
}
static ResourceLatticeValue getPessimisticValueState(Value value) {
if (auto barg = value.dyn_cast<BlockArgument>()) {
- if (FuncOp func = dyn_cast<FuncOp>(barg.getOwner()->getParentOp())) {
+ if (func::FuncOp func =
+ dyn_cast<func::FuncOp>(barg.getOwner()->getParentOp())) {
SymbolTable symbol_table(func->getParentOfType<ModuleOp>());
auto global_tensor = LookupBoundInputOfType<GlobalTensorOp>(
func, barg.getArgNumber(), symbol_table);
@@ -126,7 +127,7 @@
// Collect all those freezable. This is an extra scan but allows for the
// partial behavior from `allow_mutable_tensor`.
DenseMap<BlockArgument, bool> freezeable;
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
for (BlockArgument val : func.getArguments()) {
if (!getElementTypeOrSelf(val.getType()).isa<TF::ResourceType>())
continue;
@@ -161,7 +162,7 @@
}
DenseSet<GlobalTensorOp> frozen_global_tensors;
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
llvm::BitVector args_to_erase(func.getNumArguments());
DenseMap<Operation *, llvm::BitVector> remove_operands;
OpBuilder builder(func.getBody());
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/freeze_saved_model_assets.cc b/tensorflow/compiler/mlir/tensorflow/transforms/freeze_saved_model_assets.cc
index 28290a7..aea2250 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/freeze_saved_model_assets.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/freeze_saved_model_assets.cc
@@ -57,7 +57,7 @@
}
SymbolTable symbol_table(module);
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
llvm::BitVector args_to_erase(func.getNumArguments());
OpBuilder builder(func.getBody());
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_cfg.cc b/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_cfg.cc
index e6b70aa..86ffd3a 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_cfg.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_cfg.cc
@@ -56,7 +56,7 @@
// Requires the function to provide arguments for each of the `fn` operands
// that is compatible for tensor cast.
static Operation* CallFn(Location loc, const std::function<Value(int)>& get_arg,
- FuncOp fn, OpBuilder* builder) {
+ func::FuncOp fn, OpBuilder* builder) {
FunctionType fn_type = fn.getFunctionType();
llvm::SmallVector<Value, 4> operands;
int num_operands = fn_type.getNumInputs();
@@ -302,7 +302,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTFFunctionalControlFlowToCFG() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateTFFunctionalControlFlowToCFG() {
return std::make_unique<FunctionalControlFlowToCFG>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_regions.cc b/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_regions.cc
index 61cdafd..eaec96f 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_regions.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_regions.cc
@@ -55,7 +55,7 @@
// the input arguments are used as is (for IfOp) or block arguments of the same
// type as the input arguments are created and then used as call arguments (for
// While).
-YieldOp CreateCall(Operation* op, FuncOp func, Region& caller_region,
+YieldOp CreateCall(Operation* op, func::FuncOp func, Region& caller_region,
ValueRange args, bool use_region_args) {
assert(caller_region.empty() &&
"Expected empty region for newly created ops");
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc b/tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
index 3c3cece..4f64990 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/fused_kernel_matcher.cc
@@ -112,7 +112,7 @@
// We do support fusion only if the contraction operation is inside one of
// the expected operations with regions. Other operations can have semantics
// that is not compatible with fusion (e.g. region compilation).
- if (!isa<FuncOp, IfOp, WhileOp>(contraction->getParentOp())) {
+ if (!isa<func::FuncOp, IfOp, WhileOp>(contraction->getParentOp())) {
return rewriter.notifyMatchFailure(
contraction,
"fused operation must be nested inside a function, If or While");
@@ -255,7 +255,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateFusedKernelMatcherPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateFusedKernelMatcherPass() {
return std::make_unique<FusedKernelMatcherPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc b/tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc
index 8fed559..56318d0 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/gpu_fusion.cc
@@ -118,7 +118,7 @@
};
void GpuOpFusionPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
RewritePatternSet patterns(&getContext());
patterns.add<ReluToFusedBatchNorm>(&getContext());
(void)applyPatternsAndFoldGreedily(func, std::move(patterns));
@@ -126,7 +126,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateGpuOpFusionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateGpuOpFusionPass() {
return std::make_unique<GpuOpFusionPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.cc
index 7b185d5..a235b2c 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/graph_optimization_pass.cc
@@ -49,13 +49,14 @@
// Run island coarsening before shape inference to allow more exact shape
// inference using constant folding within islands.
- pm.addNestedPass<FuncOp>(tf_executor::CreateTFExecutorIslandCoarseningPass());
+ pm.addNestedPass<func::FuncOp>(
+ tf_executor::CreateTFExecutorIslandCoarseningPass());
pm.addPass(CreateTFShapeInferencePass());
// Assign optimal data layout to layout sensitive operations and delete
// redundant transposes from the IR.
LayoutOptimizationPipelineOptions layout_optimization_options;
- CreateLayoutOptimizationPipeline(pm.nest<FuncOp>(),
+ CreateLayoutOptimizationPipeline(pm.nest<func::FuncOp>(),
layout_optimization_options);
// Prepare IR for exporting.
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/graph_pruning.cc b/tensorflow/compiler/mlir/tensorflow/transforms/graph_pruning.cc
index fb6ac71..8e97af3 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/graph_pruning.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/graph_pruning.cc
@@ -56,7 +56,7 @@
// feeds/fetches/targets we should not attempt to prune. The best approximation
// here is to check if the graph is of the "main" function and does not have the
// "tf.entry_function" attribute defined.
-bool CanPruneGraph(FuncOp func) {
+bool CanPruneGraph(func::FuncOp func) {
return func.getName() != "main" ||
func->getAttrOfType<DictionaryAttr>("tf.entry_function") != nullptr;
}
@@ -186,7 +186,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTFExecutorGraphPruningPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTFExecutorGraphPruningPass(
llvm::ArrayRef<std::string> ops_to_preserve) {
return std::make_unique<GraphPruningPass>(ops_to_preserve);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/guarantee_all_funcs_one_use.cc b/tensorflow/compiler/mlir/tensorflow/transforms/guarantee_all_funcs_one_use.cc
index 83d5ae8..7b7fa0f 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/guarantee_all_funcs_one_use.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/guarantee_all_funcs_one_use.cc
@@ -78,7 +78,8 @@
SymbolUserMap symbol_users(symbol_table_collection, module);
made_changes = false;
- for (auto func : llvm::make_early_inc_range(module.getOps<FuncOp>())) {
+ for (auto func :
+ llvm::make_early_inc_range(module.getOps<func::FuncOp>())) {
ArrayRef<Operation *> users = symbol_users.getUsers(func);
if (users.size() <= 1) {
continue;
@@ -93,7 +94,7 @@
"repeated diamond-like call structure "
"or just very large program)";
}
- FuncOp new_func = func.clone();
+ func::FuncOp new_func = func.clone();
symbol_table.insert(new_func);
new_func.setPrivate();
if (failed(SymbolTable::replaceAllSymbolUses(
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc b/tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc
index 613ea0d..e667755 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/hoist_replicate_invariant_resource_writes.cc
@@ -140,7 +140,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateHoistReplicateInvariantResourceWritesPass() {
return std::make_unique<HoistReplicateInvariantResourceWritesPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import.cc b/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import.cc
index 56283e7..4bb8245 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import.cc
@@ -140,7 +140,7 @@
void InitTextFileToImportPass::runOnOperation() {
RewritePatternSet patterns(&getContext());
MLIRContext* context = &getContext();
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
patterns.add<ConvertInitializeTableFromTextFileV2>(
context, StringRef(saved_model_dir_));
@@ -150,11 +150,10 @@
} // namespace
// Replace InitializeTableFromTextFileV2Ops with LookupTableImportV2Ops.
-std::unique_ptr<OperationPass<FuncOp>> CreateInitTextFileToImportPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateInitTextFileToImportPass(
std::string saved_model_dir) {
return std::make_unique<InitTextFileToImportPass>(saved_model_dir);
}
-
} // namespace TF
} // namespace mlir
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import_test_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import_test_pass.cc
index 52272bd..374e267 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import_test_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/init_text_file_to_import_test_pass.cc
@@ -74,7 +74,7 @@
// Replace filename constant ops to use the temporary file.
MLIRContext* context = &getContext();
- for (FuncOp func : module.getOps<FuncOp>()) {
+ for (func::FuncOp func : module.getOps<func::FuncOp>()) {
llvm::SmallVector<arith::ConstantOp, 4> constant_ops(
func.getOps<arith::ConstantOp>());
for (auto op : constant_ops) {
@@ -97,7 +97,7 @@
// Run the lowering pass.
PassManager pm(context);
- pm.addNestedPass<FuncOp>(CreateInitTextFileToImportPass(""));
+ pm.addNestedPass<func::FuncOp>(CreateInitTextFileToImportPass(""));
if (failed(pm.run(module))) return signalPassFailure();
}
@@ -138,7 +138,7 @@
// Run the lowering pass.
MLIRContext* context = &getContext();
PassManager pm(context);
- pm.addNestedPass<FuncOp>(
+ pm.addNestedPass<func::FuncOp>(
CreateInitTextFileToImportPass(std::string(tempdir)));
if (failed(pm.run(module))) return signalPassFailure();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/initialize_variables_in_session_init.cc b/tensorflow/compiler/mlir/tensorflow/transforms/initialize_variables_in_session_init.cc
index c260af5..a40c484 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/initialize_variables_in_session_init.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/initialize_variables_in_session_init.cc
@@ -35,8 +35,8 @@
namespace {
void InitializeVariable(TF::VarHandleOp var_handle_op,
- tensorflow::Tensor* tensor, FuncOp session_init_func,
- OpBuilder builder) {
+ tensorflow::Tensor* tensor,
+ func::FuncOp session_init_func, OpBuilder builder) {
tensorflow::StatusOr<ElementsAttr> tensor_attr_or =
tensorflow::ConvertTensor(*tensor, &builder);
assert(tensor_attr_or.ok() && "Expect valid tensor");
@@ -57,14 +57,14 @@
constexpr char kTfSavedModelExportedNameAttr[] =
"tf_saved_model.exported_names";
-FuncOp CreateSessionInitFunc(ModuleOp module) {
+func::FuncOp CreateSessionInitFunc(ModuleOp module) {
constexpr char kSessionInitFuncName[] = "SessionInitializerFunction";
mlir::OpBuilder builder(module.getBodyRegion());
auto func_type =
FunctionType::get(module.getContext(), /*inputs=*/{}, /*results=*/{});
- auto func =
- builder.create<FuncOp>(module->getLoc(), kSessionInitFuncName, func_type);
+ auto func = builder.create<func::FuncOp>(module->getLoc(),
+ kSessionInitFuncName, func_type);
func->setAttr(kTfSavedModelExportedNameAttr,
builder.getStrArrayAttr({kSessionInitFuncName}));
func.setVisibility(mlir::func::FuncOp::Visibility::Public);
@@ -85,13 +85,13 @@
return func;
}
-FuncOp GetOrCreateSessionInitFunc(ModuleOp module) {
+func::FuncOp GetOrCreateSessionInitFunc(ModuleOp module) {
SessionInitializerOp session_init_op = GetSessionInitializerOp(module);
if (!session_init_op) return CreateSessionInitFunc(module);
SymbolTable symbol_table(module);
if (!session_init_op.initializers().empty()) {
- FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
+ func::FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
session_init_op.initializers()[0].cast<FlatSymbolRefAttr>().getValue());
return init_func_op;
}
@@ -113,7 +113,7 @@
// Fetch all VarHandleOp.
llvm::StringSet<> variable_names;
llvm::SmallVector<TF::VarHandleOp, 4> var_ops;
- for (auto func_op : module.getOps<FuncOp>()) {
+ for (auto func_op : module.getOps<func::FuncOp>()) {
for (auto var_handle_op : func_op.getOps<TF::VarHandleOp>()) {
auto variable_name = GetVariableName(var_handle_op);
if (variable_names.count(variable_name)) continue;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/launch_to_device_attribute.cc b/tensorflow/compiler/mlir/tensorflow/transforms/launch_to_device_attribute.cc
index ff67baa..fcb4ec9 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/launch_to_device_attribute.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/launch_to_device_attribute.cc
@@ -110,7 +110,8 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateLaunchToDeviceAttributePass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateLaunchToDeviceAttributePass() {
return std::make_unique<LaunchToDeviceAttributePass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc b/tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
index dc1ba62..75a0a51 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/layout_optimization.cc
@@ -101,7 +101,7 @@
using Permutation = SmallVector<int64_t, 4>;
void LayoutAssignmentPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
// Get runtime devices information from the closest parent module.
RuntimeDevices devices;
@@ -433,7 +433,7 @@
}
void MoveTransposesPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
SmallVector<Operation*, 8> work_list;
@@ -487,7 +487,7 @@
MoveTransposeDirection::kEnd, !options.skip_fold_transpose_in_ops));
}
-std::unique_ptr<OperationPass<FuncOp>> CreateLayoutAssignmentPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLayoutAssignmentPass() {
// This static is kind of hack, it hooks the pipeline registration for the
// command line and piggy-back to the TableGen generated registration code.
static mlir::PassPipelineRegistration<LayoutOptimizationPipelineOptions>
@@ -498,7 +498,7 @@
return std::make_unique<LayoutAssignmentPass>();
}
-std::unique_ptr<OperationPass<FuncOp>> CreateMoveTransposesPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateMoveTransposesPass() {
return std::make_unique<MoveTransposesPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/legalize_hlo.cc b/tensorflow/compiler/mlir/tensorflow/transforms/legalize_hlo.cc
index 832adb8..17ac3a2 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/legalize_hlo.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/legalize_hlo.cc
@@ -3031,7 +3031,7 @@
populateWithGenerated(*patterns);
}
-std::unique_ptr<OperationPass<FuncOp>> CreateLegalizeHloToTfPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLegalizeHloToTfPass() {
return std::make_unique<LegalizeHloToTf>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/lift_variables.cc b/tensorflow/compiler/mlir/tensorflow/transforms/lift_variables.cc
index a08296c..20efc06 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/lift_variables.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/lift_variables.cc
@@ -154,7 +154,7 @@
SmallSet<StringRef, 4> resource_names;
- for (FuncOp func : module.getOps<FuncOp>()) {
+ for (func::FuncOp func : module.getOps<func::FuncOp>()) {
for (int i = 0, e = func.getNumArguments(); i < e; ++i) {
auto resource_arg =
func.getArgAttrOfType<StringAttr>(i, kResourceNameArgAttr);
@@ -182,7 +182,7 @@
// Now that we have all global tensors created, we set the corresponding
// bound_inputs' types correctly.
SymbolTable symbol_table(module);
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
for (auto arg : func.getArguments()) {
unsigned arg_number = arg.getArgNumber();
auto global_tensor = LookupBoundInputOfType<GlobalTensorOp>(
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc b/tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc
index 908dd34..6bd9250 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/lower_quantized.cc
@@ -37,7 +37,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateLowerQuantizedPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateLowerQuantizedPass() {
return std::make_unique<LowerQuantizedPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/lower_tf_test_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/lower_tf_test_pass.cc
index 5c8272d..d7698cf 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/lower_tf_test_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/lower_tf_test_pass.cc
@@ -42,7 +42,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTestTFLowerTFPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTestTFLowerTFPass() {
return std::make_unique<LowerTF>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.cc b/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.cc
index fa15134..25c9e4c 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.cc
@@ -48,7 +48,7 @@
return is_initialized;
}
-LogicalResult MarkInitializedVariablesInFunction(FuncOp function,
+LogicalResult MarkInitializedVariablesInFunction(func::FuncOp function,
tensorflow::Session* session) {
if (!session || !llvm::hasSingleElement(function)) return success();
Block& block = function.front();
@@ -89,10 +89,10 @@
LogicalResult MarkInitializedVariablesInFunction(ModuleOp module,
tensorflow::Session* session) {
- auto functions_range = module.getOps<FuncOp>();
+ auto functions_range = module.getOps<func::FuncOp>();
return mlir::failableParallelForEach(
module.getContext(), functions_range.begin(), functions_range.end(),
- [&](FuncOp function) {
+ [&](func::FuncOp function) {
return MarkInitializedVariablesInFunction(function, session);
});
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.h b/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.h
index ad00e62..4a18c09 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables.h
@@ -28,7 +28,7 @@
// If 'session' is NULL the function is no-op.
// Returns failure in case fetching variables from session failed, success
// otherwise.
-LogicalResult MarkInitializedVariablesInFunction(FuncOp function,
+LogicalResult MarkInitializedVariablesInFunction(func::FuncOp function,
tensorflow::Session* session);
// Apply `MarkInitializedVariablesInFunction` to every non-empty function in the
// module.
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables_test_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables_test_pass.cc
index 80fbbf6..be3c9b1 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables_test_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/mark_initialized_variables_test_pass.cc
@@ -25,7 +25,7 @@
// This pass is only available in the tf-opt binary for testing.
class MarkInitializedVariablesTestPass
: public PassWrapper<MarkInitializedVariablesTestPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(MarkInitializedVariablesTestPass)
@@ -48,7 +48,7 @@
// This pass is only available in the tf-opt binary for testing.
class MarkInitializedVariablesInvalidSessionTestPass
: public PassWrapper<MarkInitializedVariablesInvalidSessionTestPass,
- OperationPass<FuncOp>> {
+ OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
MarkInitializedVariablesInvalidSessionTestPass)
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc b/tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc
index d97fde1..1939e62 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/mark_input_output_aliases.cc
@@ -98,7 +98,7 @@
}
void AddAliasingAttributeToDeviceFunc(
- FuncOp device_func,
+ func::FuncOp device_func,
llvm::DenseMap<Value, AliasInfo>& resource_alias_info_map) {
OpBuilder builder(device_func.getContext());
for (const auto& resource_alias_entry : resource_alias_info_map) {
@@ -133,7 +133,8 @@
}
FlatSymbolRefAttr func_attr = cluster_func.funcAttr();
- FuncOp device_func = module.lookupSymbol<FuncOp>(func_attr.getValue());
+ func::FuncOp device_func =
+ module.lookupSymbol<func::FuncOp>(func_attr.getValue());
AddAliasingAttributeToDeviceFunc(device_func, resource_alias_info_map);
});
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/materialize_mlir_passthrough_op.cc b/tensorflow/compiler/mlir/tensorflow/transforms/materialize_mlir_passthrough_op.cc
index 86b864f..79056c6 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/materialize_mlir_passthrough_op.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/materialize_mlir_passthrough_op.cc
@@ -54,7 +54,8 @@
op->emitError() << "could not parse attached MLIR module";
return;
}
- FuncOp main = dyn_cast<FuncOp>(nested_module->lookupSymbol("main"));
+ func::FuncOp main =
+ dyn_cast<func::FuncOp>(nested_module->lookupSymbol("main"));
if (!main) {
op->emitError() << "MLIR Opaque Op expects a main() entry point\n";
return;
@@ -100,7 +101,8 @@
} // namespace
namespace TF {
-std::unique_ptr<OperationPass<FuncOp>> CreateMaterializePassthroughOpPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateMaterializePassthroughOpPass() {
return std::make_unique<MaterializePassthroughOpPass>();
}
} // namespace TF
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/merge_control_flow.cc b/tensorflow/compiler/mlir/tensorflow/transforms/merge_control_flow.cc
index e84c3a6..524434e 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/merge_control_flow.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/merge_control_flow.cc
@@ -201,7 +201,7 @@
for (auto iter = std::prev(last); std::next(iter) != first; iter--) {
TF::IfRegionOp first_if_op = *iter;
- FuncOp func = first_if_op->getParentOfType<FuncOp>();
+ func::FuncOp func = first_if_op->getParentOfType<func::FuncOp>();
const TF::SideEffectAnalysis::Info& analysis =
side_effect_analysis->GetAnalysisForFunc(func);
auto all_ops = GetAllOpsFromIf(*(std::next(iter)));
@@ -246,7 +246,7 @@
absl::flat_hash_set<Operation*> visited;
absl::flat_hash_set<Operation*> moved_ops;
- FuncOp func = result_op->getParentOfType<FuncOp>();
+ func::FuncOp func = result_op->getParentOfType<func::FuncOp>();
const TF::SideEffectAnalysis::Info& analysis =
side_effect_analysis->GetAnalysisForFunc(func);
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc b/tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc
index df85dcb..54bd98a 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/optimize.cc
@@ -154,7 +154,7 @@
void CreateTFStandardPipeline(OpPassManager &pm,
const StandardPipelineOptions &options) {
- OpPassManager &func_pm = pm.nest<FuncOp>();
+ OpPassManager &func_pm = pm.nest<func::FuncOp>();
// First operates on the executor dialect:
// - remove dead islands.
@@ -174,11 +174,11 @@
pm.addPass(createInlinerPass());
}
pm.addPass(createSymbolDCEPass());
- pm.addNestedPass<FuncOp>(CreateTFOptimizePass());
- pm.addNestedPass<FuncOp>(createCSEPass());
+ pm.addNestedPass<func::FuncOp>(CreateTFOptimizePass());
+ pm.addNestedPass<func::FuncOp>(createCSEPass());
}
-std::unique_ptr<OperationPass<FuncOp>> CreateTFOptimizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTFOptimizePass() {
return std::make_unique<TensorFlowOptimizePass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/optimize_global_tensors.cc b/tensorflow/compiler/mlir/tensorflow/transforms/optimize_global_tensors.cc
index afdec42..35893f2 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/optimize_global_tensors.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/optimize_global_tensors.cc
@@ -50,7 +50,7 @@
// This struct tracks which funcs (and which argument to that func) the global
// tensor is bound to.
struct GlobalTensorUse {
- mutable FuncOp func;
+ mutable func::FuncOp func;
size_t arg_index;
};
@@ -84,7 +84,7 @@
GlobalTensorUsesMap global_tensor_uses;
SymbolTable symbol_table(module);
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
for (size_t i = 0, e = func.getNumArguments(); i < e; i++) {
auto sym =
func.getArgAttrOfType<SymbolRefAttr>(i, "tf_saved_model.bound_input");
@@ -135,7 +135,7 @@
}
void EraseUnusedBoundInputs(ModuleOp module) {
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
llvm::BitVector args_to_erase(func.getNumArguments());
for (int i = 0, e = func.getNumArguments(); i < e; i++) {
if (func.getArgAttr(i, "tf_saved_model.bound_input") &&
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/parallel_execute_to_islands.cc b/tensorflow/compiler/mlir/tensorflow/transforms/parallel_execute_to_islands.cc
index ba9bac0..04456be 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/parallel_execute_to_islands.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/parallel_execute_to_islands.cc
@@ -201,7 +201,8 @@
}
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateParallelExecuteToIslandsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateParallelExecuteToIslandsPass() {
return std::make_unique<ParallelExecuteToIslandsPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/passes.h b/tensorflow/compiler/mlir/tensorflow/transforms/passes.h
index e1a79af..459e1ba 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/passes.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/passes.h
@@ -404,12 +404,12 @@
namespace TFTPU {
// Creates a pass that canonicalizes legacy compilation and replication
// attributes.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateCanonicalizeCompileAndReplicateAttributesPass();
// Creates a pass that converts unified compilation and replication
// attributes back to legacy attributes.
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateConvertToLegacyCompileAndReplicateAttributesPass();
// Creates a pass that forms clusters from operations of the same
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc b/tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc
index 3741822..1cd0644 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc
@@ -74,18 +74,18 @@
// Clone the `host_func` in the `host_mlir_module` attribute if it exists
// and use it for `shape_inference_graph` attribute on XlaHostCompute.
- FuncOp cloned_func;
+ func::FuncOp cloned_func;
SymbolTable manager(op->getParentOfType<ModuleOp>());
StringRef host_module = op.host_mlir_module();
if (!host_module.empty()) {
mlir::OwningOpRef<mlir::ModuleOp> module_for_func;
- FuncOp func = op.GetHostFunc(&module_for_func);
+ func::FuncOp func = op.GetHostFunc(&module_for_func);
OpBuilder::InsertionGuard guard(rewriter);
- rewriter.setInsertionPointAfter(op->getParentOfType<FuncOp>());
- cloned_func =
- llvm::dyn_cast_or_null<FuncOp>(rewriter.clone(*func.getOperation()));
+ rewriter.setInsertionPointAfter(op->getParentOfType<func::FuncOp>());
+ cloned_func = llvm::dyn_cast_or_null<func::FuncOp>(
+ rewriter.clone(*func.getOperation()));
manager.insert(cloned_func);
rewriter.setInsertionPointToStart(&cloned_func.getBody().front());
auto result_type =
@@ -206,7 +206,7 @@
// If the parent is not a FuncOp, then add the parent op containing a region
// to worklist.
Operation* parent = region->getParentOp();
- if (!isa<FuncOp>(parent)) {
+ if (!isa<func::FuncOp>(parent)) {
if (ops_with_tokens.insert(parent).second) {
worklist.push_back(parent);
}
@@ -255,7 +255,7 @@
void PrepareTpuComputationForTfExportPass::runOnOperation() {
ModuleOp module = getOperation();
- for (FuncOp func : module.getOps<FuncOp>()) {
+ for (func::FuncOp func : module.getOps<func::FuncOp>()) {
UpdateArgAttributes(func);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/promote_resources_to_args.cc b/tensorflow/compiler/mlir/tensorflow/transforms/promote_resources_to_args.cc
index e7c5df5..cf9ef36 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/promote_resources_to_args.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/promote_resources_to_args.cc
@@ -47,7 +47,7 @@
constexpr char kResourceNameArgAttr[] = "tf.resource_name";
// Checks if a function has only one block.
-mlir::LogicalResult CheckSingleBlockFunction(FuncOp function) {
+mlir::LogicalResult CheckSingleBlockFunction(func::FuncOp function) {
if (!llvm::hasSingleElement(function)) {
return function.emitError()
<< "expects function '" << function.getName()
@@ -87,7 +87,7 @@
// Checks if resource argument has a valid resource subtype and its users are of
// `tf.ReadVariableOp` and `tf.AssignVariableOp` only.
-mlir::LogicalResult ValidateResourceArgument(FuncOp function,
+mlir::LogicalResult ValidateResourceArgument(func::FuncOp function,
BlockArgument resource_arg,
TF::ResourceType resource_type) {
if (resource_type.getSubtypes().size() != 1)
@@ -122,7 +122,7 @@
// returned in `var_handle_shared_names` based on the ordering of added resource
// arguments.
mlir::LogicalResult PromoteVarHandlesToArguments(
- FuncOp function, bool add_validation,
+ func::FuncOp function, bool add_validation,
llvm::SmallVectorImpl<std::string>* var_handle_shared_names) {
Block& block = function.front();
auto func_type = function.getFunctionType();
@@ -168,7 +168,8 @@
};
LogicalResult PromoteResourcesToArguments(
- FuncOp function, llvm::ArrayRef<std::string> var_handle_shared_names) {
+ func::FuncOp function,
+ llvm::ArrayRef<std::string> var_handle_shared_names) {
Block& block = function.front();
auto return_op =
@@ -360,7 +361,7 @@
}
SymbolTable symbolTable(module);
for (const std::string& f : functions_) {
- FuncOp func = symbolTable.lookup<FuncOp>(f);
+ func::FuncOp func = symbolTable.lookup<func::FuncOp>(f);
if (!func) continue;
// This routine should only be called when control flow operations are still
@@ -386,7 +387,7 @@
void PromoteVarHandlesToArgsPass::runOnOperation() {
ModuleOp module = getOperation();
MLIRContext* context = module.getContext();
- for (auto function : module.getOps<FuncOp>()) {
+ for (auto function : module.getOps<func::FuncOp>()) {
if (failed(CheckSingleBlockFunction(function))) return signalPassFailure();
llvm::SmallVector<std::string, 4> var_handle_shared_names;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/readonly_references_to_resources.cc b/tensorflow/compiler/mlir/tensorflow/transforms/readonly_references_to_resources.cc
index 0d36aab..14b25c8 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/readonly_references_to_resources.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/readonly_references_to_resources.cc
@@ -111,7 +111,7 @@
void ConvertReadonlyReferenceVariablesToResourceVariablesPass::
runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
OpBuilder builder(func.getContext());
SmallVector<VariableV2Op, 4> variable_v2s_to_replace;
@@ -184,7 +184,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateConvertReadonlyReferenceVariablesToResourceVariablesPass() {
return std::make_unique<
ConvertReadonlyReferenceVariablesToResourceVariablesPass>();
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc b/tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc
index 1c145a4..708bcfc 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/region_control_flow_to_functional.cc
@@ -64,7 +64,7 @@
std::string GetName(Operation* op, StringRef suffix);
tensorflow::OpOrArgLocNameMapper mapper;
- llvm::SmallVector<FuncOp, 4> worklist;
+ llvm::SmallVector<func::FuncOp, 4> worklist;
};
std::string RegionControlFlowToFunctional::GetName(Operation* op,
@@ -122,7 +122,7 @@
// are also added as return values from the function
void ExtractSingleBlockRegion(Region& region, StringRef name,
llvm::SmallVectorImpl<Value>& extern_values,
- llvm::SmallVectorImpl<FuncOp>& worklist,
+ llvm::SmallVectorImpl<func::FuncOp>& worklist,
bool extern_values_passthrough) {
ModuleOp module = region.getParentOfType<ModuleOp>();
auto builder = OpBuilder::atBlockBegin(module.getBody());
@@ -145,7 +145,7 @@
auto type = FunctionType::get(region.getContext(), input_types, return_types);
// Create new function and extract region body into the function.
- auto outlined_func = builder.create<FuncOp>(loc, name, type);
+ auto outlined_func = builder.create<func::FuncOp>(loc, name, type);
Region& func_region = outlined_func.getBody();
func_region.takeBody(region);
Block& first_block = func_region.front();
@@ -461,9 +461,9 @@
ModuleOp module = getOperation();
// Seed worklist with all functions in the module.
- worklist = llvm::to_vector<4>(module.getOps<FuncOp>());
+ worklist = llvm::to_vector<4>(module.getOps<func::FuncOp>());
while (!worklist.empty()) {
- FuncOp function = worklist.pop_back_val();
+ func::FuncOp function = worklist.pop_back_val();
auto result = function.walk([&](Operation* op) {
if (auto if_region = llvm::dyn_cast<IfRegionOp>(op)) {
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/remove_vars_in_session_initializer.cc b/tensorflow/compiler/mlir/tensorflow/transforms/remove_vars_in_session_initializer.cc
index 5565661..ab943d1 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/remove_vars_in_session_initializer.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/remove_vars_in_session_initializer.cc
@@ -86,7 +86,7 @@
SymbolTable symbol_table(module);
for (auto sym_ref : session_init_op.initializers()) {
- FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
+ func::FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
sym_ref.cast<FlatSymbolRefAttr>().getValue());
if (!init_func_op) {
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc b/tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc
index f44aefc..e34b1ef 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/replica_id_to_device_ordinal.cc
@@ -99,7 +99,8 @@
}
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateReplicaIDToDeviceOrdinalPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateReplicaIDToDeviceOrdinalPass() {
return std::make_unique<ReplicaIDToDeviceOrdinalPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc b/tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc
index c0ebf5a..7e3cbfb 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/replicate_invariant_op_hoisting.cc
@@ -149,7 +149,7 @@
}
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateReplicateInvariantOpHoistingPass() {
return std::make_unique<ReplicateInvariantOpHoistingPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/replicate_to_island.cc b/tensorflow/compiler/mlir/tensorflow/transforms/replicate_to_island.cc
index 6d7307b..3f5dc44 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/replicate_to_island.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/replicate_to_island.cc
@@ -328,7 +328,7 @@
}
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateReplicateToIslandPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateReplicateToIslandPass() {
return std::make_unique<ReplicateToIslandPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/resource_device_inference.cc b/tensorflow/compiler/mlir/tensorflow/transforms/resource_device_inference.cc
index 11f3b97..379293f 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/resource_device_inference.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/resource_device_inference.cc
@@ -69,7 +69,8 @@
class PerFunctionResult {
public:
explicit PerFunctionResult(
- FuncOp func_op, const TF::ResourceAliasAnalysis::Info& alias_analysis)
+ func::FuncOp func_op,
+ const TF::ResourceAliasAnalysis::Info& alias_analysis)
: alias_analysis_(alias_analysis) {}
// Returns the recorded device assignment for a resource, if any.
@@ -127,7 +128,7 @@
}
// Extracts and canonicalizes the device attribute.
-inline StringRef GetDeviceAttr(FuncOp func, int arg_no) {
+inline StringRef GetDeviceAttr(func::FuncOp func, int arg_no) {
auto device_attr =
func.getArgAttrOfType<mlir::StringAttr>(arg_no, kFuncDeviceAttr);
return device_attr ? device_attr.getValue() : "";
@@ -147,7 +148,7 @@
}
// Propagates device assignment inside a function.
-LogicalResult ComputeResourceDevicesInComputation(FuncOp func_op,
+LogicalResult ComputeResourceDevicesInComputation(func::FuncOp func_op,
PerFunctionResult* result) {
OpBuilder builder(func_op);
// Function arguments.
@@ -230,9 +231,9 @@
const auto& resource_alias_analysis =
getAnalysis<TF::ResourceAliasAnalysis>();
- llvm::SmallDenseMap<FuncOp, PerFunctionResult, 4> per_function_results;
- llvm::SetVector<FuncOp> worklist;
- for (auto func_op : module.getOps<FuncOp>()) {
+ llvm::SmallDenseMap<func::FuncOp, PerFunctionResult, 4> per_function_results;
+ llvm::SetVector<func::FuncOp> worklist;
+ for (auto func_op : module.getOps<func::FuncOp>()) {
worklist.insert(func_op);
per_function_results.try_emplace(
func_op, func_op, resource_alias_analysis.GetAnalysisForFunc(func_op));
@@ -241,8 +242,8 @@
// called function's arguments.
auto propagate_operands_to_callee_arguments =
[&](Operation* caller, Operation::operand_range caller_operands,
- ArrayRef<FuncOp> callees, const PerFunctionResult& caller_res) {
- for (FuncOp callee : callees) {
+ ArrayRef<func::FuncOp> callees, const PerFunctionResult& caller_res) {
+ for (func::FuncOp callee : callees) {
assert(callee);
auto& callee_res = per_function_results.find(callee)->getSecond();
bool callee_needs_recompute = false;
@@ -287,7 +288,7 @@
{if_op.then_function(), if_op.else_function()}, func_res)))
return WalkResult::interrupt();
} else if (auto call = dyn_cast<CallOpInterface>(op)) {
- auto func = dyn_cast<FuncOp>(call.resolveCallable());
+ auto func = dyn_cast<func::FuncOp>(call.resolveCallable());
if (!func) {
op->emitError(
"Cannot propagate device attribute to callee: Unable to resolve "
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc b/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
index ff8a5ed..b7b94ad9 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting.cc
@@ -282,7 +282,7 @@
bool is_func = false;
// For functions, the resources to analyze are the function arguments.
// Otherwise, its the region captures.
- if (FuncOp func = dyn_cast<FuncOp>(op_)) {
+ if (func::FuncOp func = dyn_cast<func::FuncOp>(op_)) {
is_func = true;
Region& body = func.getBody();
for (BlockArgument arg : body.getArguments()) {
@@ -610,7 +610,8 @@
// as a use. This doesn't support nesting of ops, so before calling this, nested
// ops/functions need to be already resource-lifted.
LogicalResult FindResourceArgUseInfo(
- FuncOp func_op, llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>* result) {
+ func::FuncOp func_op,
+ llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>* result) {
auto return_op = func_op.front().getTerminator();
for (auto arg : TF::filter_resources(func_op.getArguments())) {
ResourceArgUseInfo info;
@@ -677,7 +678,7 @@
// removing unused ones.
void RemoveUnusedResourceArgumentsAndForwardedRetvals(
const llvm::SmallDenseMap<int64_t, ResourceArgUseInfo>& infos,
- FuncOp func_op,
+ func::FuncOp func_op,
llvm::SmallVector<int64_t, 4>* old_to_new_arg_indices = nullptr,
llvm::SmallDenseMap<int64_t, Type>* remaining_resource_data_types =
nullptr) {
@@ -727,7 +728,7 @@
// resource argument. handle_updated_arg_value is a caller-provided function
// that handles the updated value for an resource argument.
LogicalResult LiftArgRetResourcesForFunction(
- FuncOp func_op,
+ func::FuncOp func_op,
const llvm::SmallDenseMap<int64_t, Type>& resource_data_types,
llvm::function_ref<void(int64_t, Value)> handle_updated_arg_value) {
RegionResourceHoister hoister(func_op);
@@ -825,7 +826,8 @@
}
// Lifts loads/stores from while loop's body and cond functions.
-LogicalResult HandleWhileLoop(TF::WhileOp while_op, FuncOp body, FuncOp cond) {
+LogicalResult HandleWhileLoop(TF::WhileOp while_op, func::FuncOp body,
+ func::FuncOp cond) {
auto return_op = body.front().getTerminator();
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> body_use_info;
llvm::SmallDenseMap<int64_t, ResourceArgUseInfo> cond_use_info;
@@ -889,7 +891,7 @@
// Lifts loads/stores from an IfOp or CaseOp's branches.
template <class CaseOrIfOp>
-LogicalResult HandleCaseOrIfOp(CaseOrIfOp op, ArrayRef<FuncOp> branches) {
+LogicalResult HandleCaseOrIfOp(CaseOrIfOp op, ArrayRef<func::FuncOp> branches) {
// For canonicalized If/Case, there should not be any resource outputs
int64_t non_resource_results = op.getNumResults();
@@ -963,7 +965,7 @@
auto new_operands =
FilterRange<Value, OperandRange>(op.input(), resource_arg_uses);
new_operands.insert(new_operands.begin(), op.getOperand(0));
- FuncOp first_func = branches.front();
+ func::FuncOp first_func = branches.front();
auto new_op = builder.create<CaseOrIfOp>(
op.getLoc(), first_func.getFunctionType().getResults(), new_operands,
op->getAttrs());
@@ -990,7 +992,7 @@
// information about the lifting changes.
struct PartitionedCallLiftingInfo {
// Function with resources lifted. Can be nullptr if nothing needs to change.
- FuncOp lifted_callee;
+ func::FuncOp lifted_callee;
// Mapping from old resource outputs to their aliasing output inputs.
llvm::SmallDenseMap<int64_t, int64_t> old_outputs_aliasing_old_inputs;
// Mapping from old to new output indices in case any output is removed.
@@ -1006,7 +1008,7 @@
// needs to be changed, the original function will be preserved, and the lifting
// happens on a clone, which will be stored in `result`.
LogicalResult HandlePartitionedCallOpCallee(
- FuncOp callee, PartitionedCallLiftingInfo* result) {
+ func::FuncOp callee, PartitionedCallLiftingInfo* result) {
// Sanity check: return of resources should be aliases of inputs. Such outputs
// will be removed later.
int64_t non_resource_results = 0;
@@ -1138,7 +1140,8 @@
// flow, then performs lifting on the callee.
template <typename CallOpType>
LogicalResult HandlePartitionedCallOp(
- CallOpType call_op, FuncOp callee, ModuleOp module, bool vars_initialized,
+ CallOpType call_op, func::FuncOp callee, ModuleOp module,
+ bool vars_initialized,
llvm::SmallDenseMap<llvm::StringRef, PartitionedCallLiftingInfo>*
lifted_callees) {
auto emplace_res = lifted_callees->try_emplace(callee.getName(),
@@ -1187,9 +1190,9 @@
if (failed(HandleCaseOrIfOp(if_op, {then_branch, else_branch})))
return failure();
} else if (auto case_op = llvm::dyn_cast<TF::CaseOp>(&op)) {
- SmallVector<FuncOp, 4> branch_functions;
+ SmallVector<func::FuncOp, 4> branch_functions;
case_op.get_branch_functions(branch_functions);
- for (FuncOp func : branch_functions) {
+ for (func::FuncOp func : branch_functions) {
// Recursively handle the nested control flow.
(void)HoistForControlFlow(&func.front(), module, vars_initialized,
lifted_partitioned_call_callees);
@@ -1241,7 +1244,7 @@
if (failed(TF::CleanupAndCanonicalizeForResourceOpLifting(module)))
return signalPassFailure();
- auto walk_result = module.walk([&](FuncOp func_op) {
+ auto walk_result = module.walk([&](func::FuncOp func_op) {
return func_op.walk([&](tf_device::ClusterOp cluster) {
LogicalResult result = HoistForControlFlow(
&cluster.GetBody(), module, /*vars_initialized=*/true,
@@ -1264,7 +1267,7 @@
void ResourceOpLiftingForMainFunctionPass::runOnOperation() {
ModuleOp module = getOperation();
- FuncOp main_func = module.lookupSymbol<FuncOp>("main");
+ func::FuncOp main_func = module.lookupSymbol<func::FuncOp>("main");
if (!main_func) {
return;
}
@@ -1289,7 +1292,7 @@
} // namespace TFDevice
namespace TF {
-LogicalResult ResourceLiftingForFunctionalControlFlow(FuncOp function) {
+LogicalResult ResourceLiftingForFunctionalControlFlow(func::FuncOp function) {
// This routine should only be called when control flow operations are still
// represented with TF IfOp and WhileOp operations. In this case, there should
// be only one basic blocks in the MLIR representation.
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.cc b/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.cc
index 3013761..b1130d7 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.cc
@@ -117,12 +117,12 @@
// Clones a function if it cannot be patched in place. Clone if there are
// multiple uses or unknown uses (for external functions). The cloned function
// will be marked as private.
-FuncOp CloneFunctionIfNeeded(FuncOp func) {
+func::FuncOp CloneFunctionIfNeeded(func::FuncOp func) {
ModuleOp module = func->getParentOfType<ModuleOp>();
auto func_uses = SymbolTable::getSymbolUses(func, &module.getBodyRegion());
if (func_uses.hasValue() && llvm::hasSingleElement(func_uses.getValue()))
return func;
- FuncOp cloned = func.clone();
+ func::FuncOp cloned = func.clone();
cloned.setPrivate();
cloned.setName(
StringAttr::get(func.getContext(), func.getName().str() + "_lifted"));
@@ -134,12 +134,13 @@
// branch functions to (a) drop the ununsed return values, and (b) as a result
// if some argument becomes unused in all branches, drop that argument and the
// corresponding if/case input operand.
-void EliminateUnusedResultsForIfCase(Operation *op, ArrayRef<FuncOp> branches) {
+void EliminateUnusedResultsForIfCase(Operation *op,
+ ArrayRef<func::FuncOp> branches) {
// Clone branch functions if needed since we will be mutating them.
- SmallVector<FuncOp, 2> cloned_branches;
+ SmallVector<func::FuncOp, 2> cloned_branches;
cloned_branches.reserve(branches.size());
- for (FuncOp func : branches) {
- FuncOp cloned = CloneFunctionIfNeeded(func);
+ for (func::FuncOp func : branches) {
+ func::FuncOp cloned = CloneFunctionIfNeeded(func);
cloned_branches.push_back(cloned);
if (cloned == func) continue;
// Patch up the op attribute to point to the new function.
@@ -157,7 +158,7 @@
for (OpResult result : llvm::reverse(op->getResults())) {
if (!result.use_empty()) continue;
int result_idx = result.getResultNumber();
- for (FuncOp func : cloned_branches)
+ for (func::FuncOp func : cloned_branches)
func.front().getTerminator()->eraseOperand(result_idx);
}
@@ -165,7 +166,7 @@
// those as well.
int num_args = cloned_branches[0].getNumArguments();
llvm::BitVector used_args(num_args);
- for (FuncOp func : branches) {
+ for (func::FuncOp func : branches) {
for (BlockArgument arg : func.getArguments()) {
if (!arg.use_empty()) used_args.set(arg.getArgNumber());
}
@@ -177,7 +178,7 @@
// Traverse arguments backward so that indices to be deleted stay unchanged.
for (int idx = num_args - 1; idx >= 0; --idx) {
if (used_args.test(idx)) continue;
- for (FuncOp func : cloned_branches) func.eraseArgument(idx);
+ for (func::FuncOp func : cloned_branches) func.eraseArgument(idx);
// For if/case, arg #i of attached function corresponds to operand #i+1
op->eraseOperand(idx + 1);
}
@@ -185,7 +186,7 @@
// Patch up function types (with less number of return values and potentially
// less number of arguments)
- for (FuncOp func : cloned_branches) {
+ for (func::FuncOp func : cloned_branches) {
func.setType(
FunctionType::get(func.getContext(), func.front().getArgumentTypes(),
func.front().getTerminator()->getOperandTypes()));
@@ -196,8 +197,8 @@
// Eliminated unused results from a functional while.
void EliminateUnusedResultsForWhile(TF::WhileOp op) {
- FuncOp cond = op.cond_function();
- FuncOp body = op.body_function();
+ func::FuncOp cond = op.cond_function();
+ func::FuncOp body = op.body_function();
llvm::BitVector can_eliminate(op.getNumResults());
for (OpResult result : llvm::reverse(op.getResults())) {
@@ -217,8 +218,8 @@
if (can_eliminate.empty()) return;
- FuncOp cloned_cond = CloneFunctionIfNeeded(cond);
- FuncOp cloned_body = CloneFunctionIfNeeded(body);
+ func::FuncOp cloned_cond = CloneFunctionIfNeeded(cond);
+ func::FuncOp cloned_body = CloneFunctionIfNeeded(body);
op.condAttr(FlatSymbolRefAttr::get(op.getContext(), cloned_cond.getName()));
op.bodyAttr(FlatSymbolRefAttr::get(op.getContext(), cloned_body.getName()));
@@ -233,7 +234,7 @@
}
// Patch up branch function types.
- for (FuncOp func : {cloned_cond, cloned_body}) {
+ for (func::FuncOp func : {cloned_cond, cloned_body}) {
func.setType(
FunctionType::get(func.getContext(), func.front().getArgumentTypes(),
func.front().getTerminator()->getOperandTypes()));
@@ -244,7 +245,8 @@
// For resource results, replace all uses with the resource input to which the
// result is tied to. After this, resource outputs of this op are expected to be
// unused.
-LogicalResult ForwardCommonArgToOutput(Operation *op, ArrayRef<FuncOp> branches,
+LogicalResult ForwardCommonArgToOutput(Operation *op,
+ ArrayRef<func::FuncOp> branches,
ValueRange branch_args,
bool &has_resource_result) {
// For while, the branch inputs and outputs need to match.
@@ -258,7 +260,7 @@
has_resource_result = true;
int result_idx = result.getResultNumber();
Optional<int> common_arg_index;
- for (FuncOp func : branches) {
+ for (func::FuncOp func : branches) {
auto ret = func.front().getTerminator();
auto block_arg = ret->getOperand(result_idx).dyn_cast<BlockArgument>();
if (!block_arg) {
@@ -290,9 +292,9 @@
// Canonicalizes a function if. Forwards input argument to resource results and
// then deletes the resource results.
LogicalResult CanonicalizeFunctionalIfCase(Operation *op,
- ArrayRef<FuncOp> branches,
+ ArrayRef<func::FuncOp> branches,
ValueRange branch_args) {
- for (FuncOp func : branches) {
+ for (func::FuncOp func : branches) {
if (failed(CleanupAndCanonicalize(func))) return failure();
}
@@ -312,7 +314,7 @@
// Canonicalizes a functional while. Forwards common argument to results and
// drop resource results if posible.
LogicalResult CanonicalizeFunctionalWhile(TF::WhileOp op) {
- for (FuncOp func : {op.cond_function(), op.body_function()}) {
+ for (func::FuncOp func : {op.cond_function(), op.body_function()}) {
if (failed(CleanupAndCanonicalize(func))) return failure();
}
@@ -423,7 +425,7 @@
result = CanonicalizeFunctionalIfCase(
op, {if_op.then_function(), if_op.else_function()}, if_op.input());
} else if (auto case_op = dyn_cast<TF::CaseOp>(op)) {
- SmallVector<FuncOp, 4> branches;
+ SmallVector<func::FuncOp, 4> branches;
case_op.get_branch_functions(branches);
result = CanonicalizeFunctionalIfCase(case_op, branches, case_op.input());
} else if (auto while_op = dyn_cast<TF::WhileOp>(op)) {
@@ -439,7 +441,7 @@
// For while region, the body input and output arg should match.
result = CanonicalizeWhileRegion(while_region);
} else if (auto call = dyn_cast<CallOpInterface>(op)) {
- FuncOp func = dyn_cast<FuncOp>(call.resolveCallable());
+ func::FuncOp func = dyn_cast<func::FuncOp>(call.resolveCallable());
if (!func) return WalkResult::interrupt();
result = CleanupAndCanonicalize(func);
}
@@ -453,7 +455,7 @@
namespace TF {
-LogicalResult CleanupAndCanonicalizeForResourceOpLifting(FuncOp func) {
+LogicalResult CleanupAndCanonicalizeForResourceOpLifting(func::FuncOp func) {
return CleanupAndCanonicalize(func);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.h b/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.h
index b632876..f526acc 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/resource_op_lifting_cleanup.h
@@ -39,7 +39,7 @@
namespace mlir {
namespace TF {
LogicalResult CleanupAndCanonicalizeForResourceOpLifting(ModuleOp module);
-LogicalResult CleanupAndCanonicalizeForResourceOpLifting(FuncOp func);
+LogicalResult CleanupAndCanonicalizeForResourceOpLifting(func::FuncOp func);
} // namespace TF
} // namespace mlir
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc b/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc
index e39ceab..e08270e 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/rewrite_tpu_embedding_ops.cc
@@ -101,7 +101,7 @@
}
void RewriteTPUEmbeddingOps::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (failed(RunOnRegion(&func.getBody()))) return signalPassFailure();
func.walk([&](Operation* op) {
@@ -113,7 +113,8 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateRewriteTPUEmbeddingOpsPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateRewriteTPUEmbeddingOpsPass() {
return std::make_unique<RewriteTPUEmbeddingOps>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc b/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
index a5447d1..ae475c1 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc
@@ -79,6 +79,7 @@
LLVM_DEBUG(OP->print(llvm::dbgs() << MSG << " "); llvm::dbgs() << "\n")
using ::int64_t;
+using mlir::func::FuncOp;
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
@@ -631,7 +632,7 @@
// reached convergence, false otherwise.
FailureOr<bool> PropagateShapeToFunctions(ModuleOp module,
TypeRange input_types,
- ArrayRef<FuncOp> functions,
+ ArrayRef<func::FuncOp> functions,
int64_t max_iterations);
// Propagates shapes to regions given the shapes of the inputs of the regions.
@@ -687,10 +688,10 @@
bool InferShapeForNonTFDialectOperation(Operation* op);
// Infers shape for function return type and returns whether changed.
- LogicalResult InferShapeForFunctionReturnType(FuncOp func);
+ LogicalResult InferShapeForFunctionReturnType(func::FuncOp func);
// Enqueues function for processing.
- void enqueue(FuncOp fn) {
+ void enqueue(func::FuncOp fn) {
LLVM_DEBUG(llvm::dbgs()
<< "enqueue " << fn.getName() << " ("
<< (queue_set_.count(fn) ? "already inserted" : "newly inserted")
@@ -699,24 +700,26 @@
}
// Enqueues callers on functions.
- void EnqueueCallers(FuncOp fn);
+ void EnqueueCallers(func::FuncOp fn);
// Returns the function at the front of the queue.
- FuncOp front() { return queue_.front(); }
+ func::FuncOp front() { return queue_.front(); }
// Returns whether work queue is empty.
bool EmptyQueue() const { return queue_.empty(); }
// Returns function from the front of the work queue.
- FuncOp pop_front() {
- FuncOp ret = queue_.front();
+ func::FuncOp pop_front() {
+ func::FuncOp ret = queue_.front();
queue_.pop();
queue_set_.erase(ret);
return ret;
}
// Returns the current size of the queue.
- std::queue<FuncOp>::size_type QueueSize() const { return queue_.size(); }
+ std::queue<func::FuncOp>::size_type QueueSize() const {
+ return queue_.size();
+ }
Dialect* const tf_dialect_;
@@ -796,7 +799,7 @@
// with insertions to the callers map. This could occur if GetCallers is
// called with two separate functions, the 2nd one incurs a resize and then
// both first and 2nd stored callers are used.
- ArrayRef<Operation*> GetCallers(FuncOp fn);
+ ArrayRef<Operation*> GetCallers(func::FuncOp fn);
// Mapping between ValuePort (which corresponds to an OpResult or smaller,
// e.g., first element of OpResult produced) to an Attribute if the ValuePort
@@ -808,8 +811,8 @@
SymbolUserMap symbol_users_;
// Queue of functions being processed.
- llvm::DenseSet<FuncOp> queue_set_;
- std::queue<FuncOp> queue_;
+ llvm::DenseSet<func::FuncOp> queue_set_;
+ std::queue<func::FuncOp> queue_;
int64_t graph_version_;
@@ -828,12 +831,13 @@
symbol_table_.getSymbolTable(module);
}
-ArrayRef<Operation*> ShapeInference::GetCallers(FuncOp fn) {
+ArrayRef<Operation*> ShapeInference::GetCallers(func::FuncOp fn) {
return symbol_users_.getUsers(fn);
}
-void ShapeInference::EnqueueCallers(FuncOp fn) {
- for (auto user : GetCallers(fn)) enqueue(user->getParentOfType<FuncOp>());
+void ShapeInference::EnqueueCallers(func::FuncOp fn) {
+ for (auto user : GetCallers(fn))
+ enqueue(user->getParentOfType<func::FuncOp>());
}
bool ShapeInference::UpdateTypeAndInsertIncompatibleUseCasts(Type new_type,
@@ -862,7 +866,7 @@
result.setType(new_type);
if (enqueue_callers)
- EnqueueCallers(result.getDefiningOp()->getParentOfType<FuncOp>());
+ EnqueueCallers(result.getDefiningOp()->getParentOfType<func::FuncOp>());
return true;
}
@@ -878,8 +882,8 @@
// Infers the shape from a (Stateful)PartionedCall operation by looking up the
// called function and propagating the return type.
bool ShapeInference::InferShapeForCall(CallOpInterface call_op) {
- FuncOp func =
- dyn_cast_or_null<FuncOp>(call_op.resolveCallable(&symbol_table_));
+ func::FuncOp func =
+ dyn_cast_or_null<func::FuncOp>(call_op.resolveCallable(&symbol_table_));
if (!func) return false;
DCOMMENT("Infer shape for call " << func.getName());
@@ -965,7 +969,7 @@
if (host_module.getValue().empty()) return false;
mlir::OwningOpRef<mlir::ModuleOp> module_for_func;
- FuncOp func = host_compute_op.GetHostFunc(&module_for_func);
+ func::FuncOp func = host_compute_op.GetHostFunc(&module_for_func);
// Update/use input shapes for function.
FunctionType func_type = func.getFunctionType();
@@ -1101,7 +1105,7 @@
// op. The MapDataset op always has N+1 inputs.
// TODO(jpienaar): Avoid this lookup.
auto module = op->getParentOfType<ModuleOp>();
- auto f = module.lookupSymbol<FuncOp>(op.f());
+ auto f = module.lookupSymbol<func::FuncOp>(op.f());
// Skip if function is not found or more than one caller.
if (!f || !llvm::hasSingleElement(GetCallers(f))) return false;
return InferShapeForDatasetOpCommon(op, f, max_iterations);
@@ -1115,7 +1119,7 @@
// TakeWhileDataset op. The TakeWhileDataset op always has N+1 inputs.
// TODO(jpienaar): Avoid this lookup.
auto module = op->getParentOfType<ModuleOp>();
- auto f = module.lookupSymbol<FuncOp>(op.predicate());
+ auto f = module.lookupSymbol<func::FuncOp>(op.predicate());
// Skip if function is not found or more than one caller.
if (!f || !llvm::hasSingleElement(GetCallers(f))) return false;
return InferShapeForDatasetOpCommon(op, f, max_iterations);
@@ -1133,7 +1137,7 @@
// TODO(jpienaar): Avoid this lookup.
auto module = op->getParentOfType<ModuleOp>();
- auto f = module.lookupSymbol<FuncOp>(op.f());
+ auto f = module.lookupSymbol<func::FuncOp>(op.f());
// Skip if function is not found or it has more than one caller.
if (!f || !llvm::hasSingleElement(GetCallers(f))) return false;
@@ -2145,14 +2149,14 @@
}
FailureOr<bool> ShapeInference::PropagateShapeToFunctions(
- ModuleOp module, TypeRange input_types, ArrayRef<FuncOp> functions,
+ ModuleOp module, TypeRange input_types, ArrayRef<func::FuncOp> functions,
int64_t max_iterations) {
bool any_failure = false;
bool any_nonconvergence = false;
// If shape propagation fails for one function, return failure, but do not
// early exit and attempt to propagate shapes for all provided functions to
// have a best-effort propagation.
- for (FuncOp func : functions) {
+ for (func::FuncOp func : functions) {
DCOMMENT("Propating shape to " << func.getName());
ArrayRef<Operation*> callers = GetCallers(func);
if (!llvm::hasSingleElement(callers) &&
@@ -2223,7 +2227,8 @@
}
void ShapeInference::PropagateConstantToCallee(CallOpInterface call_op,
- FuncOp func, ModuleOp module) {
+ func::FuncOp func,
+ ModuleOp module) {
auto callers = GetCallers(func);
if (!llvm::hasSingleElement(callers)) return;
@@ -2251,7 +2256,8 @@
}
void ShapeInference::PropagateConstantFromCallee(CallOpInterface call_op,
- FuncOp func, ModuleOp module) {
+ func::FuncOp func,
+ ModuleOp module) {
// If the return value is a constant, use the constant as the value of
// the call return.
Operation* op = call_op.getOperation();
@@ -2345,7 +2351,7 @@
if_op.ResolveElseFunction(&symbol_table_)},
max_iterations);
} else if (auto case_op = dyn_cast<TF::CaseOp>(op)) {
- SmallVector<FuncOp, 4> branches;
+ SmallVector<func::FuncOp, 4> branches;
case_op.get_branch_functions(branches);
return PropagateShapeToFunctions(module, case_op.input().getTypes(),
branches, max_iterations);
@@ -2372,7 +2378,8 @@
while_op.ResolveBodyFunction(&symbol_table_)},
max_iterations);
} else if (auto call_op = dyn_cast<CallOpInterface>(op)) {
- if (auto func = dyn_cast<FuncOp>(call_op.resolveCallable(&symbol_table_))) {
+ if (auto func =
+ dyn_cast<func::FuncOp>(call_op.resolveCallable(&symbol_table_))) {
PropagateConstantToCallee(call_op, func, module);
FailureOr<bool> failure_or_converged = PropagateShapeToFunctions(
module, call_op.getArgOperands().getTypes(), {func}, max_iterations);
@@ -2503,7 +2510,8 @@
return success();
}
-LogicalResult ShapeInference::InferShapeForFunctionReturnType(FuncOp func) {
+LogicalResult ShapeInference::InferShapeForFunctionReturnType(
+ func::FuncOp func) {
LLVM_DEBUG(llvm::dbgs() << "Inferring return type for: " << func.getName()
<< "\n");
@@ -2631,7 +2639,7 @@
}
static FailureOr<bool> InferShapeForFunction(ShapeInference& context,
- FuncOp func,
+ func::FuncOp func,
int64_t max_iterations) {
FailureOr<bool> failure_or_converged =
context.InferShapeUntilFixPoint(&func.getBody(), max_iterations);
@@ -2643,7 +2651,7 @@
return true;
}
-FailureOr<bool> InferShapeForFunction(FuncOp func,
+FailureOr<bool> InferShapeForFunction(func::FuncOp func,
ArrayRef<ArrayRef<int64_t>> arg_shapes,
int64_t graph_version,
int64_t max_iterations) {
@@ -2714,12 +2722,12 @@
/*propagate_caller_callee_constants=*/false);
if (auto main = module.lookupSymbol<mlir::func::FuncOp>("main"))
context.enqueue(main);
- for (auto func : module.getOps<FuncOp>()) context.enqueue(func);
+ for (auto func : module.getOps<func::FuncOp>()) context.enqueue(func);
// Arbitrarily upper bound the maximum number of functions that get processed
// just to avoid pathological cases.
auto max_iteration = context.QueueSize() * 4;
while (!context.EmptyQueue()) {
- FuncOp func = context.front();
+ func::FuncOp func = context.front();
FailureOr<bool> failure_or_converged =
InferShapeForFunction(context, func, max_iterations);
if (failed(failure_or_converged) || !failure_or_converged.getValue())
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h b/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h
index f1891de..e12c470 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.h
@@ -42,7 +42,7 @@
// function being inferred.
// Returns a failure() on error, otherwise returns true to indicate that it
// reached convergence, false otherwise.
-FailureOr<bool> InferShapeForFunction(FuncOp func,
+FailureOr<bool> InferShapeForFunction(func::FuncOp func,
ArrayRef<ArrayRef<int64_t>> arg_shapes,
int64_t graph_version,
int64_t max_iterations = 10);
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/sink_constant.cc b/tensorflow/compiler/mlir/tensorflow/transforms/sink_constant.cc
index 6476ea0..a77cac7 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/sink_constant.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/sink_constant.cc
@@ -95,7 +95,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateClusterConstantSinkingPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateClusterConstantSinkingPass(
llvm::function_ref<bool(tf_device::ClusterOp, ElementsAttr)> filter) {
return std::make_unique<ClusterConstantSinkingPass>(filter);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/stack_ops_decomposition.cc b/tensorflow/compiler/mlir/tensorflow/transforms/stack_ops_decomposition.cc
index a4b89e9..0defb84 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/stack_ops_decomposition.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/stack_ops_decomposition.cc
@@ -70,7 +70,7 @@
// Returns the aliasing argument number of a fucntion return value if it simply
// forwards the argument. Otherwise, returns -1.
-int64_t FindAliasedInput(FuncOp func, int64_t return_index) {
+int64_t FindAliasedInput(func::FuncOp func, int64_t return_index) {
Value return_val = func.front().getTerminator()->getOperand(return_index);
auto maybe_arg = return_val.dyn_cast<BlockArgument>();
if (!maybe_arg) return -1;
@@ -88,7 +88,7 @@
// If handle_new_size_vars is provided, it will be invoked on the list of new
// size variables before finally changing the function type.
void ModifyFunctionSignature(
- FuncOp func, llvm::SmallDenseMap<Value, Value>* stack_var_to_size_var,
+ func::FuncOp func, llvm::SmallDenseMap<Value, Value>* stack_var_to_size_var,
llvm::function_ref<llvm::Optional<Type>(int64_t)> arg_to_stack_type,
llvm::function_ref<void(ArrayRef<BlockArgument>)> handle_new_size_vars =
nullptr) {
@@ -118,7 +118,7 @@
// partitioned call ops.
struct PartitionedCallStackOpsInfo {
bool signature_change;
- FuncOp decomposed_callee;
+ func::FuncOp decomposed_callee;
llvm::SmallDenseMap<int64_t, int64_t> stack_var_arg_to_size_arg;
};
@@ -255,7 +255,7 @@
// and performs stack ops decomposition on it.
template <typename CallOp>
LogicalResult HandlePartitionedCallOp(
- CallOp call, FuncOp callee, ModuleOp module,
+ CallOp call, func::FuncOp callee, ModuleOp module,
const llvm::SmallDenseMap<Value, Value>& data_var_to_size_var,
llvm::StringMap<PartitionedCallStackOpsInfo>*
decomposed_partitioned_call_callees) {
@@ -283,7 +283,7 @@
new_call->setAttr(
"f", SymbolRefAttr::get(
builder.getContext(),
- const_cast<FuncOp&>(info.decomposed_callee).getName()));
+ const_cast<func::FuncOp&>(info.decomposed_callee).getName()));
for (int64_t i = 0; i < call.getNumResults(); ++i) {
auto result = call.getResult(i);
if (!getElementTypeOrSelf(result.getType())
@@ -306,7 +306,7 @@
return recreate_caller();
}
llvm::SmallDenseMap<Value, Value> callee_map;
- FuncOp lowered_callee = callee;
+ func::FuncOp lowered_callee = callee;
if (!callee.isPrivate()) {
// Clone non-private callee in case of signature change.
lowered_callee = callee.clone();
@@ -550,7 +550,7 @@
void StackOpsDecompositionPass::runOnOperation() {
auto module = getOperation();
- auto main = module.lookupSymbol<FuncOp>("main");
+ auto main = module.lookupSymbol<func::FuncOp>("main");
if (!main) return;
if (failed(DecomposeStackOps(&main.front(), module))) {
signalPassFailure();
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/strip_noinline_attribute.cc b/tensorflow/compiler/mlir/tensorflow/transforms/strip_noinline_attribute.cc
index 5feb56d..2af6af7 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/strip_noinline_attribute.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/strip_noinline_attribute.cc
@@ -28,7 +28,7 @@
// void runOnOperation() override;
void runOnOperation() override {
// Strip the "tf._noinline" attribute from top-level functions.
- for (auto func_op : getOperation().getOps<FuncOp>())
+ for (auto func_op : getOperation().getOps<func::FuncOp>())
func_op->removeAttr("tf._noinline");
}
};
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc
index 0fc1206..f432d2a 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tensor_array_ops_decomposition.cc
@@ -176,7 +176,7 @@
}
Operation* old_op = old_val.getDefiningOp();
Operation* terminator_op =
- old_op->getParentOfType<FuncOp>().front().getTerminator();
+ old_op->getParentOfType<func::FuncOp>().front().getTerminator();
llvm::SmallPtrSet<mlir::Operation*, 1> exceptions = {terminator_op};
old_val.replaceAllUsesExcept(new_val, exceptions);
}
@@ -457,7 +457,7 @@
}
// Updates func's type according to its current arguments and return values.
-void UpdateFuncType(FuncOp func) {
+void UpdateFuncType(func::FuncOp func) {
llvm::SmallVector<Type, 8> arg_types;
for (auto arg : func.getArguments()) arg_types.push_back(arg.getType());
func.setType(
@@ -467,7 +467,7 @@
// Finds the accessed gradient sources for each tensor array argument.
llvm::SmallDenseMap<int64_t, llvm::SmallVector<string, 4>> AccessedGradients(
- ArrayRef<FuncOp> funcs, ModuleOp module) {
+ ArrayRef<func::FuncOp> funcs, ModuleOp module) {
llvm::SmallDenseMap<int64_t, llvm::SmallVector<string, 4>> result;
llvm::SmallDenseMap<int64_t, llvm::StringSet<>> result_sets;
auto insert = [&](Value v, const string& source, const Block& func_block) {
@@ -477,7 +477,7 @@
if (!insert_res.second) return;
result[arg.getArgNumber()].push_back(source);
};
- for (FuncOp func : funcs) {
+ for (func::FuncOp func : funcs) {
const Block& func_block = func.front();
// Walk all operations and nested regions to find accessed gradient sources
// for function arguments.
@@ -499,7 +499,7 @@
for (const string& source : entry.getSecond())
insert(if_op.getOperand(entry.getFirst() + 1), source, func_block);
} else if (auto call = llvm::dyn_cast<CallOpInterface>(op)) {
- auto callee = dyn_cast<FuncOp>(call.resolveCallable());
+ auto callee = dyn_cast<func::FuncOp>(call.resolveCallable());
for (const auto& entry : AccessedGradients({callee}, module))
for (const string& source : entry.getSecond())
insert(call.getArgOperands()[entry.getFirst()], source, func_block);
@@ -513,7 +513,7 @@
// partitioned call ops.
struct PartitionedCallTensorArrayOpsInfo {
bool signature_change;
- FuncOp decomposed_callee;
+ func::FuncOp decomposed_callee;
llvm::SmallVector<std::pair<int64_t, llvm::SmallVector<string, 4>>, 4>
arg_grads;
llvm::SmallVector<std::pair<int64_t, int64_t>, 4> ret_forward_input;
@@ -522,7 +522,7 @@
// Updates a called function's input signature by adjusting resource types, and
// adding required gradient arguments.
void ChangeFunctionInputSignature(
- FuncOp func,
+ func::FuncOp func,
const llvm::SmallDenseMap<int64_t, llvm::SmallVector<string, 4>>& grads,
llvm::function_ref<Type(int64_t)> ta_arg_buffer_type,
llvm::function_ref<bool(int64_t)> ta_accumulate_on_write,
@@ -693,7 +693,7 @@
auto new_if = builder.create<TF::IfOp>(
if_op.getLoc(), then_branch.getFunctionType().getResults(), operands,
if_op->getAttrs());
- auto ret_forwards_input = [](FuncOp f, int64_t ret_ind) -> int64_t {
+ auto ret_forwards_input = [](func::FuncOp f, int64_t ret_ind) -> int64_t {
auto retval = f.front().getTerminator()->getOperand(ret_ind);
auto arg = retval.dyn_cast<BlockArgument>();
if (!arg) return -1;
@@ -720,7 +720,7 @@
template <typename CallOp>
LogicalResult HandlePartitionedCallOp(
- CallOp call, FuncOp callee, ModuleOp module,
+ CallOp call, func::FuncOp callee, ModuleOp module,
llvm::SmallDenseMap<Value, TensorArrayStats>* stats,
llvm::StringMap<PartitionedCallTensorArrayOpsInfo>*
decomposed_partitioned_call_callees) {
@@ -755,7 +755,7 @@
new_call->setAttr(
"f", SymbolRefAttr::get(
builder.getContext(),
- const_cast<FuncOp&>(info.decomposed_callee).getName()));
+ const_cast<func::FuncOp&>(info.decomposed_callee).getName()));
for (const auto& entry : info.ret_forward_input) {
call.getResult(entry.first)
.replaceAllUsesWith(call.getOperand(entry.second));
@@ -782,7 +782,7 @@
if (it == stats->end()) return false;
return it->getSecond().accumulate_on_write;
};
- FuncOp lowered_callee = callee;
+ func::FuncOp lowered_callee = callee;
if (!callee.isPrivate()) {
// Clone non-private callee in case of signature change.
lowered_callee = callee.clone();
@@ -936,7 +936,7 @@
void TensorArrayOpsDecompositionPass::runOnOperation() {
auto module = getOperation();
- auto main = module.lookupSymbol<FuncOp>("main");
+ auto main = module.lookupSymbol<func::FuncOp>("main");
if (!main) return;
llvm::SmallDenseMap<Value, TensorArrayStats> stats;
llvm::StringMap<PartitionedCallTensorArrayOpsInfo>
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tensor_device_copy_conversion.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tensor_device_copy_conversion.cc
index f742340..a41b9b5 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tensor_device_copy_conversion.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tensor_device_copy_conversion.cc
@@ -46,7 +46,7 @@
// Folds tf.IdentityOp and tf.IdentityNOp if op device and the argument devices
// from the defining ops match.
void TensorDeviceCopyConversionPass::runOnOperation() {
- FuncOp func_op = getOperation();
+ func::FuncOp func_op = getOperation();
auto should_fold_op_func = [&func_op](const Value &arg,
const StringAttr &op_device) {
@@ -108,7 +108,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTensorDeviceCopyConversionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateTensorDeviceCopyConversionPass() {
return std::make_unique<TensorDeviceCopyConversionPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc
index 6653492..7551372 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tensor_list_ops_decomposition.cc
@@ -52,7 +52,7 @@
};
// Updates func's type according to its current arguments and return values.
-void UpdateFuncType(FuncOp func) {
+void UpdateFuncType(func::FuncOp func) {
llvm::SmallVector<Type, 8> arg_types;
for (auto arg : func.getArguments()) arg_types.push_back(arg.getType());
func.setType(
@@ -70,7 +70,7 @@
// Modifies a function's signature to rewrite tensor list arguments to buffers
// and sizes.
void ModifyFunctionSignature(
- FuncOp func, Type size_type,
+ func::FuncOp func, Type size_type,
llvm::SmallDenseMap<Value, SizeInfo>* buffer_to_size,
llvm::function_ref<llvm::Optional<Type>(int64_t)> arg_to_buffer_type,
llvm::function_ref<bool(int64_t)> arg_buffer_size_is_fixed) {
@@ -96,7 +96,7 @@
// PartitionedCall/StatefulPartitionedCall.
struct PartitionedCallDecompositionInfo {
bool signature_change;
- FuncOp decomposed_callee;
+ func::FuncOp decomposed_callee;
llvm::SmallDenseMap<int64_t, int64_t> buffer_arg_to_size_arg;
// Each element is a tuple of (buffer_return_index, size_return_index,
// fixed_size).
@@ -138,7 +138,8 @@
// the added size indices, which is a list of tuples (buffer_return_index,
// size_return_index, fixed_size).
llvm::SmallVector<std::tuple<int64_t, int64_t, bool>, 8> ModifyFunctionReturn(
- FuncOp func, const llvm::SmallDenseMap<Value, SizeInfo>& buffer_to_size) {
+ func::FuncOp func,
+ const llvm::SmallDenseMap<Value, SizeInfo>& buffer_to_size) {
auto output_buffer_to_size = AddTensorListSizesToTerminator<func::ReturnOp>(
func.front(), buffer_to_size);
UpdateFuncType(func);
@@ -206,7 +207,7 @@
template <class CaseOrIfOp>
LogicalResult HandleCaseOrIfOp(
- CaseOrIfOp op, ArrayRef<FuncOp> branches, ModuleOp module,
+ CaseOrIfOp op, ArrayRef<func::FuncOp> branches, ModuleOp module,
llvm::SmallDenseMap<Value, SizeInfo>* buffer_to_size,
llvm::StringMap<PartitionedCallDecompositionInfo>*
decomposed_partitioned_call_callees) {
@@ -224,7 +225,7 @@
};
OpBuilder builder(op);
for (const auto& pair : llvm::zip(branches, branch_maps)) {
- FuncOp branch = std::get<0>(pair);
+ func::FuncOp branch = std::get<0>(pair);
llvm::SmallDenseMap<Value, SizeInfo>& branch_map = std::get<1>(pair);
ModifyFunctionSignature(branch, cutil::GetSizeType(builder), &branch_map,
find_arg_buffer_type, arg_buffer_size_is_fixed);
@@ -250,7 +251,7 @@
if (it == buffer_to_size->end()) continue;
new_operands.push_back(it->getSecond().size);
}
- FuncOp first_branch = branches.front();
+ func::FuncOp first_branch = branches.front();
auto new_op = OpBuilder(op).create<CaseOrIfOp>(
op.getLoc(), first_branch.getFunctionType().getResults(), new_operands,
op->getAttrs());
@@ -418,7 +419,7 @@
template <typename CallOp>
LogicalResult HandlePartitionedCallOp(
- CallOp call, FuncOp callee, ModuleOp module,
+ CallOp call, func::FuncOp callee, ModuleOp module,
llvm::SmallDenseMap<Value, SizeInfo>* buffer_to_size,
llvm::StringMap<PartitionedCallDecompositionInfo>*
decomposed_partitioned_call_callees) {
@@ -446,7 +447,7 @@
new_call->setAttr(
"f", SymbolRefAttr::get(
builder.getContext(),
- const_cast<FuncOp&>(info.decomposed_callee).getName()));
+ const_cast<func::FuncOp&>(info.decomposed_callee).getName()));
for (const auto& entry : info.buffer_ret_to_size_ret) {
(*buffer_to_size)[new_call.getResult(std::get<0>(entry))] = {
new_call.getResult(std::get<1>(entry)), std::get<2>(entry)};
@@ -463,7 +464,7 @@
}
// Rewrite the callee.
llvm::SmallDenseMap<Value, SizeInfo> callee_map;
- FuncOp lowered_callee = callee;
+ func::FuncOp lowered_callee = callee;
if (!callee.isPrivate()) {
// Clone non-private callee in case of signature change.
lowered_callee = callee.clone();
@@ -886,7 +887,7 @@
return failure();
}
} else if (auto case_op = llvm::dyn_cast<TF::CaseOp>(&op)) {
- SmallVector<FuncOp, 2> branches;
+ SmallVector<func::FuncOp, 2> branches;
case_op.get_branch_functions(branches);
if (failed(HandleCaseOrIfOp(case_op, branches, module, buffer_to_size,
decomposed_partitioned_call_callees))) {
@@ -940,7 +941,7 @@
void TensorListOpsDecompositionPass::runOnOperation() {
auto module = getOperation();
- auto main = module.lookupSymbol<FuncOp>("main");
+ auto main = module.lookupSymbol<func::FuncOp>("main");
if (!main) return;
if (failed(DecomposeTensorListOps(&main.front(), module))) {
signalPassFailure();
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/test_cluster_ops_by_policy.cc b/tensorflow/compiler/mlir/tensorflow/transforms/test_cluster_ops_by_policy.cc
index 9353ba7..16a9469 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/test_cluster_ops_by_policy.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/test_cluster_ops_by_policy.cc
@@ -56,7 +56,7 @@
};
void TestClusteringPolicyPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
ValuesConstraintSet constraints;
ClusteringPolicySet policies;
@@ -78,7 +78,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTestClusteringPolicyPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTestClusteringPolicyPass() {
return std::make_unique<TestClusteringPolicyPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/test_passes.h b/tensorflow/compiler/mlir/tensorflow/transforms/test_passes.h
index a212e41..54959c9 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/test_passes.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/test_passes.h
@@ -28,14 +28,15 @@
std::unique_ptr<OperationPass<ModuleOp>> CreateFreezeVariableTestPass();
// Test pass for applying TF->TF lowering patterns.
-std::unique_ptr<OperationPass<FuncOp>> CreateTestTFLowerTFPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTestTFLowerTFPass();
// Test passes for visitor util.
-std::unique_ptr<OperationPass<FuncOp>> CreateTestVisitorUtilPass();
-std::unique_ptr<OperationPass<FuncOp>> CreateTestVisitorUtilInterruptPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTestVisitorUtilPass();
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateTestVisitorUtilInterruptPass();
// Test operation clustering based on user defined policy.
-std::unique_ptr<OperationPass<FuncOp>> CreateTestClusteringPolicyPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTestClusteringPolicyPass();
// Test pass for analyzing side-effect analysis result.
std::unique_ptr<OperationPass<ModuleOp>> CreateTestSideEffectAnalysisPass();
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/test_resource_alias_analysis.cc b/tensorflow/compiler/mlir/tensorflow/transforms/test_resource_alias_analysis.cc
index 53c034d..22577b4 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/test_resource_alias_analysis.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/test_resource_alias_analysis.cc
@@ -50,7 +50,7 @@
"purpose.";
}
- void runOnFunction(FuncOp func,
+ void runOnFunction(func::FuncOp func,
const TF::ResourceAliasAnalysis::Info& analysis) {
int64_t next_id = 0;
llvm::SmallDenseMap<Value, int64_t, 8> ids;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/test_side_effect_analysis.cc b/tensorflow/compiler/mlir/tensorflow/transforms/test_side_effect_analysis.cc
index 48112dc..2ad0e6b 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/test_side_effect_analysis.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/test_side_effect_analysis.cc
@@ -43,7 +43,7 @@
TestSideEffectAnalysisPass, TF::SideEffectAnalysis> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSideEffectAnalysisPass)
- void runOnFunction(FuncOp func,
+ void runOnFunction(func::FuncOp func,
const TF::SideEffectAnalysis::Info& analysis) {
int64_t next_id = 0;
llvm::SmallDenseMap<Operation*, int64_t, 8> ids;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization_pass.cc
index e0fabb4..a36a79c 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tf_data_optimization_pass.cc
@@ -38,7 +38,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTFDataOptimizationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTFDataOptimizationPass() {
return std::make_unique<TFDataOptimization>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tf_device_assignment.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tf_device_assignment.cc
index 42309c9..19826da 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tf_device_assignment.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tf_device_assignment.cc
@@ -52,7 +52,7 @@
};
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateSimpleTFDeviceAssignmentPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateSimpleTFDeviceAssignmentPass(
llvm::StringRef default_device) {
return std::make_unique<SimpleTFDeviceAssignmentPass>(default_device);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td b/tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
index 3cb0108..f2679ac 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tf_passes.td
@@ -2111,7 +2111,7 @@
}];
}
-def DeviceIndexSelectorPass : Pass<"tf-device-index-selector", "FuncOp"> {
+def DeviceIndexSelectorPass : Pass<"tf-device-index-selector", "mlir::func::FuncOp"> {
let summary = "Fold tf.DeviceIndex to constant.";
let constructor = "TF::CreateDeviceIndexSelectorPass()";
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.cc
index e86cd6d..7ff06b9 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_freeze_variables.cc
@@ -106,7 +106,7 @@
PropagateUsage(read_variable_op, value);
} else if (auto call = dyn_cast<CallOpInterface>(user_op)) {
(*arguments_to_erase)[call].push_back(argument_index);
- if (auto func = dyn_cast<FuncOp>(call.resolveCallable())) {
+ if (auto func = dyn_cast<func::FuncOp>(call.resolveCallable())) {
(*arguments_to_erase)[func].push_back(argument_index);
work_list->push_back(std::make_pair(&func.getRegion(), argument_index));
}
@@ -179,11 +179,11 @@
// Helper that returns the FuncOp that is the SessionInit function which
// will be called to initialize all resources.
// Returns nullptr if no function is found.
-FuncOp GetSessionInitializerFunc(ModuleOp module) {
+func::FuncOp GetSessionInitializerFunc(ModuleOp module) {
auto session_init_op = tf_saved_model::GetSessionInitializerOp(module);
SymbolTable symbol_table(module);
if (session_init_op && !session_init_op.initializers().empty()) {
- FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
+ func::FuncOp init_func_op = symbol_table.lookup<mlir::func::FuncOp>(
session_init_op.initializers()[0].cast<FlatSymbolRefAttr>().getValue());
return init_func_op;
}
@@ -216,7 +216,7 @@
// the session init function 'sesion_init_func'
void RemoveVariablesInitializations(
const llvm::SmallVector<TF::VarHandleOp, 4>& var_handle_ops,
- FuncOp sesion_init_func) {
+ func::FuncOp sesion_init_func) {
// We identify the variables using (device, container, shared_name) of the
// resource. Capture them here and use them to identify the useless
// initializations.
@@ -314,12 +314,12 @@
return failure();
}
- FuncOp session_init_func = GetSessionInitializerFunc(module);
+ func::FuncOp session_init_func = GetSessionInitializerFunc(module);
TF::ResourceAnalyzer analyzer(module, /*skip_session_init=*/true);
llvm::SmallVector<TF::VarHandleOp, 4> variables;
// Capture list of all read only variables.
- for (auto func : module.getOps<FuncOp>()) {
+ for (auto func : module.getOps<func::FuncOp>()) {
if (func == session_init_func) continue;
for (auto var_handle_op : func.getOps<TF::VarHandleOp>()) {
if (!analyzer.IsPotentiallyWritten(var_handle_op.resource())) {
@@ -364,7 +364,7 @@
for (auto& items : arguments_to_erase) {
auto* user_op = items.first;
auto& args_to_erase = items.second;
- if (auto func = dyn_cast<FuncOp>(user_op)) {
+ if (auto func = dyn_cast<func::FuncOp>(user_op)) {
// To update a function we will need to:
// 1) Remove the unused arguments from the function itself.
// 2) Remove any returns that are not needed from the function terminator
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_passes.h b/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_passes.h
index 6f56439..4618489 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_passes.h
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tf_saved_model_passes.h
@@ -48,7 +48,7 @@
CreateRemoveVariablesInSessionInitializerPass();
// Creates a pass that removes duplicate 'tf_saved_model.bound_input' bindings.
-std::unique_ptr<OperationPass<FuncOp>> CreateDedupBoundInputBindingPass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDedupBoundInputBindingPass();
#define GEN_PASS_REGISTRATION
#include "tensorflow/compiler/mlir/tensorflow/transforms/tf_savedmodel_passes.h.inc"
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tf_test_passes.td b/tensorflow/compiler/mlir/tensorflow/transforms/tf_test_passes.td
index 0297937..b780ae4 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tf_test_passes.td
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tf_test_passes.td
@@ -40,7 +40,7 @@
let constructor = "::mlir::tf_test::CreateInitTextFileToImportSavedModelTestPass()";
}
-def TestTensorFlowLowerTFPass : Pass<"test-tf-lower-tf", "FuncOp"> {
+def TestTensorFlowLowerTFPass : Pass<"test-tf-lower-tf", "mlir::func::FuncOp"> {
let summary = "Test pass for TF->TF lowerings";
let constructor = "::mlir::tf_test::CreateTestTFLowerTFPass()";
@@ -52,13 +52,13 @@
];
}
-def TestResourceAliasAnalysis : Pass<"tf-test-resource-alias-analysis", "FuncOp"> {
+def TestResourceAliasAnalysis : Pass<"tf-test-resource-alias-analysis", "mlir::func::FuncOp"> {
let summary = "Add remarks based on resource alias analysis result, for "
"testing purpose.";
let constructor = "::mlir::tf_test::CreateTestResourceAliasAnalysisPass()";
}
-def TestClusteringPolicyPass : Pass<"tf-test-clustering-policy", "FuncOp"> {
+def TestClusteringPolicyPass : Pass<"tf-test-clustering-policy", "mlir::func::FuncOp"> {
let summary = "Test pass for clustering based on the user defined policy";
let constructor = "::mlir::tf_test::CreateTestClusteringPolicyPass()";
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc
index 4320efe..5759c37 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tfg-to-tfe.cc
@@ -145,8 +145,8 @@
// will be the operations inside the function body rather than representing
// them in the function signature.
FunctionType func_type = rewriter.getFunctionType({}, {});
- FuncOp func = rewriter.create<FuncOp>(loc, kImportModelDefaultGraphFuncName,
- func_type);
+ func::FuncOp func = rewriter.create<func::FuncOp>(
+ loc, kImportModelDefaultGraphFuncName, func_type);
rewriter.setInsertionPointToStart(func.addEntryBlock());
auto executor_graph =
rewriter.create<tf_executor::GraphOp>(loc, func_type.getResults());
@@ -178,7 +178,7 @@
Location loc = graph_func.getLoc();
FunctionType ftype = graph_func.getFunctionType();
- FuncOp func = rewriter.create<FuncOp>(
+ func::FuncOp func = rewriter.create<func::FuncOp>(
graph_func.getLoc(),
graph_func->getAttrOfType<StringAttr>(SymbolTable::getSymbolAttrName())
.getValue(),
@@ -502,7 +502,7 @@
target.addLegalDialect<TF::TensorFlowDialect>();
target.addLegalDialect<tf_executor::TensorFlowExecutorDialect>();
target.addLegalOp<ModuleOp>();
- target.addLegalOp<FuncOp>();
+ target.addLegalOp<func::FuncOp>();
target.addLegalOp<func::ReturnOp>();
RewritePatternSet patterns(&context);
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc
index 541a36b..084158c 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_cluster_formation.cc
@@ -622,7 +622,8 @@
}
LogicalResult FormClustersInFunction(
- FuncOp func, const TF::SideEffectAnalysis::Info& side_effect_analysis) {
+ func::FuncOp func,
+ const TF::SideEffectAnalysis::Info& side_effect_analysis) {
if (!llvm::hasSingleElement(func))
return func.emitOpError("Expecting a single block function");
@@ -659,7 +660,7 @@
void TPUClusterFormationPass::runOnOperation() {
auto& side_effect_analysis = getAnalysis<TF::SideEffectAnalysis>();
- for (auto func : getOperation().getOps<FuncOp>())
+ for (auto func : getOperation().getOps<func::FuncOp>())
if (!func.isExternal() &&
failed(FormClustersInFunction(
func, side_effect_analysis.GetAnalysisForFunc(func))))
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_colocate_composite_resource_ops.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_colocate_composite_resource_ops.cc
index 4cd6c20..45597bc 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_colocate_composite_resource_ops.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_colocate_composite_resource_ops.cc
@@ -128,7 +128,8 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTPUColocateCompositeResourceOps() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateTPUColocateCompositeResourceOps() {
return std::make_unique<TPUColocateCompositeResourceOps>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc
index a1ffd39..78b3f10 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_device_propagation.cc
@@ -41,7 +41,7 @@
constexpr char kFuncDeviceAttr[] = "tf.device";
// Checks if a function only contains a tf_executor.graph.
-bool IsSupportedGraph(FuncOp func) {
+bool IsSupportedGraph(func::FuncOp func) {
if (!llvm::hasSingleElement(func)) return false;
Block& block = func.front();
@@ -140,7 +140,8 @@
// Propagates devices from function arguments.
void PropagateDevicesFromArguments(
- FuncOp func, llvm::DenseMap<Value, llvm::StringRef>& value_to_device) {
+ func::FuncOp func,
+ llvm::DenseMap<Value, llvm::StringRef>& value_to_device) {
for (BlockArgument& arg : func.getArguments()) {
auto arg_device_attr =
func.getArgAttrOfType<StringAttr>(arg.getArgNumber(), kFuncDeviceAttr);
@@ -211,7 +212,7 @@
// Propagates devices to function results.
void PropagateDevicesToResults(
- FuncOp func, tf_executor::FetchOp fetch,
+ func::FuncOp func, tf_executor::FetchOp fetch,
const llvm::DenseMap<Value, llvm::StringRef>& value_to_device) {
for (OpOperand& operand : fetch.getOperation()->getOpOperands()) {
if (operand.get().getType().isa<tf_executor::ControlType>()) break;
@@ -232,7 +233,7 @@
};
void TPUDevicePropagation::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (!IsSupportedGraph(func)) return;
llvm::DenseMap<Value, llvm::StringRef> value_to_device;
@@ -244,7 +245,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTPUDevicePropagationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTPUDevicePropagationPass() {
return std::make_unique<TPUDevicePropagation>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc
index 7469a9e..c09499f 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_dynamic_layout_pass.cc
@@ -59,7 +59,7 @@
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TPUDynamicLayoutPass)
void runOnFunction(
- FuncOp func,
+ func::FuncOp func,
const TF::ResourceAliasAnalysis::Info& resource_alias_analysis);
StringRef getArgument() const final { return "tf-tpu-dynamic-layout-pass"; }
@@ -93,7 +93,7 @@
};
// Check all generator aliases (ops or function argument) are on CPU.
- FuncOp func = iterator_op->getParentOfType<FuncOp>();
+ func::FuncOp func = iterator_op->getParentOfType<func::FuncOp>();
return llvm::all_of(aliases, [&](Value alias) {
// Ignore non-generator aliases.
if (!is_generator(alias)) return true;
@@ -261,7 +261,7 @@
}
void TPUDynamicLayoutPass::runOnFunction(
- FuncOp func,
+ func::FuncOp func,
const TF::ResourceAliasAnalysis::Info& resource_alias_analysis) {
func.walk([&](TF::_TPUCompileMlirOp compile) {
// Detect tf._TPUCompileMlir -> tf.TPUExecute(s).
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_head_tail_outside_compilation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_head_tail_outside_compilation.cc
index 8210819..730cb7c 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_head_tail_outside_compilation.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_head_tail_outside_compilation.cc
@@ -134,7 +134,7 @@
const TF::SideEffectAnalysis& side_effect_analysis,
tf_device::ClusterOp cluster) {
const auto& analysis = side_effect_analysis.GetAnalysisForFunc(
- cluster->getParentOfType<FuncOp>());
+ cluster->getParentOfType<func::FuncOp>());
Region* cluster_region = &cluster.body();
llvm::SmallSetVector<Operation*, 4> head_outside_compiled_ops;
@@ -232,7 +232,7 @@
llvm::SmallVectorImpl<Operation*>* tail_outside_compiled_ops,
llvm::SmallVectorImpl<Value>* cluster_results) {
const auto& analysis = side_effect_analysis.GetAnalysisForFunc(
- cluster->getParentOfType<FuncOp>());
+ cluster->getParentOfType<func::FuncOp>());
Region* cluster_region = &cluster.body();
llvm::SmallSetVector<Operation*, 4> tail_outside_compiled_ops_set;
Operation* terminator = cluster.GetBody().getTerminator();
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc
index 88f840d..314176a 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_extract_outside_compilation.cc
@@ -64,9 +64,9 @@
// Build a function containing `ops` with `inputs` and `outputs` using
// `builder`. The `ops` are cloned and modified to use the function arguments
// as inputs.
-FuncOp BuildFunction(llvm::ArrayRef<Operation*> ops,
- llvm::ArrayRef<Value> inputs,
- llvm::ArrayRef<Value> outputs, OpBuilder* builder) {
+func::FuncOp BuildFunction(llvm::ArrayRef<Operation*> ops,
+ llvm::ArrayRef<Value> inputs,
+ llvm::ArrayRef<Value> outputs, OpBuilder* builder) {
llvm::SmallVector<Type, 4> operand_types;
operand_types.reserve(inputs.size());
for (Value v : inputs) operand_types.emplace_back(v.getType());
@@ -76,8 +76,8 @@
auto func_type = builder->getFunctionType(operand_types, output_types);
- FuncOp outlined_func =
- FuncOp::create(ops.front()->getLoc(), kHostFunctionAttr, func_type);
+ func::FuncOp outlined_func =
+ func::FuncOp::create(ops.front()->getLoc(), kHostFunctionAttr, func_type);
// Create function body.
Block* outlined_func_block = outlined_func.addEntryBlock();
@@ -102,7 +102,7 @@
// Encapsulates `func` in a module and serializes that module.
// `serialized_func_module` is set to the serialized module.
-void EncapsulateFuncAndSerialize(FuncOp func,
+void EncapsulateFuncAndSerialize(func::FuncOp func,
std::string* serialized_func_module) {
// Create a new module to hold func and all referenced functions.
OwningOpRef<mlir::ModuleOp> module_for_func =
@@ -529,9 +529,9 @@
std::string serialized_func_module;
if (HasDynamicOutputs(external_outputs.getArrayRef())) {
- FuncOp shape_op = BuildFunction(clustered_ops.getArrayRef(),
- external_operands.getArrayRef(),
- external_outputs.getArrayRef(), &builder);
+ func::FuncOp shape_op = BuildFunction(
+ clustered_ops.getArrayRef(), external_operands.getArrayRef(),
+ external_outputs.getArrayRef(), &builder);
EncapsulateFuncAndSerialize(shape_op, &serialized_func_module);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc
index 8f3f37a..b8fbc6a 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_host_computation_expansion.cc
@@ -125,7 +125,8 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateTPUHostComputationExpansionPass() {
+std::unique_ptr<OperationPass<func::FuncOp>>
+CreateTPUHostComputationExpansionPass() {
return std::make_unique<TPUHostComputationExpansionPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc
index 8919a42..5b77d68 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_identity_pruning.cc
@@ -48,24 +48,25 @@
};
// Collects all reachable functions (via call ops) from a given region.
-SmallVector<FuncOp, 4> CollectReachableFunctions(Region& region) {
- llvm::SmallPtrSet<FuncOp, 4> reachable_funcs;
+SmallVector<func::FuncOp, 4> CollectReachableFunctions(Region& region) {
+ llvm::SmallPtrSet<func::FuncOp, 4> reachable_funcs;
auto collect_reachable_funcs =
- [&reachable_funcs](Region& src, SmallVectorImpl<FuncOp>& funcs_to_visit) {
+ [&reachable_funcs](Region& src,
+ SmallVectorImpl<func::FuncOp>& funcs_to_visit) {
src.walk([&reachable_funcs, &funcs_to_visit](CallOpInterface call_op) {
- auto func = dyn_cast_or_null<FuncOp>(call_op.resolveCallable());
+ auto func = dyn_cast_or_null<func::FuncOp>(call_op.resolveCallable());
if (func && reachable_funcs.insert(func).second)
funcs_to_visit.push_back(func);
});
};
- SmallVector<FuncOp, 4> funcs_to_visit;
+ SmallVector<func::FuncOp, 4> funcs_to_visit;
collect_reachable_funcs(region, funcs_to_visit);
while (!funcs_to_visit.empty()) {
- SmallVector<FuncOp, 4> new_funcs_to_visit;
- for (FuncOp func_to_visit : funcs_to_visit) {
+ SmallVector<func::FuncOp, 4> new_funcs_to_visit;
+ for (func::FuncOp func_to_visit : funcs_to_visit) {
if (!func_to_visit.getCallableRegion()) continue;
collect_reachable_funcs(*func_to_visit.getCallableRegion(),
new_funcs_to_visit);
@@ -95,7 +96,7 @@
for (tf_device::ClusterOp cluster : clusters) {
RemoveIdentityFromRegion(cluster.body());
auto reachable_funcs = CollectReachableFunctions(cluster.body());
- for (FuncOp reachable_func : reachable_funcs)
+ for (func::FuncOp reachable_func : reachable_funcs)
RemoveIdentityFromRegion(*reachable_func.getCallableRegion());
}
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_merge_variables_with_execute.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_merge_variables_with_execute.cc
index 6aa271a..8912e78 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_merge_variables_with_execute.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_merge_variables_with_execute.cc
@@ -557,7 +557,7 @@
void TPUMergeVariablesWithExecutePass::runOnOperation() {
ModuleOp module = getOperation();
mlir::TF::ResourceAliasAnalysis resource_analysis(module);
- module.walk([&](FuncOp func) {
+ module.walk([&](func::FuncOp func) {
const auto& resource_analysis_info =
resource_analysis.GetAnalysisForFunc(func);
// Find all the executes first, since we will mutate the nodes around each
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc
index 8821f8f..16370d4 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_parallel_execute_sink_resource_write.cc
@@ -155,7 +155,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateTPUParallelExecuteSinkResourceWritePass() {
return std::make_unique<TPUParallelExecuteSinkResourceWrite>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_reorder_replicate_and_partitioned_inputs.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_reorder_replicate_and_partitioned_inputs.cc
index 3e4f143..056a1c7 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_reorder_replicate_and_partitioned_inputs.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_reorder_replicate_and_partitioned_inputs.cc
@@ -133,7 +133,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateTPUReorderReplicateAndPartitionedInputsPass() {
return std::make_unique<TPUReorderReplicateAndPartitionedInputsPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc
index c6760e8..b2d83b4 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_partitioning.cc
@@ -151,7 +151,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateTPUResourceReadsWritesPartitioningPass() {
return std::make_unique<TPUResourceReadsWritesPartitioningPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_read_for_write.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_read_for_write.cc
index 0b2ad17..e5a2038 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_read_for_write.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_resource_read_for_write.cc
@@ -115,7 +115,7 @@
auto new_cluster_func = builder.create<tf_device::ClusterFuncOp>(
loc, cluster_func.getResultTypes(), operands, cluster_func->getAttrs());
cluster_func.replaceAllUsesWith(new_cluster_func);
- FuncOp func = cluster_func.getFunc();
+ func::FuncOp func = cluster_func.getFunc();
Block& block = func.front();
for (Value read_operand : read_operands)
block.addArgument(read_operand.getType(), loc);
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_rewrite_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_rewrite_pass.cc
index a5ac9b7..ec53f08 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_rewrite_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_rewrite_pass.cc
@@ -84,11 +84,11 @@
return llvm::formatv("requires attribute '{0}'", attribute).str();
}
-LogicalResult EncapsulateFuncAndSerialize(FuncOp entry_func,
+LogicalResult EncapsulateFuncAndSerialize(func::FuncOp entry_func,
std::string* serialized_func_module) {
ModuleOp module = entry_func->getParentOfType<ModuleOp>();
SymbolTable entry_module_table(module);
- llvm::SmallVector<FuncOp, 4> referenced({entry_func});
+ llvm::SmallVector<func::FuncOp, 4> referenced({entry_func});
// Create a new module to hold func and all referenced functions.
OwningOpRef<mlir::ModuleOp> module_for_func =
@@ -105,7 +105,7 @@
auto func = referenced.pop_back_val();
// Skip functions that have already been cloned into new module.
- if (symbol_table.lookup<FuncOp>(func.getName())) continue;
+ if (symbol_table.lookup<func::FuncOp>(func.getName())) continue;
// Find any SymbolRefAttr in func that maps to a FuncOp. We need to clone
// all found FuncOps to new_module to make sure new_module is
@@ -113,7 +113,7 @@
Optional<SymbolTable::UseRange> uses = SymbolTable::getSymbolUses(func);
assert(uses && "expected to be able to collect symbol uses");
for (SymbolTable::SymbolUse use : *uses) {
- FuncOp referenced_func = entry_module_table.lookup<FuncOp>(
+ func::FuncOp referenced_func = entry_module_table.lookup<func::FuncOp>(
use.getSymbolRef().cast<FlatSymbolRefAttr>().getValue());
// Skip Symbols that do not map to a function.
@@ -355,8 +355,9 @@
}
FlatSymbolRefAttr func_attr = cluster_func.funcAttr();
- FuncOp func = cluster_func->getParentOfType<ModuleOp>().lookupSymbol<FuncOp>(
- func_attr.getValue());
+ func::FuncOp func =
+ cluster_func->getParentOfType<ModuleOp>().lookupSymbol<func::FuncOp>(
+ func_attr.getValue());
std::string txt_module;
if (failed(EncapsulateFuncAndSerialize(func, &txt_module))) return nullptr;
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_sharding_identification_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_sharding_identification_pass.cc
index c9e25e5..76256cf 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_sharding_identification_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_sharding_identification_pass.cc
@@ -158,7 +158,8 @@
}
if (auto call_op = llvm::dyn_cast<CallOpInterface>(owner)) {
- FuncOp func = llvm::dyn_cast<FuncOp>(call_op.resolveCallable());
+ func::FuncOp func =
+ llvm::dyn_cast<func::FuncOp>(call_op.resolveCallable());
if (!func) continue;
next_values_to_visit.push_back(
func.getArgument(use.getOperandNumber()));
@@ -180,7 +181,7 @@
void IdentifyXlaShardingForComputationInputs(
StringRef logical_core_0_sharding, bool use_spmd,
bool infer_from_computation, tf_device::ClusterFuncOp cluster_func,
- FuncOp func, Builder* builder,
+ func::FuncOp func, Builder* builder,
llvm::SmallVectorImpl<llvm::StringRef>& sharding_for_args) {
// Look up function definition from module.
Block& function_block = func.front();
@@ -256,7 +257,7 @@
}
// Looks up arg->retval aliases for every argument, and builds a reverse map.
-void ExtractAliases(FuncOp func, llvm::SmallVectorImpl<int>& aliases) {
+void ExtractAliases(func::FuncOp func, llvm::SmallVectorImpl<int>& aliases) {
aliases.resize(func.getNumResults(), -1);
for (int i = 0; i < func.getNumArguments(); i++) {
if (auto v = func.getArgAttrOfType<mlir::IntegerAttr>(i, kAliasingAttr)) {
@@ -328,7 +329,8 @@
}
if (auto call_op = llvm::dyn_cast_or_null<CallOpInterface>(def)) {
- FuncOp func = llvm::dyn_cast<FuncOp>(call_op.resolveCallable());
+ func::FuncOp func =
+ llvm::dyn_cast<func::FuncOp>(call_op.resolveCallable());
if (!func) continue;
value_to_visit = func.front().getTerminator()->getOperand(
value_to_visit.cast<OpResult>().getResultNumber());
@@ -345,7 +347,7 @@
void IdentifyXlaShardingForComputationOutputs(
StringRef logical_core_0_sharding, bool use_spmd,
bool infer_from_computation, tf_device::ClusterFuncOp cluster_func,
- FuncOp func, Builder* builder,
+ func::FuncOp func, Builder* builder,
const llvm::SmallVectorImpl<llvm::StringRef>& sharding_for_args,
llvm::SmallVectorImpl<llvm::StringRef>& sharding_for_rets) {
Block& function_block = func.front();
@@ -406,8 +408,9 @@
void IdentifyXlaShardingForTPUComputation(
Builder* builder, tf_device::ClusterFuncOp cluster_func) {
// Look up function definition from module.
- FuncOp func = cluster_func->getParentOfType<ModuleOp>().lookupSymbol<FuncOp>(
- cluster_func.func());
+ func::FuncOp func =
+ cluster_func->getParentOfType<ModuleOp>().lookupSymbol<func::FuncOp>(
+ cluster_func.func());
// By default inputs/outputs have maximal sharding and are assigned to logical
// core 0 if no sharding is defined.
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
index 0267507..e5580af 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_space_to_depth_pass.cc
@@ -69,14 +69,14 @@
};
// Updates func argument type to have the updated input shape.
-void UpdateFuncType(FuncOp func) {
+void UpdateFuncType(func::FuncOp func) {
auto arg_types = func.front().getArgumentTypes();
auto result_types = func.front().getTerminator()->getOperandTypes();
func.setType(FunctionType::get(func.getContext(), arg_types, result_types));
}
void HandleFuncOp(Operation* op) {
- auto func = llvm::cast<FuncOp>(op);
+ auto func = llvm::cast<func::FuncOp>(op);
UpdateFuncType(func);
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_update_embedding_enqueue_op_inputs.cc b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_update_embedding_enqueue_op_inputs.cc
index 62768bd..8b06c75 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/tpu_update_embedding_enqueue_op_inputs.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/tpu_update_embedding_enqueue_op_inputs.cc
@@ -63,7 +63,7 @@
}
LogicalResult FindTPUEmbeddingOps(
- FuncOp func_op, llvm::StringMap<Operation*>* enqueue_op_map,
+ func::FuncOp func_op, llvm::StringMap<Operation*>* enqueue_op_map,
llvm::StringMap<Operation*>* recv_activation_op_map,
llvm::StringMap<Operation*>* send_gradient_op_map) {
auto walk_result = func_op.walk([&](Operation* op) {
@@ -172,7 +172,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateTPUUpdateEmbeddingEnqueueOpInputsPass() {
return std::make_unique<TPUUpdateEmbeddingEnqueueOpInputsPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc b/tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
index da9815c..2e01a42 100644
--- a/tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
+++ b/tensorflow/compiler/mlir/tensorflow/transforms/unroll_batch_matmul.cc
@@ -283,7 +283,7 @@
return success();
}
-std::unique_ptr<OperationPass<FuncOp>> CreateUnrollBatchMatMulPassPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateUnrollBatchMatMulPassPass() {
return std::make_unique<UnrollBatchMatMulPass>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc b/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc
index 5277f23..d0d0a69 100644
--- a/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc
+++ b/tensorflow/compiler/mlir/tensorflow/translate/breakup-islands.cc
@@ -56,7 +56,7 @@
return "Transform from TF control dialect to TF executor dialect.";
}
- void runOnFunction(FuncOp func,
+ void runOnFunction(func::FuncOp func,
const TF::SideEffectAnalysis::Info& side_effect_analysis);
void BreakUpIsland(tf_executor::IslandOp island_op,
@@ -66,7 +66,8 @@
};
void BreakUpIslands::runOnFunction(
- FuncOp func, const TF::SideEffectAnalysis::Info& side_effect_analysis) {
+ func::FuncOp func,
+ const TF::SideEffectAnalysis::Info& side_effect_analysis) {
auto graph_op_range = func.front().without_terminator();
tf_executor::GraphOp graph_op;
diff --git a/tensorflow/compiler/mlir/tensorflow/translate/tf_executor_to_functional.cc b/tensorflow/compiler/mlir/tensorflow/translate/tf_executor_to_functional.cc
index 71b5593..d5c1fe0 100644
--- a/tensorflow/compiler/mlir/tensorflow/translate/tf_executor_to_functional.cc
+++ b/tensorflow/compiler/mlir/tensorflow/translate/tf_executor_to_functional.cc
@@ -80,7 +80,7 @@
}
} // end anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateExecutorDialectToFunctionalConversionPass() {
return std::make_unique<ExecutorDialectToFunctionalConversion>();
}
diff --git a/tensorflow/compiler/mlir/tensorflow/translate/tf_functional_to_executor.cc b/tensorflow/compiler/mlir/tensorflow/translate/tf_functional_to_executor.cc
index 1e33c8b..cb8e798 100644
--- a/tensorflow/compiler/mlir/tensorflow/translate/tf_functional_to_executor.cc
+++ b/tensorflow/compiler/mlir/tensorflow/translate/tf_functional_to_executor.cc
@@ -99,7 +99,7 @@
}
}
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreateFunctionalToExecutorDialectConversionPass() {
return std::make_unique<FunctionalToExecutorDialectConversion>();
}
diff --git a/tensorflow/compiler/mlir/tfjs/transforms/optimize.cc b/tensorflow/compiler/mlir/tfjs/transforms/optimize.cc
index 8d2ff29..7289e88 100644
--- a/tensorflow/compiler/mlir/tfjs/transforms/optimize.cc
+++ b/tensorflow/compiler/mlir/tfjs/transforms/optimize.cc
@@ -57,7 +57,7 @@
} // namespace
// Creates an instance of the TensorFlow.js dialect Optimize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateOptimizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizePass() {
return std::make_unique<Optimize>();
}
diff --git a/tensorflow/compiler/mlir/tfjs/transforms/passes.h b/tensorflow/compiler/mlir/tfjs/transforms/passes.h
index 5aa4533..df75d221 100644
--- a/tensorflow/compiler/mlir/tfjs/transforms/passes.h
+++ b/tensorflow/compiler/mlir/tfjs/transforms/passes.h
@@ -25,7 +25,7 @@
namespace tfjs {
// Creates an instance of the TensorFlow Lite dialect Optimize pass.
-std::unique_ptr<OperationPass<FuncOp>> CreateOptimizePass();
+std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizePass();
#define GEN_PASS_REGISTRATION
#include "tensorflow/compiler/mlir/tfjs/transforms/passes.h.inc"
diff --git a/tensorflow/compiler/mlir/tfr/passes/canonicalize.cc b/tensorflow/compiler/mlir/tfr/passes/canonicalize.cc
index f04d23c..e7cd160 100644
--- a/tensorflow/compiler/mlir/tfr/passes/canonicalize.cc
+++ b/tensorflow/compiler/mlir/tfr/passes/canonicalize.cc
@@ -156,7 +156,7 @@
} // namespace
-void populateCanonicalizationPatterns(FuncOp func,
+void populateCanonicalizationPatterns(func::FuncOp func,
RewritePatternSet &patterns) {
MLIRContext *context = func.getContext();
mlir::Dialect *tf = context->getLoadedDialect<mlir::TF::TensorFlowDialect>();
diff --git a/tensorflow/compiler/mlir/tfr/passes/decompose.cc b/tensorflow/compiler/mlir/tfr/passes/decompose.cc
index 9b1c349..5f7d860 100644
--- a/tensorflow/compiler/mlir/tfr/passes/decompose.cc
+++ b/tensorflow/compiler/mlir/tfr/passes/decompose.cc
@@ -95,7 +95,7 @@
// Decompose the TF ops with the registered composition library.
class DecomposeTFOpsPass
- : public PassWrapper<DecomposeTFOpsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<DecomposeTFOpsPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DecomposeTFOpsPass)
@@ -128,7 +128,7 @@
#include "tensorflow/compiler/mlir/tfr/passes/generated_decompose.inc"
void DecomposeTFOpsPass::ApplyCanonicalization() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
RewritePatternSet patterns(&getContext());
populateWithGenerated(patterns);
@@ -138,7 +138,7 @@
}
LogicalResult DecomposeTFOpsPass::RewriteUnregisteredTFOps() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
SymbolTable table(external_tfr_module_.hasValue()
? *external_tfr_module_
: func->getParentOfType<ModuleOp>());
@@ -280,7 +280,7 @@
LogicalResult DecomposeTFOpsPass::InlineTFRFuncCalls() {
// The Inliner will automatically use the registered dialect inliner.
InlinerInterface inliner(&getContext());
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
SymbolTable table(external_tfr_module_.hasValue()
? *external_tfr_module_
: func->getParentOfType<ModuleOp>());
@@ -353,7 +353,7 @@
} // namespace
// Creates an instance of the pass to decompose the TF ops.
-std::unique_ptr<OperationPass<FuncOp>> CreateDecomposeTFOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass(
llvm::Optional<ModuleOp> tfr_module) {
return std::make_unique<DecomposeTFOpsPass>(tfr_module);
}
diff --git a/tensorflow/compiler/mlir/tfr/passes/passes.h b/tensorflow/compiler/mlir/tfr/passes/passes.h
index 748534d..3f8736a 100644
--- a/tensorflow/compiler/mlir/tfr/passes/passes.h
+++ b/tensorflow/compiler/mlir/tfr/passes/passes.h
@@ -28,10 +28,11 @@
// Scans the func op and adds all the canonicalization patterns of the ops
// except the tf ops, inside the function.
-void populateCanonicalizationPatterns(FuncOp func, RewritePatternSet &patterns);
+void populateCanonicalizationPatterns(func::FuncOp func,
+ RewritePatternSet &patterns);
// Decompose ops.
-std::unique_ptr<OperationPass<FuncOp>> CreateDecomposeTFOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateDecomposeTFOpsPass(
llvm::Optional<ModuleOp> tfr_module = llvm::None);
// Rewrites quantized operands and results with their storage types.
@@ -40,7 +41,7 @@
std::unique_ptr<OperationPass<ModuleOp>> CreateRewriteQuantizedIOPass();
// Raise to TF ops.
-std::unique_ptr<OperationPass<FuncOp>> CreateRaiseToTFOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateRaiseToTFOpsPass(
llvm::Optional<ModuleOp> tfr_module = llvm::None,
bool materialize_derived_attrs = false);
diff --git a/tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc b/tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc
index d50b4ee..e5d3b42 100644
--- a/tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc
+++ b/tensorflow/compiler/mlir/tfr/passes/raise_to_tf.cc
@@ -465,7 +465,7 @@
// Raise TFR call ops to the TF ops.
class RaiseToTFOpsPass
- : public PassWrapper<RaiseToTFOpsPass, OperationPass<FuncOp>> {
+ : public PassWrapper<RaiseToTFOpsPass, OperationPass<func::FuncOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(RaiseToTFOpsPass)
@@ -493,7 +493,7 @@
};
void RaiseToTFOpsPass::runOnOperation() {
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
MLIRContext* ctx = &getContext();
SymbolTable table(external_tfr_module_.hasValue()
? *external_tfr_module_
@@ -509,7 +509,7 @@
} // namespace
// Creates an instance of the pass to raise TFR call ops to the TF ops.
-std::unique_ptr<OperationPass<FuncOp>> CreateRaiseToTFOpsPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateRaiseToTFOpsPass(
llvm::Optional<ModuleOp> tfr_module, bool materialize_derived_attrs) {
return std::make_unique<RaiseToTFOpsPass>(tfr_module,
materialize_derived_attrs);
diff --git a/tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc b/tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
index a2fdc47..07c8a8b 100644
--- a/tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
+++ b/tensorflow/compiler/mlir/tfr/passes/rewrite_quantized_io.cc
@@ -45,7 +45,7 @@
void RewriteQuantizedIOPass::runOnOperation() {
ModuleOp module = getOperation();
OpBuilder builder(module);
- module.walk([&](FuncOp func) {
+ module.walk([&](func::FuncOp func) {
Block& block = func.front();
Operation* terminator = block.getTerminator();
diff --git a/tensorflow/compiler/mlir/tfrt/jit/opdefs/tf_jitrt_ops.cc b/tensorflow/compiler/mlir/tfrt/jit/opdefs/tf_jitrt_ops.cc
index 2b3cfaf..8c613ee 100644
--- a/tensorflow/compiler/mlir/tfrt/jit/opdefs/tf_jitrt_ops.cc
+++ b/tensorflow/compiler/mlir/tfrt/jit/opdefs/tf_jitrt_ops.cc
@@ -99,7 +99,8 @@
Operation* self = getOperation();
// Find the referenced kernel function.
- auto kernel_fn = SymbolTable::lookupNearestSymbolFrom<FuncOp>(self, kernel());
+ auto kernel_fn =
+ SymbolTable::lookupNearestSymbolFrom<func::FuncOp>(self, kernel());
if (!kernel_fn) return 1;
int64_t cost = 0;
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/corert_converter.cc b/tensorflow/compiler/mlir/tfrt/transforms/corert_converter.cc
index 1d3cb0b..1e3b414 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/corert_converter.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/corert_converter.cc
@@ -167,7 +167,7 @@
ConversionPatternRewriter::InsertionGuard insertion_guard(*rewriter);
rewriter->setInsertionPointToStart(block);
- FuncOp func_op = op->getParentOfType<mlir::func::FuncOp>();
+ func::FuncOp func_op = op->getParentOfType<mlir::func::FuncOp>();
mlir::Value in_chain = func_op.getArgument(0);
auto get_op_handler_op = rewriter->create<tfrt::corert::GetOpHandler>(
block->getParent()->getLoc(), op_handler_type(), in_chain,
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/cross_device_transfer.cc b/tensorflow/compiler/mlir/tfrt/transforms/cross_device_transfer.cc
index 59c7c75..2b1e29c 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/cross_device_transfer.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/cross_device_transfer.cc
@@ -92,7 +92,7 @@
}
// Return the device of the given value.
-static std::string GetDevice(mlir::Value value, FuncOp parent_func_op) {
+static std::string GetDevice(mlir::Value value, func::FuncOp parent_func_op) {
std::string device = "";
if (BlockArgument block_arg = value.dyn_cast<BlockArgument>()) {
if (StringAttr device_attr = parent_func_op.getArgAttrOfType<StringAttr>(
@@ -107,7 +107,7 @@
}
struct CrossDeviceTransferPass
- : public PassWrapper<CrossDeviceTransferPass, OperationPass<FuncOp>> {
+ : public PassWrapper<CrossDeviceTransferPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(CrossDeviceTransferPass)
void runOnOperation() override;
@@ -123,7 +123,7 @@
};
void CrossDeviceTransferPass::runOnOperation() {
- FuncOp func_op = getOperation();
+ func::FuncOp func_op = getOperation();
llvm::DenseMap<mlir::Value, llvm::StringMap<mlir::Value>>
transferred_value_by_value_and_device;
@@ -182,7 +182,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateCrossDeviceTransferPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateCrossDeviceTransferPass() {
return std::make_unique<CrossDeviceTransferPass>();
}
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/deduplicate_batch_function.cc b/tensorflow/compiler/mlir/tfrt/transforms/deduplicate_batch_function.cc
index 28145bf..970fa7a 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/deduplicate_batch_function.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/deduplicate_batch_function.cc
@@ -44,10 +44,9 @@
using ::mlir::SymbolTable;
using ::mlir::SymbolTableCollection;
using ::mlir::SymbolUserMap;
-using ::mlir::func::FuncOp;
// This only includes some preliminary checks as this is a short term solution.
-bool AreEquivalent(FuncOp& lhs, FuncOp& rhs) {
+bool AreEquivalent(mlir::func::FuncOp& lhs, mlir::func::FuncOp& rhs) {
if (lhs.getFunctionType() != rhs.getFunctionType()) return false;
for (auto arg_pair : llvm::zip(lhs.getArguments(), rhs.getArguments())) {
@@ -118,9 +117,11 @@
SymbolUserMap symbol_users(symbol_table_collection, module);
// Categorize the functions invoked by BatchFunctionOp by its shared_name.
- llvm::StringMap<llvm::SmallVector<FuncOp, 2>> shared_name_to_func_ops;
+ llvm::StringMap<llvm::SmallVector<mlir::func::FuncOp, 2>>
+ shared_name_to_func_ops;
- for (auto func : llvm::make_early_inc_range(module.getOps<FuncOp>())) {
+ for (auto func :
+ llvm::make_early_inc_range(module.getOps<mlir::func::FuncOp>())) {
ArrayRef<Operation*> users = symbol_users.getUsers(func);
llvm::StringRef shared_name;
// Deduplicate the function only if all users are BatchFunctionOp and have
@@ -141,8 +142,8 @@
for (auto& it : shared_name_to_func_ops) {
auto& func_ops = it.second;
- FuncOp& func_op_to_keep = func_ops.front();
- for (FuncOp& func_op_to_remove : llvm::drop_begin(func_ops)) {
+ mlir::func::FuncOp& func_op_to_keep = func_ops.front();
+ for (mlir::func::FuncOp& func_op_to_remove : llvm::drop_begin(func_ops)) {
if (!AreEquivalent(func_op_to_keep, func_op_to_remove)) {
return func_op_to_remove.emitError(
"func_ops for BatchFunctionOp with the same shared name are "
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/lmhlo_to_gpu/lmhlo_to_gpu_while.cc b/tensorflow/compiler/mlir/tfrt/transforms/lmhlo_to_gpu/lmhlo_to_gpu_while.cc
index e14b449..01fb68e 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/lmhlo_to_gpu/lmhlo_to_gpu_while.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/lmhlo_to_gpu/lmhlo_to_gpu_while.cc
@@ -135,7 +135,7 @@
};
// Insert while_cond function.
- rewriter.setInsertionPoint(while_op->getParentOfType<FuncOp>());
+ rewriter.setInsertionPoint(while_op->getParentOfType<func::FuncOp>());
auto cond_func_type = rewriter.getFunctionType(argument_types, i1_type);
auto cond_func = rewriter.create<func::FuncOp>(while_op.cond().getLoc(),
"while_cond", cond_func_type);
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/remote_run_encapsulate.cc b/tensorflow/compiler/mlir/tfrt/transforms/remote_run_encapsulate.cc
index ebff0e0..82e893e 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/remote_run_encapsulate.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/remote_run_encapsulate.cc
@@ -70,11 +70,11 @@
}
};
-LogicalResult EncapsulateFuncAndSerialize(FuncOp entry_func,
+LogicalResult EncapsulateFuncAndSerialize(func::FuncOp entry_func,
std::string* serialized_func_module) {
ModuleOp module = entry_func->getParentOfType<ModuleOp>();
SymbolTable entry_module_table(module);
- SmallVector<FuncOp, 4> referenced({entry_func});
+ SmallVector<func::FuncOp, 4> referenced({entry_func});
// Create a new module to hold func and all referenced functions.
OwningOpRef<mlir::ModuleOp> module_for_func =
@@ -82,10 +82,10 @@
SymbolTable symbol_table(module_for_func.get());
while (!referenced.empty()) {
- FuncOp func = referenced.pop_back_val();
+ func::FuncOp func = referenced.pop_back_val();
// Skip functions that have already been cloned into new module.
- if (symbol_table.lookup<FuncOp>(func.getName())) continue;
+ if (symbol_table.lookup<func::FuncOp>(func.getName())) continue;
// Find any SymbolRefAttr in func that maps to a FuncOp. We need to clone
// all found FuncOps to new_module to make sure new_module is
@@ -93,7 +93,7 @@
Optional<SymbolTable::UseRange> uses = SymbolTable::getSymbolUses(func);
assert(uses && "expected to be able to collect symbol uses");
for (SymbolTable::SymbolUse use : *uses) {
- FuncOp referenced_func = entry_module_table.lookup<FuncOp>(
+ func::FuncOp referenced_func = entry_module_table.lookup<func::FuncOp>(
use.getSymbolRef().cast<FlatSymbolRefAttr>().getValue());
// Skip Symbols that do not map to a function.
@@ -102,7 +102,7 @@
referenced.emplace_back(referenced_func);
}
- FuncOp clone = func.clone();
+ func::FuncOp clone = func.clone();
if (clone.getName() == entry_func.getName()) {
clone.setPublic();
} else {
@@ -125,7 +125,7 @@
Type tensor_handle_ty = tfrt::corert::TensorHandleType::get(&getContext());
module.walk([&](tfrt::dist::RemoteExecuteFuncOp remote_exec_op) {
FlatSymbolRefAttr callee_sym = remote_exec_op.calleeAttr();
- FuncOp callee = symtab.lookup<FuncOp>(callee_sym.getValue());
+ func::FuncOp callee = symtab.lookup<func::FuncOp>(callee_sym.getValue());
if (!callee) {
remote_exec_op.emitOpError("callee function ")
<< callee_sym.getValue() << " is not found";
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc b/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc
index b39e605..c08df19 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt.cc
@@ -486,7 +486,7 @@
rewriter.getType<tfrt::dist::RemoteObjectIdType>();
ModuleOp module = op->getParentOfType<ModuleOp>();
SymbolTable symtab(module);
- FuncOp callee = symtab.lookup<FuncOp>(op.callee());
+ func::FuncOp callee = symtab.lookup<func::FuncOp>(op.callee());
if (!callee) {
op.emitOpError("callee function ") << op.callee() << " is not found";
return failure();
@@ -1487,7 +1487,8 @@
target->addIllegalDialect<tf_device::TensorFlowDeviceDialect>();
target->addIllegalDialect<tfrt::jitrt::JitRuntimeDialect>();
target->addDynamicallyLegalOp<mlir::func::FuncOp>([func_type_converter,
- chain_type](FuncOp op) {
+ chain_type](
+ func::FuncOp op) {
auto func_type = op.getFunctionType();
if (func_type.getNumInputs() == 0 || func_type.getInput(0) != chain_type)
return false;
@@ -1967,7 +1968,7 @@
private:
struct CompiledModule {
ModuleOp module;
- FuncOp entrypoint;
+ func::FuncOp entrypoint;
llvm::SetVector<Value> operands;
};
@@ -1990,7 +1991,7 @@
// Mapping from the outlined module string representation to the module itself
// and an entrypoint function. Used to deduplicate identical modules during
// the `tf_device.cluster` outlining.
- llvm::StringMap<std::pair<ModuleOp, FuncOp>> outlined_;
+ llvm::StringMap<std::pair<ModuleOp, func::FuncOp>> outlined_;
};
OutlineJitRtClustersPass::CompiledModule
@@ -2021,7 +2022,7 @@
// Create a function in the compiled module.
auto compiled_func_type =
FunctionType::get(ctx, operand_types, cluster->getResultTypes());
- auto compiled_func = FuncOp::create(loc, "compute", compiled_func_type);
+ auto compiled_func = func::FuncOp::create(loc, "compute", compiled_func_type);
compiled_module_symbol_table.insert(compiled_func);
// Replace uses of live-in values within cluster region with block arguments.
@@ -2075,7 +2076,7 @@
LogicalResult OutlineJitRtClustersPass::SetEntrypointConstraints(
CompiledModule &compiled) {
- FuncOp func = compiled.entrypoint;
+ func::FuncOp func = compiled.entrypoint;
// Functions outlined from jitrt device clusters must have a single block.
assert(func.getBody().getBlocks().size() == 1 && "expected single block");
@@ -2110,7 +2111,7 @@
CompiledModule compiled_module =
CreateCompiledModule(cluster, max_arg_size, symbol_table);
- FuncOp compiled_func = compiled_module.entrypoint;
+ func::FuncOp compiled_func = compiled_module.entrypoint;
// Add constraints to the entrypoint arguments.
if (failed(SetEntrypointConstraints(compiled_module))) return failure();
diff --git a/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt_data.cc b/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt_data.cc
index 61e7928..b251511 100644
--- a/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt_data.cc
+++ b/tensorflow/compiler/mlir/tfrt/transforms/tf_to_tfrt_data.cc
@@ -250,13 +250,13 @@
target.addLegalDialect<tfrt::data::DataDialect>();
target.addLegalDialect<tfrt::compiler::TFRTDialect>();
target.addDynamicallyLegalOp<mlir::func::FuncOp>(
- [&data_converter](FuncOp op) {
+ [&data_converter](func::FuncOp op) {
return data_converter.isSignatureLegal(op.getFunctionType());
});
mlir::RewritePatternSet patterns(&getContext());
patterns.add<RangeDatasetOpConversion, BatchDatasetV2OpConversion,
ConstOpConversion, ReturnOpConversion>(context);
- mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(
+ mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
patterns, data_converter);
auto result =
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/buffer_reuse_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/buffer_reuse_pass.cc
index c7df17c..42fd620 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/buffer_reuse_pass.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/buffer_reuse_pass.cc
@@ -47,7 +47,7 @@
class BufferReuseAnalysis {
public:
- explicit BufferReuseAnalysis(FuncOp f) { build(f); }
+ explicit BufferReuseAnalysis(func::FuncOp f) { build(f); }
static constexpr int32_t kIndexAmbiguous = -1;
@@ -64,13 +64,13 @@
}
private:
- void build(FuncOp &f) {
+ void build(func::FuncOp &f) {
BufferViewFlowAnalysis aliases(f);
find_output_indices(f, aliases);
find_reuse_candiates(f, aliases);
}
- void find_output_indices(FuncOp &f, BufferViewFlowAnalysis &aliases) {
+ void find_output_indices(func::FuncOp &f, BufferViewFlowAnalysis &aliases) {
f.walk([&](memref::AllocOp alloc_op) {
int32_t output_index = kIndexAmbiguous;
int count_return_uses = 0;
@@ -90,7 +90,7 @@
});
}
- void find_reuse_candiates(FuncOp &f, BufferViewFlowAnalysis &aliases) {
+ void find_reuse_candiates(func::FuncOp &f, BufferViewFlowAnalysis &aliases) {
Liveness liveness(f);
f.walk([&](Block *block) {
find_reuse_candiates(block, aliases, liveness.getLiveness(block),
@@ -173,7 +173,7 @@
return first_use;
}
- std::vector<Value> get_buffer_arguments(FuncOp &f) {
+ std::vector<Value> get_buffer_arguments(func::FuncOp &f) {
std::vector<Value> buffer_arguments;
for (BlockArgument arg : f.getArguments()) {
if (arg.getType().isa<BaseMemRefType>()) buffer_arguments.push_back(arg);
@@ -263,7 +263,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateBufferReusePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateBufferReusePass() {
return std::make_unique<BufferReusePass>();
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/bufferize_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/bufferize_pass.cc
index 06dcc8c..b9d2f23 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/bufferize_pass.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/bufferize_pass.cc
@@ -324,7 +324,7 @@
tf_framework::TFFrameworkDialect, AffineDialect, shape::ShapeDialect,
lmhlo::LmhloDialect, linalg::LinalgDialect, math::MathDialect,
vector::VectorDialect>();
- target.addLegalOp<FuncOp, ModuleOp>();
+ target.addLegalOp<func::FuncOp, ModuleOp>();
target.addIllegalDialect<mhlo::MhloDialect>();
target.addIllegalOp<tensor::GenerateOp, tensor::ExtractOp,
@@ -358,7 +358,7 @@
return std::make_unique<ComputeOpAndFuncBufferizePass>();
}
-std::unique_ptr<OperationPass<FuncOp>> CreateTiledLoopBufferizePass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTiledLoopBufferizePass() {
return std::make_unique<TiledLoopBufferizePass>();
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/convert_to_signless_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/convert_to_signless_pass.cc
index 544b8e5..a3655b8 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/convert_to_signless_pass.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/convert_to_signless_pass.cc
@@ -82,15 +82,15 @@
return converter.isLegal(op->getOperandTypes()) &&
converter.isLegal(op->getResultTypes());
});
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType());
});
RewritePatternSet patterns(&getContext());
patterns.add<ConvertToSignless>(converter, &context);
// FuncOp is special as it has type encoding via attributes.
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
- converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
auto module = getOperation();
if (failed(applyFullConversion(module, target, std::move(patterns)))) {
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/copy_cleanup_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/copy_cleanup_pass.cc
index 2586a55..9d17ea7 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/copy_cleanup_pass.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/copy_cleanup_pass.cc
@@ -39,7 +39,7 @@
// Handles the pattern where an input operand of a linalg generic is copied
// even though the producer is not mutated.
-void RemoveCopyIfTargetOnlyRead(FuncOp func) {
+void RemoveCopyIfTargetOnlyRead(func::FuncOp func) {
llvm::SmallVector<memref::AllocOp, 8> allocs_to_remove;
llvm::SmallVector<memref::CopyOp, 8> copies_to_remove;
@@ -124,7 +124,7 @@
// Handles the case where the last instructions of a function implements a copy
// back to a function argument.
-void RemoveCopyIfTargetIsFunctionArg(FuncOp func) {
+void RemoveCopyIfTargetIsFunctionArg(func::FuncOp func) {
// For now only support this on functions with a single block.
if (!func.getBody().hasOneBlock()) return;
@@ -136,7 +136,7 @@
if (auto copy = dyn_cast<memref::CopyOp>(op)) {
auto block_arg = copy.getTarget().dyn_cast<BlockArgument>();
if (!block_arg) break;
- if (!isa<FuncOp>(block_arg.getOwner()->getParentOp()) ||
+ if (!isa<func::FuncOp>(block_arg.getOwner()->getParentOp()) ||
!block_arg.hasOneUse())
break;
auto alloc = copy.getSource().getDefiningOp<memref::AllocOp>();
@@ -165,7 +165,7 @@
}
};
-std::unique_ptr<OperationPass<FuncOp>> CreateCopyCleanupPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateCopyCleanupPass() {
return std::make_unique<CopyCleanupPass>();
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_memref_prints.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_memref_prints.cc
index 82701db..4f8c84e 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_memref_prints.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_memref_prints.cc
@@ -53,11 +53,11 @@
"Did not find a print function for the element type");
auto caller_func =
- b->getInsertionBlock()->getParent()->getParentOfType<FuncOp>();
+ b->getInsertionBlock()->getParent()->getParentOfType<func::FuncOp>();
auto func_name_attr = b->getStringAttr(func_name);
- auto callee_func =
- SymbolTable::lookupNearestSymbolFrom<FuncOp>(caller_func, func_name_attr);
+ auto callee_func = SymbolTable::lookupNearestSymbolFrom<func::FuncOp>(
+ caller_func, func_name_attr);
if (!callee_func) {
OpBuilder::InsertionGuard insertGuard(*b);
@@ -65,7 +65,8 @@
b->setInsertionPointToStart(module.getBody());
auto func_type = FunctionType::get(b->getContext(), arg.getType(),
/*results=*/llvm::None);
- callee_func = b->create<FuncOp>(module.getLoc(), func_name, func_type);
+ callee_func =
+ b->create<func::FuncOp>(module.getLoc(), func_name, func_type);
callee_func.setPrivate();
}
return b->create<func::CallOp>(loc, callee_func, arg);
@@ -153,7 +154,7 @@
: public EmbedMemRefPrintsPassBase<EmbedMemRefPrintsPass> {
void runOnOperation() override {
ModuleOp module = getOperation();
- module.walk([&](FuncOp func) {
+ module.walk([&](func::FuncOp func) {
if (func.isDeclaration()) return;
Block* body = &func.getBody().front();
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework.cc
index e2f5695..228860e 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework.cc
@@ -29,12 +29,12 @@
namespace {
// Prepends argument type list of the function with an OpKernelContextType arg.
-class FuncOpConverter : public OpConversionPattern<FuncOp> {
+class FuncOpConverter : public OpConversionPattern<func::FuncOp> {
public:
- using OpConversionPattern<FuncOp>::OpConversionPattern;
+ using OpConversionPattern<func::FuncOp>::OpConversionPattern;
LogicalResult matchAndRewrite(
- FuncOp func, OpAdaptor /*adaptor*/,
+ func::FuncOp func, OpAdaptor /*adaptor*/,
ConversionPatternRewriter &rewriter) const override {
// Convert function arguments using the provided TypeConverter.
auto func_type = func.getFunctionType();
@@ -57,7 +57,7 @@
};
llvm::Optional<Value> FindOpKernelContext(Operation *op) {
- auto func = op->getParentOfType<FuncOp>();
+ auto func = op->getParentOfType<func::FuncOp>();
if (func.getNumArguments() == 0) {
return llvm::None;
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework_pass.cc
index ef65d23..31572af 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework_pass.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/embed_tf_framework_pass.cc
@@ -33,7 +33,7 @@
#include "tensorflow/compiler/mlir/tools/kernel_gen/transforms/kernel_gen_passes.h.inc"
bool IsNotInsideTfEntryFunction(Operation* op) {
- auto func = op->getParentOfType<FuncOp>();
+ auto func = op->getParentOfType<func::FuncOp>();
return !func->hasAttrOfType<UnitAttr>(TFFrameworkDialect::kTFEntryAttrName);
}
@@ -64,7 +64,7 @@
ConversionTarget target(getContext());
target.addLegalDialect<tf_framework::TFFrameworkDialect>();
- target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
+ target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
if (!op->hasAttrOfType<UnitAttr>(TFFrameworkDialect::kTFEntryAttrName)) {
return true;
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/rewrite_tf_framework_assert.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/rewrite_tf_framework_assert.cc
index 8fbe45c..d375c16 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/rewrite_tf_framework_assert.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/rewrite_tf_framework_assert.cc
@@ -41,7 +41,7 @@
Block *split_block = rewriter.splitBlock(
rewriter.getInsertionBlock(), std::next(rewriter.getInsertionPoint()));
- auto func = op->getParentOfType<FuncOp>();
+ auto func = op->getParentOfType<func::FuncOp>();
Block *error_reporting_block =
rewriter.createBlock(&func.getRegion(), {}, {});
rewriter.create<ReportErrorOp>(loc, adaptor.ctx(), adaptor.error_code(),
@@ -65,7 +65,7 @@
#include "tensorflow/compiler/mlir/tools/kernel_gen/transforms/kernel_gen_passes.h.inc"
bool IsNotInsideTfEntryFunction(Operation *op) {
- auto func = op->getParentOfType<FuncOp>();
+ auto func = op->getParentOfType<func::FuncOp>();
return !func->hasAttrOfType<UnitAttr>(TFFrameworkDialect::kTFEntryAttrName);
}
// All contained `tf_framework.assert` operations are rewritten into calls to
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/same_shape_propagation.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/same_shape_propagation.cc
index f7685f5..d6e82ee 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/same_shape_propagation.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/same_shape_propagation.cc
@@ -196,7 +196,7 @@
public:
/// Checks all operations for potential shape equality of their respective
/// results.
- void build(FuncOp function) {
+ void build(func::FuncOp function) {
function.walk([&](Operation *op) {
if (auto reshape = dyn_cast<memref::ReshapeOp>(op)) {
registerAssociation(ShapeValue{reshape.shape()}, reshape.result());
@@ -374,7 +374,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreatePropagateShapeKnowledgeToKernels() {
return std::make_unique<PropagateShapeKnowledgeToKernels>();
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tensorflow_abi_knowledge_propagation.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tensorflow_abi_knowledge_propagation.cc
index edcfb3d..218e039 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tensorflow_abi_knowledge_propagation.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tensorflow_abi_knowledge_propagation.cc
@@ -45,7 +45,7 @@
: public PropagateTfAbiKnowledgeToKernelsBase<
PropagateTfAbiKnowledgeToKernelsPass> {
void runOnOperation() override {
- FuncOp function = getOperation();
+ func::FuncOp function = getOperation();
llvm::SmallVector<Value, 4> worklist;
// We currently only handle entry functions and do not propagate across
// functions.
@@ -210,7 +210,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>>
+std::unique_ptr<OperationPass<func::FuncOp>>
CreatePropagateTfAbiKnowledgeToKernels() {
return std::make_unique<PropagateTfAbiKnowledgeToKernelsPass>();
}
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_to_jit_invocations.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_to_jit_invocations.cc
index e5e051c..e8aab38 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_to_jit_invocations.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/tf_to_jit_invocations.cc
@@ -183,7 +183,7 @@
OpBuilder tmp_module_builder(getContext(), rewriter.getListener());
auto jit_module = tmp_module_builder.create<ModuleOp>(loc);
tmp_module_builder.setInsertionPointToStart(jit_module.getBody());
- auto jit_function = tmp_module_builder.create<FuncOp>(
+ auto jit_function = tmp_module_builder.create<func::FuncOp>(
loc, tf_framework::JITCompileFromStrOp::kJITEntryFunctionName,
tmp_module_builder.getFunctionType(body->getArgumentTypes(),
yield_op->getOperandTypes()));
@@ -264,7 +264,8 @@
LogicalResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
- if (!IsUnaryTFOperation(op) || !llvm::isa<FuncOp>(op->getParentOp())) {
+ if (!IsUnaryTFOperation(op) ||
+ !llvm::isa<func::FuncOp>(op->getParentOp())) {
return failure();
}
@@ -345,7 +346,7 @@
index_64bit_if_jit_compiling, cpu_codegen);
}
-std::unique_ptr<OperationPass<FuncOp>> CreateTFToJITInvocationPass(
+std::unique_ptr<OperationPass<func::FuncOp>> CreateTFToJITInvocationPass(
llvm::ArrayRef<int64_t> tile_sizes, llvm::ArrayRef<int64_t> unroll_factors,
int64_t max_supported_rank, bool enable_ftz, bool index_64bit,
bool cpu_codegen, bool jit_i64_indexed_for_large_tensors) {
diff --git a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/vectorization_pass.cc b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/vectorization_pass.cc
index 2175124..9ced1f3 100644
--- a/tensorflow/compiler/mlir/tools/kernel_gen/transforms/vectorization_pass.cc
+++ b/tensorflow/compiler/mlir/tools/kernel_gen/transforms/vectorization_pass.cc
@@ -255,7 +255,7 @@
// is determined by confirming all consumers of all aliases are only creating an
// alias or writing data to an alias but never reading from or interacting with
// the memref in other ways.
-void RemoveDeadMemrefCode(FuncOp func) {
+void RemoveDeadMemrefCode(func::FuncOp func) {
BufferViewFlowAnalysis baa(func);
llvm::SmallSet<Operation *, 8> to_remove;
@@ -350,7 +350,7 @@
} // namespace
-std::unique_ptr<OperationPass<FuncOp>> CreateVectorizationPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateVectorizationPass() {
return std::make_unique<VectorizationPass>();
}
@@ -368,7 +368,7 @@
}
};
-std::unique_ptr<OperationPass<FuncOp>> CreateVectorizationCleanupPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreateVectorizationCleanupPass() {
return std::make_unique<VectorizationCleanupPass>();
}
diff --git a/tensorflow/compiler/mlir/tosa/transforms/convert_tfl_uint8.cc b/tensorflow/compiler/mlir/tosa/transforms/convert_tfl_uint8.cc
index 1e3f7aa..ce78ee6 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/convert_tfl_uint8.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/convert_tfl_uint8.cc
@@ -342,7 +342,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> createConvertTFLUint8Pass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createConvertTFLUint8Pass() {
return std::make_unique<ConvertUint8ToInt8>();
}
diff --git a/tensorflow/compiler/mlir/tosa/transforms/fuse_bias_tf.cc b/tensorflow/compiler/mlir/tosa/transforms/fuse_bias_tf.cc
index d6bc763..1f7abe8 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/fuse_bias_tf.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/fuse_bias_tf.cc
@@ -119,7 +119,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> createFuseBiasTFPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createFuseBiasTFPass() {
return std::make_unique<FuseBiasTF>();
}
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc
index 110cc60..df2c45e 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_tf.cc
@@ -2352,7 +2352,7 @@
}
// Creates an instance of the TensorFlow dialect LegalizeTF pass.
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFPass() {
return std::make_unique<LegalizeTF>();
}
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_tf_tfl.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_tf_tfl.cc
index 3dfc4f8..c54f57c 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_tf_tfl.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_tf_tfl.cc
@@ -47,7 +47,7 @@
populateLegalizeTFPatterns(ctx, patterns);
populateLegalizeTFLPatterns(ctx, patterns);
- FuncOp func = getOperation();
+ func::FuncOp func = getOperation();
if (ApplyPatternsWithShapeResolution(func, std::move(patterns)).failed()) {
signalPassFailure();
}
@@ -56,7 +56,7 @@
} // anonymous namespace
// Creates an instance of the TensorFlow Lite dialect LegalizeTFL pass.
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFTFLPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFTFLPass() {
return std::make_unique<LegalizeTFTFL>();
}
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc
index aa0c02c..79c68d2 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_tfl.cc
@@ -3354,7 +3354,7 @@
}
// Creates an instance of the TensorFlow Lite dialect LegalizeTFL pass.
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFLPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFLPass(
ArrayRef<std::string> disabled_patterns,
ArrayRef<std::string> enabled_patterns) {
return std::make_unique<LegalizeTFL>(disabled_patterns, enabled_patterns);
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc b/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc
index f987749..e77fa22 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.cc
@@ -542,7 +542,7 @@
}
LogicalResult ApplyPatternsWithShapeResolution(
- FuncOp func, const FrozenRewritePatternSet& patterns) {
+ func::FuncOp func, const FrozenRewritePatternSet& patterns) {
// We use top-down traversal so that shape inference can fully infer types
// during pattern rewrite.
GreedyRewriteConfig config;
@@ -567,7 +567,7 @@
// the FuncOp type.
IRRewriter rewriter(func.getContext());
func.walk([&](func::ReturnOp op) {
- FuncOp parent = dyn_cast<FuncOp>(op->getParentOp());
+ func::FuncOp parent = dyn_cast<func::FuncOp>(op->getParentOp());
if (parent != func) return;
rewriter.setInsertionPoint(op);
diff --git a/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.h b/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.h
index cc77cfb..0bb4973 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.h
+++ b/tensorflow/compiler/mlir/tosa/transforms/legalize_utils.h
@@ -128,7 +128,7 @@
// means patterns can performed shape inference while not altering immutable
// types.
LogicalResult ApplyPatternsWithShapeResolution(
- FuncOp func, const FrozenRewritePatternSet& patterns);
+ func::FuncOp func, const FrozenRewritePatternSet& patterns);
// Creates a TOSA operation and performs shape inference on the individual
// op. This allows shape inference during the TFLite to TOSA lowering.
diff --git a/tensorflow/compiler/mlir/tosa/transforms/passes.h b/tensorflow/compiler/mlir/tosa/transforms/passes.h
index 1cecb02..74680ea 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/passes.h
+++ b/tensorflow/compiler/mlir/tosa/transforms/passes.h
@@ -29,20 +29,20 @@
void populateLegalizeTFPatterns(MLIRContext* ctx, RewritePatternSet& patterns);
void populateLegalizeTFLPatterns(MLIRContext* ctx, RewritePatternSet& patterns);
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFPass();
-std::unique_ptr<OperationPass<FuncOp>> createFuseBiasTFPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createFuseBiasTFPass();
// `disabledPatterns` is a set of labels used to filter out input patterns with
// a debug label or debug name in this set.
// `enabledPatterns` is a set of labels used to filter out input patterns that
// do not have one of the labels in this set.
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFLPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFLPass(
ArrayRef<std::string> disabled_patterns = llvm::None,
ArrayRef<std::string> enabled_patterns = llvm::None);
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFTFLPass();
-std::unique_ptr<OperationPass<FuncOp>> createConvertTFLUint8Pass();
-std::unique_ptr<OperationPass<FuncOp>> createStripQuantTypesPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFTFLPass();
+std::unique_ptr<OperationPass<func::FuncOp>> createConvertTFLUint8Pass();
+std::unique_ptr<OperationPass<func::FuncOp>> createStripQuantTypesPass();
#define GEN_PASS_REGISTRATION
#include "tensorflow/compiler/mlir/tosa/transforms/passes.h.inc"
diff --git a/tensorflow/compiler/mlir/tosa/transforms/strip_quant_types.cc b/tensorflow/compiler/mlir/tosa/transforms/strip_quant_types.cc
index ed37a01..94997d8 100644
--- a/tensorflow/compiler/mlir/tosa/transforms/strip_quant_types.cc
+++ b/tensorflow/compiler/mlir/tosa/transforms/strip_quant_types.cc
@@ -95,7 +95,7 @@
Operation* op, ArrayRef<Value> operands,
ConversionPatternRewriter& rewriter) const override {
llvm::SmallVector<Type, 4> newResults;
- if (isa<FuncOp>(op)) {
+ if (isa<func::FuncOp>(op)) {
return failure();
}
@@ -131,7 +131,7 @@
target.addIllegalDialect<quant::QuantizationDialect>();
// Operations are legal if they don't contain any illegal type.
target.markUnknownOpDynamicallyLegal([](Operation* op) {
- if (auto funcOp = dyn_cast<FuncOp>(op)) {
+ if (auto funcOp = dyn_cast<func::FuncOp>(op)) {
for (Type type : funcOp.getFunctionType().getInputs()) {
if (isIllegalType(type)) return false;
}
@@ -153,7 +153,8 @@
RewritePatternSet patterns(&getContext());
patterns.add<GenericTypeConvert>(ctx, converter);
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns, converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
if (failed(applyFullConversion(func, target, std::move(patterns)))) {
signalPassFailure();
@@ -162,7 +163,7 @@
} // anonymous namespace
-std::unique_ptr<OperationPass<FuncOp>> createStripQuantTypesPass() {
+std::unique_ptr<OperationPass<func::FuncOp>> createStripQuantTypesPass() {
return std::make_unique<StripQuantTypes>();
}
} // namespace tosa
diff --git a/tensorflow/compiler/mlir/xla/hlo_function_importer.cc b/tensorflow/compiler/mlir/xla/hlo_function_importer.cc
index 07d39b3..b44e3b4 100644
--- a/tensorflow/compiler/mlir/xla/hlo_function_importer.cc
+++ b/tensorflow/compiler/mlir/xla/hlo_function_importer.cc
@@ -254,7 +254,7 @@
return importer.ImportAsRegion(computation, region, flatten_region_arg_tuple);
}
-StatusOr<mlir::func::FuncOp> HloFunctionImporter::ImportAsFunc(
+StatusOr<FuncOp> HloFunctionImporter::ImportAsFunc(
const HloComputation& computation) {
auto& imported = (*function_map_)[&computation];
if (imported) return imported;
@@ -270,8 +270,8 @@
// Construct the MLIR function and map arguments.
llvm::ArrayRef<mlir::NamedAttribute> attrs;
- auto function = mlir::func::FuncOp::create(
- mlir::UnknownLoc::get(context_), computation_name, func_type, attrs);
+ auto function = FuncOp::create(mlir::UnknownLoc::get(context_),
+ computation_name, func_type, attrs);
auto visibility = computation_name == "main" ? FuncOp::Visibility::Public
: FuncOp::Visibility::Private;
function.setVisibility(visibility);
diff --git a/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc b/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc
index 8837771..3790326 100644
--- a/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc
+++ b/tensorflow/compiler/mlir/xla/mlir_hlo_to_hlo.cc
@@ -552,7 +552,7 @@
return module_.emitError(
"conversion requires module with `main` function");
- for (auto func : module_.getOps<FuncOp>()) {
+ for (auto func : module_.getOps<func::FuncOp>()) {
if (func.empty()) continue;
if (failed(RunOnFunction(func))) return failure();
}
@@ -595,12 +595,12 @@
// Look up a symbol with the specified name, returning null if no such name
// exists.
- FuncOp LookUpSymbol(FlatSymbolRefAttr symbol) {
+ func::FuncOp LookUpSymbol(FlatSymbolRefAttr symbol) {
return module_.lookupSymbol<mlir::func::FuncOp>(symbol);
}
// Get Reference to lowered XLA computation for a function.
- xla::XlaComputation& GetLoweredComputation(FuncOp func) {
+ xla::XlaComputation& GetLoweredComputation(func::FuncOp func) {
return lowered_computation_[func];
}
diff --git a/tensorflow/compiler/mlir/xla/transforms/adjust_layout.cc b/tensorflow/compiler/mlir/xla/transforms/adjust_layout.cc
index 7968dbb..8e29b56 100644
--- a/tensorflow/compiler/mlir/xla/transforms/adjust_layout.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/adjust_layout.cc
@@ -128,7 +128,8 @@
}
namespace {
-class AdjustLayout : public PassWrapper<AdjustLayout, OperationPass<FuncOp>> {
+class AdjustLayout
+ : public PassWrapper<AdjustLayout, OperationPass<func::FuncOp>> {
void getDependentDialects(DialectRegistry ®istry) const override {
registry.insert<mhlo::MhloDialect>();
}
diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc
index 8029359..93532e7 100644
--- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf.cc
@@ -854,7 +854,7 @@
SymbolRefAttr func) {
auto module = op->getParentOfType<ModuleOp>();
auto function =
- dyn_cast_or_null<FuncOp>(SymbolTable::lookupSymbolIn(module, func));
+ dyn_cast_or_null<func::FuncOp>(SymbolTable::lookupSymbolIn(module, func));
FunctionType function_ty = function.getFunctionType();
for (auto arg_in : llvm::zip(args, function_ty.getInputs())) {
diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_communication.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_communication.cc
index f12fcb4..bdbf738 100644
--- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_communication.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_communication.cc
@@ -45,6 +45,9 @@
#include "tensorflow/compiler/xla/side_effect_util.h"
namespace mlir {
+
+using func::FuncOp;
+
namespace mhlo {
namespace {
@@ -78,11 +81,11 @@
llvm::SmallPtrSetImpl<Block*>& control_flow_blocks) {
Block* block = op->getBlock();
Operation* parent = block->getParentOp();
- while (block && parent && !isa<FuncOp>(parent)) {
+ while (block && parent && !isa<func::FuncOp>(parent)) {
if (!IsControlFlowOp(parent))
return op->emitOpError()
<< "expects ancestor(s) to be of ['" << IfOp::getOperationName()
- << "', '" << FuncOp::getOperationName() << "']";
+ << "', '" << func::FuncOp::getOperationName() << "']";
if (!llvm::hasSingleElement(block->getParent()->getBlocks()))
return op->emitOpError() << "expects single block region ancestor(s)";
@@ -100,7 +103,7 @@
// `control_flow_blocks` will be populated with control flow op ancestors for
// every communication op.
LogicalResult FindCommunicationOps(
- FuncOp func, llvm::SmallPtrSetImpl<Operation*>& control_flow_ops,
+ func::FuncOp func, llvm::SmallPtrSetImpl<Operation*>& control_flow_ops,
llvm::SmallPtrSetImpl<Block*>& control_flow_blocks,
bool& has_communication_ops) {
auto result = func.walk([&](Operation* op) {
@@ -119,10 +122,10 @@
// (transitively), and an optional clone of itself. If `clone` is set, function
// calls to `original` will be replaced with `clone`.
struct FuncToRewrite {
- FuncOp original;
+ func::FuncOp original;
llvm::SmallPtrSet<Operation*, 4> control_flow_ops;
llvm::SmallPtrSet<Block*, 4> control_flow_blocks;
- FuncOp clone;
+ func::FuncOp clone;
};
// Finds all functions that need to be rewritten with communication ops and
@@ -131,8 +134,8 @@
ModuleOp module,
llvm::SmallDenseMap<StringRef, FuncToRewrite>& funcs_to_rewrite) {
// Find functions containing communication ops.
- SmallVector<FuncOp, 4> funcs_to_visit;
- for (FuncOp func : module.getOps<FuncOp>()) {
+ SmallVector<func::FuncOp, 4> funcs_to_visit;
+ for (func::FuncOp func : module.getOps<func::FuncOp>()) {
FuncToRewrite func_to_rewrite{/*original=*/func, /*control_flow_ops=*/{},
/*control_flow_blocks=*/{},
/*clone=*/nullptr};
@@ -149,15 +152,16 @@
// Find functions that call functions with communication ops, transitively.
while (!funcs_to_visit.empty()) {
- SmallVector<FuncOp, 4> new_funcs_to_visit;
- for (FuncOp& func : funcs_to_visit) {
+ SmallVector<func::FuncOp, 4> new_funcs_to_visit;
+ for (func::FuncOp& func : funcs_to_visit) {
auto uses = func.getSymbolUses(module);
if (!uses) continue;
for (auto& use : *uses) {
// Only `mlir::func::CallOp` is supported as this requires knowing how
// to rewrite arguments and results to a function.
if (!isa<mlir::func::CallOp>(use.getUser())) continue;
- auto caller_parent_func = use.getUser()->getParentOfType<FuncOp>();
+ auto caller_parent_func =
+ use.getUser()->getParentOfType<func::FuncOp>();
if (!caller_parent_func) continue;
FuncToRewrite func_to_rewrite{/*original=*/caller_parent_func,
@@ -771,7 +775,8 @@
// Updates function type based on current function body block arguments and
// terminator operand types.
-void UpdateFunctionType(OpBuilder& builder, FuncOp func, Block& func_body) {
+void UpdateFunctionType(OpBuilder& builder, func::FuncOp func,
+ Block& func_body) {
auto new_argument_types = llvm::to_vector(func_body.getArgumentTypes());
auto new_result_types =
llvm::to_vector(func_body.getTerminator()->getOperandTypes());
@@ -842,7 +847,7 @@
// rewrite arguments and results to a function.
auto it = funcs.find(call.getCallee());
if (it != funcs.end()) {
- FuncOp clone = it->getSecond().clone;
+ func::FuncOp clone = it->getSecond().clone;
Optional<StringRef> symbol_name =
clone ? Optional<StringRef>(clone.getName()) : llvm::None;
// If the function being called is to be cloned, update the call to also
@@ -908,7 +913,7 @@
// Collects all control flow op ancestors of communication ops or function calls
// with communication ops (transitively).
void GetCommunicationControlFlowOps(
- FuncOp func,
+ func::FuncOp func,
const llvm::SmallDenseMap<StringRef, FuncToRewrite>& funcs_to_rewrite,
llvm::SmallPtrSetImpl<Operation*>& control_flow_ops,
llvm::SmallPtrSetImpl<Block*>& control_flow_blocks) {
@@ -934,7 +939,7 @@
OpBuilder builder(&getContext());
for (const auto& func_and_name : funcs_to_rewrite) {
const auto& func_to_rewrite = func_and_name.getSecond();
- FuncOp func = func_to_rewrite.original;
+ func::FuncOp func = func_to_rewrite.original;
if (failed(RewriteFunction(builder, channel_id, module, func,
funcs_to_rewrite,
func_to_rewrite.control_flow_ops,
@@ -942,7 +947,7 @@
/*is_clone=*/false)))
return signalPassFailure();
- FuncOp clone = func_and_name.getSecond().clone;
+ func::FuncOp clone = func_and_name.getSecond().clone;
if (!clone) continue;
llvm::SmallPtrSet<Operation*, 4> clone_control_flow_ops;
llvm::SmallPtrSet<Block*, 4> clone_control_flow_blocks;
diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_types.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_types.cc
index cff0c74..078ec73 100644
--- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_types.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_types.cc
@@ -102,7 +102,7 @@
markUnknownOpDynamicallyLegal([this](Operation *op) {
// The FuncOp type can contain types that the op's operand and result
// types do not contain.
- if (auto func = dyn_cast<FuncOp>(op)) {
+ if (auto func = dyn_cast<func::FuncOp>(op)) {
if (!converter_.isSignatureLegal(func.getFunctionType())) return false;
}
return converter_.isLegal(op);
@@ -156,7 +156,8 @@
TfTypeConverter converter;
RewritePatternSet patterns(&getContext());
patterns.add<TfTypePattern>(&getContext(), converter);
- populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns, converter);
+ populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
+ converter);
TfTypeConversionTarget target(getContext(), converter);
if (failed(applyFullConversion(getOperation(), target, std::move(patterns))))
return signalPassFailure();
diff --git a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc
index f98f9d3..62188da 100644
--- a/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/legalize_tf_with_tf2xla.cc
@@ -760,7 +760,7 @@
/*legalize_test_only_ops=*/false);
}
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTfWithTf2XlaPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTfWithTf2XlaPass(
llvm::StringRef device_type, bool prefer_tf2xla) {
return std::make_unique<LegalizeTF>(device_type, prefer_tf2xla);
}
diff --git a/tensorflow/compiler/mlir/xla/transforms/mhlo_to_lhlo_with_xla.cc b/tensorflow/compiler/mlir/xla/transforms/mhlo_to_lhlo_with_xla.cc
index 367131e..2d5efb9 100644
--- a/tensorflow/compiler/mlir/xla/transforms/mhlo_to_lhlo_with_xla.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/mhlo_to_lhlo_with_xla.cc
@@ -1494,8 +1494,8 @@
// Create the function as () -> (), we'll compute the arguments from the
// buffer allocation and update the type then.
- auto func_op = FuncOp::create(builder_.getUnknownLoc(), function_name,
- builder_.getFunctionType({}, {}));
+ auto func_op = func::FuncOp::create(builder_.getUnknownLoc(), function_name,
+ builder_.getFunctionType({}, {}));
{
// This is an optional attribute used by the XLA backend. If the resulting
diff --git a/tensorflow/compiler/mlir/xla/transforms/outline_with_xla_framework.cc b/tensorflow/compiler/mlir/xla/transforms/outline_with_xla_framework.cc
index 7e8aa2e..127f412 100644
--- a/tensorflow/compiler/mlir/xla/transforms/outline_with_xla_framework.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/outline_with_xla_framework.cc
@@ -68,7 +68,7 @@
// }
struct OutlineXLAFunc : public RewritePattern {
explicit OutlineXLAFunc(MLIRContext *context, PatternBenefit benefit = 1)
- : RewritePattern(FuncOp::getOperationName(), benefit, context) {}
+ : RewritePattern(func::FuncOp::getOperationName(), benefit, context) {}
static void filterFuncAttributes(ArrayRef<NamedAttribute> attrs,
bool argAttrs,
@@ -77,7 +77,7 @@
if (attr.getName() == SymbolTable::getSymbolAttrName() ||
attr.getName() == FunctionOpInterface::getTypeAttrName() ||
attr.getName() == "std.varargs" ||
- (argAttrs && attr.getName() == FuncOp::getArgDictAttrName()))
+ (argAttrs && attr.getName() == func::FuncOp::getArgDictAttrName()))
continue;
result.push_back(attr);
}
@@ -85,7 +85,7 @@
LogicalResult matchAndRewrite(Operation *op,
PatternRewriter &rewriter) const override {
- auto func = dyn_cast<FuncOp>(op);
+ auto func = dyn_cast<func::FuncOp>(op);
auto ctx = rewriter.getContext();
auto loc = func.getLoc();
SmallVector<Location> locs(func.getFunctionType().getNumInputs(), loc);
@@ -113,9 +113,9 @@
// The wrapper function will have the same name but with _xla_framework
// appended and will be annotated with the attribute "xla_entry".
- auto outline_func =
- rewriter.create<FuncOp>(loc, func.getSymName().str() + "_xla_framework",
- func_type, attrs, arg_attrs);
+ auto outline_func = rewriter.create<func::FuncOp>(
+ loc, func.getSymName().str() + "_xla_framework", func_type, attrs,
+ arg_attrs);
outline_func->setAttr("outlined", BoolAttr::get(ctx, true));
outline_func->setAttr("xla_entry", BoolAttr::get(ctx, true));
auto *b = rewriter.createBlock(&outline_func.getBody(), {},
@@ -165,7 +165,7 @@
if (failed(applyPatternsAndFoldGreedily(m, std::move(patterns)))) {
signalPassFailure();
}
- m->walk([](FuncOp f) {
+ m->walk([](func::FuncOp f) {
if (f->hasAttr("outlined")) f->removeAttr("outlined");
});
}
diff --git a/tensorflow/compiler/mlir/xla/transforms/prepare_for_export.cc b/tensorflow/compiler/mlir/xla/transforms/prepare_for_export.cc
index fec0820..f727fa3 100644
--- a/tensorflow/compiler/mlir/xla/transforms/prepare_for_export.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/prepare_for_export.cc
@@ -161,7 +161,7 @@
});
}
-std::unique_ptr<OperationPass<FuncOp>> CreatePrepareForExport() {
+std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareForExport() {
return std::make_unique<PrepareForExportPass>();
}
diff --git a/tensorflow/compiler/mlir/xla/transforms/xla_framework_to_llvm_pass.cc b/tensorflow/compiler/mlir/xla/transforms/xla_framework_to_llvm_pass.cc
index 34ecea4..f377356 100644
--- a/tensorflow/compiler/mlir/xla/transforms/xla_framework_to_llvm_pass.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/xla_framework_to_llvm_pass.cc
@@ -77,8 +77,8 @@
// Convert to the expected function signature and offer unwrapping for each of
// the original arguments.
-struct BarePtrFuncOpConversion : public ConvertOpToLLVMPattern<FuncOp> {
- using ConvertOpToLLVMPattern<FuncOp>::ConvertOpToLLVMPattern;
+struct BarePtrFuncOpConversion : public ConvertOpToLLVMPattern<func::FuncOp> {
+ using ConvertOpToLLVMPattern<func::FuncOp>::ConvertOpToLLVMPattern;
Value LoadValue(ConversionPatternRewriter &rewriter, Location loc,
Value pointer, Value index) const {
@@ -91,7 +91,7 @@
}
mlir::func::FuncOp convertFuncOpToLLVMFuncOp(
- FuncOp funcOp, ConversionPatternRewriter &rewriter) const {
+ func::FuncOp funcOp, ConversionPatternRewriter &rewriter) const {
auto loc = funcOp.getLoc();
// This signature is predetermined by
@@ -204,7 +204,7 @@
}
LogicalResult matchAndRewrite(
- FuncOp funcOp, OpAdaptor,
+ func::FuncOp funcOp, OpAdaptor,
ConversionPatternRewriter &rewriter) const override {
// Only outline functions that are globally available.
if (!funcOp->hasAttr("xla_entry")) return failure();
diff --git a/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf.cc b/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf.cc
index 628843e..34fe09f 100644
--- a/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf.cc
@@ -307,7 +307,7 @@
if (!allow_partial_conversion) {
// Fully qualify ReturnOp here as mhlo dialect also defines a ReturnOp.
- target.addLegalOp<ModuleOp, FuncOp, ::mlir::func::ReturnOp>();
+ target.addLegalOp<ModuleOp, ::mlir::func::FuncOp, ::mlir::func::ReturnOp>();
DenseSet<Operation *> nonlegalized_ops;
LogicalResult result = applyPartialConversion(
op, target, std::move(patterns), &nonlegalized_ops);
@@ -338,7 +338,7 @@
} // end namespace
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFPass(
bool allow_partial_conversion, bool legalize_chlo,
llvm::Optional<StringRef> tf2xla_fallback_device_type, bool prefer_tf2xla) {
return std::make_unique<LegalizeTF>(allow_partial_conversion, legalize_chlo,
diff --git a/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf_no_fallback.cc b/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf_no_fallback.cc
index 2e60b79..9c2f83c 100644
--- a/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf_no_fallback.cc
+++ b/tensorflow/compiler/mlir/xla/transforms/xla_legalize_tf_no_fallback.cc
@@ -72,7 +72,7 @@
TF::PopulateTFLoweringBeforeHLOPatterns(context, &patterns);
if (!allow_partial_conversion_) {
// Fully qualify ReturnOp here as mhlo dialect also defines a ReturnOp.
- target.addLegalOp<ModuleOp, FuncOp, ::mlir::func::ReturnOp>();
+ target.addLegalOp<ModuleOp, func::FuncOp, ::mlir::func::ReturnOp>();
llvm::DenseSet<Operation *> nonlegalized_ops;
LogicalResult result = applyPartialConversion(
op, target, std::move(patterns), &nonlegalized_ops);
@@ -88,7 +88,7 @@
} // end namespace
-std::unique_ptr<OperationPass<FuncOp>> createLegalizeTFNoFallbackPass(
+std::unique_ptr<OperationPass<func::FuncOp>> createLegalizeTFNoFallbackPass(
bool allow_partial_conversion) {
return std::make_unique<LegalizeTFNoFallback>(allow_partial_conversion);
}