[NFC] Update TF MLIR tests to use visibility keyword instead of sym_visibility attribute for functions. This is in preparation of https://reviews.llvm.org/D94200

PiperOrigin-RevId: 350786273
Change-Id: Iacac85c76c204459e381e9dfe67ce97f686ca461
diff --git a/tensorflow/compiler/mlir/lite/tests/optimize_functional_ops.mlir b/tensorflow/compiler/mlir/lite/tests/optimize_functional_ops.mlir
index e79be58..52d6212 100644
--- a/tensorflow/compiler/mlir/lite/tests/optimize_functional_ops.mlir
+++ b/tensorflow/compiler/mlir/lite/tests/optimize_functional_ops.mlir
@@ -13,12 +13,12 @@
   return %3 : tensor<f32>
 }
 
-func @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} {
+func private @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
   %0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
   return %0 : tensor<*xf32>
 }
 
-func @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} {
+func private @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
   %0 = "tf.Sub"(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
   return %0 : tensor<*xf32>
 }
@@ -40,23 +40,23 @@
   return %3 : tensor<f32>
 }
 
-func @addormul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} {
+func private @addormul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
   %0 = constant dense<false> : tensor<i1>
   %1 = "tf.If"(%0, %arg1, %arg0) {else_branch = @mul, then_branch = @add, is_stateless = true} : (tensor<i1>, tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
   return %1 : tensor<*xf32>
 }
 
-func @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} {
+func private @sub(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
   %0 = "tf.Sub"(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
   return %0 : tensor<*xf32>
 }
 
-func @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} {
+func private @add(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
   %0 = "tf.Add"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
   return %0 : tensor<*xf32>
 }
 
-func @mul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> attributes {sym_visibility = "private"} {
+func private @mul(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32>  {
   %0 = "tf.Multiply"(%arg0, %arg1): (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
   return %0 : tensor<*xf32>
 }
@@ -82,12 +82,12 @@
   return %4 : tensor<3x15x14x8xf32>
 }
 
-func @_functionalize_if_else_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
+func private @_functionalize_if_else_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1>  {
   %cst = constant dense<false> : tensor<i1>
   return %cst : tensor<i1>
 }
 
-func @_functionalize_if_then_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
+func private @_functionalize_if_then_branch_00(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1>  {
   %cst = constant dense<true> : tensor<i1>
   return %cst : tensor<i1>
 }
@@ -115,12 +115,12 @@
   return %4 : tensor<3x15x14x8xf32>
 }
 
-func @_functionalize_if_else_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
+func private @_functionalize_if_else_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1>  {
   %cst = constant dense<false> : tensor<i1>
   return %cst : tensor<i1>
 }
 
-func @_functionalize_if_then_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
+func private @_functionalize_if_then_branch_01(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1>  {
   %0 = "tf.blah"() : () -> tensor<i1>
   return %0 : tensor<i1>
 }
@@ -151,12 +151,12 @@
   return %4 : tensor<3x15x14x8xf32>
 }
 
-func @_functionalize_if_else_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
+func private @_functionalize_if_else_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1>  {
   %cst = constant dense<false> : tensor<i1>
   return %cst : tensor<i1>
 }
 
-func @_functionalize_if_then_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1> attributes {sym_visibility = "private"} {
+func private @_functionalize_if_then_branch_02(%arg0: tensor<*xi1>, %arg1: tensor<*xf32>, %arg2: tensor<*xf32>) -> tensor<i1>  {
   %0 = "tf.blah"() : () -> tensor<i1>
   return %0 : tensor<i1>
 }
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/executor_tpuv1_inline_tpu_island.mlir b/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/executor_tpuv1_inline_tpu_island.mlir
index b7bdf50..9f766db 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/executor_tpuv1_inline_tpu_island.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/executor_tpuv1_inline_tpu_island.mlir
@@ -35,11 +35,11 @@
   }
 // CHECK-NOT: _tpu_v1_compat_outlined
   module @_tpu_v1_compat_outlined {
-    func @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> tensor<i1> attributes {sym_visibility = "nested"} {
+    func nested @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> tensor<i1> {
       %0 = "tf.opA"(%arg0) : (tensor<i1>) -> tensor<i1>
       return %0 : tensor<i1>
     }
-    func @_tpu_v1_compat_outlined_func1(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<i1>, tensor<i32>) attributes {sym_visibility = "nested"}  {
+    func nested @_tpu_v1_compat_outlined_func1(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<i1>, tensor<i32>) {
       %0 = "tf.opA"(%arg0) : (tensor<i1>) -> tensor<i1>
       %1 = "tf.opA"(%0) : (tensor<i1>) -> tensor<i1>
       %2 = "tf.SomeOp"(%arg0, %arg1) : (tensor<i1>, tensor<f32>) -> tensor<i32>
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/while_op.mlir b/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/while_op.mlir
index 6724033..ec1c879 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/while_op.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/executor_tpuv1_island_inlining/while_op.mlir
@@ -12,7 +12,7 @@
     return %0#0 : tensor<i32>
   }
   module @_tpu_v1_compat_outlined {
-    func @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) attributes {sym_visibility = "nested"} {
+    func nested @_tpu_v1_compat_outlined_func0(%arg0: tensor<i1>) -> (tensor<i32>, tensor<i32>, tensor<i32>, tensor<i32>) {
       "tf.TPUReplicateMetadata"() {_tpu_replicate = "cluster", device = "device", num_replicas = 1 : i64, topology = "topology"} : () -> ()
       %0 = "tf.opA"(%arg0) {_tpu_replicate = "cluster"} : (tensor<i1>) -> tensor<i32>
       %1 = "tf.While"(%0) {body = @while_body_with_cluster_attr, cond = @while_cond_with_cluster_attr, is_stateless = false, name = "A", parallel_iterations = 10 : i64} : (tensor<i32>) -> tensor<i32>
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/inlining.mlir b/tensorflow/compiler/mlir/tensorflow/tests/inlining.mlir
index 1405ac6..0ff39a5 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/inlining.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/inlining.mlir
@@ -2,7 +2,7 @@
 
 // Test that simple TF operations can be inlined.
 
-func @inline_simple_callee() -> tensor<2xi32> attributes {sym_visibility = "private"} {
+func private @inline_simple_callee() -> tensor<2xi32>  {
   %cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32>
   return %cst : tensor<2xi32>
 }
@@ -17,7 +17,7 @@
 
 // Test that TPUParitionedCallOp is not inlined.
 
-func @simple_callee() -> tensor<2xi32> attributes {sym_visibility = "private"} {
+func private @simple_callee() -> tensor<2xi32>  {
   %cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32>
   return %cst : tensor<2xi32>
 }
@@ -35,7 +35,7 @@
 // Check that TF call operations can be inlined, even when the shape of the
 // argument or result is different than the called function.
 
-func @inline_shape_cast_callee(%arg : tensor<*xi32>) -> tensor<*xi32> attributes {sym_visibility = "private"} {
+func private @inline_shape_cast_callee(%arg : tensor<*xi32>) -> tensor<*xi32>  {
   return %arg : tensor<*xi32>
 }
 
@@ -51,12 +51,12 @@
 
 // Check that functions can be inlined into islands.
 
-func @inline_simple_callee1() -> tensor<2xi32> attributes {sym_visibility = "private"} {
+func private @inline_simple_callee1() -> tensor<2xi32>  {
   %cst = "tf.Const"() { value = dense<2> : tensor<2xi32> } : () -> tensor<2xi32>
   return %cst : tensor<2xi32>
 }
 
-func @inline_into_island_multi_block_callee() -> tensor<2xi32> attributes {sym_visibility = "private"} {
+func private @inline_into_island_multi_block_callee() -> tensor<2xi32>  {
   br ^bb1
 
 ^bb1:
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/case.mlir b/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/case.mlir
index 2f2ee6f..45e73fc 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/case.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/mlir2graphdef/case.mlir
@@ -13,7 +13,7 @@
     return
   }
 
-  func @indexed_case_branch0_40(%arg0: tensor<i32>) -> tensor<*xi32> attributes {sym_visibility = "private"} {
+  func private @indexed_case_branch0_40(%arg0: tensor<i32>) -> tensor<*xi32> {
     %0 = tf_executor.graph {
       %outputs, %control = tf_executor.island wraps "tf.Const"() {device = "", value = dense<1> : tensor<i32>} : () -> tensor<i32>
       %outputs_0, %control_1 = tf_executor.island wraps "tf.AddV2"(%arg0, %outputs) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<*xi32>
@@ -22,7 +22,7 @@
     return %0 : tensor<*xi32>
   }
 
-  func @indexed_case_branch1_50(%arg0: tensor<i32>) -> tensor<*xi32> attributes {sym_visibility = "private"} {
+  func private @indexed_case_branch1_50(%arg0: tensor<i32>) -> tensor<*xi32> {
     %0 = tf_executor.graph {
       %outputs, %control = tf_executor.island wraps "tf.Const"() {device = "", value = dense<2> : tensor<i32>} : () -> tensor<i32>
       %outputs_0, %control_1 = tf_executor.island wraps "tf.AddV2"(%arg0, %outputs) {device = ""} : (tensor<i32>, tensor<i32>) -> tensor<*xi32>
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
index ddd5960..493a96a 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/tensor_array_ops_decomposition.mlir
@@ -557,7 +557,7 @@
   %call = "tf.PartitionedCall"() {config = "", config_proto = "", executor_type = "", f = @callee} : () -> (tensor<*xf32>)
   return
 }
-func @callee() -> (tensor<*xf32>) attributes {sym_visibility = "private"} {
+func private @callee() -> (tensor<*xf32>) {
   %size = "tf.Const"() {value = dense<5> : tensor<i32>} : () -> tensor<i32>
   // CHECK: %[[LOCAL_VAR:.*]] = "tf.MlirLocalVarOp"() : () -> tensor<!tf.resource<tensor<5x3xf32>>>
   %ta:2 = "tf.TensorArrayV3"(%size) {dtype = f32, element_shape = #tf.shape<*>, dynamic_size = false, clear_after_read = true, identical_element_shapes = true, tensor_array_name = "ta"} : (tensor<i32>) -> (tensor<!tf.resource<tensor<*xf32>>>, tensor<f32>)
@@ -598,7 +598,7 @@
 
 // CHECK-LABEL: func private @callee
 // CHECK-SAME:  %[[VAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>, %[[GVAR:.*]]: tensor<!tf.resource<tensor<5x3xf32>>>
-func @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> attributes {sym_visibility = "private"} {
+func private @callee(%arg0: tensor<!tf.resource>) -> tensor<!tf.resource> {
   %index = "tf.Const"() {value = dense<1> : tensor<i32>} : () -> tensor<i32>
   %elem = "tf._SomeOp"() : () -> tensor<3xf32>
   %flow = "tf.Const"() {value = dense<1.0> : tensor<f32>} : () -> tensor<f32>
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir
index 0599f68..f51aa1a 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/tensor_list_ops_decomposition.mlir
@@ -515,7 +515,7 @@
 }
 
 // CHECK: func private @callee(%[[ARG0:.*]]: tensor<10xf32>, %[[ARG1:.*]]: tensor<i1>, %[[ARG2:.*]]: tensor<1xi32>) -> (tensor<10xf32>, tensor<1xi32>)
-func @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> attributes {sym_visibility = "private"} {
+func private @callee(%arg0: tensor<!tf.variant<tensor<f32>>>, %arg1: tensor<i1>) -> tensor<!tf.variant<tensor<f32>>> {
   %elem = "tf._SomeOp"(%arg1) : (tensor<i1>) -> tensor<f32>
 
   // CHECK-NOT: "tf.TensorListPushBack"
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_freeze_global_tensors.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_freeze_global_tensors.mlir
index 6c32a3b..3392b56 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_freeze_global_tensors.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_freeze_global_tensors.mlir
@@ -64,7 +64,7 @@
     return
   }
 
-  func @f_callee(%arg0: tensor<!tf.resource<tensor<f32>>>) attributes {sym_visibility = "private"} {
+  func private @f_callee(%arg0: tensor<!tf.resource<tensor<f32>>>) {
     return
   }
 }
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops.mlir
index 60330d0..da7daac 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops.mlir
@@ -46,7 +46,7 @@
     return %arg0 : tensor<f32>
   }
 
-  func @f() attributes {sym_visibility = "private"} {
+  func private @f() attributes {
     return
   }
 
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops_invalid.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops_invalid.mlir
index de7cbc3..733c99a 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops_invalid.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_ops_invalid.mlir
@@ -3,7 +3,7 @@
 module attributes {tf_saved_model.semantics} {
 
   // expected-error@+1 {{unknown tf_saved_model dialect arg attribute 'tf_saved_model.not_a_real_arg_attr'}}
-  func @f(%arg0: tensor<f32> {tf_saved_model.not_a_real_arg_attr = 1 : i32}) attributes {sym_visibility = "private"} {
+  func private @f(%arg0: tensor<f32> {tf_saved_model.not_a_real_arg_attr = 1 : i32}) {
     return
   }
 
@@ -245,8 +245,8 @@
 
   "tf_saved_model.global_tensor"() { is_mutable, sym_name = "v", type = tensor<?xf32>, value = dense<1.> : tensor<1xf32> } : () -> ()
   // expected-error@+1 {{can only apply 'tf_saved_model' argument attributes to exported functions}}
-  func @f(%arg0: tensor<!tf.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
-  -> (tensor<?xf32> {tf_saved_model.index_path = []}) attributes {sym_visibility = "private"} {
+  func private @f(%arg0: tensor<!tf.resource<tensor<?xf32>>> {tf_saved_model.bound_input = @v})
+  -> (tensor<?xf32> {tf_saved_model.index_path = []}) {
     %0 = "tf.ReadVariableOp"(%arg0) : (tensor<!tf.resource<tensor<?xf32>>>) -> tensor<?xf32>
     return %0 : tensor<?xf32>
   }
@@ -286,7 +286,7 @@
 
   // expected-error@+1 {{the initializer function should have no output}}
   "tf_saved_model.session_initializer"() { initializers = [@init] } : () -> ()
-  func @init() -> tensor<1xf32> attributes {sym_visibility = "private"} {
+  func private @init() -> tensor<1xf32> {
     %0 = "tf.Const"() {value = dense<[1.0]> : tensor<1xf32> } : () -> tensor<1xf32>
     return %0 : tensor<1xf32>
   }
@@ -299,7 +299,7 @@
   "tf_saved_model.session_initializer"() { initializer = @init } : () -> ()
   // expected-error@+1 {{there must be no more than one session_initializer op}}
   "tf_saved_model.session_initializer"() { initializers = [@init] } : () -> ()
-  func @init() -> tensor<1xf32> attributes {sym_visibility = "private"} {
+  func private @init() -> tensor<1xf32> {
     %0 = "tf.Const"() {value = dense<[1.0]> : tensor<1xf32> } : () -> tensor<1xf32>
     return %0 : tensor<1xf32>
   }
@@ -310,9 +310,9 @@
 module attributes {tf_saved_model.semantics, tf_saved_model.under_construction} {
 
   // expected-error@+1 {{exported function @f should be public}}
-  func @f(
+  func private @f(
     %arg0: tensor<f32> {tf.resource_name = "resource"}
-  ) attributes { sym_visibility = "private", tf_saved_model.exported_names = ["foo.some_func"] } {
+  ) attributes {tf_saved_model.exported_names = ["foo.some_func"] } {
     return
   }
 
@@ -372,7 +372,7 @@
 
   // expected-error@+1 {{the initializer function should be exported}}
   "tf_saved_model.session_initializer"() { initializers = [@init] } : () -> ()
-  func @init() attributes {sym_visibility = "private"} {
+  func private @init() {
     return
   }
 }
diff --git a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_optimize_global_tensors_interprocedural.mlir b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_optimize_global_tensors_interprocedural.mlir
index 636bd60..d86209a 100644
--- a/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_optimize_global_tensors_interprocedural.mlir
+++ b/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model_optimize_global_tensors_interprocedural.mlir
@@ -20,12 +20,12 @@
     return %val : tensor<f32>
   }
 
-  func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
+  func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>  {
     %val = "tf.PartitionedCall"(%arg0) {config = "", config_proto = "", executor_type = "", f = @f_callee_callee} : (tensor<*x!tf.resource>) -> (tensor<f32>)
     return %val : tensor<f32>
   }
 
-  func @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
+  func private @f_callee_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>  {
     %val = "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf.resource>) -> tensor<f32>
     return %val : tensor<f32>
   }
@@ -59,7 +59,7 @@
     return %val : tensor<f32>
   }
 
-  func @f_common(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
+  func private @f_common(%arg0: tensor<*x!tf.resource>) -> tensor<f32>  {
     %val = "tf.ReadVariableOp"(%arg0) : (tensor<*x!tf.resource>) -> tensor<f32>
     return %val : tensor<f32>
   }
@@ -85,7 +85,7 @@
     return %val_2 : tensor<f32>
   }
 
-  func @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32> attributes {sym_visibility = "private"} {
+  func private @f_callee(%arg0: tensor<*x!tf.resource>) -> tensor<f32>  {
     %cst_1 = constant dense<2.0> : tensor<f32>
     return %cst_1 : tensor<f32>
   }