nir: Add a lowering pass to split 64bit phis

Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6313>
diff --git a/src/compiler/nir/nir.h b/src/compiler/nir/nir.h
index 8371ae9..3bfc2d1 100644
--- a/src/compiler/nir/nir.h
+++ b/src/compiler/nir/nir.h
@@ -4611,6 +4611,7 @@
 bool nir_lower_bit_size(nir_shader *shader,
                         nir_lower_bit_size_callback callback,
                         void *callback_data);
+bool nir_lower_64bit_phis(nir_shader *shader);
 
 nir_lower_int64_options nir_lower_int64_op_to_options_mask(nir_op opcode);
 bool nir_lower_int64(nir_shader *shader);
diff --git a/src/compiler/nir/nir_lower_bit_size.c b/src/compiler/nir/nir_lower_bit_size.c
index 6e7d57e..71a0f6f 100644
--- a/src/compiler/nir/nir_lower_bit_size.c
+++ b/src/compiler/nir/nir_lower_bit_size.c
@@ -127,3 +127,93 @@
 
    return progress;
 }
+
+static void
+split_phi(nir_builder *b, nir_phi_instr *phi)
+{
+   nir_phi_instr *lowered[2] = {
+      nir_phi_instr_create(b->shader),
+      nir_phi_instr_create(b->shader)
+   };
+   int num_components = phi->dest.ssa.num_components;
+   assert(phi->dest.ssa.bit_size == 64);
+
+   nir_foreach_phi_src(src, phi) {
+      assert(num_components == src->src.ssa->num_components);
+
+      b->cursor = nir_before_src(&src->src, false);
+
+      nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
+      nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
+
+      nir_phi_src *xsrc = rzalloc(lowered[0], nir_phi_src);
+      xsrc->pred = src->pred;
+      xsrc->src = nir_src_for_ssa(x);
+      exec_list_push_tail(&lowered[0]->srcs, &xsrc->node);
+
+      nir_phi_src *ysrc = rzalloc(lowered[1], nir_phi_src);
+      ysrc->pred = src->pred;
+      ysrc->src = nir_src_for_ssa(y);
+      exec_list_push_tail(&lowered[1]->srcs, &ysrc->node);
+   }
+
+   nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
+                     num_components, 32, NULL);
+   nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
+                     num_components, 32, NULL);
+
+   b->cursor = nir_before_instr(&phi->instr);
+   nir_builder_instr_insert(b, &lowered[0]->instr);
+   nir_builder_instr_insert(b, &lowered[1]->instr);
+
+   b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
+   nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
+   nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(merged));
+   nir_instr_remove(&phi->instr);
+}
+
+static bool
+lower_64bit_phi_impl(nir_function_impl *impl)
+{
+   nir_builder b;
+   nir_builder_init(&b, impl);
+   bool progress = false;
+
+   nir_foreach_block(block, impl) {
+      nir_foreach_instr_safe(instr, block) {
+         if (instr->type != nir_instr_type_phi)
+            break;
+
+         nir_phi_instr *phi = nir_instr_as_phi(instr);
+         assert(phi->dest.is_ssa);
+
+         if (phi->dest.ssa.bit_size <= 32)
+            continue;
+
+         split_phi(&b, phi);
+         progress = true;
+      }
+   }
+
+   if (progress) {
+      nir_metadata_preserve(impl, nir_metadata_block_index |
+                                  nir_metadata_dominance);
+   } else {
+      nir_metadata_preserve(impl, nir_metadata_all);
+   }
+
+   return progress;
+}
+
+bool
+nir_lower_64bit_phis(nir_shader *shader)
+{
+   bool progress = false;
+
+   nir_foreach_function(function, shader) {
+      if (function->impl)
+         progress |= lower_64bit_phi_impl(function->impl);
+   }
+
+   return progress;
+}