gpu_display: refactor gpu_main_display_tube.

After go/playcl/83699, the WndProc thread is the only thread that
accesses this tube, so we could change its type from Arc<Mutex<Tube>> to
Rc<Tube>, so that we no longer need to worry about deadlocking.

This tube was a field of struct DisplayParameters. That struct is used
for CLI arg parsing, however, we don't get this tube from CLI so the
code looks awkward. Besides, DisplayParameters specifies params for each
individual window, but we will likely only have one tube and share it
among multiple windows when we support multi-windowing. So, we can
remove it from DisplayParameters, and WindowProcedureThread will
distribute the clones of the tube to surfaces.

Bug: 257486521
Test: Ran GPG

Change-Id: Ic7adeb4a41c89d79e40820ad4d50e8d85c4e385c
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/4804685
Reviewed-by: Noah Gold <nkgold@google.com>
Commit-Queue: Kaiyi Li <kaiyili@google.com>
diff --git a/devices/src/virtio/gpu/mod.rs b/devices/src/virtio/gpu/mod.rs
index e18c041..a63cb06 100644
--- a/devices/src/virtio/gpu/mod.rs
+++ b/devices/src/virtio/gpu/mod.rs
@@ -1640,7 +1640,10 @@
 #[cfg(windows)]
 #[inline]
 pub fn start_wndproc_thread(
-    vm_tube: Option<Arc<Mutex<Tube>>>,
+    #[cfg(feature = "kiwi")] gpu_main_display_tube: Option<Tube>,
 ) -> anyhow::Result<WindowProcedureThread> {
-    WindowProcedureThread::start_thread(vm_tube)
+    WindowProcedureThread::start_thread(
+        #[cfg(feature = "kiwi")]
+        gpu_main_display_tube,
+    )
 }
diff --git a/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs b/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs
index 6f5d8fd..59c92cb 100644
--- a/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs
+++ b/devices/src/virtio/vhost/user/device/gpu/sys/windows.rs
@@ -177,7 +177,7 @@
     )];
 
     let wndproc_thread =
-        virtio::gpu::start_wndproc_thread(None).context("failed to start wndproc_thread")?;
+        virtio::gpu::start_wndproc_thread().context("failed to start wndproc_thread")?;
 
     let mut gpu_params = config.params.clone();
 
diff --git a/gpu_display/src/gpu_display_win/mod.rs b/gpu_display/src/gpu_display_win/mod.rs
index 620bee0..4c506da 100644
--- a/gpu_display/src/gpu_display_win/mod.rs
+++ b/gpu_display/src/gpu_display_win/mod.rs
@@ -12,8 +12,6 @@
 
 use std::num::NonZeroU32;
 use std::sync::mpsc::channel;
-#[cfg(feature = "kiwi")]
-use std::sync::Arc;
 use std::sync::Weak;
 use std::time::Duration;
 
@@ -25,15 +23,11 @@
 use base::Event;
 use base::EventWaitResult;
 use base::RawDescriptor;
-#[cfg(feature = "kiwi")]
-use base::Tube;
 use euclid::size2;
 use euclid::Size2D;
 use math_util::Size2DCheckedCast;
 use metrics::Metrics;
 pub use surface::NoopSurface as Surface;
-#[cfg(feature = "kiwi")]
-use sync::Mutex;
 use vm_control::gpu::DisplayMode;
 use vm_control::gpu::DisplayParameters;
 use window_message_processor::DisplaySendToWndProc;
diff --git a/gpu_display/src/gpu_display_win/surface.rs b/gpu_display/src/gpu_display_win/surface.rs
index 4c381bc..c59b906 100644
--- a/gpu_display/src/gpu_display_win/surface.rs
+++ b/gpu_display/src/gpu_display_win/surface.rs
@@ -9,8 +9,8 @@
 use metrics::Metrics;
 
 use super::window::Window;
-use super::window_message_dispatcher::DisplayEventDispatcher;
 use super::window_message_processor::HandleWindowMessage;
+use super::window_message_processor::MessageHandlerResources;
 use super::DisplayProperties;
 use super::VirtualDisplaySpace;
 
@@ -22,7 +22,7 @@
         _virtual_display_size: &Size2D<i32, VirtualDisplaySpace>,
         _metrics: Option<Weak<Metrics>>,
         _display_properties: &DisplayProperties,
-        _display_event_dispatcher: DisplayEventDispatcher,
+        _resources: MessageHandlerResources,
     ) -> Result<Self> {
         Ok(Self {})
     }
diff --git a/gpu_display/src/gpu_display_win/window_message_dispatcher.rs b/gpu_display/src/gpu_display_win/window_message_dispatcher.rs
index 429b195..11284b5 100644
--- a/gpu_display/src/gpu_display_win/window_message_dispatcher.rs
+++ b/gpu_display/src/gpu_display_win/window_message_dispatcher.rs
@@ -16,6 +16,7 @@
 use base::debug;
 use base::error;
 use base::info;
+use base::Tube;
 use linux_input_sys::virtio_input_event;
 #[cfg(feature = "kiwi")]
 use vm_control::ServiceSendToGpu;
@@ -80,6 +81,7 @@
 pub(crate) struct WindowMessageDispatcher<T: HandleWindowMessage> {
     message_processor: Option<WindowMessageProcessor<T>>,
     display_event_dispatcher: DisplayEventDispatcher,
+    gpu_main_display_tube: Option<Rc<Tube>>,
     // The dispatcher is pinned so that its address in the memory won't change, and it is always
     // safe to use the pointer to it stored in the window.
     _pinned_marker: PhantomPinned,
@@ -90,10 +92,14 @@
     /// of the `Window` object, and drop it before the underlying window is completely gone.
     /// TODO(b/238680252): This should be good enough for supporting multi-windowing, but we should
     /// revisit it if we also want to manage some child windows of the crosvm window.
-    pub fn create(window: Window) -> Result<Pin<Box<Self>>> {
+    pub fn create(
+        window: Window,
+        gpu_main_display_tube: Option<Rc<Tube>>,
+    ) -> Result<Pin<Box<Self>>> {
         let mut dispatcher = Box::pin(Self {
             message_processor: Default::default(),
             display_event_dispatcher: DisplayEventDispatcher::new(),
+            gpu_main_display_tube,
             _pinned_marker: PhantomPinned,
         });
         dispatcher
@@ -213,12 +219,16 @@
         create_handler_func: CreateMessageHandlerFunction<T>,
     ) -> bool {
         match &mut self.message_processor {
-            Some(processor) => match processor
-                .create_message_handler(create_handler_func, self.display_event_dispatcher.clone())
-            {
-                Ok(_) => return true,
-                Err(e) => error!("Failed to create message handler: {:?}", e),
-            },
+            Some(processor) => {
+                let resources = MessageHandlerResources {
+                    display_event_dispatcher: self.display_event_dispatcher.clone(),
+                    gpu_main_display_tube: self.gpu_main_display_tube.clone(),
+                };
+                match processor.create_message_handler(create_handler_func, resources) {
+                    Ok(_) => return true,
+                    Err(e) => error!("Failed to create message handler: {:?}", e),
+                }
+            }
             None => {
                 error!("Cannot create message handler because there is no message processor!")
             }
diff --git a/gpu_display/src/gpu_display_win/window_message_processor.rs b/gpu_display/src/gpu_display_win/window_message_processor.rs
index d3e568f..d8f1ba3 100644
--- a/gpu_display/src/gpu_display_win/window_message_processor.rs
+++ b/gpu_display/src/gpu_display_win/window_message_processor.rs
@@ -2,10 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+use std::rc::Rc;
+
 use anyhow::Context;
 use anyhow::Result;
 use base::error;
 use base::warn;
+use base::Tube;
 #[cfg(feature = "kiwi")]
 use vm_control::ServiceSendToGpu;
 use winapi::shared::minwindef::LPARAM;
@@ -38,8 +41,14 @@
 /// destructing the message.
 pub(crate) const WM_USER_HANDLE_DISPLAY_MESSAGE_INTERNAL: UINT = WM_USER + 2;
 
+/// Struct for resources used for window message handler creation.
+pub struct MessageHandlerResources {
+    pub display_event_dispatcher: DisplayEventDispatcher,
+    pub gpu_main_display_tube: Option<Rc<Tube>>,
+}
+
 pub type CreateMessageHandlerFunction<T> =
-    Box<dyn FnOnce(&Window, DisplayEventDispatcher) -> Result<T>>;
+    Box<dyn FnOnce(&Window, MessageHandlerResources) -> Result<T>>;
 
 /// Called after the handler creation finishes. The argument indicates whether that was successful.
 pub type CreateMessageHandlerCallback = Box<dyn FnOnce(bool)>;
@@ -162,9 +171,9 @@
     pub fn create_message_handler(
         &mut self,
         create_handler_func: CreateMessageHandlerFunction<T>,
-        display_event_dispatcher: DisplayEventDispatcher,
+        handler_resources: MessageHandlerResources,
     ) -> Result<()> {
-        create_handler_func(&self.window, display_event_dispatcher)
+        create_handler_func(&self.window, handler_resources)
             .map(|handler| {
                 self.message_handler.replace(handler);
                 if let Some(handler) = &mut self.message_handler {
diff --git a/gpu_display/src/gpu_display_win/window_procedure_thread.rs b/gpu_display/src/gpu_display_win/window_procedure_thread.rs
index 909f2bd..c2e8792 100644
--- a/gpu_display/src/gpu_display_win/window_procedure_thread.rs
+++ b/gpu_display/src/gpu_display_win/window_procedure_thread.rs
@@ -8,6 +8,7 @@
 use std::os::windows::io::RawHandle;
 use std::pin::Pin;
 use std::ptr::null_mut;
+use std::rc::Rc;
 use std::sync::atomic::AtomicI32;
 use std::sync::atomic::Ordering;
 use std::sync::mpsc::channel;
@@ -28,7 +29,6 @@
 use base::ReadNotifier;
 use base::Tube;
 use euclid::size2;
-use sync::Mutex;
 #[cfg(feature = "kiwi")]
 use vm_control::ServiceSendToGpu;
 use win_util::syscall_bail;
@@ -156,7 +156,9 @@
 }
 
 impl<T: HandleWindowMessage> WindowProcedureThread<T> {
-    pub fn start_thread(vm_tube: Option<Arc<Mutex<Tube>>>) -> Result<Self> {
+    pub fn start_thread(
+        #[cfg(feature = "kiwi")] gpu_main_display_tube: Option<Tube>,
+    ) -> Result<Self> {
         let (thread_id_sender, thread_id_receiver) = channel();
         let message_loop_state = Arc::new(AtomicI32::new(MessageLoopState::NotStarted as i32));
         let thread_terminated_event = Event::new().unwrap();
@@ -166,13 +168,19 @@
             .try_clone()
             .map_err(|e| anyhow!("Failed to clone thread_terminated_event: {}", e))?;
 
+        #[cfg(not(feature = "kiwi"))]
+        let gpu_main_display_tube = None;
         let thread = match ThreadBuilder::new()
             .name("gpu_display_wndproc".into())
             .spawn(move || {
                 // Must be called before any other threads post messages to the WndProc thread.
                 thread_message_util::force_create_message_queue();
 
-                Self::run_message_loop(thread_id_sender, message_loop_state_clone, vm_tube);
+                Self::run_message_loop(
+                    thread_id_sender,
+                    message_loop_state_clone,
+                    gpu_main_display_tube,
+                );
 
                 if let Err(e) = thread_terminated_event_clone.signal() {
                     error!("Failed to signal thread terminated event: {}", e);
@@ -222,11 +230,14 @@
     fn run_message_loop(
         thread_id_sender: Sender<Result<u32>>,
         message_loop_state: Arc<AtomicI32>,
-        vm_tube: Option<Arc<Mutex<Tube>>>,
+        gpu_main_display_tube: Option<Tube>,
     ) {
+        let gpu_main_display_tube = gpu_main_display_tube.map(Rc::new);
         // Safe because the dispatcher will take care of the lifetime of the `Window` object.
         let create_window_res = unsafe { Self::create_window() };
-        match create_window_res.and_then(|window| WindowMessageDispatcher::<T>::create(window)) {
+        match create_window_res.and_then(|window| {
+            WindowMessageDispatcher::<T>::create(window, gpu_main_display_tube.clone())
+        }) {
             Ok(dispatcher) => {
                 info!("WndProc thread entering message loop");
                 message_loop_state.store(MessageLoopState::Running as i32, Ordering::SeqCst);
@@ -235,7 +246,7 @@
                     error!("Failed to send WndProc thread ID: {}", e);
                 }
 
-                let exit_state = Self::run_message_loop_body(dispatcher, vm_tube);
+                let exit_state = Self::run_message_loop_body(dispatcher, gpu_main_display_tube);
                 message_loop_state.store(exit_state as i32, Ordering::SeqCst);
             }
             Err(e) => {
@@ -253,12 +264,11 @@
 
     fn run_message_loop_body(
         mut message_dispatcher: Pin<Box<WindowMessageDispatcher<T>>>,
-        vm_tube: Option<Arc<Mutex<Tube>>>,
+        gpu_main_display_tube: Option<Rc<Tube>>,
     ) -> MessageLoopState {
         let mut msg_wait_ctx = MsgWaitContext::new();
-        if let Some(tube) = &vm_tube {
-            if let Err(e) = msg_wait_ctx.add(tube.lock().get_read_notifier(), Token::ServiceMessage)
-            {
+        if let Some(tube) = &gpu_main_display_tube {
+            if let Err(e) = msg_wait_ctx.add(tube.get_read_notifier(), Token::ServiceMessage) {
                 error!(
                     "Failed to add service message read notifier to MsgWaitContext: {:?}",
                     e
@@ -277,15 +287,14 @@
                             return MessageLoopState::ExitedNormally;
                         }
                     }
-                    Token::ServiceMessage => {
-                        #[cfg(feature = "kiwi")]
-                        Self::read_and_dispatch_service_message(
-                            &mut message_dispatcher,
-                            // We never use this token if `vm_tube` is None, so `expect()` should
-                            // always succeed.
-                            vm_tube.as_ref().expect("Service message tube is None"),
-                        );
-                    }
+                    Token::ServiceMessage => Self::read_and_dispatch_service_message(
+                        &mut message_dispatcher,
+                        // We never use this token if `gpu_main_display_tube` is None, so `expect()`
+                        // should always succeed.
+                        gpu_main_display_tube
+                            .as_ref()
+                            .expect("Service message tube is None"),
+                    ),
                 },
                 Err(e) => {
                     error!(
@@ -356,20 +365,23 @@
     #[cfg(feature = "kiwi")]
     fn read_and_dispatch_service_message(
         message_dispatcher: &mut Pin<Box<WindowMessageDispatcher<T>>>,
-        vm_tube: &Arc<Mutex<Tube>>,
+        gpu_main_display_tube: &Tube,
     ) {
-        // We might want to send messages via `vm_tube` while processing service messages, so we
-        // have to make sure the mutex on this tube has been released before processing the message.
-        let message = match vm_tube.lock().recv::<ServiceSendToGpu>() {
-            Ok(message) => message,
+        match gpu_main_display_tube.recv::<ServiceSendToGpu>() {
+            Ok(message) => message_dispatcher
+                .as_mut()
+                .process_service_message(&message),
             Err(e) => {
-                error!("Failed to receive service message through the tube: {}", e);
-                return;
+                error!("Failed to receive service message through the tube: {}", e)
             }
-        };
-        message_dispatcher
-            .as_mut()
-            .process_service_message(&message);
+        }
+    }
+
+    #[cfg(not(feature = "kiwi"))]
+    fn read_and_dispatch_service_message(
+        _: &mut Pin<Box<WindowMessageDispatcher<T>>>,
+        _gpu_main_display_tube: &Tube,
+    ) {
     }
 
     fn is_message_loop_running(&self) -> bool {
diff --git a/src/sys/windows/generic.rs b/src/sys/windows/generic.rs
index 0eb50ec..d9d07ab 100644
--- a/src/sys/windows/generic.rs
+++ b/src/sys/windows/generic.rs
@@ -207,7 +207,7 @@
     _product_args: GpuBackendConfigProduct,
 ) -> Result<Gpu> {
     let wndproc_thread =
-        virtio::gpu::start_wndproc_thread(None).expect("Failed to start wndproc_thread!");
+        virtio::gpu::start_wndproc_thread().expect("Failed to start wndproc_thread!");
 
     Ok(Gpu::new(
         vm_evt_wrtube