Use cvd::Subprocess::Stop() for all launcher subprocesses.

This simplifies the stopping procedure by letting the process monitor
stop all processes, including the vmm (no longer needed to call
vm_manager->Stop). The process monitor simply calls the subprocess'
stop routine that defaults to what the PM used to do but it's easily
customizable now on command creation. It prepares the ground to
implement a cleaner shutdown mechanism (instead of killing) by simply
specifying custom stopper functions.

Bug: 140302661
Test: locally
Change-Id: Ia984c5bff33a9bc89ebfc219bdb4bfb99fee3b14
(cherry picked from commit 883685c7298b349733c73cdf62285cf5db5b8094)
diff --git a/host/commands/launch/main.cc b/host/commands/launch/main.cc
index 88853fa..d9029b1 100644
--- a/host/commands/launch/main.cc
+++ b/host/commands/launch/main.cc
@@ -37,7 +37,6 @@
 
 #include <gflags/gflags.h>
 #include <glog/logging.h>
-#include <host/libs/vm_manager/crosvm_manager.h>
 
 #include "common/libs/fs/shared_fd.h"
 #include "common/libs/fs/shared_select.h"
@@ -57,6 +56,7 @@
 #include "host/commands/launch/vsoc_shared_memory.h"
 #include "host/libs/config/cuttlefish_config.h"
 #include "host/commands/kernel_log_monitor/kernel_log_server.h"
+#include <host/libs/vm_manager/crosvm_manager.h>
 #include "host/libs/vm_manager/vm_manager.h"
 #include "host/libs/vm_manager/qemu_manager.h"
 
@@ -284,25 +284,27 @@
 }
 
 void ServerLoop(cvd::SharedFD server,
-                vm_manager::VmManager* vm_manager,
                 cvd::ProcessMonitor* process_monitor) {
   while (true) {
     // TODO: use select to handle simultaneous connections.
     auto client = cvd::SharedFD::Accept(*server);
     cvd::LauncherAction action;
-    auto response = cvd::LauncherResponse::kSuccess;
     while (client->IsOpen() && client->Read(&action, sizeof(action)) > 0) {
       switch (action) {
         case cvd::LauncherAction::kStop:
-          vm_manager->Stop();
-          process_monitor->StopMonitoredProcesses();
-          client->Write(&response, sizeof(response));
-          std::exit(0);
+          if (process_monitor->StopMonitoredProcesses()) {
+            auto response = cvd::LauncherResponse::kSuccess;
+            client->Write(&response, sizeof(response));
+            std::exit(0);
+          } else {
+            auto response = cvd::LauncherResponse::kError;
+            client->Write(&response, sizeof(response));
+          }
           break;
         default:
           LOG(ERROR) << "Unrecognized launcher action: "
                      << static_cast<char>(action);
-          response = cvd::LauncherResponse::kError;
+          auto response = cvd::LauncherResponse::kError;
           client->Write(&response, sizeof(response));
       }
     }
@@ -430,7 +432,7 @@
                              GetOnSubprocessExitCallback(*config));
   LaunchAdbConnectorIfEnabled(&process_monitor, *config, adbd_events_pipe);
 
-  ServerLoop(launcher_monitor_socket, vm_manager, &process_monitor); // Should not return
+  ServerLoop(launcher_monitor_socket, &process_monitor); // Should not return
   LOG(ERROR) << "The server loop returned, it should never happen!!";
   return cvd::LauncherExitCodes::kServerError;
 }
diff --git a/host/commands/launch/process_monitor.cc b/host/commands/launch/process_monitor.cc
index 477ef8f..6486935 100644
--- a/host/commands/launch/process_monitor.cc
+++ b/host/commands/launch/process_monitor.cc
@@ -92,45 +92,33 @@
   NotifyThread(thread_comm_main_);
 }
 
-void ProcessMonitor::StopMonitoredProcesses() {
+bool ProcessMonitor::StopMonitoredProcesses() {
+  // Because the mutex is held while this function executes, the restarter
+  // thread is kept blocked and by the time it resumes execution there are no
+  // more processes to monitor
   std::lock_guard<std::mutex> lock(processes_mutex_);
-  for (auto& entry : monitored_processes_) {
-    auto pid = entry.proc->pid();
-    if (pid > 0) {
-      auto pgid = getpgid(pid);
-      if (pgid < 0) {
-        auto error = errno;
-        LOG(WARNING) << "Error obtaining process group id of process with pid="
-                     << pid << ": " << strerror(error);
-        // Send the kill signal anyways, because pgid will be -1 it will be sent
-        // to the process and not a (non-existent) group
-      }
-      bool is_group_head = pid == pgid;
-      if (is_group_head) {
-        killpg(pid, SIGKILL);
-      } else {
-        kill(pid, SIGKILL);
-      }
-    }
+  bool result = true;
+  // Processes were started in the order they appear in the vector, stop them in
+  // reverse order for symmetry.
+  for (auto entry_it = monitored_processes_.rbegin();
+       entry_it != monitored_processes_.rend(); ++entry_it) {
+    auto& entry = *entry_it;
+    result = result && entry.proc->Stop();
   }
+  // Wait for all processes to actually exit.
   for (auto& entry : monitored_processes_) {
-    auto pid = entry.proc->pid();
-    if (pid > 0) {
-      int wstatus;
-      pid_t ret_pid;
-      do {
-        errno = 0;
-        ret_pid = waitpid(pid, &wstatus, 0);
-      } while (ret_pid < 0 && errno == EINTR);
-      if (ret_pid < 0) {
-        auto error = errno;
-        LOG(WARNING) << "Failed to wait for process with pid=" << pid
-                     << ": " << strerror(error);
-      }
+    // Most processes are being killed by signals, calling Wait(void) would be
+    // too verbose on the logs.
+    int wstatus;
+    auto ret = entry.proc->Wait(&wstatus, 0);
+    if (ret < 0) {
+      LOG(WARNING) << "Failed to wait for process "
+                   << entry.cmd->GetShortName();
     }
   }
   // Clear the list to ensure they are not started again
   monitored_processes_.clear();
+  return result;
 }
 
 bool ProcessMonitor::RestartOnExitCb(MonitorEntry* entry) {
diff --git a/host/commands/launch/process_monitor.h b/host/commands/launch/process_monitor.h
index cf1f753..5683c61 100644
--- a/host/commands/launch/process_monitor.h
+++ b/host/commands/launch/process_monitor.h
@@ -48,7 +48,7 @@
   void MonitorExistingSubprocess(Command cmd, Subprocess sub_process,
                                  OnSocketReadyCb on_control_socket_ready_cb);
   // Stops all monitored subprocesses.
-  void StopMonitoredProcesses();
+  bool StopMonitoredProcesses();
   static bool RestartOnExitCb(MonitorEntry* entry);
   static bool DoNotMonitorCb(MonitorEntry* entry);
 
diff --git a/host/libs/vm_manager/crosvm_manager.cpp b/host/libs/vm_manager/crosvm_manager.cpp
index 33cedc7..a60d268 100644
--- a/host/libs/vm_manager/crosvm_manager.cpp
+++ b/host/libs/vm_manager/crosvm_manager.cpp
@@ -44,6 +44,17 @@
   }
 }
 
+bool Stop() {
+  auto config = vsoc::CuttlefishConfig::Get();
+  cvd::Command command(config->crosvm_binary());
+  command.AddParameter("stop");
+  command.AddParameter(GetControlSocketPath(config));
+
+  auto process = command.Start();
+
+  return process.Wait() == 0;
+}
+
 }  // namespace
 
 const std::string CrosvmManager::name() { return "crosvm"; }
@@ -81,7 +92,14 @@
     : VmManager(config) {}
 
 std::vector<cvd::Command> CrosvmManager::StartCommands(bool with_frontend) {
-  cvd::Command crosvm_cmd(config_->crosvm_binary());
+  cvd::Command crosvm_cmd(config_->crosvm_binary(), [](cvd::Subprocess* proc) {
+    auto stopped = Stop();
+    if (stopped) {
+      return true;
+    }
+    LOG(WARNING) << "Failed to stop VMM nicely, attempting to KILL";
+    return KillSubprocess(proc);
+  });
   crosvm_cmd.AddParameter("run");
 
   if (config_->gpu_mode() != vsoc::kGpuModeGuestSwiftshader) {
@@ -156,14 +174,4 @@
   return ret;
 }
 
-bool CrosvmManager::Stop() {
-  cvd::Command command(config_->crosvm_binary());
-  command.AddParameter("stop");
-  command.AddParameter(GetControlSocketPath(config_));
-
-  auto process = command.Start();
-
-  return process.Wait() == 0;
-}
-
 }  // namespace vm_manager
diff --git a/host/libs/vm_manager/crosvm_manager.h b/host/libs/vm_manager/crosvm_manager.h
index 41ddb0d..74ed8c5 100644
--- a/host/libs/vm_manager/crosvm_manager.h
+++ b/host/libs/vm_manager/crosvm_manager.h
@@ -35,7 +35,6 @@
   virtual ~CrosvmManager() = default;
 
   std::vector<cvd::Command> StartCommands(bool with_frontend) override;
-  bool Stop() override;
 };
 
 }  // namespace vm_manager
diff --git a/host/libs/vm_manager/qemu_manager.cpp b/host/libs/vm_manager/qemu_manager.cpp
index 831a156..155f680 100644
--- a/host/libs/vm_manager/qemu_manager.cpp
+++ b/host/libs/vm_manager/qemu_manager.cpp
@@ -66,6 +66,36 @@
   return output.str();
 }
 
+bool Stop() {
+  auto config = vsoc::CuttlefishConfig::Get();
+  auto monitor_path = GetMonitorPath(config);
+  auto monitor_sock = cvd::SharedFD::SocketLocalClient(
+      monitor_path.c_str(), false, SOCK_STREAM);
+
+  if (!monitor_sock->IsOpen()) {
+    LOG(ERROR) << "The connection to qemu is closed, is it still running?";
+    return false;
+  }
+  char msg[] = "{\"execute\":\"qmp_capabilities\"}{\"execute\":\"quit\"}";
+  ssize_t len = sizeof(msg) - 1;
+  while (len > 0) {
+    int tmp = monitor_sock->Write(msg, len);
+    if (tmp < 0) {
+      LOG(ERROR) << "Error writing to socket: " << monitor_sock->StrError();
+      return false;
+    }
+    len -= tmp;
+  }
+  // Log the reply
+  char buff[1000];
+  while ((len = monitor_sock->Read(buff, sizeof(buff) - 1)) > 0) {
+    buff[len] = '\0';
+    LOG(INFO) << "From qemu monitor: " << buff;
+  }
+
+  return true;
+}
+
 }  // namespace
 
 const std::string QemuManager::name() { return "qemu_cli"; }
@@ -124,39 +154,19 @@
   LogAndSetEnv("vsock_guest_cid", std::to_string(config_->vsock_guest_cid()));
   LogAndSetEnv("logcat_mode", config_->logcat_mode());
 
-  cvd::Command qemu_cmd(vsoc::DefaultHostArtifactsPath("bin/cf_qemu.sh"));
+  cvd::Command qemu_cmd(vsoc::DefaultHostArtifactsPath("bin/cf_qemu.sh"),
+                        [](cvd::Subprocess* proc) {
+                          auto stopped = Stop();
+                          if (stopped) {
+                            return true;
+                          }
+                          LOG(WARNING) << "Failed to stop VMM nicely, "
+                                       << "attempting to KILL";
+                          return KillSubprocess(proc);
+                        });
   std::vector<cvd::Command> ret;
   ret.push_back(std::move(qemu_cmd));
   return ret;
 }
 
-bool QemuManager::Stop() {
-  auto monitor_path = GetMonitorPath(config_);
-  auto monitor_sock = cvd::SharedFD::SocketLocalClient(
-      monitor_path.c_str(), false, SOCK_STREAM);
-
-  if (!monitor_sock->IsOpen()) {
-    LOG(ERROR) << "The connection to qemu is closed, is it still running?";
-    return false;
-  }
-  char msg[] = "{\"execute\":\"qmp_capabilities\"}{\"execute\":\"quit\"}";
-  ssize_t len = sizeof(msg) - 1;
-  while (len > 0) {
-    int tmp = monitor_sock->Write(msg, len);
-    if (tmp < 0) {
-      LOG(ERROR) << "Error writing to socket: " << monitor_sock->StrError();
-      return false;
-    }
-    len -= tmp;
-  }
-  // Log the reply
-  char buff[1000];
-  while ((len = monitor_sock->Read(buff, sizeof(buff) - 1)) > 0) {
-    buff[len] = '\0';
-    LOG(INFO) << "From qemu monitor: " << buff;
-  }
-
-  return true;
-}
-
 }  // namespace vm_manager
diff --git a/host/libs/vm_manager/qemu_manager.h b/host/libs/vm_manager/qemu_manager.h
index 540bb67..9b0df49 100644
--- a/host/libs/vm_manager/qemu_manager.h
+++ b/host/libs/vm_manager/qemu_manager.h
@@ -33,7 +33,6 @@
   virtual ~QemuManager() = default;
 
   std::vector<cvd::Command> StartCommands(bool with_frontend) override;
-  bool Stop() override;
 };
 
 }  // namespace vm_manager
diff --git a/host/libs/vm_manager/vm_manager.h b/host/libs/vm_manager/vm_manager.h
index 083520d..79d5bbd 100644
--- a/host/libs/vm_manager/vm_manager.h
+++ b/host/libs/vm_manager/vm_manager.h
@@ -47,7 +47,6 @@
   // command_starter function allows to customize the way vmm commands are
   // started/tracked/etc.
   virtual std::vector<cvd::Command> StartCommands(bool with_frontend) = 0;
-  virtual bool Stop() = 0;
 
   virtual bool ValidateHostConfiguration(
       std::vector<std::string>* config_commands) const;