RPC binder: set TCP_NDELAY

System by default cache TCP data trying to pack more data before sending
out to the wire, this cause delay in rpc communication.
Currently we set TCP_NDELAY in RpcSession, but not in RpcServer.
This CL add TCP_NDELAY to RpcServer also.

Bug: 304823925
Test: manual
Change-Id: I8bfda370a78eff9b8e0ab75e6bc2fd0a5ba743ed
diff --git a/libs/binder/RpcServer.cpp b/libs/binder/RpcServer.cpp
index 55fc16d..03dad2b 100644
--- a/libs/binder/RpcServer.cpp
+++ b/libs/binder/RpcServer.cpp
@@ -17,6 +17,7 @@
 #define LOG_TAG "RpcServer"
 
 #include <inttypes.h>
+#include <netinet/tcp.h>
 #include <poll.h>
 #include <sys/socket.h>
 #include <sys/un.h>
@@ -572,6 +573,17 @@
         return -savedErrno;
     }
 
+    if (addr.addr()->sa_family == AF_INET || addr.addr()->sa_family == AF_INET6) {
+        int noDelay = 1;
+        int result =
+                setsockopt(socket_fd.get(), IPPROTO_TCP, TCP_NODELAY, &noDelay, sizeof(noDelay));
+        if (result < 0) {
+            int savedErrno = errno;
+            ALOGE("Could not set TCP_NODELAY on  %s", strerror(savedErrno));
+            return -savedErrno;
+        }
+    }
+
     {
         RpcMutexLockGuard _l(mLock);
         if (mServerSocketModifier != nullptr) {