Sync up ring_buffer class

bug: 140112486

+ Add potentially missing errno.h to qemu_pipe_host.cpp

Change-Id: Ibd58696d0be9c9af319f52cb6ce5a50618b93832
diff --git a/android-emu/android/base/ring_buffer.c b/android-emu/android/base/ring_buffer.c
index ef26d1d..f6b3b78 100644
--- a/android-emu/android/base/ring_buffer.c
+++ b/android-emu/android/base/ring_buffer.c
@@ -156,6 +156,40 @@
     return (long)steps;
 }
 
+long ring_buffer_advance_write(
+    struct ring_buffer* r, uint32_t step_size, uint32_t steps) {
+    uint32_t i;
+
+    for (i = 0; i < steps; ++i) {
+        if (!ring_buffer_can_write(r, step_size)) {
+            errno = -EAGAIN;
+            return (long)i;
+        }
+
+        __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
+    }
+
+    errno = 0;
+    return (long)steps;
+}
+
+long ring_buffer_advance_read(
+    struct ring_buffer* r, uint32_t step_size, uint32_t steps) {
+    uint32_t i;
+
+    for (i = 0; i < steps; ++i) {
+        if (!ring_buffer_can_read(r, step_size)) {
+            errno = -EAGAIN;
+            return (long)i;
+        }
+
+        __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
+    }
+
+    errno = 0;
+    return (long)steps;
+}
+
 uint32_t ring_buffer_calc_shift(uint32_t size) {
     uint32_t shift = 0;
     while ((1 << shift) < size) {
@@ -196,7 +230,7 @@
     v->mask = (1 << shift) - 1;
 }
 
-static uint32_t ring_buffer_view_get_ring_pos(
+uint32_t ring_buffer_view_get_ring_pos(
     const struct ring_buffer_view* v,
     uint32_t index) {
     return index & v->mask;
@@ -235,6 +269,60 @@
     }
 }
 
+int ring_buffer_copy_contents(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t wanted_bytes,
+    uint8_t* res) {
+
+    uint32_t total_available =
+        ring_buffer_available_read(r, v);
+    uint32_t available_at_end = 0;
+
+    if (v) {
+        available_at_end =
+            v->size - ring_buffer_view_get_ring_pos(v, r->read_pos);
+    } else {
+        available_at_end =
+            RING_BUFFER_SIZE - get_ring_pos(r->write_pos);
+    }
+
+    if (total_available < wanted_bytes) {
+        return -1;
+    }
+
+    if (v) {
+        if (wanted_bytes > available_at_end) {
+            uint32_t remaining = wanted_bytes - available_at_end;
+            memcpy(res,
+                   &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
+                   available_at_end);
+            memcpy(res + available_at_end,
+                   &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)],
+                   remaining);
+        } else {
+            memcpy(res,
+                   &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
+                   wanted_bytes);
+        }
+    } else {
+        if (wanted_bytes > available_at_end) {
+            uint32_t remaining = wanted_bytes - available_at_end;
+            memcpy(res,
+                   &r->buf[get_ring_pos(r->read_pos)],
+                   available_at_end);
+            memcpy(res + available_at_end,
+                   &r->buf[get_ring_pos(r->read_pos + available_at_end)],
+                   remaining);
+        } else {
+            memcpy(res,
+                   &r->buf[get_ring_pos(r->read_pos)],
+                   wanted_bytes);
+        }
+    }
+    return 0;
+}
+
 long ring_buffer_view_write(
     struct ring_buffer* r,
     struct ring_buffer_view* v,
@@ -319,9 +407,7 @@
 
 void ring_buffer_yield() {
 #ifdef _WIN32
-    if (!SwitchToThread()) {
-        Sleep(0);
-    }
+    ring_buffer_pause();
 #else
     sched_yield();
 #endif
@@ -352,6 +438,9 @@
     uint32_t bytes,
     uint64_t timeout_us) {
 
+    uint64_t start_us = ring_buffer_curr_us();
+    uint64_t curr_wait_us;
+
     bool can_write =
         v ? ring_buffer_view_can_write(r, v, bytes) :
             ring_buffer_can_write(r, bytes);
@@ -404,6 +493,24 @@
     struct ring_buffer_view* v,
     const void* data,
     uint32_t bytes) {
+    ring_buffer_write_fully_with_abort(r, v, data, bytes, 0, 0);
+}
+
+void ring_buffer_read_fully(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    void* data,
+    uint32_t bytes) {
+    ring_buffer_read_fully_with_abort(r, v, data, bytes, 0, 0);
+}
+
+uint32_t ring_buffer_write_fully_with_abort(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    const void* data,
+    uint32_t bytes,
+    uint32_t abort_value,
+    const volatile uint32_t* abort_ptr) {
 
     uint32_t candidate_step = get_step_size(r, v, bytes);
     uint32_t processed = 0;
@@ -425,14 +532,22 @@
         }
 
         processed += processed_here ? candidate_step : 0;
+
+        if (abort_ptr && (abort_value == *abort_ptr)) {
+            return processed;
+        }
     }
+
+    return processed;
 }
 
-void ring_buffer_read_fully(
+uint32_t ring_buffer_read_fully_with_abort(
     struct ring_buffer* r,
     struct ring_buffer_view* v,
     void* data,
-    uint32_t bytes) {
+    uint32_t bytes,
+    uint32_t abort_value,
+    const volatile uint32_t* abort_ptr) {
 
     uint32_t candidate_step = get_step_size(r, v, bytes);
     uint32_t processed = 0;
@@ -455,7 +570,13 @@
         }
 
         processed += processed_here ? candidate_step : 0;
+
+        if (abort_ptr && (abort_value == *abort_ptr)) {
+            return processed;
+        }
     }
+
+    return processed;
 }
 
 void ring_buffer_sync_init(struct ring_buffer* r) {
diff --git a/android-emu/android/base/ring_buffer.h b/android-emu/android/base/ring_buffer.h
index 34c0666..390a758 100644
--- a/android-emu/android/base/ring_buffer.h
+++ b/android-emu/android/base/ring_buffer.h
@@ -52,6 +52,13 @@
     struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps);
 long ring_buffer_read(
     struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps);
+// Like ring_buffer_write / ring_buffer_read, but merely advances the counters
+// without reading or writing anything. Returns the number of step_size steps
+// advanced.
+long ring_buffer_advance_write(
+    struct ring_buffer* r, uint32_t step_size, uint32_t steps);
+long ring_buffer_advance_read(
+    struct ring_buffer* r, uint32_t step_size, uint32_t steps);
 
 // If we want to work with dynamically allocated buffers, a separate struct is
 // needed; the host and guest are in different address spaces and thus have
@@ -63,6 +70,13 @@
     uint32_t mask;
 };
 
+// Convenience struct that holds a pointer to a ring along with a view.  It's a
+// common pattern for the ring and the buffer of the view to be shared between
+// two entities (in this case, usually guest and host).
+struct ring_buffer_with_view {
+    struct ring_buffer* ring;
+    struct ring_buffer_view view;
+};
 
 // Calculates the highest power of 2 so that
 // (1 << shift) <= size.
@@ -122,6 +136,29 @@
     void* data,
     uint32_t bytes);
 
+// Like read/write fully, but with an abort value. The value is read from
+// |abortPtr| each time. If |abortPtr| is null, then behaves the same
+// as ring_buffer_(read|write)_fully.
+// Returns the actual number of bytes sent or received.
+uint32_t ring_buffer_write_fully_with_abort(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    const void* data,
+    uint32_t bytes,
+    uint32_t abort_value,
+    const volatile uint32_t* abort_ptr);
+uint32_t ring_buffer_read_fully_with_abort(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    void* data,
+    uint32_t bytes,
+    uint32_t abort_value,
+    const volatile uint32_t* abort_ptr);
+
+uint32_t ring_buffer_view_get_ring_pos(
+    const struct ring_buffer_view* v,
+    uint32_t index);
+
 bool ring_buffer_can_write(
     const struct ring_buffer* r, uint32_t bytes);
 bool ring_buffer_can_read(
@@ -137,6 +174,16 @@
 uint32_t ring_buffer_available_read(
     const struct ring_buffer* r,
     const struct ring_buffer_view* v);
+// Copies out contents from the consumer side of
+// ring buffer/view |r,v|.
+// If there is less available read than |wanted_bytes|,
+// returns -1.
+// On success, returns 0.
+int ring_buffer_copy_contents(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t wanted_bytes,
+    uint8_t* res);
 
 // Lockless synchronization where the consumer is allowed to hang up and go to
 // sleep. This can be considered a sort of asymmetric lock for two threads,
diff --git a/shared/OpenglCodecCommon/qemu_pipe_host.cpp b/shared/OpenglCodecCommon/qemu_pipe_host.cpp
index 1249eb1..c53a8eb 100644
--- a/shared/OpenglCodecCommon/qemu_pipe_host.cpp
+++ b/shared/OpenglCodecCommon/qemu_pipe_host.cpp
@@ -21,6 +21,8 @@
 #include <log/log.h>
 #endif
 
+#include <errno.h>
+
 using android::HostGoldfishPipeDevice;
 
 QEMU_PIPE_HANDLE qemu_pipe_open(const char* pipeName) {