[ring-buffer] add ring buffer library to guest android-emu

bug: 140112486

Change-Id: I8c724352652746bdd1198b64b70565f05b6d06ff
diff --git a/android-emu/Android.mk b/android-emu/Android.mk
index 9ef32cd..6de495e 100644
--- a/android-emu/Android.mk
+++ b/android-emu/Android.mk
@@ -16,6 +16,7 @@
     android/base/files/Stream.cpp \
     android/base/files/StreamSerializing.cpp \
     android/base/Pool.cpp \
+    android/base/ring_buffer.c \
     android/base/StringFormat.cpp \
     android/base/SubAllocator.cpp \
     android/base/synchronization/AndroidMessageChannel.cpp \
diff --git a/android-emu/CMakeLists.txt b/android-emu/CMakeLists.txt
index ffa1fd2..e6482d8 100644
--- a/android-emu/CMakeLists.txt
+++ b/android-emu/CMakeLists.txt
@@ -1,8 +1,8 @@
 # This is an autogenerated file! Do not edit!
 # instead run make from .../device/generic/goldfish-opengl
 # which will re-generate this file.
-android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/android-emu/Android.mk" "74426692eff285e93994e82bdc6a648312c8a6e7f0b2daf22fe255ac5bfd3b64")
-set(androidemu_src android/base/AlignedBuf.cpp android/base/files/MemStream.cpp android/base/files/Stream.cpp android/base/files/StreamSerializing.cpp android/base/Pool.cpp android/base/StringFormat.cpp android/base/SubAllocator.cpp android/base/synchronization/AndroidMessageChannel.cpp android/base/threads/AndroidFunctorThread.cpp android/base/threads/AndroidThreadStore.cpp android/base/threads/AndroidThread_pthread.cpp android/base/threads/AndroidWorkPool.cpp android/base/Tracing.cpp android/utils/debug.c)
+android_validate_sha256("${GOLDFISH_DEVICE_ROOT}/android-emu/Android.mk" "3654f06acfa661c75b91fe271337a63a77326a69b4247026996c40bff8138bde")
+set(androidemu_src android/base/AlignedBuf.cpp android/base/files/MemStream.cpp android/base/files/Stream.cpp android/base/files/StreamSerializing.cpp android/base/Pool.cpp android/base/ring_buffer.c android/base/StringFormat.cpp android/base/SubAllocator.cpp android/base/synchronization/AndroidMessageChannel.cpp android/base/threads/AndroidFunctorThread.cpp android/base/threads/AndroidThreadStore.cpp android/base/threads/AndroidThread_pthread.cpp android/base/threads/AndroidWorkPool.cpp android/base/Tracing.cpp android/utils/debug.c)
 android_add_shared_library(androidemu)
 target_include_directories(androidemu PRIVATE ${GOLDFISH_DEVICE_ROOT}/android-emu ${GOLDFISH_DEVICE_ROOT}/./host/include/libOpenglRender ${GOLDFISH_DEVICE_ROOT}/./system/include ${GOLDFISH_DEVICE_ROOT}/./../../../external/qemu/android/android-emugl/guest)
 target_compile_definitions(androidemu PRIVATE "-DWITH_GLES2" "-DPLATFORM_SDK_VERSION=29" "-DGOLDFISH_HIDL_GRALLOC" "-DEMULATOR_OPENGL_POST_O=1" "-DHOST_BUILD" "-DANDROID" "-DGL_GLEXT_PROTOTYPES" "-DPAGE_SIZE=4096" "-DGOLDFISH_VULKAN" "-DLOG_TAG=\"androidemu\"")
diff --git a/android-emu/android/base/ring_buffer.c b/android-emu/android/base/ring_buffer.c
new file mode 100644
index 0000000..9fc8b01
--- /dev/null
+++ b/android-emu/android/base/ring_buffer.c
@@ -0,0 +1,505 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#include "android/base/ring_buffer.h"
+
+#include <errno.h>
+#include <string.h>
+#ifdef _MSC_VER
+#include "msvc-posix.h"
+#else
+#include <sys/time.h>
+#endif
+
+#include <emmintrin.h>
+
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <sched.h>
+#include <unistd.h>
+#endif
+
+#define RING_BUFFER_MASK (RING_BUFFER_SIZE - 1)
+
+#define RING_BUFFER_VERSION 1
+
+void ring_buffer_init(struct ring_buffer* r) {
+    r->guest_version = 1;
+    r->write_pos = 0;
+    r->read_pos = 0;
+
+    r->read_live_count = 0;
+    r->read_yield_count = 0;
+    r->read_sleep_us_count = 0;
+
+    r->state = 0;
+}
+
+static uint32_t get_ring_pos(uint32_t index) {
+    return index & RING_BUFFER_MASK;
+}
+
+bool ring_buffer_can_write(const struct ring_buffer* r, uint32_t bytes) {
+    uint32_t read_view;
+    __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
+    return get_ring_pos(read_view - r->write_pos - 1) >= bytes;
+}
+
+bool ring_buffer_can_read(const struct ring_buffer* r, uint32_t bytes) {
+    uint32_t write_view;
+    __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
+    return get_ring_pos(write_view - r->read_pos) >= bytes;
+}
+
+long ring_buffer_write(
+    struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps) {
+    const uint8_t* data_bytes = (const uint8_t*)data;
+    uint32_t i;
+
+    for (i = 0; i < steps; ++i) {
+        if (!ring_buffer_can_write(r, step_size)) {
+            errno = -EAGAIN;
+            return (long)i;
+        }
+
+        // Needs to be split up into 2 writes for the edge case.
+        uint32_t available_at_end =
+            RING_BUFFER_SIZE - get_ring_pos(r->write_pos);
+
+        if (step_size > available_at_end) {
+            uint32_t remaining = step_size - available_at_end;
+            memcpy(
+                &r->buf[get_ring_pos(r->write_pos)],
+                data_bytes + i * step_size,
+                available_at_end);
+            memcpy(
+                &r->buf[get_ring_pos(r->write_pos + available_at_end)],
+                data_bytes + i * step_size + available_at_end,
+                remaining);
+        } else {
+            memcpy(
+                &r->buf[get_ring_pos(r->write_pos)],
+                data_bytes + i * step_size,
+                step_size);
+        }
+
+        __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
+    }
+
+    errno = 0;
+    return (long)steps;
+}
+
+long ring_buffer_read(
+    struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps) {
+    uint8_t* data_bytes = (uint8_t*)data;
+    uint32_t i;
+
+    for (i = 0; i < steps; ++i) {
+        if (!ring_buffer_can_read(r, step_size)) {
+            errno = -EAGAIN;
+            return (long)i;
+        }
+
+        // Needs to be split up into 2 reads for the edge case.
+        uint32_t available_at_end =
+            RING_BUFFER_SIZE - get_ring_pos(r->read_pos);
+
+        if (step_size > available_at_end) {
+            uint32_t remaining = step_size - available_at_end;
+            memcpy(
+                data_bytes + i * step_size,
+                &r->buf[get_ring_pos(r->read_pos)],
+                available_at_end);
+            memcpy(
+                data_bytes + i * step_size + available_at_end,
+                &r->buf[get_ring_pos(r->read_pos + available_at_end)],
+                remaining);
+        } else {
+            memcpy(
+                data_bytes + i * step_size,
+                &r->buf[get_ring_pos(r->read_pos)],
+                step_size);
+        }
+
+        __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
+    }
+
+    errno = 0;
+    return (long)steps;
+}
+
+uint32_t ring_buffer_calc_shift(uint32_t size) {
+    uint32_t shift = 0;
+    while ((1 << shift) < size) {
+        ++shift;
+    }
+
+    // if size is not a power of 2,
+    if ((1 << shift) > size) {
+        --shift;
+    }
+    return shift;
+}
+
+void ring_buffer_view_init(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    uint8_t* buf,
+    uint32_t size) {
+
+    uint32_t shift = ring_buffer_calc_shift(size);
+
+    ring_buffer_init(r);
+
+    v->buf = buf;
+    v->size = (1 << shift);
+    v->mask = (1 << shift) - 1;
+}
+
+void ring_buffer_init_view_only(
+    struct ring_buffer_view* v,
+    uint8_t* buf,
+    uint32_t size) {
+
+    uint32_t shift = ring_buffer_calc_shift(size);
+
+    v->buf = buf;
+    v->size = (1 << shift);
+    v->mask = (1 << shift) - 1;
+}
+
+static uint32_t ring_buffer_view_get_ring_pos(
+    const struct ring_buffer_view* v,
+    uint32_t index) {
+    return index & v->mask;
+}
+
+bool ring_buffer_view_can_write(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes) {
+    uint32_t read_view;
+    __atomic_load(&r->read_pos, &read_view, __ATOMIC_SEQ_CST);
+    return ring_buffer_view_get_ring_pos(
+            v, read_view - r->write_pos - 1) >= bytes;
+}
+
+bool ring_buffer_view_can_read(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes) {
+    uint32_t write_view;
+    __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
+    return ring_buffer_view_get_ring_pos(
+            v, write_view - r->read_pos) >= bytes;
+}
+
+uint32_t ring_buffer_available_read(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v) {
+    uint32_t write_view;
+    __atomic_load(&r->write_pos, &write_view, __ATOMIC_SEQ_CST);
+    if (v) {
+        return ring_buffer_view_get_ring_pos(
+                v, write_view - r->read_pos);
+    } else {
+        return get_ring_pos(write_view - r->read_pos);
+    }
+}
+
+long ring_buffer_view_write(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    const void* data, uint32_t step_size, uint32_t steps) {
+
+    uint8_t* data_bytes = (uint8_t*)data;
+    uint32_t i;
+
+    for (i = 0; i < steps; ++i) {
+        if (!ring_buffer_view_can_write(r, v, step_size)) {
+            errno = -EAGAIN;
+            return (long)i;
+        }
+
+        // Needs to be split up into 2 writes for the edge case.
+        uint32_t available_at_end =
+            v->size - ring_buffer_view_get_ring_pos(v, r->write_pos);
+
+        if (step_size > available_at_end) {
+            uint32_t remaining = step_size - available_at_end;
+            memcpy(
+                &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)],
+                data_bytes + i * step_size,
+                available_at_end);
+            memcpy(
+                &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos + available_at_end)],
+                data_bytes + i * step_size + available_at_end,
+                remaining);
+        } else {
+            memcpy(
+                &v->buf[ring_buffer_view_get_ring_pos(v, r->write_pos)],
+                data_bytes + i * step_size,
+                step_size);
+        }
+
+        __atomic_add_fetch(&r->write_pos, step_size, __ATOMIC_SEQ_CST);
+    }
+
+    errno = 0;
+    return (long)steps;
+
+}
+
+long ring_buffer_view_read(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    void* data, uint32_t step_size, uint32_t steps) {
+    uint8_t* data_bytes = (uint8_t*)data;
+    uint32_t i;
+
+    for (i = 0; i < steps; ++i) {
+        if (!ring_buffer_view_can_read(r, v, step_size)) {
+            errno = -EAGAIN;
+            return (long)i;
+        }
+
+        // Needs to be split up into 2 reads for the edge case.
+        uint32_t available_at_end =
+            v->size - ring_buffer_view_get_ring_pos(v, r->read_pos);
+
+        if (step_size > available_at_end) {
+            uint32_t remaining = step_size - available_at_end;
+            memcpy(
+                data_bytes + i * step_size,
+                &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
+                available_at_end);
+            memcpy(
+                data_bytes + i * step_size + available_at_end,
+                &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos + available_at_end)],
+                remaining);
+        } else {
+            memcpy(data_bytes + i * step_size,
+                   &v->buf[ring_buffer_view_get_ring_pos(v, r->read_pos)],
+                   step_size);
+        }
+        __atomic_add_fetch(&r->read_pos, step_size, __ATOMIC_SEQ_CST);
+    }
+
+    errno = 0;
+    return (long)steps;
+}
+
+void ring_buffer_yield() {
+#ifdef _WIN32
+    if (!SwitchToThread()) {
+        Sleep(0);
+    }
+#else
+    sched_yield();
+#endif
+}
+
+static void ring_buffer_sleep() {
+#ifdef _WIN32
+    Sleep(2);
+#else
+    usleep(2000);
+#endif
+}
+
+static uint64_t ring_buffer_curr_us() {
+    uint64_t res;
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    res = tv.tv_sec * 1000000ULL + tv.tv_usec;
+    return res;
+}
+
+static const uint32_t yield_backoff_us = 1000;
+static const uint32_t sleep_backoff_us = 2000;
+
+bool ring_buffer_wait_write(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes,
+    uint64_t timeout_us) {
+
+    bool can_write =
+        v ? ring_buffer_view_can_write(r, v, bytes) :
+            ring_buffer_can_write(r, bytes);
+
+    while (!can_write) {
+        ring_buffer_yield();
+        can_write =
+            v ? ring_buffer_view_can_write(r, v, bytes) :
+                ring_buffer_can_write(r, bytes);
+    }
+
+    return true;
+}
+
+bool ring_buffer_wait_read(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes,
+    uint64_t timeout_us) {
+
+    bool can_read =
+        v ? ring_buffer_view_can_read(r, v, bytes) :
+            ring_buffer_can_read(r, bytes);
+
+    while (!can_read) {
+        ring_buffer_yield();
+        can_read =
+            v ? ring_buffer_view_can_read(r, v, bytes) :
+                ring_buffer_can_read(r, bytes);
+    }
+
+    ((struct ring_buffer*)r)->read_live_count++;
+    return true;
+}
+
+static uint32_t get_step_size(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    uint32_t bytes) {
+
+    uint32_t step_shift = 0;
+    uint32_t available = v ? (v->size >> 1) : (RING_BUFFER_SIZE >> 1);
+    uint32_t res = available < bytes ? available : bytes;
+
+    return res;
+}
+
+void ring_buffer_write_fully(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    const void* data,
+    uint32_t bytes) {
+
+    uint32_t candidate_step = get_step_size(r, v, bytes);
+    uint32_t processed = 0;
+
+    uint8_t* dst = (uint8_t*)data;
+
+    while (processed < bytes) {
+        if (bytes - processed < candidate_step) {
+            candidate_step = bytes - processed;
+        }
+
+        long processed_here = 0;
+        ring_buffer_wait_write(r, v, candidate_step, (uint64_t)(-1));
+
+        if (v) {
+            processed_here = ring_buffer_view_write(r, v, dst + processed, candidate_step, 1);
+        } else {
+            processed_here = ring_buffer_write(r, dst + processed, candidate_step, 1);
+        }
+
+        processed += processed_here ? candidate_step : 0;
+    }
+}
+
+void ring_buffer_read_fully(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    void* data,
+    uint32_t bytes) {
+
+    uint32_t candidate_step = get_step_size(r, v, bytes);
+    uint32_t processed = 0;
+
+    uint8_t* dst = (uint8_t*)data;
+
+    while (processed < bytes) {
+        _mm_pause();
+        if (bytes - processed < candidate_step) {
+            candidate_step = bytes - processed;
+        }
+
+        long processed_here = 0;
+        ring_buffer_wait_read(r, v, candidate_step, (uint64_t)(-1));
+
+        if (v) {
+            processed_here = ring_buffer_view_read(r, v, dst + processed, candidate_step, 1);
+        } else {
+            processed_here = ring_buffer_read(r, dst + processed, candidate_step, 1);
+        }
+
+        processed += processed_here ? candidate_step : 0;
+    }
+}
+
+void ring_buffer_sync_init(struct ring_buffer* r) {
+    __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST);
+}
+
+bool ring_buffer_producer_acquire(struct ring_buffer* r) {
+    uint32_t expected_idle = RING_BUFFER_SYNC_PRODUCER_IDLE;
+    bool success = __atomic_compare_exchange_n(
+        &r->state,
+        &expected_idle,
+        RING_BUFFER_SYNC_PRODUCER_ACTIVE,
+        false /* strong */,
+        __ATOMIC_SEQ_CST,
+        __ATOMIC_SEQ_CST);
+    return success;
+}
+
+bool ring_buffer_producer_acquire_from_hangup(struct ring_buffer* r) {
+    uint32_t expected_hangup = RING_BUFFER_SYNC_CONSUMER_HUNG_UP;
+    bool success = __atomic_compare_exchange_n(
+        &r->state,
+        &expected_hangup,
+        RING_BUFFER_SYNC_PRODUCER_ACTIVE,
+        false /* strong */,
+        __ATOMIC_SEQ_CST,
+        __ATOMIC_SEQ_CST);
+    return success;
+}
+
+void ring_buffer_producer_wait_hangup(struct ring_buffer* r) {
+    while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) !=
+           RING_BUFFER_SYNC_CONSUMER_HUNG_UP) {
+        ring_buffer_yield();
+    }
+}
+
+void ring_buffer_producer_idle(struct ring_buffer* r) {
+    __atomic_store_n(&r->state, RING_BUFFER_SYNC_PRODUCER_IDLE, __ATOMIC_SEQ_CST);
+}
+
+bool ring_buffer_consumer_hangup(struct ring_buffer* r) {
+    uint32_t expected_idle = RING_BUFFER_SYNC_PRODUCER_IDLE;
+    bool success = __atomic_compare_exchange_n(
+        &r->state,
+        &expected_idle,
+        RING_BUFFER_SYNC_CONSUMER_HANGING_UP,
+        false /* strong */,
+        __ATOMIC_SEQ_CST,
+        __ATOMIC_SEQ_CST);
+    return success;
+}
+
+void ring_buffer_consumer_wait_producer_idle(struct ring_buffer* r) {
+    while (__atomic_load_n(&r->state, __ATOMIC_SEQ_CST) !=
+           RING_BUFFER_SYNC_PRODUCER_IDLE) {
+        ring_buffer_yield();
+    }
+}
+
+void ring_buffer_consumer_hung_up(struct ring_buffer* r) {
+    __atomic_store_n(&r->state, RING_BUFFER_SYNC_CONSUMER_HUNG_UP, __ATOMIC_SEQ_CST);
+}
diff --git a/android-emu/android/base/ring_buffer.h b/android-emu/android/base/ring_buffer.h
new file mode 100644
index 0000000..34c0666
--- /dev/null
+++ b/android-emu/android/base/ring_buffer.h
@@ -0,0 +1,185 @@
+// Copyright 2018 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+#pragma once
+
+#include "android/utils/compiler.h"
+
+ANDROID_BEGIN_HEADER
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#define RING_BUFFER_SHIFT 11
+#define RING_BUFFER_SIZE (1 << RING_BUFFER_SHIFT)
+#define NUM_CONFIG_FIELDS 32
+
+// Single producer/consumer ring buffer struct that can be shared
+// between host and guest as-is.
+struct ring_buffer {
+    uint32_t host_version;
+    uint32_t guest_version;
+    uint32_t write_pos; // Atomically updated for the consumer
+    uint32_t unused0[13]; // Separate cache line
+    uint32_t read_pos; // Atomically updated for the producer
+    uint32_t read_live_count;
+    uint32_t read_yield_count;
+    uint32_t read_sleep_us_count;
+    uint32_t unused1[12]; // Separate cache line
+    uint8_t buf[RING_BUFFER_SIZE];
+    uint32_t state; // An atomically updated variable from both
+                    // producer and consumer for other forms of
+                    // coordination.
+    // Configuration fields
+    uint32_t config[NUM_CONFIG_FIELDS];
+};
+
+void ring_buffer_init(struct ring_buffer* r);
+
+// Writes or reads step_size at a time. Sets errno=EAGAIN if full or empty.
+// Returns the number of step_size steps read.
+long ring_buffer_write(
+    struct ring_buffer* r, const void* data, uint32_t step_size, uint32_t steps);
+long ring_buffer_read(
+    struct ring_buffer* r, void* data, uint32_t step_size, uint32_t steps);
+
+// If we want to work with dynamically allocated buffers, a separate struct is
+// needed; the host and guest are in different address spaces and thus have
+// different views of the same memory, with the host and guest having different
+// copies of this struct.
+struct ring_buffer_view {
+    uint8_t* buf;
+    uint32_t size;
+    uint32_t mask;
+};
+
+
+// Calculates the highest power of 2 so that
+// (1 << shift) <= size.
+uint32_t ring_buffer_calc_shift(uint32_t size);
+
+// Initializes ring buffer with view using |buf|. If |size| is not a power of
+// two, then the buffer will assume a size equal to the greater power of two
+// less than |size|.
+void ring_buffer_view_init(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    uint8_t* buf,
+    uint32_t size);
+
+void ring_buffer_init_view_only(
+    struct ring_buffer_view* v,
+    uint8_t* buf,
+    uint32_t size);
+
+// Read/write functions with the view.
+long ring_buffer_view_write(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    const void* data, uint32_t step_size, uint32_t steps);
+long ring_buffer_view_read(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    void* data, uint32_t step_size, uint32_t steps);
+
+// Usage of ring_buffer as a waitable object.
+// These functions will back off if spinning too long.
+//
+// if |v| is null, it is assumed that the statically allocated ring buffer is
+// used.
+//
+// Returns true if ring buffer became available, false if timed out.
+bool ring_buffer_wait_write(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes,
+    uint64_t timeout_us);
+bool ring_buffer_wait_read(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes,
+    uint64_t timeout_us);
+
+// read/write fully, blocking if there is nothing to read/write.
+void ring_buffer_write_fully(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    const void* data,
+    uint32_t bytes);
+void ring_buffer_read_fully(
+    struct ring_buffer* r,
+    struct ring_buffer_view* v,
+    void* data,
+    uint32_t bytes);
+
+bool ring_buffer_can_write(
+    const struct ring_buffer* r, uint32_t bytes);
+bool ring_buffer_can_read(
+    const struct ring_buffer* r, uint32_t bytes);
+bool ring_buffer_view_can_write(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes);
+bool ring_buffer_view_can_read(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v,
+    uint32_t bytes);
+uint32_t ring_buffer_available_read(
+    const struct ring_buffer* r,
+    const struct ring_buffer_view* v);
+
+// Lockless synchronization where the consumer is allowed to hang up and go to
+// sleep. This can be considered a sort of asymmetric lock for two threads,
+// where the consumer can be more sleepy. It captures the pattern we usually use
+// for emulator devices; the guest asks the host for something, and some host
+// thread services the request and goes back to sleep.
+enum ring_buffer_sync_state {
+    RING_BUFFER_SYNC_PRODUCER_IDLE = 0,
+    RING_BUFFER_SYNC_PRODUCER_ACTIVE = 1,
+    RING_BUFFER_SYNC_CONSUMER_HANGING_UP = 2,
+    RING_BUFFER_SYNC_CONSUMER_HUNG_UP = 3,
+};
+
+// Sync state is RING_BUFFER_SYNC_PRODUCER_IDLE.
+void ring_buffer_sync_init(struct ring_buffer* r);
+
+// Tries to acquire the channel for sending.
+// Returns false if the consumer was in the middle of hanging up,
+// true if the producer successfully acquired the channel
+// (put it in the RING_BUFFER_SYNC_PRODUCER_ACTIVE state).
+bool ring_buffer_producer_acquire(struct ring_buffer* r);
+// Same as above, but acquires from RING_BUFFER_SYNC_CONSUMER_HUNG_UP.
+bool ring_buffer_producer_acquire_from_hangup(struct ring_buffer* r);
+// Waits until the consumer hangs up.
+void ring_buffer_producer_wait_hangup(struct ring_buffer* r);
+// Sets the state back to RING_BUFFER_SYNC_PRODUCER_IDLE.
+void ring_buffer_producer_idle(struct ring_buffer* r);
+
+// There is no symmetric consumer acquire because the consumer can consume with
+// the ring buffer being in any state (albeit with long waiting if the producer
+// does not send anything)
+
+// Tries to acquire the channel on the consumer side for
+// hanging up. Returns false if the producer is in the middle of sending,
+// true if the consumer successfully hung up the channel
+// (put it in the RING_BUFFER_SYNC_CONSUMER_HUNG_UP state).
+bool ring_buffer_consumer_hangup(struct ring_buffer* r);
+// Waits until the producer has set the state to
+// RING_BUFFER_SYNC_PRODUCER_IDLE.
+void ring_buffer_consumer_wait_producer_idle(struct ring_buffer* r);
+// Sets the state to hung up.
+void ring_buffer_consumer_hung_up(struct ring_buffer* r);
+
+// Convenient function to reschedule thread
+void ring_buffer_yield();
+ANDROID_END_HEADER