Merge "Revert "Revert "goldfish: add script to create partitioned image"""
am: 32082aa62a

Change-Id: I2330987e0518863478e2f8d64a5cbf11eb099df8
diff --git a/audio/Android.mk b/audio/Android.mk
index d9d2f13..4343a4a 100644
--- a/audio/Android.mk
+++ b/audio/Android.mk
@@ -25,7 +25,30 @@
 
 LOCAL_SRC_FILES := audio_hw.c
 
-LOCAL_SHARED_LIBRARIES += libdl
+LOCAL_C_INCLUDES += \
+			external/tinyalsa/include \
+
+LOCAL_SHARED_LIBRARIES += \
+			libdl \
+			libtinyalsa
+
+LOCAL_CFLAGS := -Wno-unused-parameter
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := audio.primary.goldfish_legacy
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SHARED_LIBRARIES := libcutils liblog
+
+LOCAL_SRC_FILES := audio_hw_legacy.c
+
+LOCAL_SHARED_LIBRARIES += \
+			libdl
+
 LOCAL_CFLAGS := -Wno-unused-parameter
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/audio/audio_hw.c b/audio/audio_hw.c
index cab9aad..2498e51 100644
--- a/audio/audio_hw.c
+++ b/audio/audio_hw.c
@@ -15,13 +15,13 @@
  */
 
 #define LOG_TAG "audio_hw_generic"
-/*#define LOG_NDEBUG 0*/
 
 #include <errno.h>
 #include <pthread.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <sys/time.h>
+#include <dlfcn.h>
 #include <fcntl.h>
 
 #include <cutils/log.h>
@@ -30,42 +30,223 @@
 #include <hardware/hardware.h>
 #include <system/audio.h>
 #include <hardware/audio.h>
+#include <tinyalsa/asoundlib.h>
+
+#define PCM_CARD 0
+#define PCM_DEVICE 0
 
 
-#define AUDIO_DEVICE_NAME "/dev/eac"
-#define OUT_SAMPLING_RATE 44100
-#define OUT_BUFFER_SIZE 4096
-#define OUT_LATENCY_MS 20
-#define IN_SAMPLING_RATE 8000
-#define IN_BUFFER_SIZE 320
+#define OUT_PERIOD_MS 15
+#define OUT_PERIOD_COUNT 4
 
+#define IN_PERIOD_MS 15
+#define IN_PERIOD_COUNT 4
 
 struct generic_audio_device {
-    struct audio_hw_device device;
+    struct audio_hw_device device; // Constant after init
     pthread_mutex_t lock;
-    struct audio_stream_out *output;
-    struct audio_stream_in *input;
-    int fd;
-    bool mic_mute;
+    bool mic_mute;                 // Proteced by this->lock
+    struct mixer* mixer;           // Proteced by this->lock
 };
 
+/* If not NULL, this is a pointer to the fallback module.
+ * This really is the original goldfish audio device /dev/eac which we will use
+ * if no alsa devices are detected.
+ */
+static struct audio_module*  sFallback;
+static pthread_once_t sFallbackOnce = PTHREAD_ONCE_INIT;
+static void fallback_init(void);
+static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state);
+
+typedef struct audio_vbuffer {
+    pthread_mutex_t lock;
+    uint8_t *  data;
+    size_t     frame_size;
+    size_t     frame_count;
+    size_t     head;
+    size_t     tail;
+    size_t     live;
+} audio_vbuffer_t;
+
+static int audio_vbuffer_init (audio_vbuffer_t * audio_vbuffer, size_t frame_count,
+                              size_t frame_size) {
+    if (!audio_vbuffer) {
+        return -EINVAL;
+    }
+    audio_vbuffer->frame_size = frame_size;
+    audio_vbuffer->frame_count = frame_count;
+    size_t bytes = frame_count * frame_size;
+    audio_vbuffer->data = calloc(bytes, 1);
+    if (!audio_vbuffer->data) {
+        return -ENOMEM;
+    }
+    audio_vbuffer->head = 0;
+    audio_vbuffer->tail = 0;
+    audio_vbuffer->live = 0;
+    pthread_mutex_init (&audio_vbuffer->lock, (const pthread_mutexattr_t *) NULL);
+    return 0;
+}
+
+static int audio_vbuffer_destroy (audio_vbuffer_t * audio_vbuffer) {
+    if (!audio_vbuffer) {
+        return -EINVAL;
+    }
+    free(audio_vbuffer->data);
+    pthread_mutex_destroy(&audio_vbuffer->lock);
+    return 0;
+}
+
+static int audio_vbuffer_live (audio_vbuffer_t * audio_vbuffer) {
+    if (!audio_vbuffer) {
+        return -EINVAL;
+    }
+    pthread_mutex_lock (&audio_vbuffer->lock);
+    int live = audio_vbuffer->live;
+    pthread_mutex_unlock (&audio_vbuffer->lock);
+    return live;
+}
+
+static int audio_vbuffer_dead (audio_vbuffer_t * audio_vbuffer) {
+    if (!audio_vbuffer) {
+        return -EINVAL;
+    }
+    pthread_mutex_lock (&audio_vbuffer->lock);
+    int dead = audio_vbuffer->frame_count - audio_vbuffer->live;
+    pthread_mutex_unlock (&audio_vbuffer->lock);
+    return dead;
+}
+
+#define MIN(a,b) (((a)<(b))?(a):(b))
+static size_t audio_vbuffer_write (audio_vbuffer_t * audio_vbuffer, const void * buffer, size_t frame_count) {
+    size_t frames_written = 0;
+    pthread_mutex_lock (&audio_vbuffer->lock);
+
+    while (frame_count != 0) {
+        int frames = 0;
+        if (audio_vbuffer->live == 0 || audio_vbuffer->head > audio_vbuffer->tail) {
+            frames = MIN(frame_count, audio_vbuffer->frame_count - audio_vbuffer->head);
+        } else if (audio_vbuffer->head < audio_vbuffer->tail) {
+            frames = MIN(frame_count, audio_vbuffer->tail - (audio_vbuffer->head));
+        } else {
+            // Full
+            break;
+        }
+        memcpy(&audio_vbuffer->data[audio_vbuffer->head*audio_vbuffer->frame_size],
+               &((uint8_t*)buffer)[frames_written*audio_vbuffer->frame_size],
+               frames*audio_vbuffer->frame_size);
+        audio_vbuffer->live += frames;
+        frames_written += frames;
+        frame_count -= frames;
+        audio_vbuffer->head = (audio_vbuffer->head + frames) % audio_vbuffer->frame_count;
+    }
+
+    pthread_mutex_unlock (&audio_vbuffer->lock);
+    return frames_written;
+}
+
+static size_t audio_vbuffer_read (audio_vbuffer_t * audio_vbuffer, void * buffer, size_t frame_count) {
+    size_t frames_read = 0;
+    pthread_mutex_lock (&audio_vbuffer->lock);
+
+    while (frame_count != 0) {
+        int frames = 0;
+        if (audio_vbuffer->live == audio_vbuffer->frame_count ||
+            audio_vbuffer->tail > audio_vbuffer->head) {
+            frames = MIN(frame_count, audio_vbuffer->frame_count - audio_vbuffer->tail);
+        } else if (audio_vbuffer->tail < audio_vbuffer->head) {
+            frames = MIN(frame_count, audio_vbuffer->head - audio_vbuffer->tail);
+        } else {
+            break;
+        }
+        memcpy(&((uint8_t*)buffer)[frames_read*audio_vbuffer->frame_size],
+               &audio_vbuffer->data[audio_vbuffer->tail*audio_vbuffer->frame_size],
+               frames*audio_vbuffer->frame_size);
+        audio_vbuffer->live -= frames;
+        frames_read += frames;
+        frame_count -= frames;
+        audio_vbuffer->tail = (audio_vbuffer->tail + frames) % audio_vbuffer->frame_count;
+    }
+
+    pthread_mutex_unlock (&audio_vbuffer->lock);
+    return frames_read;
+}
 
 struct generic_stream_out {
-    struct audio_stream_out stream;
-    struct generic_audio_device *dev;
-    audio_devices_t device;
+    struct audio_stream_out stream;   // Constant after init
+    pthread_mutex_t lock;
+    struct generic_audio_device *dev; // Constant after init
+    audio_devices_t device;           // Protected by this->lock
+    struct audio_config req_config;   // Constant after init
+    struct pcm_config pcm_config;     // Constant after init
+    audio_vbuffer_t buffer;           // Constant after init
+
+    // Time & Position Keeping
+    bool standby;                      // Protected by this->lock
+    uint64_t underrun_position;        // Protected by this->lock
+    struct timespec underrun_time;     // Protected by this->lock
+    uint64_t last_write_time_us;       // Protected by this->lock
+    uint64_t frames_total_buffered;    // Protected by this->lock
+    uint64_t frames_written;           // Protected by this->lock
+    uint64_t frames_rendered;          // Protected by this->lock
+
+    // Worker
+    pthread_t worker_thread;          // Constant after init
+    pthread_cond_t worker_wake;       // Protected by this->lock
+    bool worker_standby;              // Protected by this->lock
+    bool worker_exit;                 // Protected by this->lock
 };
 
 struct generic_stream_in {
-    struct audio_stream_in stream;
-    struct generic_audio_device *dev;
-    audio_devices_t device;
+    struct audio_stream_in stream;    // Constant after init
+    pthread_mutex_t lock;
+    struct generic_audio_device *dev; // Constant after init
+    audio_devices_t device;           // Protected by this->lock
+    struct audio_config req_config;   // Constant after init
+    struct pcm *pcm;                  // Protected by this->lock
+    struct pcm_config pcm_config;     // Constant after init
+    int16_t *stereo_to_mono_buf;      // Protected by this->lock
+    size_t stereo_to_mono_buf_size;   // Protected by this->lock
+    audio_vbuffer_t buffer;           // Protected by this->lock
+
+    // Time & Position Keeping
+    bool standby;                     // Protected by this->lock
+    int64_t standby_position;         // Protected by this->lock
+    struct timespec standby_exit_time;// Protected by this->lock
+    int64_t standby_frames_read;      // Protected by this->lock
+
+    // Worker
+    pthread_t worker_thread;          // Constant after init
+    pthread_cond_t worker_wake;       // Protected by this->lock
+    bool worker_standby;              // Protected by this->lock
+    bool worker_exit;                 // Protected by this->lock
 };
 
+static struct pcm_config pcm_config_out = {
+    .channels = 2,
+    .rate = 0,
+    .period_size = 0,
+    .period_count = OUT_PERIOD_COUNT,
+    .format = PCM_FORMAT_S16_LE,
+    .start_threshold = 0,
+};
+
+static struct pcm_config pcm_config_in = {
+    .channels = 2,
+    .rate = 0,
+    .period_size = 0,
+    .period_count = IN_PERIOD_COUNT,
+    .format = PCM_FORMAT_S16_LE,
+    .start_threshold = 0,
+    .stop_threshold = INT_MAX,
+};
+
+static pthread_mutex_t adev_init_lock = PTHREAD_MUTEX_INITIALIZER;
+static unsigned int audio_device_ref_count = 0;
 
 static uint32_t out_get_sample_rate(const struct audio_stream *stream)
 {
-    return OUT_SAMPLING_RATE;
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    return out->req_config.sample_rate;
 }
 
 static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
@@ -75,17 +256,23 @@
 
 static size_t out_get_buffer_size(const struct audio_stream *stream)
 {
-    return OUT_BUFFER_SIZE;
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    int size = out->pcm_config.period_size *
+                audio_stream_out_frame_size(&out->stream);
+
+    return size;
 }
 
 static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
 {
-    return AUDIO_CHANNEL_OUT_STEREO;
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    return out->req_config.channel_mask;
 }
 
 static audio_format_t out_get_format(const struct audio_stream *stream)
 {
-    return AUDIO_FORMAT_PCM_16_BIT;
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    return out->req_config.format;
 }
 
 static int out_set_format(struct audio_stream *stream, audio_format_t format)
@@ -93,16 +280,10 @@
     return -ENOSYS;
 }
 
-static int out_standby(struct audio_stream *stream)
-{
-    // out_standby is a no op
-    return 0;
-}
-
 static int out_dump(const struct audio_stream *stream, int fd)
 {
     struct generic_stream_out *out = (struct generic_stream_out *)stream;
-
+    pthread_mutex_lock(&out->lock);
     dprintf(fd, "\tout_dump:\n"
                 "\t\tsample rate: %u\n"
                 "\t\tbuffer size: %u\n"
@@ -116,7 +297,7 @@
                 out_get_format(stream),
                 out->device,
                 out->dev);
-
+    pthread_mutex_unlock(&out->lock);
     return 0;
 }
 
@@ -129,21 +310,27 @@
     long val;
     char *end;
 
-    parms = str_parms_create_str(kvpairs);
-
-    ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
-                            value, sizeof(value));
-    if (ret >= 0) {
-        errno = 0;
-        val = strtol(value, &end, 10);
-        if (errno == 0 && (end != NULL) && (*end == '\0') && ((int)val == val)) {
-            out->device = (int)val;
-        } else {
-            ret = -EINVAL;
+    pthread_mutex_lock(&out->lock);
+    if (!out->standby) {
+        //Do not support changing params while stream running
+        ret = -ENOSYS;
+    } else {
+        parms = str_parms_create_str(kvpairs);
+        ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+                                value, sizeof(value));
+        if (ret >= 0) {
+            errno = 0;
+            val = strtol(value, &end, 10);
+            if (errno == 0 && (end != NULL) && (*end == '\0') && ((int)val == val)) {
+                out->device = (int)val;
+                ret = 0;
+            } else {
+                ret = -EINVAL;
+            }
         }
+        str_parms_destroy(parms);
     }
-
-    str_parms_destroy(parms);
+    pthread_mutex_unlock(&out->lock);
     return ret;
 }
 
@@ -158,7 +345,9 @@
 
     ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
     if (ret >= 0) {
+        pthread_mutex_lock(&out->lock);
         str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, out->device);
+        pthread_mutex_unlock(&out->lock);
         str = strdup(str_parms_to_str(reply));
     } else {
         str = strdup(keys);
@@ -171,7 +360,8 @@
 
 static uint32_t out_get_latency(const struct audio_stream_out *stream)
 {
-    return OUT_LATENCY_MS;
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    return (out->pcm_config.period_size * 1000) / out->pcm_config.rate;
 }
 
 static int out_set_volume(struct audio_stream_out *stream, float left,
@@ -180,24 +370,254 @@
     return -ENOSYS;
 }
 
-static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
+static void *out_write_worker(void * args)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)args;
+    struct pcm *pcm = NULL;
+    uint8_t *buffer = NULL;
+    int buffer_frames;
+    int buffer_size;
+    bool restart = false;
+    bool shutdown = false;
+    while (true) {
+        pthread_mutex_lock(&out->lock);
+        while (out->worker_standby || restart) {
+            restart = false;
+            if (pcm) {
+                pcm_close(pcm); // Frees pcm
+                pcm = NULL;
+                free(buffer);
+                buffer=NULL;
+            }
+            if (out->worker_exit) {
+                break;
+            }
+            pthread_cond_wait(&out->worker_wake, &out->lock);
+        }
+
+        if (out->worker_exit) {
+            if (!out->worker_standby) {
+                ALOGE("Out worker not in standby before exiting");
+            }
+            shutdown = true;
+        }
+
+        while (!shutdown && audio_vbuffer_live(&out->buffer) == 0) {
+            pthread_cond_wait(&out->worker_wake, &out->lock);
+        }
+
+        if (shutdown) {
+            pthread_mutex_unlock(&out->lock);
+            break;
+        }
+
+        if (!pcm) {
+            pcm = pcm_open(PCM_CARD, PCM_DEVICE,
+                          PCM_OUT | PCM_MONOTONIC, &out->pcm_config);
+            if (!pcm_is_ready(pcm)) {
+                ALOGE("pcm_open(out) failed: %s: channels %d format %d rate %d",
+                  pcm_get_error(pcm),
+                  out->pcm_config.channels,
+                  out->pcm_config.format,
+                  out->pcm_config.rate
+                   );
+                pthread_mutex_unlock(&out->lock);
+                break;
+            }
+            buffer_frames = out->pcm_config.period_size;
+            buffer_size = pcm_frames_to_bytes(pcm, buffer_frames);
+            buffer = malloc(buffer_size);
+            if (!buffer) {
+                ALOGE("could not allocate write buffer");
+                pthread_mutex_unlock(&out->lock);
+                break;
+            }
+        }
+        int frames = audio_vbuffer_read(&out->buffer, buffer, buffer_frames);
+        pthread_mutex_unlock(&out->lock);
+        int ret = pcm_write(pcm, buffer, pcm_frames_to_bytes(pcm, frames));
+        if (ret != 0) {
+            ALOGE("pcm_write failed %s", pcm_get_error(pcm));
+            restart = true;
+        }
+    }
+    if (buffer) {
+        free(buffer);
+    }
+
+    return NULL;
+}
+
+// Call with in->lock held
+static void get_current_output_position(struct generic_stream_out *out,
+                                       uint64_t * position,
+                                       struct timespec * timestamp) {
+    struct timespec curtime = { .tv_sec = 0, .tv_nsec = 0 };
+    clock_gettime(CLOCK_MONOTONIC, &curtime);
+    const int64_t now_us = (curtime.tv_sec * 1000000000LL + curtime.tv_nsec) / 1000;
+    if (timestamp) {
+        *timestamp = curtime;
+    }
+    int64_t position_since_underrun;
+    if (out->standby) {
+        position_since_underrun = 0;
+    } else {
+        const int64_t first_us = (out->underrun_time.tv_sec * 1000000000LL +
+                                  out->underrun_time.tv_nsec) / 1000;
+        position_since_underrun = (now_us - first_us) *
+                out_get_sample_rate(&out->stream.common) /
+                1000000;
+        if (position_since_underrun < 0) {
+            position_since_underrun = 0;
+        }
+    }
+    *position = out->underrun_position + position_since_underrun;
+
+    // The device will reuse the same output stream leading to periods of
+    // underrun.
+    if (*position > out->frames_written) {
+        ALOGW("Not supplying enough data to HAL, expected position %lld , only wrote %lld",
+              *position, out->frames_written);
+
+        *position = out->frames_written;
+        out->underrun_position = *position;
+        out->underrun_time = curtime;
+        out->frames_total_buffered = 0;
+    }
+}
+
+
+static ssize_t out_write(struct audio_stream_out *stream, const void *buffer,
                          size_t bytes)
 {
     struct generic_stream_out *out = (struct generic_stream_out *)stream;
-    struct generic_audio_device *adev = out->dev;
+    const size_t frames =  bytes / audio_stream_out_frame_size(stream);
 
-    pthread_mutex_lock(&adev->lock);
-    if (adev->fd >= 0)
-        bytes = write(adev->fd, buffer, bytes);
-    pthread_mutex_unlock(&adev->lock);
+    pthread_mutex_lock(&out->lock);
 
+    if (out->worker_standby) {
+        out->worker_standby = false;
+    }
+
+    uint64_t current_position;
+    struct timespec current_time;
+
+    get_current_output_position(out, &current_position, &current_time);
+    const uint64_t now_us = (current_time.tv_sec * 1000000000LL +
+                             current_time.tv_nsec) / 1000;
+    if (out->standby) {
+        out->standby = false;
+        out->underrun_time = current_time;
+        out->frames_rendered = 0;
+        out->frames_total_buffered = 0;
+    }
+
+    size_t frames_written = audio_vbuffer_write(&out->buffer, buffer, frames);
+    pthread_cond_signal(&out->worker_wake);
+
+    /* Implementation just consumes bytes if we start getting backed up */
+    out->frames_written += frames;
+    out->frames_rendered += frames;
+    out->frames_total_buffered += frames;
+
+    // We simulate the audio device blocking when it's write buffers become
+    // full.
+
+    // At the beginning or after an underrun, try to fill up the vbuffer.
+    // This will be throttled by the PlaybackThread
+    int frames_sleep = out->frames_total_buffered < out->buffer.frame_count ? 0 : frames;
+
+    uint64_t sleep_time_us = frames_sleep * 1000000LL /
+                            out_get_sample_rate(&stream->common);
+
+    // If the write calls are delayed, subtract time off of the sleep to
+    // compensate
+    uint64_t time_since_last_write_us = now_us - out->last_write_time_us;
+    if (time_since_last_write_us < sleep_time_us) {
+        sleep_time_us -= time_since_last_write_us;
+    } else {
+        sleep_time_us = 0;
+    }
+    out->last_write_time_us = now_us + sleep_time_us;
+
+    pthread_mutex_unlock(&out->lock);
+
+    if (sleep_time_us > 0) {
+        usleep(sleep_time_us);
+    }
+
+    if (frames_written < frames) {
+        ALOGW("Hardware backing HAL too slow, could only write %d of %zu frames", frames_written, frames);
+    }
+
+    /* Always consume all bytes */
     return bytes;
 }
 
+static int out_get_presentation_position(const struct audio_stream_out *stream,
+                                   uint64_t *frames, struct timespec *timestamp)
+
+{
+    int ret = -EINVAL;
+    if (stream == NULL || frames == NULL || timestamp == NULL) {
+        return -EINVAL;
+    }
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+
+    pthread_mutex_lock(&out->lock);
+    get_current_output_position(out, frames, timestamp);
+    pthread_mutex_unlock(&out->lock);
+
+    return 0;
+}
+
 static int out_get_render_position(const struct audio_stream_out *stream,
                                    uint32_t *dsp_frames)
 {
-    return -ENOSYS;
+    if (stream == NULL || dsp_frames == NULL) {
+        return -EINVAL;
+    }
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    pthread_mutex_lock(&out->lock);
+    *dsp_frames = out->frames_rendered;
+    pthread_mutex_unlock(&out->lock);
+    return 0;
+}
+
+// Must be called with out->lock held
+static void do_out_standby(struct generic_stream_out *out)
+{
+    int frames_sleep = 0;
+    uint64_t sleep_time_us = 0;
+    if (out->standby) {
+        return;
+    }
+    while (true) {
+        get_current_output_position(out, &out->underrun_position, NULL);
+        frames_sleep = out->frames_written - out->underrun_position;
+
+        if (frames_sleep == 0) {
+            break;
+        }
+
+        sleep_time_us = frames_sleep * 1000000LL /
+                        out_get_sample_rate(&out->stream.common);
+
+        pthread_mutex_unlock(&out->lock);
+        usleep(sleep_time_us);
+        pthread_mutex_lock(&out->lock);
+    }
+    out->worker_standby = true;
+    out->standby = true;
+}
+
+static int out_standby(struct audio_stream *stream)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    pthread_mutex_lock(&out->lock);
+    do_out_standby(out);
+    pthread_mutex_unlock(&out->lock);
+    return 0;
 }
 
 static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
@@ -218,10 +638,10 @@
     return -ENOSYS;
 }
 
-/** audio_stream_in implementation **/
 static uint32_t in_get_sample_rate(const struct audio_stream *stream)
 {
-    return IN_SAMPLING_RATE;
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    return in->req_config.sample_rate;
 }
 
 static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
@@ -229,19 +649,138 @@
     return -ENOSYS;
 }
 
+static int refine_output_parameters(uint32_t *sample_rate, audio_format_t *format, audio_channel_mask_t *channel_mask)
+{
+    static const uint32_t sample_rates [] = {8000,11025,16000,22050,24000,32000,
+                                            44100,48000};
+    static const int sample_rates_count = sizeof(sample_rates)/sizeof(uint32_t);
+    bool inval = false;
+    if (*format != AUDIO_FORMAT_PCM_16_BIT) {
+        *format = AUDIO_FORMAT_PCM_16_BIT;
+        inval = true;
+    }
+
+    int channel_count = popcount(*channel_mask);
+    if (channel_count != 1 && channel_count != 2) {
+        *channel_mask = AUDIO_CHANNEL_IN_STEREO;
+        inval = true;
+    }
+
+    int i;
+    for (i = 0; i < sample_rates_count; i++) {
+        if (*sample_rate < sample_rates[i]) {
+            *sample_rate = sample_rates[i];
+            inval=true;
+            break;
+        }
+        else if (*sample_rate == sample_rates[i]) {
+            break;
+        }
+        else if (i == sample_rates_count-1) {
+            // Cap it to the highest rate we support
+            *sample_rate = sample_rates[i];
+            inval=true;
+        }
+    }
+
+    if (inval) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+static int check_output_parameters(uint32_t sample_rate, audio_format_t format,
+                                  audio_channel_mask_t channel_mask)
+{
+    return refine_output_parameters(&sample_rate, &format, &channel_mask);
+}
+
+
+static int refine_input_parameters(uint32_t *sample_rate, audio_format_t *format, audio_channel_mask_t *channel_mask)
+{
+    static const uint32_t sample_rates [] = {8000, 11025, 16000, 22050, 44100, 48000};
+    static const int sample_rates_count = sizeof(sample_rates)/sizeof(uint32_t);
+    bool inval = false;
+    // Only PCM_16_bit is supported. If this is changed, stereo to mono drop
+    // must be fixed in in_read
+    if (*format != AUDIO_FORMAT_PCM_16_BIT) {
+        *format = AUDIO_FORMAT_PCM_16_BIT;
+        inval = true;
+    }
+
+    int channel_count = popcount(*channel_mask);
+    if (channel_count != 1 && channel_count != 2) {
+        *channel_mask = AUDIO_CHANNEL_IN_STEREO;
+        inval = true;
+    }
+
+    int i;
+    for (i = 0; i < sample_rates_count; i++) {
+        if (*sample_rate < sample_rates[i]) {
+            *sample_rate = sample_rates[i];
+            inval=true;
+            break;
+        }
+        else if (*sample_rate == sample_rates[i]) {
+            break;
+        }
+        else if (i == sample_rates_count-1) {
+            // Cap it to the highest rate we support
+            *sample_rate = sample_rates[i];
+            inval=true;
+        }
+    }
+
+    if (inval) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+static int check_input_parameters(uint32_t sample_rate, audio_format_t format,
+                                  audio_channel_mask_t channel_mask)
+{
+    return refine_input_parameters(&sample_rate, &format, &channel_mask);
+}
+
+static size_t get_input_buffer_size(uint32_t sample_rate, audio_format_t format,
+                                    audio_channel_mask_t channel_mask)
+{
+    size_t size;
+    size_t device_rate;
+    int channel_count = popcount(channel_mask);
+    if (check_input_parameters(sample_rate, format, channel_mask) != 0)
+        return 0;
+
+    size = sample_rate*IN_PERIOD_MS/1000;
+    // Audioflinger expects audio buffers to be multiple of 16 frames
+    size = ((size + 15) / 16) * 16;
+    size *= sizeof(short) * channel_count;
+
+    return size;
+}
+
+
 static size_t in_get_buffer_size(const struct audio_stream *stream)
 {
-    return IN_BUFFER_SIZE;
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    int size = get_input_buffer_size(in->req_config.sample_rate,
+                                 in->req_config.format,
+                                 in->req_config.channel_mask);
+
+    return size;
 }
 
 static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
 {
-    return AUDIO_CHANNEL_IN_MONO;
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    return in->req_config.channel_mask;
 }
 
 static audio_format_t in_get_format(const struct audio_stream *stream)
 {
-    return AUDIO_FORMAT_PCM_16_BIT;
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    return in->req_config.format;
 }
 
 static int in_set_format(struct audio_stream *stream, audio_format_t format)
@@ -249,16 +788,11 @@
     return -ENOSYS;
 }
 
-static int in_standby(struct audio_stream *stream)
-{
-    // in_standby is a no op
-    return 0;
-}
-
 static int in_dump(const struct audio_stream *stream, int fd)
 {
     struct generic_stream_in *in = (struct generic_stream_in *)stream;
 
+    pthread_mutex_lock(&in->lock);
     dprintf(fd, "\tin_dump:\n"
                 "\t\tsample rate: %u\n"
                 "\t\tbuffer size: %u\n"
@@ -272,7 +806,7 @@
                 in_get_format(stream),
                 in->device,
                 in->dev);
-
+    pthread_mutex_unlock(&in->lock);
     return 0;
 }
 
@@ -285,21 +819,28 @@
     long val;
     char *end;
 
-    parms = str_parms_create_str(kvpairs);
+    pthread_mutex_lock(&in->lock);
+    if (!in->standby) {
+        ret = -ENOSYS;
+    } else {
+        parms = str_parms_create_str(kvpairs);
 
-    ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
-                            value, sizeof(value));
-    if (ret >= 0) {
-        errno = 0;
-        val = strtol(value, &end, 10);
-        if ((errno == 0) && (end != NULL) && (*end == '\0') && ((int)val == val)) {
-            in->device = (int)val;
-        } else {
-            ret = -EINVAL;
+        ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+                                value, sizeof(value));
+        if (ret >= 0) {
+            errno = 0;
+            val = strtol(value, &end, 10);
+            if ((errno == 0) && (end != NULL) && (*end == '\0') && ((int)val == val)) {
+                in->device = (int)val;
+                ret = 0;
+            } else {
+                ret = -EINVAL;
+            }
         }
-    }
 
-    str_parms_destroy(parms);
+        str_parms_destroy(parms);
+    }
+    pthread_mutex_unlock(&in->lock);
     return ret;
 }
 
@@ -332,19 +873,221 @@
     return 0;
 }
 
+// Call with in->lock held
+static void get_current_input_position(struct generic_stream_in *in,
+                                       int64_t * position,
+                                       struct timespec * timestamp) {
+    struct timespec t = { .tv_sec = 0, .tv_nsec = 0 };
+    clock_gettime(CLOCK_MONOTONIC, &t);
+    const int64_t now_us = (t.tv_sec * 1000000000LL + t.tv_nsec) / 1000;
+    if (timestamp) {
+        *timestamp = t;
+    }
+    int64_t position_since_standby;
+    if (in->standby) {
+        position_since_standby = 0;
+    } else {
+        const int64_t first_us = (in->standby_exit_time.tv_sec * 1000000000LL +
+                                  in->standby_exit_time.tv_nsec) / 1000;
+        position_since_standby = (now_us - first_us) *
+                in_get_sample_rate(&in->stream.common) /
+                1000000;
+        if (position_since_standby < 0) {
+            position_since_standby = 0;
+        }
+    }
+    *position = in->standby_position + position_since_standby;
+}
+
+// Must be called with in->lock held
+static void do_in_standby(struct generic_stream_in *in)
+{
+    if (in->standby) {
+        return;
+    }
+    in->worker_standby = true;
+    get_current_input_position(in, &in->standby_position, NULL);
+    in->standby = true;
+}
+
+static int in_standby(struct audio_stream *stream)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    pthread_mutex_lock(&in->lock);
+    do_in_standby(in);
+    pthread_mutex_unlock(&in->lock);
+    return 0;
+}
+
+static void *in_read_worker(void * args)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)args;
+    struct pcm *pcm = NULL;
+    uint8_t *buffer = NULL;
+    size_t buffer_frames;
+    int buffer_size;
+
+    bool restart = false;
+    bool shutdown = false;
+    while (true) {
+        pthread_mutex_lock(&in->lock);
+        while (in->worker_standby || restart) {
+            restart = false;
+            if (pcm) {
+                pcm_close(pcm); // Frees pcm
+                pcm = NULL;
+                free(buffer);
+                buffer=NULL;
+            }
+            if (in->worker_exit) {
+                break;
+            }
+            pthread_cond_wait(&in->worker_wake, &in->lock);
+        }
+
+        if (in->worker_exit) {
+            if (!in->worker_standby) {
+                ALOGE("In worker not in standby before exiting");
+            }
+            shutdown = true;
+        }
+        if (shutdown) {
+            pthread_mutex_unlock(&in->lock);
+            break;
+        }
+        if (!pcm) {
+            pcm = pcm_open(PCM_CARD, PCM_DEVICE,
+                          PCM_IN | PCM_MONOTONIC, &in->pcm_config);
+            if (!pcm_is_ready(pcm)) {
+                ALOGE("pcm_open(in) failed: %s: channels %d format %d rate %d",
+                  pcm_get_error(pcm),
+                  in->pcm_config.channels,
+                  in->pcm_config.format,
+                  in->pcm_config.rate
+                   );
+                pthread_mutex_unlock(&in->lock);
+                break;
+            }
+            buffer_frames = in->pcm_config.period_size;
+            buffer_size = pcm_frames_to_bytes(pcm, buffer_frames);
+            buffer = malloc(buffer_size);
+            if (!buffer) {
+                ALOGE("could not allocate worker read buffer");
+                pthread_mutex_unlock(&in->lock);
+                break;
+            }
+        }
+        pthread_mutex_unlock(&in->lock);
+        int ret = pcm_read(pcm, buffer, pcm_frames_to_bytes(pcm, buffer_frames));
+        if (ret != 0) {
+            ALOGW("pcm_read failed %s", pcm_get_error(pcm));
+            restart = true;
+        }
+
+        pthread_mutex_lock(&in->lock);
+        size_t frames_written = audio_vbuffer_write(&in->buffer, buffer, buffer_frames);
+        pthread_mutex_unlock(&in->lock);
+
+        if (frames_written != buffer_frames) {
+            ALOGW("in_read_worker only could write %zu / %zu frames", frames_written, buffer_frames);
+        }
+    }
+    if (buffer) {
+        free(buffer);
+    }
+    return NULL;
+}
+
 static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
                        size_t bytes)
 {
     struct generic_stream_in *in = (struct generic_stream_in *)stream;
     struct generic_audio_device *adev = in->dev;
+    const size_t frames =  bytes / audio_stream_in_frame_size(stream);
+    int ret = 0;
+    bool mic_mute = false;
+    size_t read_bytes = 0;
 
-    pthread_mutex_lock(&adev->lock);
-    if (adev->fd >= 0)
-        bytes = read(adev->fd, buffer, bytes);
-    if (adev->mic_mute && (bytes > 0)) {
-        memset(buffer, 0, bytes);
+    adev_get_mic_mute(&adev->device, &mic_mute);
+    pthread_mutex_lock(&in->lock);
+
+    if (in->worker_standby) {
+        in->worker_standby = false;
     }
-    pthread_mutex_unlock(&adev->lock);
+    pthread_cond_signal(&in->worker_wake);
+
+    int64_t current_position;
+    struct timespec current_time;
+
+    get_current_input_position(in, &current_position, &current_time);
+    if (in->standby) {
+        in->standby = false;
+        in->standby_exit_time = current_time;
+        in->standby_frames_read = 0;
+    }
+
+    const int64_t frames_available = current_position - in->standby_position - in->standby_frames_read;
+
+    const size_t frames_wait = (frames_available > frames) ? 0 : frames - frames_available;
+
+    int64_t sleep_time_us  = frames_wait * 1000000LL /
+                             in_get_sample_rate(&stream->common);
+
+    pthread_mutex_unlock(&in->lock);
+
+    if (sleep_time_us > 0) {
+        usleep(sleep_time_us);
+    }
+
+    pthread_mutex_lock(&in->lock);
+    int read_frames = 0;
+    if (in->standby) {
+        ALOGW("Input put to sleep while read in progress");
+        goto exit;
+    }
+    in->standby_frames_read += frames;
+
+    if (popcount(in->req_config.channel_mask) == 1 &&
+        in->pcm_config.channels == 2) {
+        // Need to resample to mono
+        if (in->stereo_to_mono_buf_size < bytes*2) {
+            in->stereo_to_mono_buf = realloc(in->stereo_to_mono_buf,
+                                             bytes*2);
+            if (!in->stereo_to_mono_buf) {
+                ALOGE("Failed to allocate stereo_to_mono_buff");
+                goto exit;
+            }
+        }
+
+        read_frames = audio_vbuffer_read(&in->buffer, in->stereo_to_mono_buf, frames);
+
+        // Currently only pcm 16 is supported.
+        uint16_t *src = (uint16_t *)in->stereo_to_mono_buf;
+        uint16_t *dst = (uint16_t *)buffer;
+        size_t i;
+        // Resample stereo 16 to mono 16 by dropping one channel.
+        // The stereo stream is interleaved L-R-L-R
+        for (i = 0; i < frames; i++) {
+            *dst = *src;
+            src += 2;
+            dst += 1;
+        }
+    } else {
+        read_frames = audio_vbuffer_read(&in->buffer, buffer, frames);
+    }
+
+exit:
+    read_bytes = read_frames*audio_stream_in_frame_size(stream);
+
+    if (mic_mute) {
+        read_bytes = 0;
+    }
+
+    if (read_bytes < bytes) {
+        memset (&((uint8_t *)buffer)[read_bytes], 0, bytes-read_bytes);
+    }
+
+    pthread_mutex_unlock(&in->lock);
 
     return bytes;
 }
@@ -354,6 +1097,18 @@
     return 0;
 }
 
+static int in_get_capture_position(const struct audio_stream_in *stream,
+                                int64_t *frames, int64_t *time)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    pthread_mutex_lock(&in->lock);
+    struct timespec current_time;
+    get_current_input_position(in, frames, &current_time);
+    *time = (current_time.tv_sec * 1000000000LL + current_time.tv_nsec);
+    pthread_mutex_unlock(&in->lock);
+    return 0;
+}
+
 static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
 {
     // in_add_audio_effect is a no op
@@ -378,26 +1133,18 @@
     struct generic_stream_out *out;
     int ret = 0;
 
-    pthread_mutex_lock(&adev->lock);
-    if (adev->output != NULL) {
-        ret = -ENOSYS;
-        goto error;
-    }
-
-    if ((config->format != AUDIO_FORMAT_PCM_16_BIT) ||
-        (config->channel_mask != AUDIO_CHANNEL_OUT_STEREO) ||
-        (config->sample_rate != OUT_SAMPLING_RATE)) {
+    if (refine_output_parameters(&config->sample_rate, &config->format, &config->channel_mask)) {
         ALOGE("Error opening output stream format %d, channel_mask %04x, sample_rate %u",
               config->format, config->channel_mask, config->sample_rate);
-        config->format = AUDIO_FORMAT_PCM_16_BIT;
-        config->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
-        config->sample_rate = OUT_SAMPLING_RATE;
         ret = -EINVAL;
         goto error;
     }
 
     out = (struct generic_stream_out *)calloc(1, sizeof(struct generic_stream_out));
 
+    if (!out)
+        return -ENOMEM;
+
     out->stream.common.get_sample_rate = out_get_sample_rate;
     out->stream.common.set_sample_rate = out_set_sample_rate;
     out->stream.common.get_buffer_size = out_get_buffer_size;
@@ -414,15 +1161,41 @@
     out->stream.set_volume = out_set_volume;
     out->stream.write = out_write;
     out->stream.get_render_position = out_get_render_position;
+    out->stream.get_presentation_position = out_get_presentation_position;
     out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
 
+    pthread_mutex_init(&out->lock, (const pthread_mutexattr_t *) NULL);
     out->dev = adev;
     out->device = devices;
-    adev->output = (struct audio_stream_out *)out;
+    memcpy(&out->req_config, config, sizeof(struct audio_config));
+    memcpy(&out->pcm_config, &pcm_config_out, sizeof(struct pcm_config));
+    out->pcm_config.rate = config->sample_rate;
+    out->pcm_config.period_size = out->pcm_config.rate*OUT_PERIOD_MS/1000;
+
+    out->standby = true;
+    out->underrun_position = 0;
+    out->underrun_time.tv_sec = 0;
+    out->underrun_time.tv_nsec = 0;
+    out->last_write_time_us = 0;
+    out->frames_total_buffered = 0;
+    out->frames_written = 0;
+    out->frames_rendered = 0;
+
+    ret = audio_vbuffer_init(&out->buffer,
+                      out->pcm_config.period_size*out->pcm_config.period_count,
+                      out->pcm_config.channels *
+                      pcm_format_to_bits(out->pcm_config.format) >> 3);
+    if (ret == 0) {
+        pthread_cond_init(&out->worker_wake, NULL);
+        out->worker_standby = true;
+        out->worker_exit = false;
+        pthread_create(&out->worker_thread, NULL, out_write_worker, out);
+
+    }
     *stream_out = &out->stream;
 
+
 error:
-    pthread_mutex_unlock(&adev->lock);
 
     return ret;
 }
@@ -430,14 +1203,18 @@
 static void adev_close_output_stream(struct audio_hw_device *dev,
                                      struct audio_stream_out *stream)
 {
-    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    pthread_mutex_lock(&out->lock);
+    do_out_standby(out);
 
-    pthread_mutex_lock(&adev->lock);
-    if (stream == adev->output) {
-        free(stream);
-        adev->output = NULL;
-    }
-    pthread_mutex_unlock(&adev->lock);
+    out->worker_exit = true;
+    pthread_cond_signal(&out->worker_wake);
+    pthread_mutex_unlock(&out->lock);
+
+    pthread_join(out->worker_thread, NULL);
+    pthread_mutex_destroy(&out->lock);
+    audio_vbuffer_destroy(&out->buffer);
+    free(stream);
 }
 
 static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
@@ -453,12 +1230,7 @@
 
 static int adev_init_check(const struct audio_hw_device *dev)
 {
-    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
-
-    if (adev->fd >= 0)
-        return 0;
-
-    return -ENODEV;
+    return 0;
 }
 
 static int adev_set_voice_volume(struct audio_hw_device *dev, float volume)
@@ -496,7 +1268,6 @@
 static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
 {
     struct generic_audio_device *adev = (struct generic_audio_device *)dev;
-
     pthread_mutex_lock(&adev->lock);
     adev->mic_mute = state;
     pthread_mutex_unlock(&adev->lock);
@@ -506,20 +1277,43 @@
 static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
 {
     struct generic_audio_device *adev = (struct generic_audio_device *)dev;
-
     pthread_mutex_lock(&adev->lock);
     *state = adev->mic_mute;
     pthread_mutex_unlock(&adev->lock);
-
     return 0;
 }
 
+
 static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
                                          const struct audio_config *config)
 {
-    return IN_BUFFER_SIZE;
+    return get_input_buffer_size(config->sample_rate, config->format, config->channel_mask);
 }
 
+
+static void adev_close_input_stream(struct audio_hw_device *dev,
+                                   struct audio_stream_in *stream)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    pthread_mutex_lock(&in->lock);
+    do_in_standby(in);
+
+    in->worker_exit = true;
+    pthread_cond_signal(&in->worker_wake);
+    pthread_mutex_unlock(&in->lock);
+    pthread_join(in->worker_thread, NULL);
+
+    if (in->stereo_to_mono_buf != NULL) {
+        free(in->stereo_to_mono_buf);
+        in->stereo_to_mono_buf_size = 0;
+    }
+
+    pthread_mutex_destroy(&in->lock);
+    audio_vbuffer_destroy(&in->buffer);
+    free(stream);
+}
+
+
 static int adev_open_input_stream(struct audio_hw_device *dev,
                                   audio_io_handle_t handle,
                                   audio_devices_t devices,
@@ -532,139 +1326,144 @@
     struct generic_audio_device *adev = (struct generic_audio_device *)dev;
     struct generic_stream_in *in;
     int ret = 0;
-
-    pthread_mutex_lock(&adev->lock);
-    if (adev->input != NULL) {
-        ret = -ENOSYS;
-        goto error;
-    }
-
-    if ((config->format != AUDIO_FORMAT_PCM_16_BIT) ||
-        (config->channel_mask != AUDIO_CHANNEL_IN_MONO) ||
-        (config->sample_rate != IN_SAMPLING_RATE)) {
+    if (refine_input_parameters(&config->sample_rate, &config->format, &config->channel_mask)) {
         ALOGE("Error opening input stream format %d, channel_mask %04x, sample_rate %u",
               config->format, config->channel_mask, config->sample_rate);
-        config->format = AUDIO_FORMAT_PCM_16_BIT;
-        config->channel_mask = AUDIO_CHANNEL_IN_MONO;
-        config->sample_rate = IN_SAMPLING_RATE;
         ret = -EINVAL;
         goto error;
     }
 
     in = (struct generic_stream_in *)calloc(1, sizeof(struct generic_stream_in));
+    if (!in) {
+        ret = -ENOMEM;
+        goto error;
+    }
 
     in->stream.common.get_sample_rate = in_get_sample_rate;
-    in->stream.common.set_sample_rate = in_set_sample_rate;
+    in->stream.common.set_sample_rate = in_set_sample_rate;         // no op
     in->stream.common.get_buffer_size = in_get_buffer_size;
     in->stream.common.get_channels = in_get_channels;
     in->stream.common.get_format = in_get_format;
-    in->stream.common.set_format = in_set_format;
+    in->stream.common.set_format = in_set_format;                   // no op
     in->stream.common.standby = in_standby;
     in->stream.common.dump = in_dump;
     in->stream.common.set_parameters = in_set_parameters;
     in->stream.common.get_parameters = in_get_parameters;
-    in->stream.common.add_audio_effect = in_add_audio_effect;
-    in->stream.common.remove_audio_effect = in_remove_audio_effect;
-    in->stream.set_gain = in_set_gain;
+    in->stream.common.add_audio_effect = in_add_audio_effect;       // no op
+    in->stream.common.remove_audio_effect = in_remove_audio_effect; // no op
+    in->stream.set_gain = in_set_gain;                              // no op
     in->stream.read = in_read;
-    in->stream.get_input_frames_lost = in_get_input_frames_lost;
+    in->stream.get_input_frames_lost = in_get_input_frames_lost;    // no op
+    in->stream.get_capture_position = in_get_capture_position;
 
+    pthread_mutex_init(&in->lock, (const pthread_mutexattr_t *) NULL);
     in->dev = adev;
     in->device = devices;
-    adev->input = (struct audio_stream_in *)in;
+    memcpy(&in->req_config, config, sizeof(struct audio_config));
+    memcpy(&in->pcm_config, &pcm_config_in, sizeof(struct pcm_config));
+    in->pcm_config.rate = config->sample_rate;
+    in->pcm_config.period_size = in->pcm_config.rate*IN_PERIOD_MS/1000;
+
+    in->stereo_to_mono_buf = NULL;
+    in->stereo_to_mono_buf_size = 0;
+
+    in->standby = true;
+    in->standby_position = 0;
+    in->standby_exit_time.tv_sec = 0;
+    in->standby_exit_time.tv_nsec = 0;
+    in->standby_frames_read = 0;
+
+    ret = audio_vbuffer_init(&in->buffer,
+                      in->pcm_config.period_size*in->pcm_config.period_count,
+                      in->pcm_config.channels *
+                      pcm_format_to_bits(in->pcm_config.format) >> 3);
+    if (ret == 0) {
+        pthread_cond_init(&in->worker_wake, NULL);
+        in->worker_standby = true;
+        in->worker_exit = false;
+        pthread_create(&in->worker_thread, NULL, in_read_worker, in);
+    }
+
     *stream_in = &in->stream;
 
 error:
-    pthread_mutex_unlock(&adev->lock);
-
     return ret;
 }
 
-static void adev_close_input_stream(struct audio_hw_device *dev,
-                                   struct audio_stream_in *stream)
-{
-    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
-
-    pthread_mutex_lock(&adev->lock);
-    if (stream == adev->input) {
-        free(stream);
-        adev->input = NULL;
-    }
-    pthread_mutex_unlock(&adev->lock);
-}
 
 static int adev_dump(const audio_hw_device_t *dev, int fd)
 {
-    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
-
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-
-    dprintf(fd, "\nadev_dump:\n"
-                "\tfd: %d\n"
-                "\tmic_mute: %s\n"
-                "\toutput: %p\n"
-                "\tinput: %p\n\n",
-                adev->fd,
-                adev->mic_mute ? "true": "false",
-                adev->output,
-                adev->input);
-
-    if (adev->output != NULL)
-        out_dump((const struct audio_stream *)adev->output, fd);
-    if (adev->input != NULL)
-        in_dump((const struct audio_stream *)adev->input, fd);
-
     return 0;
 }
 
 static int adev_close(hw_device_t *dev)
 {
     struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+    int ret = 0;
+    if (!adev)
+        return 0;
 
-    adev_close_output_stream((struct audio_hw_device *)dev, adev->output);
-    adev_close_input_stream((struct audio_hw_device *)dev, adev->input);
+    pthread_mutex_lock(&adev_init_lock);
 
-    if (adev->fd >= 0)
-        close(adev->fd);
+    if (audio_device_ref_count == 0) {
+        ALOGE("adev_close called when ref_count 0");
+        ret = -EINVAL;
+        goto error;
+    }
 
-    free(dev);
-    return 0;
+    if ((--audio_device_ref_count) == 0) {
+        if (adev->mixer) {
+            mixer_close(adev->mixer);
+        }
+        free(adev);
+    }
+
+error:
+    pthread_mutex_unlock(&adev_init_lock);
+    return ret;
 }
 
 static int adev_open(const hw_module_t* module, const char* name,
                      hw_device_t** device)
 {
-    struct generic_audio_device *adev;
-    int fd;
+    static struct generic_audio_device *adev;
 
     if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
         return -EINVAL;
 
-    fd = open(AUDIO_DEVICE_NAME, O_RDWR);
-    if (fd < 0)
-        return -ENOSYS;
+    pthread_once(&sFallbackOnce, fallback_init);
+    if (sFallback != NULL) {
+        return sFallback->common.methods->open(&sFallback->common, name, device);
+    }
 
+    pthread_mutex_lock(&adev_init_lock);
+    if (audio_device_ref_count != 0) {
+        *device = &adev->device.common;
+        audio_device_ref_count++;
+        ALOGV("%s: returning existing instance of adev", __func__);
+        ALOGV("%s: exit", __func__);
+        goto unlock;
+    }
     adev = calloc(1, sizeof(struct generic_audio_device));
 
-    adev->fd = fd;
+    pthread_mutex_init(&adev->lock, (const pthread_mutexattr_t *) NULL);
 
     adev->device.common.tag = HARDWARE_DEVICE_TAG;
     adev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
     adev->device.common.module = (struct hw_module_t *) module;
     adev->device.common.close = adev_close;
 
-    adev->device.init_check = adev_init_check;
-    adev->device.set_voice_volume = adev_set_voice_volume;
-    adev->device.set_master_volume = adev_set_master_volume;
-    adev->device.get_master_volume = adev_get_master_volume;
-    adev->device.set_master_mute = adev_set_master_mute;
-    adev->device.get_master_mute = adev_get_master_mute;
-    adev->device.set_mode = adev_set_mode;
+    adev->device.init_check = adev_init_check;               // no op
+    adev->device.set_voice_volume = adev_set_voice_volume;   // no op
+    adev->device.set_master_volume = adev_set_master_volume; // no op
+    adev->device.get_master_volume = adev_get_master_volume; // no op
+    adev->device.set_master_mute = adev_set_master_mute;     // no op
+    adev->device.get_master_mute = adev_get_master_mute;     // no op
+    adev->device.set_mode = adev_set_mode;                   // no op
     adev->device.set_mic_mute = adev_set_mic_mute;
     adev->device.get_mic_mute = adev_get_mic_mute;
-    adev->device.set_parameters = adev_set_parameters;
-    adev->device.get_parameters = adev_get_parameters;
+    adev->device.set_parameters = adev_set_parameters;       // no op
+    adev->device.get_parameters = adev_get_parameters;       // no op
     adev->device.get_input_buffer_size = adev_get_input_buffer_size;
     adev->device.open_output_stream = adev_open_output_stream;
     adev->device.close_output_stream = adev_close_output_stream;
@@ -674,6 +1473,36 @@
 
     *device = &adev->device.common;
 
+    adev->mixer = mixer_open(PCM_CARD);
+    struct mixer_ctl *ctl;
+
+    // Set default mixer ctls
+    // Enable channels and set volume
+    for (int i = 0; i < (int)mixer_get_num_ctls(adev->mixer); i++) {
+        ctl = mixer_get_ctl(adev->mixer, i);
+        ALOGD("mixer %d name %s", i, mixer_ctl_get_name(ctl));
+        if (!strcmp(mixer_ctl_get_name(ctl), "Master Playback Volume") ||
+            !strcmp(mixer_ctl_get_name(ctl), "Capture Volume")) {
+            for (int z = 0; z < (int)mixer_ctl_get_num_values(ctl); z++) {
+                ALOGD("set ctl %d to %d", z, 100);
+                mixer_ctl_set_percent(ctl, z, 100);
+            }
+            continue;
+        }
+        if (!strcmp(mixer_ctl_get_name(ctl), "Master Playback Switch") ||
+            !strcmp(mixer_ctl_get_name(ctl), "Capture Switch")) {
+            for (int z = 0; z < (int)mixer_ctl_get_num_values(ctl); z++) {
+                ALOGD("set ctl %d to %d", z, 1);
+                mixer_ctl_set_value(ctl, z, 1);
+            }
+            continue;
+        }
+    }
+
+    audio_device_ref_count++;
+
+unlock:
+    pthread_mutex_unlock(&adev_init_lock);
     return 0;
 }
 
@@ -692,3 +1521,41 @@
         .methods = &hal_module_methods,
     },
 };
+
+/* This function detects whether or not we should be using an alsa audio device
+ * or fall back to the legacy goldfish_audio driver.
+ */
+static void
+fallback_init(void)
+{
+    void* module;
+
+    FILE *fptr = fopen ("/proc/asound/pcm", "r");
+    if (fptr != NULL) {
+      // asound/pcm is empty if there are no devices
+      int c = fgetc(fptr);
+      fclose(fptr);
+      if (c != EOF) {
+          ALOGD("Emulator host-side ALSA audio emulation detected.");
+          return;
+      }
+    }
+
+    ALOGD("Emulator without host-side ALSA audio emulation detected.");
+#if __LP64__
+    module = dlopen("/system/lib64/hw/audio.primary.goldfish_legacy.so",
+                    RTLD_LAZY|RTLD_LOCAL);
+#else
+    module = dlopen("/system/lib/hw/audio.primary.goldfish_legacy.so",
+                    RTLD_LAZY|RTLD_LOCAL);
+#endif
+    if (module != NULL) {
+        sFallback = (struct audio_module *)(dlsym(module, HAL_MODULE_INFO_SYM_AS_STR));
+        if (sFallback == NULL) {
+            dlclose(module);
+        }
+    }
+    if (sFallback == NULL) {
+        ALOGE("Could not find legacy fallback module!?");
+    }
+}
diff --git a/audio/audio_hw_legacy.c b/audio/audio_hw_legacy.c
new file mode 100644
index 0000000..14d82da
--- /dev/null
+++ b/audio/audio_hw_legacy.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "audio_hw_generic"
+/*#define LOG_NDEBUG 0*/
+
+#include <errno.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <fcntl.h>
+
+#include <cutils/log.h>
+#include <cutils/str_parms.h>
+
+#include <hardware/hardware.h>
+#include <system/audio.h>
+#include <hardware/audio.h>
+
+
+#define AUDIO_DEVICE_NAME "/dev/eac"
+#define OUT_BUFFER_SIZE 4096
+#define OUT_LATENCY_MS 20
+#define IN_SAMPLING_RATE 8000
+#define IN_BUFFER_SIZE 320
+
+
+struct generic_audio_device {
+    struct audio_hw_device device;
+    pthread_mutex_t lock;
+    struct audio_stream_out *output;
+    struct audio_stream_in *input;
+    int fd;
+    bool mic_mute;
+};
+
+
+struct generic_stream_out {
+    struct audio_stream_out stream;
+    struct generic_audio_device *dev;
+    audio_devices_t device;
+    uint32_t sample_rate;
+};
+
+struct generic_stream_in {
+    struct audio_stream_in stream;
+    struct generic_audio_device *dev;
+    audio_devices_t device;
+};
+
+
+static uint32_t out_get_sample_rate(const struct audio_stream *stream)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    return out->sample_rate;
+}
+
+static int out_set_sample_rate(struct audio_stream *stream, uint32_t rate)
+{
+    return -ENOSYS;
+}
+
+static size_t out_get_buffer_size(const struct audio_stream *stream)
+{
+    return OUT_BUFFER_SIZE;
+}
+
+static audio_channel_mask_t out_get_channels(const struct audio_stream *stream)
+{
+    return AUDIO_CHANNEL_OUT_STEREO;
+}
+
+static audio_format_t out_get_format(const struct audio_stream *stream)
+{
+    return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+static int out_set_format(struct audio_stream *stream, audio_format_t format)
+{
+    return -ENOSYS;
+}
+
+static int out_standby(struct audio_stream *stream)
+{
+    // out_standby is a no op
+    return 0;
+}
+
+static int out_dump(const struct audio_stream *stream, int fd)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+
+    dprintf(fd, "\tout_dump:\n"
+                "\t\tsample rate: %u\n"
+                "\t\tbuffer size: %u\n"
+                "\t\tchannel mask: %08x\n"
+                "\t\tformat: %d\n"
+                "\t\tdevice: %08x\n"
+                "\t\taudio dev: %p\n\n",
+                out_get_sample_rate(stream),
+                out_get_buffer_size(stream),
+                out_get_channels(stream),
+                out_get_format(stream),
+                out->device,
+                out->dev);
+
+    return 0;
+}
+
+static int out_set_parameters(struct audio_stream *stream, const char *kvpairs)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    struct str_parms *parms;
+    char value[32];
+    int ret;
+    long val;
+    char *end;
+
+    parms = str_parms_create_str(kvpairs);
+
+    ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+                            value, sizeof(value));
+    if (ret >= 0) {
+        errno = 0;
+        val = strtol(value, &end, 10);
+        if (errno == 0 && (end != NULL) && (*end == '\0') && ((int)val == val)) {
+            out->device = (int)val;
+        } else {
+            ret = -EINVAL;
+        }
+    }
+
+    str_parms_destroy(parms);
+    return ret;
+}
+
+static char * out_get_parameters(const struct audio_stream *stream, const char *keys)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    struct str_parms *query = str_parms_create_str(keys);
+    char *str;
+    char value[256];
+    struct str_parms *reply = str_parms_create();
+    int ret;
+
+    ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
+    if (ret >= 0) {
+        str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, out->device);
+        str = strdup(str_parms_to_str(reply));
+    } else {
+        str = strdup(keys);
+    }
+
+    str_parms_destroy(query);
+    str_parms_destroy(reply);
+    return str;
+}
+
+static uint32_t out_get_latency(const struct audio_stream_out *stream)
+{
+    return OUT_LATENCY_MS;
+}
+
+static int out_set_volume(struct audio_stream_out *stream, float left,
+                          float right)
+{
+    return -ENOSYS;
+}
+
+static ssize_t out_write(struct audio_stream_out *stream, const void* buffer,
+                         size_t bytes)
+{
+    struct generic_stream_out *out = (struct generic_stream_out *)stream;
+    struct generic_audio_device *adev = out->dev;
+
+    pthread_mutex_lock(&adev->lock);
+    if (adev->fd >= 0)
+        bytes = write(adev->fd, buffer, bytes);
+    pthread_mutex_unlock(&adev->lock);
+
+    return bytes;
+}
+
+static int out_get_render_position(const struct audio_stream_out *stream,
+                                   uint32_t *dsp_frames)
+{
+    return -ENOSYS;
+}
+
+static int out_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+    // out_add_audio_effect is a no op
+    return 0;
+}
+
+static int out_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+    // out_remove_audio_effect is a no op
+    return 0;
+}
+
+static int out_get_next_write_timestamp(const struct audio_stream_out *stream,
+                                        int64_t *timestamp)
+{
+    return -ENOSYS;
+}
+
+/** audio_stream_in implementation **/
+static uint32_t in_get_sample_rate(const struct audio_stream *stream)
+{
+    return IN_SAMPLING_RATE;
+}
+
+static int in_set_sample_rate(struct audio_stream *stream, uint32_t rate)
+{
+    return -ENOSYS;
+}
+
+static size_t in_get_buffer_size(const struct audio_stream *stream)
+{
+    return IN_BUFFER_SIZE;
+}
+
+static audio_channel_mask_t in_get_channels(const struct audio_stream *stream)
+{
+    return AUDIO_CHANNEL_IN_MONO;
+}
+
+static audio_format_t in_get_format(const struct audio_stream *stream)
+{
+    return AUDIO_FORMAT_PCM_16_BIT;
+}
+
+static int in_set_format(struct audio_stream *stream, audio_format_t format)
+{
+    return -ENOSYS;
+}
+
+static int in_standby(struct audio_stream *stream)
+{
+    // in_standby is a no op
+    return 0;
+}
+
+static int in_dump(const struct audio_stream *stream, int fd)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+
+    dprintf(fd, "\tin_dump:\n"
+                "\t\tsample rate: %u\n"
+                "\t\tbuffer size: %u\n"
+                "\t\tchannel mask: %08x\n"
+                "\t\tformat: %d\n"
+                "\t\tdevice: %08x\n"
+                "\t\taudio dev: %p\n\n",
+                in_get_sample_rate(stream),
+                in_get_buffer_size(stream),
+                in_get_channels(stream),
+                in_get_format(stream),
+                in->device,
+                in->dev);
+
+    return 0;
+}
+
+static int in_set_parameters(struct audio_stream *stream, const char *kvpairs)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    struct str_parms *parms;
+    char value[32];
+    int ret;
+    long val;
+    char *end;
+
+    parms = str_parms_create_str(kvpairs);
+
+    ret = str_parms_get_str(parms, AUDIO_PARAMETER_STREAM_ROUTING,
+                            value, sizeof(value));
+    if (ret >= 0) {
+        errno = 0;
+        val = strtol(value, &end, 10);
+        if ((errno == 0) && (end != NULL) && (*end == '\0') && ((int)val == val)) {
+            in->device = (int)val;
+        } else {
+            ret = -EINVAL;
+        }
+    }
+
+    str_parms_destroy(parms);
+    return ret;
+}
+
+static char * in_get_parameters(const struct audio_stream *stream,
+                                const char *keys)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    struct str_parms *query = str_parms_create_str(keys);
+    char *str;
+    char value[256];
+    struct str_parms *reply = str_parms_create();
+    int ret;
+
+    ret = str_parms_get_str(query, AUDIO_PARAMETER_STREAM_ROUTING, value, sizeof(value));
+    if (ret >= 0) {
+        str_parms_add_int(reply, AUDIO_PARAMETER_STREAM_ROUTING, in->device);
+        str = strdup(str_parms_to_str(reply));
+    } else {
+        str = strdup(keys);
+    }
+
+    str_parms_destroy(query);
+    str_parms_destroy(reply);
+    return str;
+}
+
+static int in_set_gain(struct audio_stream_in *stream, float gain)
+{
+    // in_set_gain is a no op
+    return 0;
+}
+
+static ssize_t in_read(struct audio_stream_in *stream, void* buffer,
+                       size_t bytes)
+{
+    struct generic_stream_in *in = (struct generic_stream_in *)stream;
+    struct generic_audio_device *adev = in->dev;
+
+    pthread_mutex_lock(&adev->lock);
+    if (adev->fd >= 0)
+        bytes = read(adev->fd, buffer, bytes);
+    if (adev->mic_mute && (bytes > 0)) {
+        memset(buffer, 0, bytes);
+    }
+    pthread_mutex_unlock(&adev->lock);
+
+    return bytes;
+}
+
+static uint32_t in_get_input_frames_lost(struct audio_stream_in *stream)
+{
+    return 0;
+}
+
+static int in_add_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+    // in_add_audio_effect is a no op
+    return 0;
+}
+
+static int in_remove_audio_effect(const struct audio_stream *stream, effect_handle_t effect)
+{
+    // in_add_audio_effect is a no op
+    return 0;
+}
+
+static int adev_open_output_stream(struct audio_hw_device *dev,
+                                   audio_io_handle_t handle,
+                                   audio_devices_t devices,
+                                   audio_output_flags_t flags,
+                                   struct audio_config *config,
+                                   struct audio_stream_out **stream_out,
+                                   const char *address __unused)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+    struct generic_stream_out *out;
+    static const uint32_t sample_rates [] = { 44100, 48000 };
+    static const int sample_rates_count = sizeof(sample_rates)/sizeof(sample_rates[0]);
+    int ret = 0;
+
+    pthread_mutex_lock(&adev->lock);
+    if (adev->output != NULL) {
+        ret = -ENOSYS;
+        goto error;
+    }
+
+    if ((config->format != AUDIO_FORMAT_PCM_16_BIT) ||
+        (config->channel_mask != AUDIO_CHANNEL_OUT_STEREO) ) {
+        ALOGE("Error opening output stream, format %d, channel_mask %04x",
+              config->format, config->channel_mask);
+        config->format = AUDIO_FORMAT_PCM_16_BIT;
+        config->channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+        ret = -EINVAL;
+    }
+
+    for (int idx = 0; idx < sample_rates_count; idx++) {
+        if (config->sample_rate < sample_rates[idx]) {
+            config->sample_rate = sample_rates[idx];
+            ALOGE("Error opening output stream, sample_rate %u", config->sample_rate);
+            ret = -EINVAL;
+            break;
+        } else if (config->sample_rate == sample_rates[idx]) {
+            break;
+        } else if (idx == sample_rates_count-1) {
+            // Cap it to the highest rate we support
+            config->sample_rate = sample_rates[idx];
+            ALOGE("Error opening output stream, sample_rate %u", config->sample_rate);
+            ret = -EINVAL;
+        }
+    }
+
+    if (ret != 0) goto error;
+
+    out = (struct generic_stream_out *)calloc(1, sizeof(struct generic_stream_out));
+
+    out->stream.common.get_sample_rate = out_get_sample_rate;
+    out->stream.common.set_sample_rate = out_set_sample_rate;
+    out->stream.common.get_buffer_size = out_get_buffer_size;
+    out->stream.common.get_channels = out_get_channels;
+    out->stream.common.get_format = out_get_format;
+    out->stream.common.set_format = out_set_format;
+    out->stream.common.standby = out_standby;
+    out->stream.common.dump = out_dump;
+    out->stream.common.set_parameters = out_set_parameters;
+    out->stream.common.get_parameters = out_get_parameters;
+    out->stream.common.add_audio_effect = out_add_audio_effect;
+    out->stream.common.remove_audio_effect = out_remove_audio_effect;
+    out->stream.get_latency = out_get_latency;
+    out->stream.set_volume = out_set_volume;
+    out->stream.write = out_write;
+    out->stream.get_render_position = out_get_render_position;
+    out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
+    out->sample_rate = config->sample_rate;
+
+    out->dev = adev;
+    out->device = devices;
+    adev->output = (struct audio_stream_out *)out;
+    *stream_out = &out->stream;
+
+error:
+    pthread_mutex_unlock(&adev->lock);
+
+    return ret;
+}
+
+static void adev_close_output_stream(struct audio_hw_device *dev,
+                                     struct audio_stream_out *stream)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    pthread_mutex_lock(&adev->lock);
+    if (stream == adev->output) {
+        free(stream);
+        adev->output = NULL;
+    }
+    pthread_mutex_unlock(&adev->lock);
+}
+
+static int adev_set_parameters(struct audio_hw_device *dev, const char *kvpairs)
+{
+    return 0;
+}
+
+static char * adev_get_parameters(const struct audio_hw_device *dev,
+                                  const char *keys)
+{
+    return strdup("");
+}
+
+static int adev_init_check(const struct audio_hw_device *dev)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    if (adev->fd >= 0)
+        return 0;
+
+    return -ENODEV;
+}
+
+static int adev_set_voice_volume(struct audio_hw_device *dev, float volume)
+{
+    // adev_set_voice_volume is a no op (simulates phones)
+    return 0;
+}
+
+static int adev_set_master_volume(struct audio_hw_device *dev, float volume)
+{
+    return -ENOSYS;
+}
+
+static int adev_get_master_volume(struct audio_hw_device *dev, float *volume)
+{
+    return -ENOSYS;
+}
+
+static int adev_set_master_mute(struct audio_hw_device *dev, bool muted)
+{
+    return -ENOSYS;
+}
+
+static int adev_get_master_mute(struct audio_hw_device *dev, bool *muted)
+{
+    return -ENOSYS;
+}
+
+static int adev_set_mode(struct audio_hw_device *dev, audio_mode_t mode)
+{
+    // adev_set_mode is a no op (simulates phones)
+    return 0;
+}
+
+static int adev_set_mic_mute(struct audio_hw_device *dev, bool state)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    pthread_mutex_lock(&adev->lock);
+    adev->mic_mute = state;
+    pthread_mutex_unlock(&adev->lock);
+    return 0;
+}
+
+static int adev_get_mic_mute(const struct audio_hw_device *dev, bool *state)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    pthread_mutex_lock(&adev->lock);
+    *state = adev->mic_mute;
+    pthread_mutex_unlock(&adev->lock);
+
+    return 0;
+}
+
+static size_t adev_get_input_buffer_size(const struct audio_hw_device *dev,
+                                         const struct audio_config *config)
+{
+    return IN_BUFFER_SIZE;
+}
+
+static int adev_open_input_stream(struct audio_hw_device *dev,
+                                  audio_io_handle_t handle,
+                                  audio_devices_t devices,
+                                  struct audio_config *config,
+                                  struct audio_stream_in **stream_in,
+                                  audio_input_flags_t flags __unused,
+                                  const char *address __unused,
+                                  audio_source_t source __unused)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+    struct generic_stream_in *in;
+    int ret = 0;
+
+    pthread_mutex_lock(&adev->lock);
+    if (adev->input != NULL) {
+        ret = -ENOSYS;
+        goto error;
+    }
+
+    if ((config->format != AUDIO_FORMAT_PCM_16_BIT) ||
+        (config->channel_mask != AUDIO_CHANNEL_IN_MONO) ||
+        (config->sample_rate != IN_SAMPLING_RATE)) {
+        ALOGE("Error opening input stream format %d, channel_mask %04x, sample_rate %u",
+              config->format, config->channel_mask, config->sample_rate);
+        config->format = AUDIO_FORMAT_PCM_16_BIT;
+        config->channel_mask = AUDIO_CHANNEL_IN_MONO;
+        config->sample_rate = IN_SAMPLING_RATE;
+        ret = -EINVAL;
+        goto error;
+    }
+
+    in = (struct generic_stream_in *)calloc(1, sizeof(struct generic_stream_in));
+
+    in->stream.common.get_sample_rate = in_get_sample_rate;
+    in->stream.common.set_sample_rate = in_set_sample_rate;
+    in->stream.common.get_buffer_size = in_get_buffer_size;
+    in->stream.common.get_channels = in_get_channels;
+    in->stream.common.get_format = in_get_format;
+    in->stream.common.set_format = in_set_format;
+    in->stream.common.standby = in_standby;
+    in->stream.common.dump = in_dump;
+    in->stream.common.set_parameters = in_set_parameters;
+    in->stream.common.get_parameters = in_get_parameters;
+    in->stream.common.add_audio_effect = in_add_audio_effect;
+    in->stream.common.remove_audio_effect = in_remove_audio_effect;
+    in->stream.set_gain = in_set_gain;
+    in->stream.read = in_read;
+    in->stream.get_input_frames_lost = in_get_input_frames_lost;
+
+    in->dev = adev;
+    in->device = devices;
+    adev->input = (struct audio_stream_in *)in;
+    *stream_in = &in->stream;
+
+error:
+    pthread_mutex_unlock(&adev->lock);
+
+    return ret;
+}
+
+static void adev_close_input_stream(struct audio_hw_device *dev,
+                                   struct audio_stream_in *stream)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    pthread_mutex_lock(&adev->lock);
+    if (stream == adev->input) {
+        free(stream);
+        adev->input = NULL;
+    }
+    pthread_mutex_unlock(&adev->lock);
+}
+
+static int adev_dump(const audio_hw_device_t *dev, int fd)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    dprintf(fd, "\nadev_dump:\n"
+                "\tfd: %d\n"
+                "\tmic_mute: %s\n"
+                "\toutput: %p\n"
+                "\tinput: %p\n\n",
+                adev->fd,
+                adev->mic_mute ? "true": "false",
+                adev->output,
+                adev->input);
+
+    if (adev->output != NULL)
+        out_dump((const struct audio_stream *)adev->output, fd);
+    if (adev->input != NULL)
+        in_dump((const struct audio_stream *)adev->input, fd);
+
+    return 0;
+}
+
+static int adev_close(hw_device_t *dev)
+{
+    struct generic_audio_device *adev = (struct generic_audio_device *)dev;
+
+    adev_close_output_stream((struct audio_hw_device *)dev, adev->output);
+    adev_close_input_stream((struct audio_hw_device *)dev, adev->input);
+
+    if (adev->fd >= 0)
+        close(adev->fd);
+
+    free(dev);
+    return 0;
+}
+
+static int adev_open(const hw_module_t* module, const char* name,
+                     hw_device_t** device)
+{
+    struct generic_audio_device *adev;
+    int fd;
+
+    if (strcmp(name, AUDIO_HARDWARE_INTERFACE) != 0)
+        return -EINVAL;
+
+    fd = open(AUDIO_DEVICE_NAME, O_RDWR);
+    if (fd < 0)
+        return -ENOSYS;
+
+    adev = calloc(1, sizeof(struct generic_audio_device));
+
+    adev->fd = fd;
+
+    adev->device.common.tag = HARDWARE_DEVICE_TAG;
+    adev->device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
+    adev->device.common.module = (struct hw_module_t *) module;
+    adev->device.common.close = adev_close;
+
+    adev->device.init_check = adev_init_check;
+    adev->device.set_voice_volume = adev_set_voice_volume;
+    adev->device.set_master_volume = adev_set_master_volume;
+    adev->device.get_master_volume = adev_get_master_volume;
+    adev->device.set_master_mute = adev_set_master_mute;
+    adev->device.get_master_mute = adev_get_master_mute;
+    adev->device.set_mode = adev_set_mode;
+    adev->device.set_mic_mute = adev_set_mic_mute;
+    adev->device.get_mic_mute = adev_get_mic_mute;
+    adev->device.set_parameters = adev_set_parameters;
+    adev->device.get_parameters = adev_get_parameters;
+    adev->device.get_input_buffer_size = adev_get_input_buffer_size;
+    adev->device.open_output_stream = adev_open_output_stream;
+    adev->device.close_output_stream = adev_close_output_stream;
+    adev->device.open_input_stream = adev_open_input_stream;
+    adev->device.close_input_stream = adev_close_input_stream;
+    adev->device.dump = adev_dump;
+
+    *device = &adev->device.common;
+
+    return 0;
+}
+
+static struct hw_module_methods_t hal_module_methods = {
+    .open = adev_open,
+};
+
+struct audio_module HAL_MODULE_INFO_SYM = {
+    .common = {
+        .tag = HARDWARE_MODULE_TAG,
+        .module_api_version = AUDIO_MODULE_API_VERSION_0_1,
+        .hal_api_version = HARDWARE_HAL_API_VERSION,
+        .id = AUDIO_HARDWARE_MODULE_ID,
+        .name = "Generic audio HW HAL",
+        .author = "The Android Open Source Project",
+        .methods = &hal_module_methods,
+    },
+};
diff --git a/audio_policy.conf b/audio_policy.conf
new file mode 100644
index 0000000..0945c25
--- /dev/null
+++ b/audio_policy.conf
@@ -0,0 +1,64 @@
+#
+# Audio policy configuration for generic device builds (goldfish audio HAL - emulator)
+#
+
+# Global configuration section: lists input and output devices always present on the device
+# as well as the output device selected by default.
+# Devices are designated by a string that corresponds to the enum in audio.h
+
+global_configuration {
+  attached_output_devices AUDIO_DEVICE_OUT_SPEAKER
+  default_output_device AUDIO_DEVICE_OUT_SPEAKER
+  attached_input_devices AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_REMOTE_SUBMIX
+}
+
+# audio hardware module section: contains descriptors for all audio hw modules present on the
+# device. Each hw module node is named after the corresponding hw module library base name.
+# For instance, "primary" corresponds to audio.primary.<device>.so.
+# The "primary" module is mandatory and must include at least one output with
+# AUDIO_OUTPUT_FLAG_PRIMARY flag.
+# Each module descriptor contains one or more output profile descriptors and zero or more
+# input profile descriptors. Each profile lists all the parameters supported by a given output
+# or input stream category.
+# The "channel_masks", "formats", "devices" and "flags" are specified using strings corresponding
+# to enums in audio.h and audio_policy.h. They are concatenated by use of "|" without space or "\n".
+
+audio_hw_modules {
+  primary {
+    outputs {
+      primary {
+        sampling_rates 8000|11025|16000|22050|24000|44100|48000
+        channel_masks AUDIO_CHANNEL_OUT_MONO|AUDIO_CHANNEL_OUT_STEREO
+        formats AUDIO_FORMAT_PCM_16_BIT
+        devices AUDIO_DEVICE_OUT_SPEAKER|AUDIO_DEVICE_OUT_WIRED_HEADPHONE|AUDIO_DEVICE_OUT_WIRED_HEADSET
+        flags AUDIO_OUTPUT_FLAG_PRIMARY
+      }
+    }
+    inputs {
+      primary {
+        sampling_rates 8000|11025|16000|22050|44100|48000
+        channel_masks AUDIO_CHANNEL_IN_MONO|AUDIO_CHANNEL_IN_STEREO
+        formats AUDIO_FORMAT_PCM_16_BIT
+        devices AUDIO_DEVICE_IN_BUILTIN_MIC|AUDIO_DEVICE_IN_WIRED_HEADSET
+      }
+    }
+  }
+  r_submix {
+    outputs {
+      submix {
+        sampling_rates 48000
+        channel_masks AUDIO_CHANNEL_OUT_STEREO
+        formats AUDIO_FORMAT_PCM_16_BIT
+        devices AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+      }
+    }
+    inputs {
+      submix {
+        sampling_rates 48000
+        channel_masks AUDIO_CHANNEL_IN_STEREO
+        formats AUDIO_FORMAT_PCM_16_BIT
+        devices AUDIO_DEVICE_IN_REMOTE_SUBMIX
+      }
+    }
+  }
+}
diff --git a/camera/Alignment.h b/camera/Alignment.h
new file mode 100644
index 0000000..4619d5e
--- /dev/null
+++ b/camera/Alignment.h
@@ -0,0 +1,14 @@
+#ifndef HW_EMULATOR_CAMERA_ALIGNMENT_H
+#define HW_EMULATOR_CAMERA_ALIGNMENT_H
+
+namespace android {
+
+// Align |value| to the next larger value that is divisible by |alignment|
+// |alignment| has to be a power of 2.
+inline int align(int value, int alignment) {
+    return (value + alignment - 1) & (~(alignment - 1));
+}
+
+}  // namespace android
+
+#endif  // HW_EMULATOR_CAMERA_ALIGNMENT_H
diff --git a/camera/Android.mk b/camera/Android.mk
index 80a4509..bd33131 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -25,6 +25,7 @@
 emulator_camera_clang_flags := -Wno-c++11-narrowing
 emulator_camera_shared_libraries := \
     libbinder \
+    libexif \
     liblog \
     libutils \
     libcutils \
@@ -34,8 +35,14 @@
 	libjpeg \
 	libcamera_metadata
 
-emulator_camera_c_includes := external/jpeg \
+emulator_camera_static_libraries := \
+	libyuv_static
+
+emulator_camera_c_includes := external/libjpeg-turbo \
+	external/libexif \
+	external/libyuv/files/include \
 	frameworks/native/include/media/hardware \
+	$(LOCAL_PATH)/../include \
 	$(LOCAL_PATH)/../../goldfish-opengl/system/OpenglSystemCommon \
 	$(call include-path-for, camera)
 
@@ -62,7 +69,11 @@
 		fake-pipeline2/Sensor.cpp \
 		fake-pipeline2/JpegCompressor.cpp \
 	EmulatedCamera3.cpp \
-		EmulatedFakeCamera3.cpp
+		EmulatedFakeCamera3.cpp \
+	Exif.cpp \
+	Thumbnail.cpp \
+	WorkerThread.cpp \
+
 
 # Emulated camera - goldfish / vbox_x86 build###################################
 
@@ -71,6 +82,7 @@
 LOCAL_CLANG_CFLAGS += ${emulator_camera_clang_flags}
 
 LOCAL_SHARED_LIBRARIES := ${emulator_camera_shared_libraries}
+LOCAL_STATIC_LIBRARIES := ${emulator_camera_static_libraries}
 LOCAL_C_INCLUDES += ${emulator_camera_c_includes}
 LOCAL_SRC_FILES := ${emulator_camera_src}
 
@@ -93,6 +105,7 @@
 LOCAL_CLANG_CFLAGS += ${emulator_camera_clang_flags}
 
 LOCAL_SHARED_LIBRARIES := ${emulator_camera_shared_libraries}
+LOCAL_STATIC_LIBRARIES := ${emulator_camera_static_libraries}
 LOCAL_C_INCLUDES += ${emulator_camera_c_includes}
 LOCAL_SRC_FILES := ${emulator_camera_src}
 
@@ -100,55 +113,6 @@
 
 include $(BUILD_SHARED_LIBRARY)
 
-# JPEG stub#####################################################################
+# Build all subdirectories #####################################################
+include $(call all-makefiles-under,$(LOCAL_PATH))
 
-ifneq ($(TARGET_BUILD_PDK),true)
-
-include $(CLEAR_VARS)
-
-jpeg_module_relative_path := hw
-jpeg_cflags := -fno-short-enums -DQEMU_HARDWARE
-jpeg_cflags += -Wno-unused-parameter
-jpeg_clang_flags += -Wno-c++11-narrowing
-jpeg_shared_libraries := \
-    libcutils \
-    liblog \
-    libskia \
-    libandroid_runtime
-jpeg_c_includes := external/libjpeg-turbo \
-                   external/skia/include/core/ \
-                   frameworks/base/core/jni/android/graphics \
-                   frameworks/native/include
-jpeg_src := JpegStub.cpp
-
-# JPEG stub - goldfish build####################################################
-
-LOCAL_MODULE_RELATIVE_PATH := ${jpeg_module_relative_path}
-LOCAL_CFLAGS += ${jpeg_cflags}
-LOCAL_CLANG_CFLAGS += ${jpeg_clangflags}
-
-LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries}
-LOCAL_C_INCLUDES += ${jpeg_c_includes}
-LOCAL_SRC_FILES := ${jpeg_src}
-
-LOCAL_MODULE := camera.goldfish.jpeg
-
-include $(BUILD_SHARED_LIBRARY)
-
-# JPEG stub - ranchu build######################################################
-
-include ${CLEAR_VARS}
-
-LOCAL_MODULE := camera.ranchu.jpeg
-
-LOCAL_MODULE_RELATIVE_PATH := ${jpeg_module_relative_path}
-LOCAL_CFLAGS += ${jpeg_cflags}
-LOCAL_CLANG_CFLAGS += ${jpeg_clangflags}
-
-LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries}
-LOCAL_C_INCLUDES += ${jpeg_c_includes}
-LOCAL_SRC_FILES := ${jpeg_src}
-
-include $(BUILD_SHARED_LIBRARY)
-
-endif # !PDK
diff --git a/camera/CallbackNotifier.cpp b/camera/CallbackNotifier.cpp
index 0dbd50d..18c5eec 100755
--- a/camera/CallbackNotifier.cpp
+++ b/camera/CallbackNotifier.cpp
@@ -24,8 +24,12 @@
 #include <cutils/log.h>
 #include <MetadataBufferType.h>
 #include "EmulatedCameraDevice.h"
+#undef min
+#undef max
 #include "CallbackNotifier.h"
+#include "Exif.h"
 #include "JpegCompressor.h"
+#include "Thumbnail.h"
 
 namespace android {
 
@@ -180,6 +184,13 @@
     }
 }
 
+void CallbackNotifier::autoFocusComplete() {
+    // Even though we don't support auto-focus we are expected to send a fake
+    // success message according to the documentation.
+    // https://developer.android.com/reference/android/hardware/Camera.AutoFocusCallback.html
+    mNotifyCB(CAMERA_MSG_FOCUS, true, 0, mCBOpaque);
+}
+
 status_t CallbackNotifier::storeMetaDataInBuffers(bool enable)
 {
     // Return error if metadata is request, otherwise silently agree.
@@ -206,19 +217,33 @@
     mTakingPicture = false;
 }
 
-void CallbackNotifier::onNextFrameAvailable(const void* frame,
-                                            nsecs_t timestamp,
+void CallbackNotifier::onNextFrameAvailable(nsecs_t timestamp,
                                             EmulatedCameraDevice* camera_dev)
 {
     if (isMessageEnabled(CAMERA_MSG_VIDEO_FRAME) && isVideoRecordingEnabled() &&
             isNewVideoFrameTime(timestamp)) {
-        camera_memory_t* cam_buff =
-            mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
+        // This is the path for video frames, the format used here is not
+        // exposed to external users so it can be whatever the camera and the
+        // encoder can agree upon. The emulator system images use software
+        // encoders that expect a YUV420 format but the camera parameter
+        // constants cannot represent this. The closest we have is YV12 which is
+        // YVU420. So we produce YV12 frames so that we can serve those through
+        // the preview callback below and then we convert from YV12 to YUV420
+        // here. This is a pretty cheap conversion in most cases since we have
+        // to copy the frame here anyway. In the best (and most common) cases
+        // the conversion is just copying the U and V parts of the frame in
+        // different order. A slightly more expensive case is when the YV12
+        // frame has padding to ensure that rows are aligned to 16-byte
+        // boundaries. The YUV420 format expected by the encoders do not have
+        // this alignment so it has to be removed. This way the encoder gets the
+        // format it expects and the preview callback (or data callback) below
+        // gets the format that is configured in camera parameters.
+        const size_t frameSize = camera_dev->getVideoFrameBufferSize();
+        camera_memory_t* cam_buff = mGetMemoryCB(-1, frameSize, 1, NULL);
         if (NULL != cam_buff && NULL != cam_buff->data) {
-            memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+            camera_dev->getCurrentFrame(cam_buff->data, V4L2_PIX_FMT_YUV420);
             mDataCBTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME,
                                cam_buff, 0, mCBOpaque);
-
             mCameraMemoryTs.push_back( cam_buff );
         } else {
             ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
@@ -229,7 +254,8 @@
         camera_memory_t* cam_buff =
             mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
         if (NULL != cam_buff && NULL != cam_buff->data) {
-            memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+            camera_dev->getCurrentFrame(cam_buff->data,
+                                        camera_dev->getOriginalPixelFormat());
             mDataCB(CAMERA_MSG_PREVIEW_FRAME, cam_buff, 0, NULL, mCBOpaque);
             cam_buff->release(cam_buff);
         } else {
@@ -252,13 +278,39 @@
             mNotifyCB(CAMERA_MSG_RAW_IMAGE_NOTIFY, 0, 0, mCBOpaque);
         }
         if (isMessageEnabled(CAMERA_MSG_COMPRESSED_IMAGE)) {
+            // Create EXIF data from the camera parameters, this includes things
+            // like EXIF default fields, a timestamp and GPS information.
+            ExifData* exifData = createExifData(mCameraParameters);
+
+            // Hold the frame lock while accessing the current frame to prevent
+            // concurrent modifications. Then create our JPEG from that frame.
+            EmulatedCameraDevice::FrameLock lock(*camera_dev);
+            const void* frame = camera_dev->getCurrentFrame();
+
+            // Create a thumbnail and place the pointer and size in the EXIF
+            // data structure. This transfers ownership to the EXIF data and
+            // the memory will be deallocated in the freeExifData call below.
+            int width = camera_dev->getFrameWidth();
+            int height = camera_dev->getFrameHeight();
+            int thumbWidth = mCameraParameters.getInt(
+                    CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH);
+            int thumbHeight = mCameraParameters.getInt(
+                    CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT);
+            if (thumbWidth > 0 && thumbHeight > 0) {
+                if (!createThumbnail(static_cast<const unsigned char*>(frame),
+                                     width, height, thumbWidth, thumbHeight,
+                                     mJpegQuality, exifData)) {
+                    // Not really a fatal error, we'll just keep going
+                    ALOGE("%s: Failed to create thumbnail for image",
+                          __FUNCTION__);
+                }
+            }
+
             /* Compress the frame to JPEG. Note that when taking pictures, we
              * have requested camera device to provide us with NV21 frames. */
             NV21JpegCompressor compressor;
-            status_t res =
-                compressor.compressRawImage(frame, camera_dev->getFrameWidth(),
-                                            camera_dev->getFrameHeight(),
-                                            mJpegQuality);
+            status_t res = compressor.compressRawImage(frame, width, height,
+                                                       mJpegQuality, exifData);
             if (res == NO_ERROR) {
                 camera_memory_t* jpeg_buff =
                     mGetMemoryCB(-1, compressor.getCompressedSize(), 1, NULL);
@@ -272,6 +324,8 @@
             } else {
                 ALOGE("%s: Compression failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
             }
+            // The EXIF data has been consumed, free it
+            freeExifData(exifData);
         }
     }
 }
diff --git a/camera/CallbackNotifier.h b/camera/CallbackNotifier.h
index 24784b5..f61ac59 100755
--- a/camera/CallbackNotifier.h
+++ b/camera/CallbackNotifier.h
@@ -23,10 +23,12 @@
  */
 
 #include <utils/List.h>
+#include <camera/CameraParameters.h>
 
 namespace android {
 
 class EmulatedCameraDevice;
+class FrameProducer;
 
 /* Manages callbacks set via set_callbacks, enable_msg_type, and disable_msg_type
  * camera HAL API.
@@ -105,6 +107,13 @@
      */
     void releaseRecordingFrame(const void* opaque);
 
+    /* Send a message to the notify callback that auto-focus has completed.
+     * This method is called from the containing emulated camera object when it
+     * has received confirmation from the camera device that auto-focusing is
+     * completed.
+     */
+    void autoFocusComplete();
+
     /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
      * This method is called by the containing emulated camera object when it is
      * handing the camera_device_ops_t::msg_type_enabled callback.
@@ -129,7 +138,7 @@
      * Return:
      *  true if video recording is enabled, or false if it is disabled.
      */
-    inline bool isVideoRecordingEnabled()
+    inline bool isVideoRecordingEnabled() const
     {
         return mVideoRecEnabled;
     }
@@ -144,21 +153,15 @@
 
     /* Next frame is available in the camera device.
      * This is a notification callback that is invoked by the camera device when
-     * a new frame is available.
+     * a new frame is available. The captured frame is available through the
+     * |camera_dev| obejct.
      * Note that most likely this method is called in context of a worker thread
      * that camera device has created for frame capturing.
      * Param:
-     *  frame - Captured frame, or NULL if camera device didn't pull the frame
-     *      yet. If NULL is passed in this parameter use GetCurrentFrame method
-     *      of the camera device class to obtain the next frame. Also note that
-     *      the size of the frame that is passed here (as well as the frame
-     *      returned from the GetCurrentFrame method) is defined by the current
-     *      frame settings (width + height + pixel format) for the camera device.
      * timestamp - Frame's timestamp.
      * camera_dev - Camera device instance that delivered the frame.
      */
-    void onNextFrameAvailable(const void* frame,
-                              nsecs_t timestamp,
+    void onNextFrameAvailable(nsecs_t timestamp,
                               EmulatedCameraDevice* camera_dev);
 
     /* Entry point for notifications that occur in camera device.
@@ -182,6 +185,14 @@
         mJpegQuality = jpeg_quality;
     }
 
+    /* Sets the camera parameters that will be used to populate exif data in the
+     * picture.
+     */
+    void setCameraParameters(CameraParameters cameraParameters)
+    {
+        mCameraParameters = cameraParameters;
+    }
+
     /****************************************************************************
      * Private API
      ***************************************************************************/
@@ -226,6 +237,9 @@
     /* JPEG quality used to compress frame during picture taking. */
     int                             mJpegQuality;
 
+    /* Camera parameters used for EXIF data in picture */
+    CameraParameters                mCameraParameters;
+
     /* Video recording status. */
     bool                            mVideoRecEnabled;
 
diff --git a/camera/Converters.cpp b/camera/Converters.cpp
index f63f67f..4765bf2 100755
--- a/camera/Converters.cpp
+++ b/camera/Converters.cpp
@@ -23,6 +23,8 @@
 #include <cutils/log.h>
 #include "Converters.h"
 
+#include "Alignment.h"
+
 namespace android {
 
 static void _YUV420SToRGB565(const uint8_t* Y,
@@ -31,12 +33,18 @@
                              int dUV,
                              uint16_t* rgb,
                              int width,
-                             int height)
+                             int height,
+                             int y_stride,
+                             int uv_stride)
 {
+    const uint8_t* Y_pos = Y;
     const uint8_t* U_pos = U;
     const uint8_t* V_pos = V;
 
     for (int y = 0; y < height; y++) {
+        Y = Y_pos + y_stride * y;
+        U = U_pos + uv_stride * (y / 2);
+        V = V_pos + uv_stride * (y / 2);
         for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
             const uint8_t nU = *U;
             const uint8_t nV = *V;
@@ -45,13 +53,6 @@
             *rgb = YUVToRGB565(*Y, nU, nV);
             Y++; rgb++;
         }
-        if (y & 0x1) {
-            U_pos = U;
-            V_pos = V;
-        } else {
-            U = U_pos;
-            V = V_pos;
-        }
     }
 }
 
@@ -61,12 +62,18 @@
                             int dUV,
                             uint32_t* rgb,
                             int width,
-                            int height)
+                            int height,
+                            int y_stride,
+                            int uv_stride)
 {
+    const uint8_t* Y_pos = Y;
     const uint8_t* U_pos = U;
     const uint8_t* V_pos = V;
 
     for (int y = 0; y < height; y++) {
+        Y = Y_pos + y_stride * y;
+        U = U_pos + uv_stride * (y / 2);
+        V = V_pos + uv_stride * (y / 2);
         for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
             const uint8_t nU = *U;
             const uint8_t nV = *V;
@@ -75,41 +82,51 @@
             *rgb = YUVToRGB32(*Y, nU, nV);
             Y++; rgb++;
         }
-        if (y & 0x1) {
-            U_pos = U;
-            V_pos = V;
-        } else {
-            U = U_pos;
-            V = V_pos;
-        }
     }
 }
 
+/* The YV12 and YU12 formats require that the row strides are aligned to 16 byte
+ * boundaries as per the format specification at:
+ * https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12
+ *
+ * This means that we can't just use the width or assume that pixels are
+ * tightly packed, we have to calculate aligned strides and use them to find the
+ * next row.
+ */
 void YV12ToRGB565(const void* yv12, void* rgb, int width, int height)
 {
-    const int pix_total = width * height;
+    // See note above about alignment
+    const int y_stride = align(width, 16);
+    const int uv_stride = align(y_stride / 2, 16);
     const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
-    const uint8_t* U = Y + pix_total;
-    const uint8_t* V = U + pix_total / 4;
-    _YUV420SToRGB565(Y, U, V, 1, reinterpret_cast<uint16_t*>(rgb), width, height);
+    const uint8_t* U = Y + y_stride * height;
+    const uint8_t* V = U + uv_stride * (height / 2);
+    _YUV420SToRGB565(Y, U, V, 1, reinterpret_cast<uint16_t*>(rgb),
+                     width, height, y_stride, uv_stride);
 }
 
 void YV12ToRGB32(const void* yv12, void* rgb, int width, int height)
 {
-    const int pix_total = width * height;
+    // See note above about alignment
+    const int y_stride = align(width, 16);
+    const int uv_stride = align(y_stride / 2, 16);
     const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
-    const uint8_t* V = Y + pix_total;
-    const uint8_t* U = V + pix_total / 4;
-    _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+    const uint8_t* V = Y + y_stride * height;
+    const uint8_t* U = V + uv_stride * (height / 2);
+    _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height,
+                    y_stride, uv_stride);
 }
 
 void YU12ToRGB32(const void* yu12, void* rgb, int width, int height)
 {
-    const int pix_total = width * height;
+    // See note above about alignment
+    const int y_stride = align(width, 16);
+    const int uv_stride = align(y_stride / 2, 16);
     const uint8_t* Y = reinterpret_cast<const uint8_t*>(yu12);
-    const uint8_t* U = Y + pix_total;
-    const uint8_t* V = U + pix_total / 4;
-    _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+    const uint8_t* U = Y + y_stride * height;
+    const uint8_t* V = U + uv_stride * (height / 2);
+    _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height,
+                    y_stride, uv_stride);
 }
 
 /* Common converter for YUV 4:2:0 interleaved to RGB565.
@@ -122,7 +139,13 @@
                           int width,
                           int height)
 {
-    _YUV420SToRGB565(Y, U, V, 2, rgb, width, height);
+    // The UV stride for NV21 and NV12 is the same as the width because the
+    // U and V values are interleaved, making each row twice as wide even though
+    // each value covers a two pixel wide area. These formats do not require any
+    // kind of alignment.
+    int y_stride = width;
+    int uv_stride = width;
+    _YUV420SToRGB565(Y, U, V, 2, rgb, width, height, y_stride, uv_stride);
 }
 
 /* Common converter for YUV 4:2:0 interleaved to RGB32.
@@ -135,7 +158,13 @@
                          int width,
                          int height)
 {
-    _YUV420SToRGB32(Y, U, V, 2, rgb, width, height);
+    // The UV stride for NV21 and NV12 is the same as the width because the
+    // U and V values are interleaved, making each row twice as wide even though
+    // each value covers a two pixel wide area. These formats do not require any
+    // kind of alignment.
+    int y_stride = width;
+    int uv_stride = width;
+    _YUV420SToRGB32(Y, U, V, 2, rgb, width, height, y_stride, uv_stride);
 }
 
 void NV12ToRGB565(const void* nv12, void* rgb, int width, int height)
diff --git a/camera/EmulatedCamera.cpp b/camera/EmulatedCamera.cpp
index 096c5b2..bdab833 100755
--- a/camera/EmulatedCamera.cpp
+++ b/camera/EmulatedCamera.cpp
@@ -36,6 +36,16 @@
 
 namespace android {
 
+static const char* kValidFocusModes[] = {
+    CameraParameters::FOCUS_MODE_AUTO,
+    CameraParameters::FOCUS_MODE_INFINITY,
+    CameraParameters::FOCUS_MODE_MACRO,
+    CameraParameters::FOCUS_MODE_FIXED,
+    CameraParameters::FOCUS_MODE_EDOF,
+    CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO,
+    CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE,
+};
+
 #if DEBUG_PARAM
 /* Calculates and logs parameter changes.
  * Param:
@@ -58,6 +68,46 @@
  */
 static char* AddValue(const char* param, const char* val);
 
+/*
+ * Check if a given string |value| equals at least one of the strings in |list|
+ */
+template<size_t N>
+static bool IsValueInList(const char* value, const char* const (&list)[N])
+{
+    for (size_t i = 0; i < N; ++i) {
+        if (strcmp(value, list[i]) == 0) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static bool StringsEqual(const char* str1, const char* str2) {
+    if (str1 == nullptr && str2 == nullptr) {
+        return true;
+    }
+    if (str1 == nullptr || str2 == nullptr) {
+        return false;
+    }
+    return strcmp(str1, str2) == 0;
+}
+
+static bool GetFourCcFormatFromCameraParam(const char* fmt_str,
+                                           uint32_t* fmt_val) {
+    if (strcmp(fmt_str, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+        // Despite the name above this is a YVU format, specifically YV12
+        *fmt_val = V4L2_PIX_FMT_YVU420;
+        return true;
+    } else if (strcmp(fmt_str, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+        *fmt_val = V4L2_PIX_FMT_RGB32;
+        return true;
+    } else if (strcmp(fmt_str, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+        *fmt_val = V4L2_PIX_FMT_NV21;
+        return true;
+    }
+    return false;
+}
+
 EmulatedCamera::EmulatedCamera(int cameraId,
                                struct hw_module_t* module)
         : EmulatedBaseCamera(cameraId,
@@ -94,14 +144,23 @@
      * Fake required parameters.
      */
 
+    mParameters.set(CameraParameters::KEY_RECORDING_HINT,
+                    CameraParameters::FALSE);
     mParameters.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES, "320x240,0x0");
 
-    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, "512");
-    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, "384");
+    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, "320");
+    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, "240");
     mParameters.set(CameraParameters::KEY_JPEG_QUALITY, "90");
-    mParameters.set(CameraParameters::KEY_FOCAL_LENGTH, "4.31");
-    mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "54.8");
-    mParameters.set(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, "42.5");
+    // Camera values for a Logitech B910 HD Webcam
+    //     Focal length: 4.90 mm (from specs)
+    //     Horizontal view angle: 61 degrees for 4:3 sizes,
+    //         70 degrees for 16:9 sizes (empirical)
+    //     Vertical view angle: 45.8 degrees (= 61 * 3 / 4)
+    // (The Mac has only "4:3" image sizes; the correct angle
+    //  is 51.0 degrees. [MacBook Pro (Retina, 15-inch, Mid 2014)])
+    mParameters.set(CameraParameters::KEY_FOCAL_LENGTH, "4.90");
+    mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "61.0");
+    mParameters.set(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, "45.8");
     mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY, "90");
 
     /* Preview format settings used here are related to panoramic view only. It's
@@ -112,13 +171,13 @@
                     preview_formats);
     mParameters.setPreviewFormat(CameraParameters::PIXEL_FORMAT_YUV420SP);
 
-    /* We don't relay on the actual frame rates supported by the camera device,
+    /* We don't rely on the actual frame rates supported by the camera device,
      * since we will emulate them through timeouts in the emulated camera device
      * worker thread. */
     mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES,
                     "30,24,20,15,10,5");
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, "(5,30)");
-    mParameters.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, "5,30");
+    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE, "(24000,24000)");
+    mParameters.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, "24000,24000");
     mParameters.setPreviewFrameRate(24);
 
     /* Only PIXEL_FORMAT_YUV420P is accepted by video framework in emulator! */
@@ -156,6 +215,24 @@
             CameraParameters::WHITE_BALANCE_TWILIGHT, 0.92f, 1.22f);
     getCameraDevice()->setWhiteBalanceMode(CameraParameters::WHITE_BALANCE_AUTO);
 
+    /* Set suported antibanding values */
+    mParameters.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+                    CameraParameters::ANTIBANDING_AUTO);
+    mParameters.set(CameraParameters::KEY_ANTIBANDING,
+                    CameraParameters::ANTIBANDING_AUTO);
+
+    /* Set control effect mode
+     * Bug: 30862244
+     * */
+    mParameters.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+                    CameraParameters::EFFECT_NONE);
+    mParameters.set(CameraParameters::KEY_EFFECT,
+                    CameraParameters::EFFECT_NONE);
+
+    /* Set focus distances for "near,optimal,far" */
+    mParameters.set(CameraParameters::KEY_FOCUS_DISTANCES,
+                    "Infinity,Infinity,Infinity");
+
     /* Not supported features
      */
     mParameters.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
@@ -166,15 +243,14 @@
     return NO_ERROR;
 }
 
-void EmulatedCamera::onNextFrameAvailable(const void* frame,
-                                          nsecs_t timestamp,
+void EmulatedCamera::onNextFrameAvailable(nsecs_t timestamp,
                                           EmulatedCameraDevice* camera_dev)
 {
     /* Notify the preview window first. */
-    mPreviewWindow.onNextFrameAvailable(frame, timestamp, camera_dev);
+    mPreviewWindow.onNextFrameAvailable(timestamp, camera_dev);
 
     /* Notify callback notifier next. */
-    mCallbackNotifier.onNextFrameAvailable(frame, timestamp, camera_dev);
+    mCallbackNotifier.onNextFrameAvailable(timestamp, camera_dev);
 }
 
 void EmulatedCamera::onCameraDeviceError(int err)
@@ -183,6 +259,9 @@
     mCallbackNotifier.onCameraDeviceError(err);
 }
 
+void EmulatedCamera::setTakingPicture(bool takingPicture) {
+    mCallbackNotifier.setTakingPicture(takingPicture);
+}
 /****************************************************************************
  * Camera API implementation.
  ***************************************************************************/
@@ -241,6 +320,10 @@
     return EmulatedBaseCamera::getCameraInfo(info);
 }
 
+void EmulatedCamera::autoFocusComplete() {
+    mCallbackNotifier.autoFocusComplete();
+}
+
 status_t EmulatedCamera::setPreviewWindow(struct preview_stream_ops* window)
 {
     /* Callback should return a negative errno. */
@@ -281,6 +364,17 @@
 
 void EmulatedCamera::stopPreview()
 {
+    /* The camera client will not pass on calls to set the preview window to
+     * NULL if the preview is not enabled. If preview is not enabled the camera
+     * client will instead simply destroy the preview window without notifying
+     * the HAL. Later on when preview is enabled again that means the HAL will
+     * attempt to use the old, destroyed window which will cause a crash.
+     * Instead we need to clear the preview window here, the client will set
+     * a preview window when needed. The preview window is cleared here instead
+     * of inside doStopPreview to prevent the window from being cleared when
+     * restarting the preview because of a parameter change. */
+    mPreviewWindow.setPreviewWindow(nullptr, 0);
+
     doStopPreview();
 }
 
@@ -297,8 +391,55 @@
 
 status_t EmulatedCamera::startRecording()
 {
-    /* Callback should return a negative errno. */
-    return -mCallbackNotifier.enableVideoRecording(mParameters.getPreviewFrameRate());
+    /* This callback should return a negative errno, hence all the negations */
+    int frameRate = mParameters.getPreviewFrameRate();
+    status_t res = mCallbackNotifier.enableVideoRecording(frameRate);
+    if (res != NO_ERROR) {
+        ALOGE("%s: CallbackNotifier failed to enable video recording",
+              __FUNCTION__);
+        stopRecording();
+        return -res;
+    }
+    EmulatedCameraDevice* const camera_dev = getCameraDevice();
+    if (camera_dev == nullptr || !camera_dev->isStarted()) {
+        // No need for restarts, the next preview start will use correct params
+        return NO_ERROR;
+    }
+
+    // If the camera is running we might have to restart it to accomodate
+    // whatever pixel format and frame size the caller wants.
+    uint32_t conf_fmt = 0;
+    res = getConfiguredPixelFormat(&conf_fmt);
+    if (res != NO_ERROR) {
+        stopRecording();
+        return -res;
+    }
+    uint32_t cur_fmt = camera_dev->getOriginalPixelFormat();
+    int conf_width = -1, conf_height = -1;
+    res = getConfiguredFrameSize(&conf_width, &conf_height);
+    if (res != NO_ERROR) {
+        stopRecording();
+        return -res;
+    }
+    int cur_width = camera_dev->getFrameWidth();
+    int cur_height = camera_dev->getFrameHeight();
+
+    if (cur_fmt != conf_fmt ||
+            cur_width != conf_width ||
+            cur_height != conf_height) {
+        // We need to perform a restart to use the new format or size and it
+        // has to be an asynchronous restart or this might block if the camera
+        // thread is currently delivering a frame.
+        if (!camera_dev->requestRestart(conf_width, conf_height, conf_fmt,
+                                        false /* takingPicture */,
+                                        false /* oneBurst */)) {
+            ALOGE("%s: Could not restart preview with new pixel format",
+                  __FUNCTION__);
+            stopRecording();
+            return -EINVAL;
+        }
+    }
+    return NO_ERROR;
 }
 
 void EmulatedCamera::stopRecording()
@@ -318,18 +459,30 @@
 
 status_t EmulatedCamera::setAutoFocus()
 {
-    ALOGV("%s", __FUNCTION__);
-
-    /* TODO: Future enhancements. */
-    return NO_ERROR;
+    // Make sure to check that a preview is in progress. Otherwise this will
+    // silently fail because no callback will be called until the preview starts
+    // which might be never.
+    if (!isPreviewEnabled()) {
+        return EINVAL;
+    }
+    EmulatedCameraDevice* const camera_dev = getCameraDevice();
+    if (camera_dev && camera_dev->isStarted()) {
+        return camera_dev->setAutoFocus();
+    }
+    return EINVAL;
 }
 
 status_t EmulatedCamera::cancelAutoFocus()
 {
-    ALOGV("%s", __FUNCTION__);
-
-    /* TODO: Future enhancements. */
-    return NO_ERROR;
+    // In this case we don't check if a preview is in progress or not. Unlike
+    // setAutoFocus this call will not silently fail without the check. If an
+    // auto-focus request is somehow pending without having preview enabled this
+    // will correctly cancel that pending auto-focus which seems reasonable.
+    EmulatedCameraDevice* const camera_dev = getCameraDevice();
+    if (camera_dev && camera_dev->isStarted()) {
+        return camera_dev->cancelAutoFocus();
+    }
+    return EINVAL;
 }
 
 status_t EmulatedCamera::takePicture()
@@ -343,19 +496,18 @@
     /* Collect frame info for the picture. */
     mParameters.getPictureSize(&width, &height);
     const char* pix_fmt = mParameters.getPictureFormat();
-    if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
-        org_fmt = V4L2_PIX_FMT_YUV420;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
-        org_fmt = V4L2_PIX_FMT_RGB32;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
-        org_fmt = V4L2_PIX_FMT_NV21;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_JPEG) == 0) {
-        /* We only have JPEG converted for NV21 format. */
-        org_fmt = V4L2_PIX_FMT_NV21;
-    } else {
-        ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
-        return EINVAL;
+    if (!GetFourCcFormatFromCameraParam(pix_fmt, &org_fmt)) {
+        // Also check for JPEG here, the function above does not do this since
+        // this is very specific to this use case.
+        if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_JPEG) == 0) {
+            /* We only have JPEG converted for NV21 format. */
+            org_fmt = V4L2_PIX_FMT_NV21;
+        } else {
+            ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+            return EINVAL;
+        }
     }
+
     /* Get JPEG quality. */
     int jpeg_quality = mParameters.getInt(CameraParameters::KEY_JPEG_QUALITY);
     if (jpeg_quality <= 0) {
@@ -367,46 +519,40 @@
      * picture.
      */
 
-    const bool preview_on = mPreviewWindow.isPreviewEnabled();
-    if (preview_on) {
-        doStopPreview();
-    }
-
-    /* Camera device should have been stopped when the shutter message has been
-     * enabled. */
     EmulatedCameraDevice* const camera_dev = getCameraDevice();
-    if (camera_dev->isStarted()) {
-        ALOGW("%s: Camera device is started", __FUNCTION__);
-        camera_dev->stopDeliveringFrames();
-        camera_dev->stopDevice();
-    }
+    mCallbackNotifier.setJpegQuality(jpeg_quality);
+    mCallbackNotifier.setCameraParameters(mParameters);
 
-    /*
-     * Take the picture now.
-     */
-
-    /* Start camera device for the picture frame. */
     ALOGD("Starting camera for picture: %.4s(%s)[%dx%d]",
-         reinterpret_cast<const char*>(&org_fmt), pix_fmt, width, height);
-    res = camera_dev->startDevice(width, height, org_fmt);
-    if (res != NO_ERROR) {
-        if (preview_on) {
-            doStartPreview();
+          reinterpret_cast<const char*>(&org_fmt), pix_fmt, width, height);
+    if (mPreviewWindow.isPreviewEnabled()) {
+        mPreviewWindow.stopPreview();
+        /* If the camera preview is enabled we need to perform an asynchronous
+         * restart. A blocking restart could deadlock this thread as it's
+         * currently holding the camera client lock and the frame delivery could
+         * be stuck on waiting for that lock. If this was synchronous then this
+         * thread would in turn get stuck on waiting for the delivery thread. */
+        if (!camera_dev->requestRestart(width, height, org_fmt,
+                                        true /* takingPicture */,
+                                        true /* oneBurst */)) {
+            return UNKNOWN_ERROR;
+        }
+        return NO_ERROR;
+    } else {
+        /* Start camera device for the picture frame. */
+        res = camera_dev->startDevice(width, height, org_fmt);
+        if (res != NO_ERROR) {
+            return res;
+        }
+
+        /* Deliver one frame only. */
+        mCallbackNotifier.setTakingPicture(true);
+        res = camera_dev->startDeliveringFrames(true);
+        if (res != NO_ERROR) {
+            mCallbackNotifier.setTakingPicture(false);
         }
         return res;
     }
-
-    /* Deliver one frame only. */
-    mCallbackNotifier.setJpegQuality(jpeg_quality);
-    mCallbackNotifier.setTakingPicture(true);
-    res = camera_dev->startDeliveringFrames(true);
-    if (res != NO_ERROR) {
-        mCallbackNotifier.setTakingPicture(false);
-        if (preview_on) {
-            doStartPreview();
-        }
-    }
-    return res;
 }
 
 status_t EmulatedCamera::cancelPicture()
@@ -424,6 +570,7 @@
     CameraParameters new_param;
     String8 str8_param(parms);
     new_param.unflatten(str8_param);
+    bool restartPreview = false;
 
     /*
      * Check for new exposure compensation parameter.
@@ -472,9 +619,82 @@
             getCameraDevice()->setWhiteBalanceMode(new_white_balance);
         }
     }
+    int old_frame_rate = mParameters.getPreviewFrameRate();
+    int new_frame_rate = new_param.getPreviewFrameRate();
+    if (old_frame_rate != new_frame_rate) {
+        getCameraDevice()->setPreviewFrameRate(new_frame_rate);
+    }
+
+    // Validate focus mode
+    const char* focus_mode = new_param.get(CameraParameters::KEY_FOCUS_MODE);
+    if (focus_mode && !IsValueInList(focus_mode, kValidFocusModes)) {
+        return BAD_VALUE;
+    }
+
+    // Validate preview size, if there is no preview size the initial values of
+    // the integers below will be preserved thus intentionally failing the test
+    int new_preview_width = -1, new_preview_height = -1;
+    new_param.getPreviewSize(&new_preview_width, &new_preview_height);
+    if (new_preview_width < 0 || new_preview_height < 0) {
+        return BAD_VALUE;
+    }
+    // If the preview size has changed we have to restart the preview to make
+    // sure we provide frames of the correct size. The receiver assumes the
+    // frame size is correct and will copy all data provided into a buffer whose
+    // size is determined by the preview size without checks, potentially
+    // causing buffer overruns or underruns if there is a size mismatch.
+    int old_preview_width = -1, old_preview_height = -1;
+    mParameters.getPreviewSize(&old_preview_width, &old_preview_height);
+    if (old_preview_width != new_preview_width ||
+            old_preview_height != new_preview_height) {
+        restartPreview = true;
+    }
+
+    // For the same reasons as with the preview size we have to look for changes
+    // in video size and restart the preview if the size has changed.
+    int old_video_width = -1, old_video_height = -1;
+    int new_video_width = -1, new_video_height = -1;
+    mParameters.getVideoSize(&old_video_width, &old_video_height);
+    new_param.getVideoSize(&new_video_width, &new_video_height);
+    if (old_video_width != new_video_width ||
+        old_video_height != new_video_height) {
+        restartPreview = true;
+    }
+    // Restart the preview if the pixel format changes to make sure we serve
+    // the selected encoding to the client.
+    const char* old_format = mParameters.getPreviewFormat();
+    const char* new_format = new_param.getPreviewFormat();
+    if (!StringsEqual(old_format, new_format)) {
+        restartPreview = true;
+    }
+
+    const char* old_hint =
+        mParameters.get(CameraParameters::KEY_RECORDING_HINT);
+    const char* new_hint = new_param.get(CameraParameters::KEY_RECORDING_HINT);
+    if (!StringsEqual(old_hint, new_hint)) {
+        // The recording hint changed, this indicates we transitioned from
+        // recording to non-recording or the other way around. We need to look
+        // at a new pixel format for this and that requires a restart.
+        restartPreview = true;
+    }
 
     mParameters = new_param;
 
+    // Now that the parameters have been assigned check if the preview needs to
+    // be restarted. If necessary this will then use the new parameters to set
+    // up the preview as requested by the caller.
+    if (restartPreview && isPreviewEnabled()) {
+        status_t status = doStopPreview();
+        if (status != NO_ERROR) {
+            ALOGE("%s: Stopping preview failed: %d", __FUNCTION__, status);
+            return status;
+        }
+        status = doStartPreview();
+        if (status != NO_ERROR) {
+            ALOGE("%s: Starting preview failed: %d", __FUNCTION__, status);
+            return status;
+        }
+    }
     return NO_ERROR;
 }
 
@@ -483,6 +703,20 @@
 static char lNoParam = '\0';
 char* EmulatedCamera::getParameters()
 {
+    // Read the image size and set the camera's Field of View.
+    // These values are valid for a Logitech B910 HD Webcam.
+    int width=0, height=0;
+    mParameters.getPictureSize(&width, &height);
+    if (height > 0) {
+        if (((double)width / height) < 1.55) {
+            // Closer to 4:3 (1.33), set the FOV to 61.0 degrees
+            mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "61.0");
+        } else {
+            // Closer to 16:9 (1.77), set the FOV to 70.0 degrees
+            mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "70.0");
+        }
+    }
+
     String8 params(mParameters.flatten());
     char* ret_str =
         reinterpret_cast<char*>(malloc(sizeof(char) * (params.length()+1)));
@@ -509,6 +743,13 @@
 {
     ALOGV("%s: cmd = %d, arg1 = %d, arg2 = %d", __FUNCTION__, cmd, arg1, arg2);
 
+    switch (cmd) {
+        case CAMERA_CMD_START_FACE_DETECTION:
+        case CAMERA_CMD_STOP_FACE_DETECTION:
+            // We do not support hardware face detection so we need to indicate
+            // that any attempt to start/stop face detection is invalid
+            return BAD_VALUE;
+    }
     /* TODO: Future enhancements. */
     return 0;
 }
@@ -528,6 +769,50 @@
     return -EINVAL;
 }
 
+status_t EmulatedCamera::getConfiguredPixelFormat(uint32_t* pixelFormat) const {
+    const char* pix_fmt = nullptr;
+    const char* recordingHint =
+        mParameters.get(CameraParameters::KEY_RECORDING_HINT);
+    bool recordingHintOn = recordingHint && strcmp(recordingHint,
+                                                   CameraParameters::TRUE) == 0;
+    bool recordingEnabled = mCallbackNotifier.isVideoRecordingEnabled();
+    if (recordingHintOn || recordingEnabled) {
+        // We're recording a video, use the video pixel format
+        pix_fmt = mParameters.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
+    }
+    if (pix_fmt == nullptr) {
+        pix_fmt = mParameters.getPreviewFormat();
+    }
+    if (pix_fmt == nullptr) {
+        ALOGE("%s: Unable to obtain configured pixel format", __FUNCTION__);
+        return EINVAL;
+    }
+    /* Convert framework's pixel format to the FOURCC one. */
+    if (!GetFourCcFormatFromCameraParam(pix_fmt, pixelFormat)) {
+        ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+        return EINVAL;
+    }
+    return NO_ERROR;
+}
+
+status_t EmulatedCamera::getConfiguredFrameSize(int* outWidth,
+                                                int* outHeight) const {
+    int width = -1, height = -1;
+    if (mParameters.get(CameraParameters::KEY_VIDEO_SIZE) != nullptr) {
+        mParameters.getVideoSize(&width, &height);
+    } else {
+        mParameters.getPreviewSize(&width, &height);
+    }
+    if (width < 0 || height < 0) {
+        ALOGE("%s: No frame size configured for camera", __FUNCTION__);
+        return EINVAL;
+    }
+    // Only modify the out parameters once we know we succeeded
+    *outWidth = width;
+    *outHeight = height;
+    return NO_ERROR;
+}
+
 /****************************************************************************
  * Preview management.
  ***************************************************************************/
@@ -556,55 +841,24 @@
         }
     }
 
-    int width, height;
     /* Lets see what should we use for frame width, and height. */
-    if (mParameters.get(CameraParameters::KEY_VIDEO_SIZE) != NULL) {
-        mParameters.getVideoSize(&width, &height);
-    } else {
-        mParameters.getPreviewSize(&width, &height);
-    }
-    /* Lets see what should we use for the frame pixel format. Note that there
-     * are two parameters that define pixel formats for frames sent to the
-     * application via notification callbacks:
-     * - KEY_VIDEO_FRAME_FORMAT, that is used when recording video, and
-     * - KEY_PREVIEW_FORMAT, that is used for preview frame notification.
-     * We choose one or the other, depending on "recording-hint" property set by
-     * the framework that indicating intention: video, or preview. */
-    const char* pix_fmt = NULL;
-    const char* is_video = mParameters.get(EmulatedCamera::RECORDING_HINT_KEY);
-    if (is_video == NULL) {
-        is_video = CameraParameters::FALSE;
-    }
-    if (strcmp(is_video, CameraParameters::TRUE) == 0) {
-        /* Video recording is requested. Lets see if video frame format is set. */
-        pix_fmt = mParameters.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
-    }
-    /* If this was not video recording, or video frame format is not set, lets
-     * use preview pixel format for the main framebuffer. */
-    if (pix_fmt == NULL) {
-        pix_fmt = mParameters.getPreviewFormat();
-    }
-    if (pix_fmt == NULL) {
-        ALOGE("%s: Unable to obtain video format", __FUNCTION__);
+    int width, height;
+    res = getConfiguredFrameSize(&width, &height);
+    if (res != NO_ERROR) {
         mPreviewWindow.stopPreview();
-        return EINVAL;
+        return res;
     }
 
-    /* Convert framework's pixel format to the FOURCC one. */
-    uint32_t org_fmt;
-    if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
-        org_fmt = V4L2_PIX_FMT_YUV420;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
-        org_fmt = V4L2_PIX_FMT_RGB32;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
-        org_fmt = V4L2_PIX_FMT_NV21;
-    } else {
-        ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+    uint32_t org_fmt = 0;
+    res = getConfiguredPixelFormat(&org_fmt);
+    if (res != NO_ERROR) {
         mPreviewWindow.stopPreview();
-        return EINVAL;
+        return res;
     }
-    ALOGD("Starting camera: %dx%d -> %.4s(%s)",
-         width, height, reinterpret_cast<const char*>(&org_fmt), pix_fmt);
+
+    camera_dev->setPreviewFrameRate(mParameters.getPreviewFrameRate());
+    ALOGD("Starting camera: %dx%d -> %.4s",
+         width, height, reinterpret_cast<const char*>(&org_fmt));
     res = camera_dev->startDevice(width, height, org_fmt);
     if (res != NO_ERROR) {
         mPreviewWindow.stopPreview();
@@ -675,6 +929,9 @@
 
     mCallbackNotifier.cleanupCBNotifier();
 
+    /* Re-init the camera settings in case settings were changed */
+    Initialize();
+
     return NO_ERROR;
 }
 
diff --git a/camera/EmulatedCamera.h b/camera/EmulatedCamera.h
index 9825d5d..70791e9 100755
--- a/camera/EmulatedCamera.h
+++ b/camera/EmulatedCamera.h
@@ -76,21 +76,17 @@
 
     /* Next frame is available in the camera device.
      * This is a notification callback that is invoked by the camera device when
-     * a new frame is available.
+     * a new frame is available. The captured frame is available through
+     * the |camera_dev| object. Remember to create a
+     * EmulatedCameraDevice::FrameLock instance to lock the frame before
+     * accessing it.
      * Note that most likely this method is called in context of a worker thread
      * that camera device has created for frame capturing.
      * Param:
-     *  frame - Captured frame, or NULL if camera device didn't pull the frame
-     *      yet. If NULL is passed in this parameter use GetCurrentFrame method
-     *      of the camera device class to obtain the next frame. Also note that
-     *      the size of the frame that is passed here (as well as the frame
-     *      returned from the GetCurrentFrame method) is defined by the current
-     *      frame settings (width + height + pixel format) for the camera device.
      * timestamp - Frame's timestamp.
      * camera_dev - Camera device instance that delivered the frame.
      */
-    virtual void onNextFrameAvailable(const void* frame,
-                                      nsecs_t timestamp,
+    virtual void onNextFrameAvailable(nsecs_t timestamp,
                                       EmulatedCameraDevice* camera_dev);
 
     /* Entry point for notifications that occur in camera device.
@@ -99,6 +95,9 @@
      */
     virtual void onCameraDeviceError(int err);
 
+    /* Signal to the callback notifier that a pictuer is being taken. */
+    void setTakingPicture(bool takingPicture);
+
     /****************************************************************************
      * Camera API implementation
      ***************************************************************************/
@@ -118,6 +117,11 @@
      * These methods are called from the camera API callback routines.
      ***************************************************************************/
 
+public:
+    /* Signal that a requested auto-focus has completed. This will be called
+     * from the camera device's worker thread. */
+    void autoFocusComplete();
+
 protected:
     /* Actual handler for camera_device_ops_t::set_preview_window callback.
      * NOTE: When this method is called the object is locked.
@@ -290,6 +294,10 @@
     /* Cleans up camera when released. */
     virtual status_t cleanupCamera();
 
+private:
+    status_t getConfiguredPixelFormat(uint32_t* pixelFormat) const;
+    status_t getConfiguredFrameSize(int* width, int* height) const;
+
     /****************************************************************************
      * Camera API callbacks as defined by camera_device_ops structure.
      * See hardware/libhardware/include/hardware/camera.h for information on
diff --git a/camera/EmulatedCameraDevice.cpp b/camera/EmulatedCameraDevice.cpp
index c8e5640..e2d9412 100755
--- a/camera/EmulatedCameraDevice.cpp
+++ b/camera/EmulatedCameraDevice.cpp
@@ -28,29 +28,31 @@
 #include <cutils/log.h>
 #include <sys/select.h>
 #include <cmath>
+#include "Alignment.h"
+#include "EmulatedCamera.h"
 #include "EmulatedCameraDevice.h"
 
+#undef min
+#undef max
+#include <algorithm>
+
 namespace android {
 
 const float GAMMA_CORRECTION = 2.2f;
 EmulatedCameraDevice::EmulatedCameraDevice(EmulatedCamera* camera_hal)
     : mObjectLock(),
-      mCurFrameTimestamp(0),
       mCameraHAL(camera_hal),
-      mCurrentFrame(NULL),
       mExposureCompensation(1.0f),
       mWhiteBalanceScale(NULL),
       mSupportedWhiteBalanceScale(),
-      mState(ECDS_CONSTRUCTED)
+      mState(ECDS_CONSTRUCTED),
+      mTriggerAutoFocus(false)
 {
 }
 
 EmulatedCameraDevice::~EmulatedCameraDevice()
 {
     ALOGV("EmulatedCameraDevice destructor");
-    if (mCurrentFrame != NULL) {
-        delete[] mCurrentFrame;
-    }
     for (size_t i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) {
         if (mSupportedWhiteBalanceScale.valueAt(i) != NULL) {
             delete[] mSupportedWhiteBalanceScale.valueAt(i);
@@ -70,13 +72,6 @@
         return NO_ERROR;
     }
 
-    /* Instantiate worker thread object. */
-    mWorkerThread = new WorkerThread(this);
-    if (getWorkerThread() == NULL) {
-        ALOGE("%s: Unable to instantiate worker thread object", __FUNCTION__);
-        return ENOMEM;
-    }
-
     mState = ECDS_INITIALIZED;
 
     return NO_ERROR;
@@ -107,10 +102,18 @@
     }
 
     const status_t res = stopWorkerThread();
-    ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+    ALOGE_IF(res != NO_ERROR, "%s: stopWorkerThread failed", __FUNCTION__);
     return res;
 }
 
+status_t EmulatedCameraDevice::setPreviewFrameRate(int framesPerSecond) {
+    if (framesPerSecond <= 0) {
+        return EINVAL;
+    }
+    mFramesPerSecond = framesPerSecond;
+    return NO_ERROR;
+}
+
 void EmulatedCameraDevice::setExposureCompensation(const float ev) {
     ALOGV("%s", __FUNCTION__);
 
@@ -155,13 +158,105 @@
     v = RGB2V(r, g, b);
 }
 
+void EmulatedCameraDevice::checkAutoFocusTrigger() {
+    // The expected value is a reference so we need it to be a variable
+    bool expectedTrigger = true;
+    if (mTriggerAutoFocus.compare_exchange_strong(expectedTrigger, false)) {
+        // If the compare exchange returns true then the value was the expected
+        // 'true' and was successfully set to 'false'. So that means it's time
+        // to trigger an auto-focus event and that we have disabled that trigger
+        // so it won't happen until another request is received.
+        mCameraHAL->autoFocusComplete();
+    }
+}
+
+status_t EmulatedCameraDevice::getCurrentFrameImpl(const uint8_t* source,
+                                                   uint8_t* dest,
+                                                   uint32_t pixelFormat) const {
+    if (pixelFormat == mPixelFormat) {
+        memcpy(dest, source, mFrameBufferSize);
+        return NO_ERROR;
+    } else if (pixelFormat == V4L2_PIX_FMT_YUV420 &&
+               mPixelFormat == V4L2_PIX_FMT_YVU420) {
+        // Convert from YV12 to YUV420 without alignment
+        const int ySize = mYStride * mFrameHeight;
+        const int uvSize = mUVStride * (mFrameHeight / 2);
+        if (mYStride == mFrameWidth) {
+            // Copy Y straight up
+            memcpy(dest, source, ySize);
+        } else {
+            // Strip alignment
+            for (int y = 0; y < mFrameHeight; ++y) {
+                memcpy(dest + y * mFrameWidth,
+                       source + y * mYStride,
+                       mFrameWidth);
+            }
+        }
+
+        if (mUVStride == mFrameWidth / 2) {
+            // Swap U and V
+            memcpy(dest + ySize, source + ySize + uvSize, uvSize);
+            memcpy(dest + ySize + uvSize, source + ySize, uvSize);
+        } else {
+            // Strip alignment
+            uint8_t* uvDest = dest + mFrameWidth * mFrameHeight;
+            const uint8_t* uvSource = source + ySize + uvSize;
+
+            for (int i = 0; i < 2; ++i) {
+                for (int y = 0; y < mFrameHeight / 2; ++y) {
+                    memcpy(uvDest + y * (mFrameWidth / 2),
+                           uvSource + y * mUVStride,
+                           mFrameWidth / 2);
+                }
+                uvDest += (mFrameHeight / 2) * (mFrameWidth / 2);
+                uvSource -= uvSize;
+            }
+        }
+        return NO_ERROR;
+    }
+    ALOGE("%s: Invalid pixel format conversion [%.4s to %.4s] requested",
+          __FUNCTION__, reinterpret_cast<const char*>(&mPixelFormat),
+          reinterpret_cast<const char*>(&pixelFormat));
+    return EINVAL;
+}
+
+status_t EmulatedCameraDevice::getCurrentFrame(void* buffer,
+                                               uint32_t pixelFormat)
+{
+    if (!isStarted()) {
+        ALOGE("%s: Device is not started", __FUNCTION__);
+        return EINVAL;
+    }
+    if (buffer == nullptr) {
+        ALOGE("%s: Invalid buffer provided", __FUNCTION__);
+        return EINVAL;
+    }
+
+    FrameLock lock(*this);
+    const void* source = mCameraThread->getPrimaryBuffer();
+    if (source == nullptr) {
+        ALOGE("%s: No framebuffer", __FUNCTION__);
+        return EINVAL;
+    }
+    return getCurrentFrameImpl(reinterpret_cast<const uint8_t*>(source),
+                               reinterpret_cast<uint8_t*>(buffer),
+                               pixelFormat);
+}
+
 status_t EmulatedCameraDevice::getCurrentPreviewFrame(void* buffer)
 {
     if (!isStarted()) {
         ALOGE("%s: Device is not started", __FUNCTION__);
         return EINVAL;
     }
-    if (mCurrentFrame == NULL || buffer == NULL) {
+    if (buffer == nullptr) {
+        ALOGE("%s: Invalid buffer provided", __FUNCTION__);
+        return EINVAL;
+    }
+
+    FrameLock lock(*this);
+    const void* currentFrame = mCameraThread->getPrimaryBuffer();
+    if (currentFrame == nullptr) {
         ALOGE("%s: No framebuffer", __FUNCTION__);
         return EINVAL;
     }
@@ -169,16 +264,16 @@
     /* In emulation the framebuffer is never RGB. */
     switch (mPixelFormat) {
         case V4L2_PIX_FMT_YVU420:
-            YV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+            YV12ToRGB32(currentFrame, buffer, mFrameWidth, mFrameHeight);
             return NO_ERROR;
         case V4L2_PIX_FMT_YUV420:
-            YU12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+            YU12ToRGB32(currentFrame, buffer, mFrameWidth, mFrameHeight);
             return NO_ERROR;
         case V4L2_PIX_FMT_NV21:
-            NV21ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+            NV21ToRGB32(currentFrame, buffer, mFrameWidth, mFrameHeight);
             return NO_ERROR;
         case V4L2_PIX_FMT_NV12:
-            NV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+            NV12ToRGB32(currentFrame, buffer, mFrameWidth, mFrameHeight);
             return NO_ERROR;
 
         default:
@@ -188,6 +283,45 @@
     }
 }
 
+const void* EmulatedCameraDevice::getCurrentFrame() {
+    if (mCameraThread.get()) {
+        return mCameraThread->getPrimaryBuffer();
+    }
+    return nullptr;
+}
+
+EmulatedCameraDevice::FrameLock::FrameLock(EmulatedCameraDevice& cameraDevice)
+    : mCameraDevice(cameraDevice) {
+        mCameraDevice.lockCurrentFrame();
+}
+
+EmulatedCameraDevice::FrameLock::~FrameLock() {
+    mCameraDevice.unlockCurrentFrame();
+}
+
+status_t EmulatedCameraDevice::setAutoFocus() {
+    mTriggerAutoFocus = true;
+    return NO_ERROR;
+}
+
+status_t EmulatedCameraDevice::cancelAutoFocus() {
+    mTriggerAutoFocus = false;
+    return NO_ERROR;
+}
+
+bool EmulatedCameraDevice::requestRestart(int width, int height,
+                                          uint32_t pixelFormat,
+                                          bool takingPicture, bool oneBurst) {
+    if (mCameraThread.get() == nullptr) {
+        ALOGE("%s: No thread alive to perform the restart, is preview on?",
+              __FUNCTION__);
+        return false;
+    }
+    mCameraThread->requestRestart(width, height, pixelFormat,
+                                  takingPicture, oneBurst);
+    return true;
+}
+
 /****************************************************************************
  * Emulated camera device private API
  ***************************************************************************/
@@ -200,11 +334,24 @@
     switch (pix_fmt) {
         case V4L2_PIX_FMT_YVU420:
         case V4L2_PIX_FMT_YUV420:
+            // For these pixel formats the strides have to be aligned to 16 byte
+            // boundaries as per the format specification
+            // https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12
+            mYStride = align(width, 16);
+            mUVStride = align(mYStride / 2, 16);
+            // The second term should use half the height but since there are
+            // two planes the multiplication with two cancels that out
+            mFrameBufferSize = mYStride * height + mUVStride * height;
+            break;
         case V4L2_PIX_FMT_NV21:
         case V4L2_PIX_FMT_NV12:
-            mFrameBufferSize = (width * height * 12) / 8;
+            mYStride = width;
+            // Because of interleaving the UV stride is the same as the Y stride
+            // since it covers two pixels, one U and one V.
+            mUVStride = mYStride;
+            // Since the U/V stride covers both U and V we don't multiply by two
+            mFrameBufferSize = mYStride * height + mUVStride * (height / 2);
             break;
-
         default:
             ALOGE("%s: Unknown pixel format %.4s",
                  __FUNCTION__, reinterpret_cast<const char*>(&pix_fmt));
@@ -218,13 +365,10 @@
     mTotalPixels = width * height;
 
     /* Allocate framebuffer. */
-    mCurrentFrame = new uint8_t[mFrameBufferSize];
-    if (mCurrentFrame == NULL) {
-        ALOGE("%s: Unable to allocate framebuffer", __FUNCTION__);
-        return ENOMEM;
-    }
-    ALOGV("%s: Allocated %p %zu bytes for %d pixels in %.4s[%dx%d] frame",
-         __FUNCTION__, mCurrentFrame, mFrameBufferSize, mTotalPixels,
+    mFrameBuffers[0].resize(mFrameBufferSize);
+    mFrameBuffers[1].resize(mFrameBufferSize);
+    ALOGV("%s: Allocated %zu bytes for %d pixels in %.4s[%dx%d] frame",
+         __FUNCTION__, mFrameBufferSize, mTotalPixels,
          reinterpret_cast<const char*>(&mPixelFormat), mFrameWidth, mFrameHeight);
     return NO_ERROR;
 }
@@ -234,10 +378,11 @@
     mFrameWidth = mFrameHeight = mTotalPixels = 0;
     mPixelFormat = 0;
 
-    if (mCurrentFrame != NULL) {
-        delete[] mCurrentFrame;
-        mCurrentFrame = NULL;
-    }
+    mFrameBuffers[0].clear();
+    mFrameBuffers[1].clear();
+    // No need to keep all that memory allocated if the camera isn't running
+    mFrameBuffers[0].shrink_to_fit();
+    mFrameBuffers[1].shrink_to_fit();
 }
 
 /****************************************************************************
@@ -253,8 +398,18 @@
         return EINVAL;
     }
 
-    const status_t res = getWorkerThread()->startThread(one_burst);
-    ALOGE_IF(res != NO_ERROR, "%s: Unable to start worker thread", __FUNCTION__);
+    mCameraThread = new CameraThread(this, staticProduceFrame, this);
+    if (mCameraThread == NULL) {
+        ALOGE("%s: Unable to instantiate CameraThread object", __FUNCTION__);
+        return ENOMEM;
+    }
+    status_t res = mCameraThread->startThread(one_burst);
+    if (res != NO_ERROR) {
+        ALOGE("%s: Unable to start CameraThread: %s",
+              __FUNCTION__, strerror(res));
+        return res;
+    }
+
     return res;
 }
 
@@ -267,147 +422,255 @@
         return EINVAL;
     }
 
-    const status_t res = getWorkerThread()->stopThread();
-    ALOGE_IF(res != NO_ERROR, "%s: Unable to stop worker thread", __FUNCTION__);
+    status_t res = mCameraThread->stopThread();
+    if (res != NO_ERROR) {
+        ALOGE("%s: Unable to stop CameraThread", __FUNCTION__);
+        return res;
+    }
+    res = mCameraThread->joinThread();
+    if (res != NO_ERROR) {
+        ALOGE("%s: Unable to join CameraThread", __FUNCTION__);
+        return res;
+    }
+
+    // Destroy the thread as well
+    mCameraThread.clear();
     return res;
 }
 
-bool EmulatedCameraDevice::inWorkerThread()
-{
-    /* This will end the thread loop, and will terminate the thread. Derived
-     * classes must override this method. */
-    return false;
+EmulatedCameraDevice::CameraThread::CameraThread(EmulatedCameraDevice* dev,
+                                                 ProduceFrameFunc producer,
+                                                 void* producerOpaque)
+    : WorkerThread("Camera_CameraThread", dev, dev->mCameraHAL),
+      mCurFrameTimestamp(0),
+      mProducerFunc(producer),
+      mProducerOpaque(producerOpaque),
+      mRestartWidth(0),
+      mRestartHeight(0),
+      mRestartPixelFormat(0),
+      mRestartOneBurst(false),
+      mRestartTakingPicture(false),
+      mRestartRequested(false) {
+
 }
 
-/****************************************************************************
- * Worker thread implementation.
- ***************************************************************************/
+const void* EmulatedCameraDevice::CameraThread::getPrimaryBuffer() const {
+    if (mFrameProducer.get()) {
+        return mFrameProducer->getPrimaryBuffer();
+    }
+    return nullptr;
+}
 
-status_t EmulatedCameraDevice::WorkerThread::readyToRun()
-{
-    ALOGV("Starting emulated camera device worker thread...");
+void EmulatedCameraDevice::CameraThread::lockPrimaryBuffer() {
+    mFrameProducer->lockPrimaryBuffer();
+}
 
-    ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
-            "%s: Thread control FDs are opened", __FUNCTION__);
-    /* Create a pair of FDs that would be used to control the thread. */
-    int thread_fds[2];
-    status_t ret;
-    Mutex::Autolock lock(mCameraDevice->mObjectLock);
-    if (pipe(thread_fds) == 0) {
-        mThreadControl = thread_fds[1];
-        mControlFD = thread_fds[0];
-        ALOGV("Emulated device's worker thread has been started.");
-        ret = NO_ERROR;
-    } else {
-        ALOGE("%s: Unable to create thread control FDs: %d -> %s",
-             __FUNCTION__, errno, strerror(errno));
-        ret = errno;
+void EmulatedCameraDevice::CameraThread::unlockPrimaryBuffer() {
+    mFrameProducer->unlockPrimaryBuffer();
+}
+
+bool
+EmulatedCameraDevice::CameraThread::waitForFrameOrTimeout(nsecs_t timeout) {
+    // Keep waiting until the frame producer indicates that a frame is available
+    // This does introduce some unnecessary latency to the first frame delivery
+    // but avoids a lot of thread synchronization.
+    do {
+        // We don't have any specific fd we want to select so we pass in -1
+        // timeout is in nanoseconds but Select expects microseconds
+        Mutex::Autolock lock(mRunningMutex);
+        mRunningCondition.waitRelative(mRunningMutex, timeout);
+        if (!mRunning) {
+            ALOGV("%s: CameraThread has been terminated.", __FUNCTION__);
+            return false;
+        }
+        // Set a short timeout in case there is no frame available and we are
+        // going to loop. This way we ensure a sleep but keep a decent latency
+        timeout = milliseconds(5);
+    } while (!mFrameProducer->hasFrame());
+
+    return true;
+}
+
+bool EmulatedCameraDevice::CameraThread::inWorkerThread() {
+    /* Wait till FPS timeout expires, or thread exit message is received. */
+    nsecs_t wakeAt =
+        mCurFrameTimestamp + 1000000000.0 / mCameraDevice->mFramesPerSecond;
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    nsecs_t timeout = std::max<nsecs_t>(0, wakeAt - now);
+
+    if (!waitForFrameOrTimeout(timeout)) {
+        return false;
     }
 
-    mSetup.signal();
-    return ret;
+    /* Check if a restart and potentially apply the requested changes */
+    if (!checkRestartRequest()) {
+        return false;
+    }
+
+    /* Check if an auto-focus event needs to be triggered */
+    mCameraDevice->checkAutoFocusTrigger();
+
+    mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+    mCameraHAL->onNextFrameAvailable(mCurFrameTimestamp, mCameraDevice);
+
+    return true;
 }
 
-status_t EmulatedCameraDevice::WorkerThread::stopThread()
-{
-    ALOGV("Stopping emulated camera device's worker thread...");
+status_t EmulatedCameraDevice::CameraThread::onThreadStart() {
+    void* primaryBuffer = mCameraDevice->getPrimaryBuffer();
+    void* secondaryBuffer = mCameraDevice->getSecondaryBuffer();
+    mFrameProducer = new FrameProducer(mCameraDevice,
+                                       mProducerFunc, mProducerOpaque,
+                                       primaryBuffer, secondaryBuffer);
+    if (mFrameProducer.get() == nullptr) {
+        ALOGE("%s: Could not instantiate FrameProducer object", __FUNCTION__);
+        return ENOMEM;
+    }
+    return mFrameProducer->startThread(mOneBurst);
+}
 
-    status_t res = EINVAL;
+void EmulatedCameraDevice::CameraThread::onThreadExit() {
+    if (mFrameProducer.get()) {
+        if (mFrameProducer->stopThread() == NO_ERROR) {
+            mFrameProducer->joinThread();
+            mFrameProducer.clear();
+        }
+    }
+}
 
-    // Limit the scope of the Autolock
+EmulatedCameraDevice::CameraThread::FrameProducer::FrameProducer(
+        EmulatedCameraDevice* dev,
+        ProduceFrameFunc producer,
+        void* opaque,
+        void* primaryBuffer,
+        void* secondaryBuffer)
+    : WorkerThread("Camera_FrameProducer", dev, dev->mCameraHAL),
+      mProducer(producer),
+      mOpaque(opaque),
+      mPrimaryBuffer(primaryBuffer),
+      mSecondaryBuffer(secondaryBuffer),
+      mLastFrame(0),
+      mHasFrame(false) {
+
+}
+
+const void*
+EmulatedCameraDevice::CameraThread::FrameProducer::getPrimaryBuffer() const {
+    return mPrimaryBuffer;
+}
+
+void EmulatedCameraDevice::CameraThread::FrameProducer::lockPrimaryBuffer() {
+    mBufferMutex.lock();
+}
+void EmulatedCameraDevice::CameraThread::FrameProducer::unlockPrimaryBuffer() {
+    mBufferMutex.unlock();
+}
+
+void EmulatedCameraDevice::CameraThread::requestRestart(int width,
+                                                        int height,
+                                                        uint32_t pixelFormat,
+                                                        bool takingPicture,
+                                                        bool oneBurst) {
+    Mutex::Autolock lock(mRequestMutex);
+    mRestartWidth = width;
+    mRestartHeight = height;
+    mRestartPixelFormat = pixelFormat;
+    mRestartTakingPicture = takingPicture;
+    mRestartOneBurst = oneBurst;
+    mRestartRequested = true;
+}
+
+bool EmulatedCameraDevice::CameraThread::FrameProducer::hasFrame() const {
+    return mHasFrame;
+}
+
+bool EmulatedCameraDevice::CameraThread::checkRestartRequest() {
+    Mutex::Autolock lock(mRequestMutex);
+    if (mRestartRequested) {
+        mRestartRequested = false;
+        status_t res = mFrameProducer->stopThread();
+        if (res != NO_ERROR) {
+            ALOGE("%s: Could not stop frame producer thread", __FUNCTION__);
+            mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+            return false;
+        }
+        res = mFrameProducer->joinThread();
+        if (res != NO_ERROR) {
+            ALOGE("%s: Could not join frame producer thread", __FUNCTION__);
+            mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+            return false;
+        }
+        mFrameProducer.clear();
+        res = mCameraDevice->stopDevice();
+        if (res != NO_ERROR) {
+            ALOGE("%s: Could not stop device", __FUNCTION__);
+            mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+            return false;
+        }
+        res = mCameraDevice->startDevice(mRestartWidth,
+                                         mRestartHeight,
+                                         mRestartPixelFormat);
+        if (res != NO_ERROR) {
+            ALOGE("%s: Could not start device", __FUNCTION__);
+            mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+            return false;
+        }
+        if (mRestartTakingPicture) {
+            mCameraHAL->setTakingPicture(true);
+        }
+        mOneBurst = mRestartOneBurst;
+
+        // Pretend like this a thread start, performs the remaining setup
+        if (onThreadStart() != NO_ERROR) {
+            mCameraDevice->stopDevice();
+            mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+            return false;
+        }
+
+        // Now wait for the frame producer to start producing before we proceed
+        return waitForFrameOrTimeout(0);
+    }
+    return true;
+}
+
+bool EmulatedCameraDevice::CameraThread::FrameProducer::inWorkerThread() {
+    nsecs_t nextFrame =
+        mLastFrame + 1000000000 / mCameraDevice->mFramesPerSecond;
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    nsecs_t timeout = std::max<nsecs_t>(0, nextFrame - now);
+
     {
-      // If thread is running and readyToRun() has not finished running,
-      //    then wait until it is done.
-      Mutex::Autolock lock(mCameraDevice->mObjectLock);
-      if (isRunning() && (mThreadControl < 0 || mControlFD < 0)) {
-          mSetup.wait(mCameraDevice->mObjectLock);
-      }
-    }
-
-    if (mThreadControl >= 0) {
-        /* Send "stop" message to the thread loop. */
-        const ControlMessage msg = THREAD_STOP;
-        const int wres =
-            TEMP_FAILURE_RETRY(write(mThreadControl, &msg, sizeof(msg)));
-        if (wres == sizeof(msg)) {
-            /* Stop the thread, and wait till it's terminated. */
-            res = requestExitAndWait();
-            if (res == NO_ERROR) {
-                /* Close control FDs. */
-                if (mThreadControl >= 0) {
-                    close(mThreadControl);
-                    mThreadControl = -1;
-                }
-                if (mControlFD >= 0) {
-                    close(mControlFD);
-                    mControlFD = -1;
-                }
-                ALOGV("Emulated camera device's worker thread has been stopped.");
-            } else {
-                ALOGE("%s: requestExitAndWait failed: %d -> %s",
-                     __FUNCTION__, res, strerror(-res));
-            }
-        } else {
-            ALOGE("%s: Unable to send THREAD_STOP message: %d -> %s",
-                 __FUNCTION__, errno, strerror(errno));
-            res = errno ? errno : EINVAL;
+        Mutex::Autolock lock(mRunningMutex);
+        mRunningCondition.waitRelative(mRunningMutex, timeout);
+        if (!mRunning) {
+            ALOGV("%s: FrameProducer has been terminated.", __FUNCTION__);
+            return false;
         }
-    } else {
-        ALOGE("%s: Thread control FDs are not opened", __FUNCTION__);
     }
 
-    return res;
+    // Produce one frame and place it in the secondary buffer
+    mLastFrame = systemTime(SYSTEM_TIME_MONOTONIC);
+    if (!mProducer(mOpaque, mSecondaryBuffer)) {
+        ALOGE("FrameProducer could not produce frame, exiting thread");
+        mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+        return false;
+    }
+
+    {
+        // Switch buffers now that the secondary buffer is ready
+        Mutex::Autolock lock(mBufferMutex);
+        std::swap(mPrimaryBuffer, mSecondaryBuffer);
+    }
+    mHasFrame = true;
+    return true;
 }
 
-EmulatedCameraDevice::WorkerThread::SelectRes
-EmulatedCameraDevice::WorkerThread::Select(int fd, int timeout)
-{
-    fd_set fds[1];
-    struct timeval tv, *tvp = NULL;
+void EmulatedCameraDevice::lockCurrentFrame() {
+    mCameraThread->lockPrimaryBuffer();
+}
 
-    const int fd_num = (fd >= 0) ? max(fd, mControlFD) + 1 :
-                                   mControlFD + 1;
-    FD_ZERO(fds);
-    FD_SET(mControlFD, fds);
-    if (fd >= 0) {
-        FD_SET(fd, fds);
-    }
-    if (timeout) {
-        tv.tv_sec = timeout / 1000000;
-        tv.tv_usec = timeout % 1000000;
-        tvp = &tv;
-    }
-    int res = TEMP_FAILURE_RETRY(select(fd_num, fds, NULL, NULL, tvp));
-    if (res < 0) {
-        ALOGE("%s: select returned %d and failed: %d -> %s",
-             __FUNCTION__, res, errno, strerror(errno));
-        return ERROR;
-    } else if (res == 0) {
-        /* Timeout. */
-        return TIMEOUT;
-    } else if (FD_ISSET(mControlFD, fds)) {
-        /* A control event. Lets read the message. */
-        ControlMessage msg;
-        res = TEMP_FAILURE_RETRY(read(mControlFD, &msg, sizeof(msg)));
-        if (res != sizeof(msg)) {
-            ALOGE("%s: Unexpected message size %d, or an error %d -> %s",
-                 __FUNCTION__, res, errno, strerror(errno));
-            return ERROR;
-        }
-        /* THREAD_STOP is the only message expected here. */
-        if (msg == THREAD_STOP) {
-            ALOGV("%s: THREAD_STOP message is received", __FUNCTION__);
-            return EXIT_THREAD;
-        } else {
-            ALOGE("Unknown worker thread message %d", msg);
-            return ERROR;
-        }
-    } else {
-        /* Must be an FD. */
-        ALOGW_IF(fd < 0 || !FD_ISSET(fd, fds), "%s: Undefined 'select' result",
-                __FUNCTION__);
-        return READY;
-    }
+void EmulatedCameraDevice::unlockCurrentFrame() {
+    mCameraThread->unlockPrimaryBuffer();
 }
 
 };  /* namespace android */
diff --git a/camera/EmulatedCameraDevice.h b/camera/EmulatedCameraDevice.h
index fff11fa..85422eb 100755
--- a/camera/EmulatedCameraDevice.h
+++ b/camera/EmulatedCameraDevice.h
@@ -31,6 +31,11 @@
 #include <utils/String8.h>
 #include "EmulatedCameraCommon.h"
 #include "Converters.h"
+#include "WorkerThread.h"
+
+#undef min
+#undef max
+#include <vector>
 
 namespace android {
 
@@ -161,6 +166,11 @@
      */
     virtual status_t stopDeliveringFrames();
 
+    /* Set the preview frame rate.
+     * Indicates the rate at which the camera should provide preview frames in
+     * frames per second. */
+    status_t setPreviewFrameRate(int framesPerSecond);
+
     /* Sets the exposure compensation for the camera device.
      */
     void setExposureCompensation(const float ev);
@@ -169,14 +179,37 @@
      */
     void setWhiteBalanceMode(const char* mode);
 
+    /* Gets current framebuffer in a selected format
+     * This method must be called on a connected instance of this class with a
+     * started camera device. If it is called on a disconnected instance, or
+     * camera device has not been started, this method must return a failure.
+     * Note that this method should be called only after at least one frame has
+     * been captured and delivered. Otherwise it will return garbage in the
+     * preview frame buffer. Typically, this method should be called from
+     * onNextFrameAvailable callback. The method can perform some basic pixel
+     * format conversion for the most efficient conversions. If a conversion
+     * is not supported the method will fail. Note that this does NOT require
+     * that the current frame be locked using a FrameLock object.
+     *
+     * Param:
+     *  buffer - Buffer, large enough to contain the entire frame.
+     *  pixelFormat - The pixel format to convert to, use
+     *                getOriginalPixelFormat() to get the configured pixel
+     *                format (if using this no conversion will be needed)
+     * Return:
+     *  NO_ERROR on success, or an appropriate error status.
+     */
+    virtual status_t getCurrentFrame(void* buffer, uint32_t pixelFormat);
+
     /* Gets current framebuffer, converted into preview frame format.
      * This method must be called on a connected instance of this class with a
      * started camera device. If it is called on a disconnected instance, or
      * camera device has not been started, this method must return a failure.
      * Note that this method should be called only after at least one frame has
      * been captured and delivered. Otherwise it will return garbage in the
-     * preview frame buffer. Typically, this method shuld be called from
-     * onNextFrameAvailable callback.
+     * preview frame buffer. Typically, this method should be called from
+     * onNextFrameAvailable callback. Note that this does NOT require that the
+     * current frame be locked using a FrameLock object.
      * Param:
      *  buffer - Buffer, large enough to contain the entire preview frame.
      * Return:
@@ -184,6 +217,26 @@
      */
     virtual status_t getCurrentPreviewFrame(void* buffer);
 
+    /* Gets a pointer to the current frame buffer in its raw format.
+     * This method must be called on a connected instance of this class with a
+     * started camera device. If it is called on a disconnected instance, or
+     * camera device has not been started, this method must return NULL.
+     * This method should only be called when the frame lock is held through
+     * a FrameLock object. Otherwise the contents of the frame might change
+     * unexpectedly or its memory could be deallocated leading to a crash.
+     * Return:
+     *  A pointer to the current frame buffer on success, NULL otherwise.
+     */
+    virtual const void* getCurrentFrame();
+
+    class FrameLock {
+    public:
+        FrameLock(EmulatedCameraDevice& cameraDevice);
+        ~FrameLock();
+    private:
+        EmulatedCameraDevice& mCameraDevice;
+    };
+
     /* Gets width of the frame obtained from the physical device.
      * Return:
      *  Width of the frame obtained from the physical device. Note that value
@@ -219,6 +272,23 @@
         return mFrameBufferSize;
     }
 
+    /* Get number of bytes required to store current video frame buffer. Note
+     * that this can be different from getFrameBufferSize depending on the pixel
+     * format and resolution. The video frames use a pixel format that is
+     * suitable for the encoding pipeline and this may have different alignment
+     * requirements than the pixel format used for regular frames.
+     */
+    inline size_t getVideoFrameBufferSize() const
+    {
+        ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+        // Currently the video format is always YUV 420 without any kind of
+        // alignment. So each pixel uses 12 bits, and then we divide by 8 to get
+        // the size in bytes. If additional pixel formats are supported this
+        // should be updated to take the selected video format into
+        // consideration.
+        return (mFrameWidth * mFrameHeight * 12) / 8;
+    }
+
     /* Gets number of pixels in the current frame buffer.
      * Return:
      *  Number of pixels in the frame buffer. Note that value returned from this
@@ -261,9 +331,7 @@
      */
 
     inline bool isInitialized() const {
-        /* Instance is initialized when the worker thread has been successfuly
-         * created (but not necessarily started). */
-        return mWorkerThread.get() != NULL && mState != ECDS_CONSTRUCTED;
+        return mState != ECDS_CONSTRUCTED;
     }
     inline bool isConnected() const {
         /* Instance is connected when its status is either"connected", or
@@ -274,6 +342,27 @@
         return mState == ECDS_STARTED;
     }
 
+    /* Enable auto-focus for the camera, this is only possible between calls to
+     * startPreview and stopPreview, i.e. when preview frames are being
+     * delivered. This will eventually trigger a callback to the camera HAL
+     * saying auto-focus completed.
+     */
+    virtual status_t setAutoFocus();
+
+    /* Cancel auto-focus if it's enabled.
+     */
+    virtual status_t cancelAutoFocus();
+
+    /* Request an asynchronous camera restart with new image parameters. The
+     * restart will be performed on the same thread that delivers frames,
+     * ensuring that all callbacks are done from the same thread.
+     * Return
+     *  false if the thread request cannot be honored because no thread is
+     *        running or some other error occured.
+     */
+    bool requestRestart(int width, int height, uint32_t pixelFormat,
+                        bool takingPicture, bool oneBurst);
+
     /****************************************************************************
      * Emulated camera device private API
      ***************************************************************************/
@@ -309,6 +398,20 @@
      */
     void changeWhiteBalance(uint8_t& y, uint8_t& u, uint8_t& v) const;
 
+    /* Check if there is a pending auto-focus trigger and send a notification
+     * if there is. This should be called from the worker thread loop if the
+     * camera device wishes to use the default behavior of immediately sending
+     * an auto-focus completion event on request. Otherwise the device should
+     * implement its own auto-focus behavior. */
+    void checkAutoFocusTrigger();
+
+    /* Implementation for getCurrentFrame that includes pixel format conversion
+     * if needed. This allows subclasses to easily use this method instead of
+     * having to reimplement the conversion all over.
+     */
+    status_t getCurrentFrameImpl(const uint8_t* source, uint8_t* dest,
+                                 uint32_t pixelFormat) const;
+
     /****************************************************************************
      * Worker thread management.
      * Typicaly when emulated camera device starts capturing frames from the
@@ -320,163 +423,125 @@
 
 protected:
     /* Starts the worker thread.
-     * Typically, worker thread is started from startDeliveringFrames method of
-     * this class.
+     * Typically, the worker thread is started from the startDeliveringFrames
+     * method of this class.
      * Param:
      *  one_burst - Controls how many times thread loop should run. If this
      *      parameter is 'true', thread routine will run only once If this
-     *      parameter is 'false', thread routine will run until stopWorkerThread
-     *      method is called. See startDeliveringFrames for more info.
+     *      parameter is 'false', thread routine will run until
+     *      stopWorkerThreads method is called. See startDeliveringFrames for
+     *      more info.
      * Return:
      *  NO_ERROR on success, or an appropriate error status.
      */
     virtual status_t startWorkerThread(bool one_burst);
 
-    /* Stops the worker thread.
-     * Note that this method will always wait for the worker thread to terminate.
-     * Typically, worker thread is started from stopDeliveringFrames method of
-     * this class.
+    /* Stop the worker thread.
+     * Note that this method will always wait for the worker thread to
+     * terminate. Typically, the worker thread is stopped from the
+     * stopDeliveringFrames method of this class.
      * Return:
      *  NO_ERROR on success, or an appropriate error status.
      */
     virtual status_t stopWorkerThread();
 
-    /* Implementation of the worker thread routine.
-     * In the default implementation of the worker thread routine we simply
-     * return 'false' forcing the thread loop to exit, and the thread to
-     * terminate. Derived class should override that method to provide there the
-     * actual frame delivery.
-     * Return:
-     *  true To continue thread loop (this method will be called again), or false
-     *  to exit the thread loop and to terminate the thread.
-     */
-    virtual bool inWorkerThread();
+    /* Produce a camera frame and place it in buffer. The buffer is one of
+     * the two buffers provided to mFrameProducer during construction along with
+     * a pointer to this method. The method is expected to know what size frames
+     * it provided to the producer thread. Returning false indicates an
+     * unrecoverable error that will stop the frame production thread. */
+    virtual bool produceFrame(void* buffer) = 0;
 
-    /* Encapsulates a worker thread used by the emulated camera device.
-     */
-    friend class WorkerThread;
-    class WorkerThread : public Thread {
-
-        /****************************************************************************
-         * Public API
-         ***************************************************************************/
-
-        public:
-            inline explicit WorkerThread(EmulatedCameraDevice* camera_dev)
-                : Thread(true),   // Callbacks may involve Java calls.
-                  mCameraDevice(camera_dev),
-                  mThreadControl(-1),
-                  mControlFD(-1)
-            {
-            }
-
-            inline ~WorkerThread()
-            {
-                ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
-                        "%s: Control FDs are opened in the destructor",
-                        __FUNCTION__);
-                if (mThreadControl >= 0) {
-                    close(mThreadControl);
-                }
-                if (mControlFD >= 0) {
-                    close(mControlFD);
-                }
-            }
-
-            /* Starts the thread
-             * Param:
-             *  one_burst - Controls how many times thread loop should run. If
-             *      this parameter is 'true', thread routine will run only once
-             *      If this parameter is 'false', thread routine will run until
-             *      stopThread method is called. See startWorkerThread for more
-             *      info.
-             * Return:
-             *  NO_ERROR on success, or an appropriate error status.
-             */
-            inline status_t startThread(bool one_burst)
-            {
-                mOneBurst = one_burst;
-                return run("Camera_startThread", ANDROID_PRIORITY_URGENT_DISPLAY, 0);
-            }
-
-            /* Overriden base class method.
-             * It is overriden in order to provide one-time initialization just
-             * prior to starting the thread routine.
-             */
-            status_t readyToRun();
-
-            /* Stops the thread. */
-            status_t stopThread();
-
-            /* Values returned from the Select method of this class. */
-            enum SelectRes {
-                /* A timeout has occurred. */
-                TIMEOUT,
-                /* Data are available for read on the provided FD. */
-                READY,
-                /* Thread exit request has been received. */
-                EXIT_THREAD,
-                /* An error has occurred. */
-                ERROR
-            };
-
-            /* Select on an FD event, keeping in mind thread exit message.
-             * Param:
-             *  fd - File descriptor on which to wait for an event. This
-             *      parameter may be negative. If it is negative this method will
-             *      only wait on a control message to the thread.
-             *  timeout - Timeout in microseconds. 0 indicates no timeout (wait
-             *      forever).
-             * Return:
-             *  See SelectRes enum comments.
-             */
-            SelectRes Select(int fd, int timeout);
-
-        /****************************************************************************
-         * Private API
-         ***************************************************************************/
-
-        private:
-            /* Implements abstract method of the base Thread class. */
-            bool threadLoop()
-            {
-                /* Simply dispatch the call to the containing camera device. */
-                if (mCameraDevice->inWorkerThread()) {
-                    /* Respect "one burst" parameter (see startThread). */
-                    return !mOneBurst;
-                } else {
-                    return false;
-                }
-            }
-
-            /* Containing camera device object. */
-            EmulatedCameraDevice*   mCameraDevice;
-
-            /* FD that is used to send control messages into the thread. */
-            int                     mThreadControl;
-
-            /* FD that thread uses to receive control messages. */
-            int                     mControlFD;
-
-            /* Controls number of times the thread loop runs.
-             * See startThread for more information. */
-            bool                    mOneBurst;
-
-            /* Enumerates control messages that can be sent into the thread. */
-            enum ControlMessage {
-                /* Stop the thread. */
-                THREAD_STOP
-            };
-
-            Condition mSetup;
-    };
-
-    /* Worker thread accessor. */
-    inline WorkerThread* getWorkerThread() const
-    {
-        return mWorkerThread.get();
+    /* Get the primary buffer to use when constructing the FrameProducer. */
+    virtual void* getPrimaryBuffer() {
+        return mFrameBuffers[0].data();
     }
 
+    /* Get the seconary buffer to use when constructing the FrameProducer. */
+    virtual void* getSecondaryBuffer() {
+        return mFrameBuffers[1].data();
+    }
+
+    /* A class that encaspulates the asynchronous behavior of a camera. This
+     * includes asynchronous production (through another thread), frame delivery
+     * as well as asynchronous state changes that have to be synchronized with
+     * frame production and delivery but can't be blocking the camera HAL. */
+    class CameraThread : public WorkerThread {
+    public:
+        typedef bool (*ProduceFrameFunc)(void* opaque, void* destinationBuffer);
+        CameraThread(EmulatedCameraDevice* cameraDevice,
+                     ProduceFrameFunc producer,
+                     void* producerOpaque);
+
+        /* Access the primary buffer of the frame producer, this is the frame
+         * that is currently not being written to. The buffer will only have
+         * valid contents if hasFrame() returns true. Note that accessing this
+         * without first having created a Lock can lead to contents changing
+         * without notice. */
+        const void* getPrimaryBuffer() const;
+
+        /* Lock and unlock the primary buffer */
+        void lockPrimaryBuffer();
+        void unlockPrimaryBuffer();
+
+        void requestRestart(int width, int height, uint32_t pixelFormat,
+                            bool takingPicture, bool oneBurst);
+
+    private:
+        bool checkRestartRequest();
+        bool waitForFrameOrTimeout(nsecs_t timeout);
+        bool inWorkerThread() override;
+
+        status_t onThreadStart() override;
+        void onThreadExit() override;
+
+        /* A class with a thread that will call a function at a specified
+         * interval to produce frames. This is done in a double-buffered fashion
+         * to make sure that one of the frames can be delivered without risk of
+         * overwriting its contents. Access to the primary buffer, the one NOT
+         * being drawn to, should be protected with the lock methods provided or
+         * the guarantee of not overwriting the contents does not hold.
+         */
+        class FrameProducer : public WorkerThread {
+        public:
+            FrameProducer(EmulatedCameraDevice* cameraDevice,
+                          ProduceFrameFunc producer, void* opaque,
+                          void* primaryBuffer, void* secondaryBuffer);
+
+            /* Indicates if the producer has produced at least one frame. */
+            bool hasFrame() const;
+
+            const void* getPrimaryBuffer() const;
+
+            void lockPrimaryBuffer();
+            void unlockPrimaryBuffer();
+
+        protected:
+            bool inWorkerThread() override;
+
+            ProduceFrameFunc mProducer;
+            void* mOpaque;
+            void* mPrimaryBuffer;
+            void* mSecondaryBuffer;
+            nsecs_t mLastFrame;
+            mutable Mutex mBufferMutex;
+            std::atomic<bool> mHasFrame;
+        };
+
+        nsecs_t mCurFrameTimestamp;
+        /* Worker thread that will produce frames for the camera thread */
+        sp<FrameProducer> mFrameProducer;
+        ProduceFrameFunc mProducerFunc;
+        void* mProducerOpaque;
+        Mutex mRequestMutex;
+        int mRestartWidth;
+        int mRestartHeight;
+        uint32_t mRestartPixelFormat;
+        bool mRestartOneBurst;
+        bool mRestartTakingPicture;
+        bool mRestartRequested;
+    };
+
     /****************************************************************************
      * Data members
      ***************************************************************************/
@@ -485,17 +550,23 @@
     /* Locks this instance for parameters, state, etc. change. */
     Mutex                       mObjectLock;
 
-    /* Worker thread that is used in frame capturing. */
-    sp<WorkerThread>            mWorkerThread;
-
-    /* Timestamp of the current frame. */
-    nsecs_t                     mCurFrameTimestamp;
+    /* A camera thread that is used in frame production, delivery and handling
+     * of asynchronous restarts. Internally the process of generating and
+     * delivering frames is split up into two threads. This way frames can
+     * always be delivered on time even if they cannot be produced fast enough
+     * to keep up with the expected frame rate. It also increases performance on
+     * multi-core systems. If the producer cannot keep up the last frame will
+     * simply be delivered again. */
+    sp<CameraThread>          mCameraThread;
 
     /* Emulated camera object containing this instance. */
     EmulatedCamera*             mCameraHAL;
 
-    /* Framebuffer containing the current frame. */
-    uint8_t*                    mCurrentFrame;
+    /* Framebuffers containing the frame being drawn to and the frame being
+     * delivered. This is used by the double buffering producer thread and
+     * the consumer thread will copy frames from one of these buffers to
+     * mCurrentFrame to avoid being stalled by frame production. */
+    std::vector<uint8_t>        mFrameBuffers[2];
 
     /*
      * Framebuffer properties.
@@ -514,6 +585,19 @@
     /* Frame height */
     int                         mFrameHeight;
 
+    /* The number of frames per second that the camera should deliver */
+    int                         mFramesPerSecond;
+
+    /* Defines byte distance between the start of each Y row */
+    int                         mYStride;
+
+    /* Defines byte distance between the start of each U/V row. For formats with
+     * separate U and V planes this is the distance between rows in each plane.
+     * For formats with interleaved U and V components this is the distance
+     * between rows in the interleaved plane, meaning that it's the stride over
+     * the combined U and V components. */
+    int                         mUVStride;
+
     /* Total number of pixels */
     int                         mTotalPixels;
 
@@ -539,6 +623,32 @@
 
     /* Object state. */
     EmulatedCameraDeviceState   mState;
+
+private:
+    /* Lock the current frame so that it can safely be accessed using
+     * getCurrentFrame. Prefer using a FrameLock object on the stack instead
+     * to ensure that the lock is always unlocked properly.
+     */
+    void lockCurrentFrame();
+    /* Unlock the current frame after locking it. Prefer using a FrameLock
+     * object instead.
+     */
+    void unlockCurrentFrame();
+
+    static bool staticProduceFrame(void* opaque, void* buffer) {
+        auto cameraDevice = reinterpret_cast<EmulatedCameraDevice*>(opaque);
+        return cameraDevice->produceFrame(buffer);
+    }
+
+    /* A flag indicating if an auto-focus completion event should be sent the
+     * next time the worker thread runs. This implies that auto-focus completion
+     * event can only be delivered while preview frames are being delivered.
+     * This is also a requirement specified in the documentation where a request
+     * to perform auto-focusing is only valid between calls to startPreview and
+     * stopPreview.
+     * https://developer.android.com/reference/android/hardware/Camera.html#autoFocus(android.hardware.Camera.AutoFocusCallback)
+     */
+    std::atomic<bool> mTriggerAutoFocus;
 };
 
 }; /* namespace android */
diff --git a/camera/EmulatedCameraFactory.cpp b/camera/EmulatedCameraFactory.cpp
index 137d8ab..cf8440a 100755
--- a/camera/EmulatedCameraFactory.cpp
+++ b/camera/EmulatedCameraFactory.cpp
@@ -55,6 +55,8 @@
         createQemuCameras();
     }
 
+    waitForQemuSfFakeCameraPropertyAvailable();
+
     if (isBackFakeCameraEmulationOn()) {
         /* Camera ID. */
         const int camera_id = mEmulatedCameraNum;
@@ -169,7 +171,7 @@
         }
     }
 
-    ALOGV("%d cameras are being emulated. %d of them are fake cameras.",
+    ALOGE("%d cameras are being emulated. %d of them are fake cameras.",
           mEmulatedCameraNum, mFakeCameraNum);
 
     /* Create hotplug thread */
@@ -437,6 +439,26 @@
     mEmulatedCameraNum = index;
 }
 
+void EmulatedCameraFactory::waitForQemuSfFakeCameraPropertyAvailable() {
+    // Camera service may start running before qemu-props sets qemu.sf.fake_camera to
+    // any of the follwing four values: "none,front,back,both"; so we need to wait.
+    // android/camera/camera-service.c
+    // bug: 30768229
+    int numAttempts = 100;
+    char prop[PROPERTY_VALUE_MAX];
+    bool timeout = true;
+    for (int i = 0; i < numAttempts; ++i) {
+        if (property_get("qemu.sf.fake_camera", prop, NULL) != 0 ) {
+            timeout = false;
+            break;
+        }
+        usleep(5000);
+    }
+    if (timeout) {
+        ALOGE("timeout (%dms) waiting for property qemu.sf.fake_camera to be set\n", 5 * numAttempts);
+    }
+}
+
 bool EmulatedCameraFactory::isBackFakeCameraEmulationOn()
 {
     /* Defined by 'qemu.sf.fake_camera' boot property: if property exist, and
diff --git a/camera/EmulatedCameraFactory.h b/camera/EmulatedCameraFactory.h
index 3f19be1..879f925 100755
--- a/camera/EmulatedCameraFactory.h
+++ b/camera/EmulatedCameraFactory.h
@@ -127,7 +127,7 @@
     /* Gets qemu camera orientation. */
     int getQemuCameraOrientation() {
         /* TODO: Have a boot property that controls that. */
-        return 270;
+        return 90;
     }
 
     /* Gets number of emulated cameras.
@@ -156,6 +156,9 @@
      */
     void createQemuCameras();
 
+    /* Waits till qemu-props has done setup, timeout after 500ms */
+    void waitForQemuSfFakeCameraPropertyAvailable();
+
     /* Checks if fake camera emulation is on for the camera facing back. */
     bool isBackFakeCameraEmulationOn();
 
diff --git a/camera/EmulatedFakeCamera.cpp b/camera/EmulatedFakeCamera.cpp
index 457850d..3db1a80 100755
--- a/camera/EmulatedFakeCamera.cpp
+++ b/camera/EmulatedFakeCamera.cpp
@@ -70,12 +70,17 @@
      * Parameters provided by the camera device.
      */
 
-    /* 352x288 and 320x240 frame dimensions are required by the framework for
-     * video mode preview and video recording. */
+   /* 352x288, 320x240 and 176x144 frame dimensions are required by
+     * the framework for video mode preview and video recording. */
     mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
                     "640x480,352x288,320x240");
     mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
-                    "640x480,352x288,320x240");
+                    "640x480,352x288,320x240,176x144");
+    mParameters.set(CameraParameters::KEY_SUPPORTED_VIDEO_SIZES,
+                    "640x480,352x288,320x240,176x144");
+    mParameters.set(CameraParameters::KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO,
+                    "640x480");
+
     mParameters.setPreviewSize(640, 480);
     mParameters.setPictureSize(640, 480);
 
diff --git a/camera/EmulatedFakeCamera2.cpp b/camera/EmulatedFakeCamera2.cpp
index 186ae8c..7df6f52 100644
--- a/camera/EmulatedFakeCamera2.cpp
+++ b/camera/EmulatedFakeCamera2.cpp
@@ -1118,7 +1118,7 @@
 }
 
 EmulatedFakeCamera2::ReadoutThread::~ReadoutThread() {
-    delete mInFlightQueue;
+    delete[] mInFlightQueue;
 }
 
 status_t EmulatedFakeCamera2::ReadoutThread::readyToRun() {
diff --git a/camera/EmulatedFakeCamera3.cpp b/camera/EmulatedFakeCamera3.cpp
index a8d509c..0e305f9 100644
--- a/camera/EmulatedFakeCamera3.cpp
+++ b/camera/EmulatedFakeCamera3.cpp
@@ -925,6 +925,8 @@
                 GraphicBufferMapper::get().unlock(
                         *(request->output_buffers[i].buffer));
             }
+            delete sensorBuffers;
+            delete buffers;
             return NO_INIT;
         }
 
diff --git a/camera/EmulatedFakeCameraDevice.cpp b/camera/EmulatedFakeCameraDevice.cpp
index 4afadc1..d3cdd78 100755
--- a/camera/EmulatedFakeCameraDevice.cpp
+++ b/camera/EmulatedFakeCameraDevice.cpp
@@ -25,8 +25,20 @@
 #include "EmulatedFakeCamera.h"
 #include "EmulatedFakeCameraDevice.h"
 
+#undef min
+#undef max
+#include <algorithm>
+
 namespace android {
 
+static const double kCheckXSpeed = 0.00000000096;
+static const double kCheckYSpeed = 0.00000000032;
+
+static const double kSquareXSpeed = 0.000000000096;
+static const double kSquareYSpeed = 0.000000000160;
+
+static const nsecs_t kSquareColorChangeIntervalNs = seconds(5);
+
 EmulatedFakeCameraDevice::EmulatedFakeCameraDevice(EmulatedFakeCamera* camera_hal)
     : EmulatedCameraDevice(camera_hal),
       mBlackYUV(kBlack32),
@@ -34,10 +46,15 @@
       mRedYUV(kRed8),
       mGreenYUV(kGreen8),
       mBlueYUV(kBlue8),
+      mSquareColor(&mRedYUV),
       mLastRedrawn(0),
+      mLastColorChange(0),
       mCheckX(0),
       mCheckY(0),
-      mCcounter(0)
+      mSquareX(0),
+      mSquareY(0),
+      mSquareXSpeed(kSquareXSpeed),
+      mSquareYSpeed(kSquareYSpeed)
 #if EFCD_ROTATE_FRAME
       , mLastRotatedAt(0),
         mCurrentFrameType(0),
@@ -124,33 +141,29 @@
         /* Calculate U/V panes inside the framebuffer. */
         switch (mPixelFormat) {
             case V4L2_PIX_FMT_YVU420:
-                mFrameV = mCurrentFrame + mTotalPixels;
-                mFrameU = mFrameU + mTotalPixels / 4;
+                mFrameVOffset = mYStride * mFrameHeight;
+                mFrameUOffset = mFrameVOffset + mUVStride * (mFrameHeight / 2);
                 mUVStep = 1;
-                mUVTotalNum = mTotalPixels / 4;
                 break;
 
             case V4L2_PIX_FMT_YUV420:
-                mFrameU = mCurrentFrame + mTotalPixels;
-                mFrameV = mFrameU + mTotalPixels / 4;
+                mFrameUOffset = mYStride * mFrameHeight;
+                mFrameVOffset = mFrameUOffset + mUVStride * (mFrameHeight / 2);
                 mUVStep = 1;
-                mUVTotalNum = mTotalPixels / 4;
                 break;
 
             case V4L2_PIX_FMT_NV21:
                 /* Interleaved UV pane, V first. */
-                mFrameV = mCurrentFrame + mTotalPixels;
-                mFrameU = mFrameV + 1;
+                mFrameVOffset = mYStride * mFrameHeight;
+                mFrameUOffset = mFrameVOffset + 1;
                 mUVStep = 2;
-                mUVTotalNum = mTotalPixels / 4;
                 break;
 
             case V4L2_PIX_FMT_NV12:
                 /* Interleaved UV pane, U first. */
-                mFrameU = mCurrentFrame + mTotalPixels;
-                mFrameV = mFrameU + 1;
+                mFrameUOffset = mYStride * mFrameHeight;
+                mFrameVOffset = mFrameUOffset + 1;
                 mUVStep = 2;
-                mUVTotalNum = mTotalPixels / 4;
                 break;
 
             default:
@@ -158,6 +171,8 @@
                      reinterpret_cast<const char*>(&mPixelFormat));
                 return EINVAL;
         }
+        mLastRedrawn = systemTime(SYSTEM_TIME_MONOTONIC);
+        mLastColorChange = mLastRedrawn;
         /* Number of items in a single row inside U/V panes. */
         mUVInRow = (width / 2) * mUVStep;
         mState = ECDS_STARTED;
@@ -178,7 +193,6 @@
         return NO_ERROR;
     }
 
-    mFrameU = mFrameV = NULL;
     EmulatedCameraDevice::commonStopDevice();
     mState = ECDS_CONNECTED;
 
@@ -189,48 +203,24 @@
  * Worker thread management overrides.
  ***************************************************************************/
 
-bool EmulatedFakeCameraDevice::inWorkerThread()
+bool EmulatedFakeCameraDevice::produceFrame(void* buffer)
 {
-    /* Wait till FPS timeout expires, or thread exit message is received. */
-    WorkerThread::SelectRes res =
-        getWorkerThread()->Select(-1, 1000000 / mEmulatedFPS);
-    if (res == WorkerThread::EXIT_THREAD) {
-        ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
-        return false;
-    }
-
-    /* Lets see if we need to generate a new frame. */
-    if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRedrawn) >= mRedrawAfter) {
-        /*
-         * Time to generate a new frame.
-         */
-
 #if EFCD_ROTATE_FRAME
-        const int frame_type = rotateFrame();
-        switch (frame_type) {
-            case 0:
-                drawCheckerboard();
-                break;
-            case 1:
-                drawStripes();
-                break;
-            case 2:
-                drawSolid(mCurrentColor);
-                break;
-        }
-#else
-        /* Draw the checker board. */
-        drawCheckerboard();
-
-#endif  // EFCD_ROTATE_FRAME
-
-        mLastRedrawn = systemTime(SYSTEM_TIME_MONOTONIC);
+    const int frame_type = rotateFrame();
+    switch (frame_type) {
+        case 0:
+            drawCheckerboard(buffer);
+            break;
+        case 1:
+            drawStripes(buffer);
+            break;
+        case 2:
+            drawSolid(buffer, mCurrentColor);
+            break;
     }
-
-    /* Timestamp the current frame, and notify the camera HAL about new frame. */
-    mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
-    mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
-
+#else
+    drawCheckerboard(buffer);
+#endif  // EFCD_ROTATE_FRAME
     return true;
 }
 
@@ -238,9 +228,15 @@
  * Fake camera device private API
  ***************************************************************************/
 
-void EmulatedFakeCameraDevice::drawCheckerboard()
+void EmulatedFakeCameraDevice::drawCheckerboard(void* buffer)
 {
-    const int size = mFrameWidth / 10;
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    nsecs_t elapsed = now - mLastRedrawn;
+    uint8_t* currentFrame = reinterpret_cast<uint8_t*>(buffer);
+    uint8_t* frameU = currentFrame + mFrameUOffset;
+    uint8_t* frameV = currentFrame + mFrameVOffset;
+
+    const int size = std::min(mFrameWidth, mFrameHeight) / 10;
     bool black = true;
 
     if (size == 0) {
@@ -249,33 +245,47 @@
         return;
     }
 
+    mCheckX += kCheckXSpeed * elapsed;
+    mCheckY += kCheckYSpeed * elapsed;
 
-    if((mCheckX / size) & 1)
+    // Allow the X and Y values to transition across two checkerboard boxes
+    // before resetting it back. This allows for the gray to black transition.
+    // Note that this is in screen size independent coordinates so that frames
+    // will look similar regardless of resolution
+    if (mCheckX > 2.0) {
+        mCheckX -= 2.0;
+    }
+    if (mCheckY > 2.0) {
+        mCheckY -= 2.0;
+    }
+
+    // Are we in the gray or black zone?
+    if (mCheckX >= 1.0)
         black = false;
-    if((mCheckY / size) & 1)
+    if (mCheckY >= 1.0)
         black = !black;
 
-    int county = mCheckY % size;
-    int checkxremainder = mCheckX % size;
-    uint8_t* Y = mCurrentFrame;
-    uint8_t* U_pos = mFrameU;
-    uint8_t* V_pos = mFrameV;
-    uint8_t* U = U_pos;
-    uint8_t* V = V_pos;
+    int county = static_cast<int>(mCheckY * size) % size;
+    int checkxremainder = static_cast<int>(mCheckX * size) % size;
 
     YUVPixel adjustedWhite = YUVPixel(mWhiteYUV);
     changeWhiteBalance(adjustedWhite.Y, adjustedWhite.U, adjustedWhite.V);
+    adjustedWhite.Y = changeExposure(adjustedWhite.Y);
+    YUVPixel adjustedBlack = YUVPixel(mBlackYUV);
+    adjustedBlack.Y = changeExposure(adjustedBlack.Y);
 
     for(int y = 0; y < mFrameHeight; y++) {
         int countx = checkxremainder;
         bool current = black;
+        uint8_t* Y = currentFrame + mYStride * y;
+        uint8_t* U = frameU + mUVStride * (y / 2);
+        uint8_t* V = frameV + mUVStride * (y / 2);
         for(int x = 0; x < mFrameWidth; x += 2) {
             if (current) {
-                mBlackYUV.get(Y, U, V);
+                adjustedBlack.get(Y, U, V);
             } else {
                 adjustedWhite.get(Y, U, V);
             }
-            *Y = changeExposure(*Y);
             Y[1] = *Y;
             Y += 2; U += mUVStep; V += mUVStep;
             countx += 2;
@@ -284,49 +294,70 @@
                 current = !current;
             }
         }
-        if (y & 0x1) {
-            U_pos = U;
-            V_pos = V;
-        } else {
-            U = U_pos;
-            V = V_pos;
-        }
         if(county++ >= size) {
             county = 0;
             black = !black;
         }
     }
-    mCheckX += 3;
-    mCheckY++;
 
     /* Run the square. */
-    int sqx = ((mCcounter * 3) & 255);
-    if(sqx > 128) sqx = 255 - sqx;
-    int sqy = ((mCcounter * 5) & 255);
-    if(sqy > 128) sqy = 255 - sqy;
-    const int sqsize = mFrameWidth / 10;
-    drawSquare(sqx * sqsize / 32, sqy * sqsize / 32, (sqsize * 5) >> 1,
-               (mCcounter & 0x100) ? &mRedYUV : &mGreenYUV);
-    mCcounter++;
+    const int squareSize = std::min(mFrameWidth, mFrameHeight) / 4;
+    mSquareX += mSquareXSpeed * elapsed;
+    mSquareY += mSquareYSpeed * elapsed;
+    int squareX = mSquareX * mFrameWidth;
+    int squareY = mSquareY * mFrameHeight;
+    if (squareX + squareSize > mFrameWidth) {
+        mSquareXSpeed = -mSquareXSpeed;
+        double relativeWidth = static_cast<double>(squareSize) / mFrameWidth;
+        mSquareX -= 2.0 * (mSquareX + relativeWidth - 1.0);
+        squareX = mSquareX * mFrameWidth;
+    } else if (squareX < 0) {
+        mSquareXSpeed = -mSquareXSpeed;
+        mSquareX = -mSquareX;
+        squareX = mSquareX * mFrameWidth;
+    }
+    if (squareY + squareSize > mFrameHeight) {
+        mSquareYSpeed = -mSquareYSpeed;
+        double relativeHeight = static_cast<double>(squareSize) / mFrameHeight;
+        mSquareY -= 2.0 * (mSquareY + relativeHeight - 1.0);
+        squareY = mSquareY * mFrameHeight;
+    } else if (squareY < 0) {
+        mSquareYSpeed = -mSquareYSpeed;
+        mSquareY = -mSquareY;
+        squareY = mSquareY * mFrameHeight;
+    }
+
+    if (now - mLastColorChange > kSquareColorChangeIntervalNs) {
+        mLastColorChange = now;
+        mSquareColor = mSquareColor == &mRedYUV ? &mGreenYUV : &mRedYUV;
+    }
+
+    drawSquare(buffer, squareX, squareY, squareSize, mSquareColor);
+    mLastRedrawn = now;
 }
 
-void EmulatedFakeCameraDevice::drawSquare(int x,
+void EmulatedFakeCameraDevice::drawSquare(void* buffer,
+                                          int x,
                                           int y,
                                           int size,
                                           const YUVPixel* color)
 {
-    const int square_xstop = min(mFrameWidth, x + size);
-    const int square_ystop = min(mFrameHeight, y + size);
-    uint8_t* Y_pos = mCurrentFrame + y * mFrameWidth + x;
+    uint8_t* currentFrame = reinterpret_cast<uint8_t*>(buffer);
+    uint8_t* frameU = currentFrame + mFrameUOffset;
+    uint8_t* frameV = currentFrame + mFrameVOffset;
+
+    const int square_xstop = std::min(mFrameWidth, x + size);
+    const int square_ystop = std::min(mFrameHeight, y + size);
+    uint8_t* Y_pos = currentFrame + y * mYStride + x;
 
     YUVPixel adjustedColor = *color;
     changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
 
     // Draw the square.
     for (; y < square_ystop; y++) {
-        const int iUV = (y / 2) * mUVInRow + (x / 2) * mUVStep;
-        uint8_t* sqU = mFrameU + iUV;
-        uint8_t* sqV = mFrameV + iUV;
+        const int iUV = (y / 2) * mUVStride + (x / 2) * mUVStep;
+        uint8_t* sqU = frameU + iUV;
+        uint8_t* sqV = frameV + iUV;
         uint8_t* sqY = Y_pos;
         for (int i = x; i < square_xstop; i += 2) {
             adjustedColor.get(sqY, sqU, sqV);
@@ -334,36 +365,40 @@
             sqY[1] = *sqY;
             sqY += 2; sqU += mUVStep; sqV += mUVStep;
         }
-        Y_pos += mFrameWidth;
+        Y_pos += mYStride;
     }
 }
 
 #if EFCD_ROTATE_FRAME
 
-void EmulatedFakeCameraDevice::drawSolid(YUVPixel* color)
+void EmulatedFakeCameraDevice::drawSolid(void* buffer, YUVPixel* color)
 {
     YUVPixel adjustedColor = *color;
     changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
 
-    /* All Ys are the same. */
-    memset(mCurrentFrame, changeExposure(adjustedColor.Y), mTotalPixels);
+    /* All Ys are the same, will fill any alignment padding but that's OK */
+    memset(mCurrentFrame, changeExposure(adjustedColor.Y),
+           mFrameHeight * mYStride);
 
     /* Fill U, and V panes. */
-    uint8_t* U = mFrameU;
-    uint8_t* V = mFrameV;
-    for (int k = 0; k < mUVTotalNum; k++, U += mUVStep, V += mUVStep) {
-        *U = color->U;
-        *V = color->V;
+    for (int y = 0; y < mFrameHeight / 2; ++y) {
+        uint8_t* U = mFrameU + y * mUVStride;
+        uint8_t* V = mFrameV + y * mUVStride;
+
+        for (int x = 0; x < mFrameWidth / 2; ++x, U += mUVStep, V += mUVStep) {
+            *U = color->U;
+            *V = color->V;
+        }
     }
 }
 
-void EmulatedFakeCameraDevice::drawStripes()
+void EmulatedFakeCameraDevice::drawStripes(void* buffer)
 {
     /* Divide frame into 4 stripes. */
     const int change_color_at = mFrameHeight / 4;
     const int each_in_row = mUVInRow / mUVStep;
     uint8_t* pY = mCurrentFrame;
-    for (int y = 0; y < mFrameHeight; y++, pY += mFrameWidth) {
+    for (int y = 0; y < mFrameHeight; y++, pY += mYStride) {
         /* Select the color. */
         YUVPixel* color;
         const int color_index = y / change_color_at;
@@ -386,7 +421,7 @@
         memset(pY, changeExposure(color->Y), mFrameWidth);
 
         /* Offset of the current row inside U/V panes. */
-        const int uv_off = (y / 2) * mUVInRow;
+        const int uv_off = (y / 2) * mUVStride;
         /* Fill U, and V panes. */
         uint8_t* U = mFrameU + uv_off;
         uint8_t* V = mFrameV + uv_off;
diff --git a/camera/EmulatedFakeCameraDevice.h b/camera/EmulatedFakeCameraDevice.h
index f66f076..a3e9201 100755
--- a/camera/EmulatedFakeCameraDevice.h
+++ b/camera/EmulatedFakeCameraDevice.h
@@ -77,9 +77,6 @@
     /* Stops the camera device. */
     status_t stopDevice();
 
-    /* Gets current preview fame into provided buffer. */
-    status_t getPreviewFrame(void* buffer);
-
     /***************************************************************************
      * Worker thread management overrides.
      * See declarations of these methods in EmulatedCameraDevice class for
@@ -87,12 +84,8 @@
      **************************************************************************/
 
 protected:
-    /* Implementation of the worker thread routine.
-     * This method simply sleeps for a period of time defined by the FPS property
-     * of the fake camera (simulating frame frequency), and then calls emulated
-     * camera's onNextFrameAvailable method.
-     */
-    bool inWorkerThread();
+    /* Implementation of the frame production routine. */
+    bool produceFrame(void* buffer) override;
 
     /****************************************************************************
      * Fake camera device private API
@@ -100,8 +93,9 @@
 
 private:
 
-    /* Draws a black and white checker board in the current frame buffer. */
-    void drawCheckerboard();
+    /* Draws a black and white checker board in |buffer| with the assumption
+     * that the size of buffer matches the current frame buffer size. */
+    void drawCheckerboard(void* buffer);
 
     /* Draws a square of the given color in the current frame buffer.
      * Param:
@@ -109,11 +103,11 @@
      *  size - Size of the square's side.
      *  color - Square's color.
      */
-    void drawSquare(int x, int y, int size, const YUVPixel* color);
+    void drawSquare(void* buffer, int x, int y, int size, const YUVPixel* color);
 
 #if EFCD_ROTATE_FRAME
-    void drawSolid(YUVPixel* color);
-    void drawStripes();
+    void drawSolid(void* buffer, YUVPixel* color);
+    void drawStripes(void* buffer);
     int rotateFrame();
 #endif  // EFCD_ROTATE_FRAME
 
@@ -131,6 +125,7 @@
     YUVPixel    mRedYUV;
     YUVPixel    mGreenYUV;
     YUVPixel    mBlueYUV;
+    YUVPixel*   mSquareColor;
 
     /* Last time the frame has been redrawn. */
     nsecs_t     mLastRedrawn;
@@ -140,10 +135,10 @@
      */
 
     /* U pane inside the framebuffer. */
-    uint8_t*    mFrameU;
+    ptrdiff_t   mFrameUOffset;
 
     /* V pane inside the framebuffer. */
-    uint8_t*    mFrameV;
+    ptrdiff_t   mFrameVOffset;
 
     /* Defines byte distance between adjacent U, and V values. */
     int         mUVStep;
@@ -153,24 +148,17 @@
      * number of both, Us and Vs in a single row in the interleaved UV pane. */
     int         mUVInRow;
 
-    /* Total number of each, U, and V elements in the framebuffer. */
-    int         mUVTotalNum;
-
     /*
      * Checkerboard drawing related stuff
      */
+    nsecs_t     mLastColorChange;
 
-    int         mCheckX;
-    int         mCheckY;
-    int         mCcounter;
-
-    /* Emulated FPS (frames per second).
-     * We will emulate 50 FPS. */
-    static const int        mEmulatedFPS = 50;
-
-    /* Defines time (in nanoseconds) between redrawing the checker board.
-     * We will redraw the checker board every 15 milliseconds. */
-    static const nsecs_t    mRedrawAfter = 15000000LL;
+    double      mCheckX;
+    double      mCheckY;
+    double      mSquareX;
+    double      mSquareY;
+    double      mSquareXSpeed;
+    double      mSquareYSpeed;
 
 #if EFCD_ROTATE_FRAME
     /* Frame rotation frequency in nanosec (currently - 3 sec) */
diff --git a/camera/EmulatedQemuCamera.cpp b/camera/EmulatedQemuCamera.cpp
index af1e324..ce47f07 100755
--- a/camera/EmulatedQemuCamera.cpp
+++ b/camera/EmulatedQemuCamera.cpp
@@ -25,6 +25,12 @@
 #include "EmulatedQemuCamera.h"
 #include "EmulatedCameraFactory.h"
 
+#undef min
+#undef max
+#include <sstream>
+#include <string>
+#include <vector>
+
 namespace android {
 
 EmulatedQemuCamera::EmulatedQemuCamera(int cameraId, struct hw_module_t* module)
@@ -65,43 +71,84 @@
     /*
      * Set customizable parameters.
      */
+    using Size = std::pair<int, int>;
+    std::vector<Size> resolutions;
+    std::stringstream ss(frame_dims);
+    std::string input;
+    while (std::getline(ss, input, ',')) {
+        int width = 0;
+        int height = 0;
+        char none = 0;
+        /* Expect only two results because that means there was nothing after
+         * the height, we don't want any trailing characters. Otherwise we just
+         * ignore this entry. */
+        if (sscanf(input.c_str(), "%dx%d%c", &width, &height, &none) == 2) {
+            resolutions.push_back(Size(width, height));
+            ALOGE("%s: %dx%d", __FUNCTION__, width, height);
+        }
+    }
+
+    /* The Android framework contains a wrapper around the v1 Camera API so that
+     * it can be used with API v2. This wrapper attempts to figure out the
+     * sensor resolution of the camera by looking at the resolution with the
+     * largest area and infer that the dimensions of that resolution must also
+     * be the size of the camera sensor. Any resolution with a dimension that
+     * exceeds the sensor size will be rejected so Camera API calls will start
+     * failing. To work around this we remove any resolutions with at least one
+     * dimension exceeding that of the max area resolution. */
+
+    /* First find the resolution with the maximum area, the "sensor size" */
+    int maxArea = 0;
+    int maxAreaWidth = 0;
+    int maxAreaHeight = 0;
+    for (const auto& res : resolutions) {
+        int area = res.first * res.second;
+        if (area > maxArea) {
+            maxArea = area;
+            maxAreaWidth = res.first;
+            maxAreaHeight = res.second;
+        }
+    }
+
+    /* Next remove any resolution with a dimension exceeding the sensor size. */
+    for (auto res = resolutions.begin(); res != resolutions.end(); ) {
+        if (res->first > maxAreaWidth || res->second > maxAreaHeight) {
+            /* Width and/or height larger than sensor, remove it */
+            res = resolutions.erase(res);
+        } else {
+            ++res;
+        }
+    }
+
+    if (resolutions.empty()) {
+        ALOGE("%s: Qemu camera has no valid resolutions", __FUNCTION__);
+        return EINVAL;
+    }
+
+    /* Next rebuild the frame size string for the camera parameters */
+    std::stringstream sizesStream;
+    for (size_t i = 0; i < resolutions.size(); ++i) {
+        if (i != 0) {
+            sizesStream << ',';
+        }
+        sizesStream << resolutions[i].first << 'x' << resolutions[i].second;
+    }
+    std::string sizes = sizesStream.str();
 
     mParameters.set(EmulatedCamera::FACING_KEY, facing_dir);
     mParameters.set(EmulatedCamera::ORIENTATION_KEY,
                     gEmulatedCameraFactory.getQemuCameraOrientation());
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, frame_dims);
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES, frame_dims);
+    mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
+                    sizes.c_str());
+    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
+                    sizes.c_str());
 
     /*
      * Use first dimension reported by the device to set current preview and
      * picture sizes.
      */
-
-    char first_dim[128];
-    /* Dimensions are separated with ',' */
-    const char* c = strchr(frame_dims, ',');
-    if (c == NULL) {
-        strncpy(first_dim, frame_dims, sizeof(first_dim));
-        first_dim[sizeof(first_dim)-1] = '\0';
-    } else if (static_cast<size_t>(c - frame_dims) < sizeof(first_dim)) {
-        memcpy(first_dim, frame_dims, c - frame_dims);
-        first_dim[c - frame_dims] = '\0';
-    } else {
-        memcpy(first_dim, frame_dims, sizeof(first_dim));
-        first_dim[sizeof(first_dim)-1] = '\0';
-    }
-
-    /* Width and height are separated with 'x' */
-    char* sep = strchr(first_dim, 'x');
-    if (sep == NULL) {
-        ALOGE("%s: Invalid first dimension format in %s",
-             __FUNCTION__, frame_dims);
-        return EINVAL;
-    }
-
-    *sep = '\0';
-    const int x = atoi(first_dim);
-    const int y = atoi(sep + 1);
+    int x = resolutions[0].first;
+    int y = resolutions[0].second;
     mParameters.setPreviewSize(x, y);
     mParameters.setPictureSize(x, y);
 
diff --git a/camera/EmulatedQemuCameraDevice.cpp b/camera/EmulatedQemuCameraDevice.cpp
index 07837af..6105d0d 100755
--- a/camera/EmulatedQemuCameraDevice.cpp
+++ b/camera/EmulatedQemuCameraDevice.cpp
@@ -29,16 +29,12 @@
 
 EmulatedQemuCameraDevice::EmulatedQemuCameraDevice(EmulatedQemuCamera* camera_hal)
     : EmulatedCameraDevice(camera_hal),
-      mQemuClient(),
-      mPreviewFrame(NULL)
+      mQemuClient()
 {
 }
 
 EmulatedQemuCameraDevice::~EmulatedQemuCameraDevice()
 {
-    if (mPreviewFrame != NULL) {
-        delete[] mPreviewFrame;
-    }
 }
 
 /****************************************************************************
@@ -158,12 +154,14 @@
     /* Allocate preview frame buffer. */
     /* TODO: Watch out for preview format changes! At this point we implement
      * RGB32 only.*/
-    mPreviewFrame = new uint32_t[mTotalPixels];
-    if (mPreviewFrame == NULL) {
-        ALOGE("%s: Unable to allocate %d bytes for preview frame",
-             __FUNCTION__, mTotalPixels);
-        return ENOMEM;
-    }
+    mPreviewFrames[0].resize(mTotalPixels);
+    mPreviewFrames[1].resize(mTotalPixels);
+
+    mFrameBufferPairs[0].first = mFrameBuffers[0].data();
+    mFrameBufferPairs[0].second = mPreviewFrames[0].data();
+
+    mFrameBufferPairs[1].first = mFrameBuffers[1].data();
+    mFrameBufferPairs[1].second = mPreviewFrames[1].data();
 
     /* Start the actual camera device. */
     res = mQemuClient.queryStart(mPixelFormat, mFrameWidth, mFrameHeight);
@@ -196,10 +194,12 @@
     /* Stop the actual camera device. */
     status_t res = mQemuClient.queryStop();
     if (res == NO_ERROR) {
-        if (mPreviewFrame == NULL) {
-            delete[] mPreviewFrame;
-            mPreviewFrame = NULL;
-        }
+        mPreviewFrames[0].clear();
+        mPreviewFrames[1].clear();
+        // No need to keep all that memory around as capacity, shrink it
+        mPreviewFrames[0].shrink_to_fit();
+        mPreviewFrames[1].shrink_to_fit();
+
         EmulatedCameraDevice::commonStopDevice();
         mState = ECDS_CONNECTED;
         ALOGV("%s: Qemu camera device '%s' is stopped",
@@ -216,50 +216,96 @@
  * EmulatedCameraDevice virtual overrides
  ***************************************************************************/
 
-status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer)
-{
-    ALOGW_IF(mPreviewFrame == NULL, "%s: No preview frame", __FUNCTION__);
-    if (mPreviewFrame != NULL) {
-        memcpy(buffer, mPreviewFrame, mTotalPixels * 4);
-        return 0;
-    } else {
-        return EmulatedCameraDevice::getCurrentPreviewFrame(buffer);
+status_t EmulatedQemuCameraDevice::getCurrentFrame(void* buffer,
+                                                   uint32_t pixelFormat) {
+    if (!isStarted()) {
+        ALOGE("%s: Device is not started", __FUNCTION__);
+        return EINVAL;
     }
+    if (buffer == nullptr) {
+        ALOGE("%s: Invalid buffer provided", __FUNCTION__);
+        return EINVAL;
+    }
+
+    FrameLock lock(*this);
+    const void* primary = mCameraThread->getPrimaryBuffer();
+    auto frameBufferPair = reinterpret_cast<const FrameBufferPair*>(primary);
+    uint8_t* frame = frameBufferPair->first;
+
+    if (frame == nullptr) {
+        ALOGE("%s: No frame", __FUNCTION__);
+        return EINVAL;
+    }
+    return getCurrentFrameImpl(reinterpret_cast<const uint8_t*>(frame),
+                               reinterpret_cast<uint8_t*>(buffer),
+                               pixelFormat);
+}
+
+status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer) {
+    if (!isStarted()) {
+        ALOGE("%s: Device is not started", __FUNCTION__);
+        return EINVAL;
+    }
+    if (buffer == nullptr) {
+        ALOGE("%s: Invalid buffer provided", __FUNCTION__);
+        return EINVAL;
+    }
+
+    FrameLock lock(*this);
+    const void* primary = mCameraThread->getPrimaryBuffer();
+    auto frameBufferPair = reinterpret_cast<const FrameBufferPair*>(primary);
+    uint32_t* previewFrame = frameBufferPair->second;
+
+    if (previewFrame == nullptr) {
+        ALOGE("%s: No frame", __FUNCTION__);
+        return EINVAL;
+    }
+    memcpy(buffer, previewFrame, mTotalPixels * 4);
+    return NO_ERROR;
+}
+
+const void* EmulatedQemuCameraDevice::getCurrentFrame() {
+    if (mCameraThread.get() == nullptr) {
+        return nullptr;
+    }
+
+    const void* primary = mCameraThread->getPrimaryBuffer();
+    auto frameBufferPair = reinterpret_cast<const FrameBufferPair*>(primary);
+    uint8_t* frame = frameBufferPair->first;
+
+    return frame;
 }
 
 /****************************************************************************
  * Worker thread management overrides.
  ***************************************************************************/
 
-bool EmulatedQemuCameraDevice::inWorkerThread()
+bool EmulatedQemuCameraDevice::produceFrame(void* buffer)
 {
-    /* Wait till FPS timeout expires, or thread exit message is received. */
-    WorkerThread::SelectRes res =
-        getWorkerThread()->Select(-1, 1000000 / mEmulatedFPS);
-    if (res == WorkerThread::EXIT_THREAD) {
-        ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
-        return false;
-    }
+    auto frameBufferPair = reinterpret_cast<FrameBufferPair*>(buffer);
+    uint8_t* rawFrame = frameBufferPair->first;
+    uint32_t* previewFrame = frameBufferPair->second;
 
-    /* Query frames from the service. */
-    status_t query_res = mQemuClient.queryFrame(mCurrentFrame, mPreviewFrame,
+    status_t query_res = mQemuClient.queryFrame(rawFrame, previewFrame,
                                                  mFrameBufferSize,
                                                  mTotalPixels * 4,
                                                  mWhiteBalanceScale[0],
                                                  mWhiteBalanceScale[1],
                                                  mWhiteBalanceScale[2],
                                                  mExposureCompensation);
-    if (query_res == NO_ERROR) {
-        /* Timestamp the current frame, and notify the camera HAL. */
-        mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
-        mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
-        return true;
-    } else {
+    if (query_res != NO_ERROR) {
         ALOGE("%s: Unable to get current video frame: %s",
              __FUNCTION__, strerror(query_res));
-        mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
         return false;
     }
+    return true;
+}
+
+void* EmulatedQemuCameraDevice::getPrimaryBuffer() {
+    return &mFrameBufferPairs[0];
+}
+void* EmulatedQemuCameraDevice::getSecondaryBuffer() {
+    return &mFrameBufferPairs[1];
 }
 
 }; /* namespace android */
diff --git a/camera/EmulatedQemuCameraDevice.h b/camera/EmulatedQemuCameraDevice.h
index 8ef562b..ed19f6c 100755
--- a/camera/EmulatedQemuCameraDevice.h
+++ b/camera/EmulatedQemuCameraDevice.h
@@ -80,11 +80,16 @@
      **************************************************************************/
 
 public:
-    /* Gets current preview fame into provided buffer.
-     * We override this method in order to provide preview frames cached in this
-     * object.
-     */
-    status_t getCurrentPreviewFrame(void* buffer);
+
+    /* Copy the current frame to |buffer| */
+    status_t getCurrentFrame(void* buffer, uint32_t pixelFormat) override;
+
+    /* Copy the current preview frame to |buffer| */
+    status_t getCurrentPreviewFrame(void* buffer) override;
+
+    /* Get a pointer to the current frame, lock it first using FrameLock in
+     * EmulatedCameraDevice class */
+    const void* getCurrentFrame() override;
 
     /***************************************************************************
      * Worker thread management overrides.
@@ -93,8 +98,11 @@
      **************************************************************************/
 
 protected:
-    /* Implementation of the worker thread routine. */
-    bool inWorkerThread();
+    /* Implementation of the frame production routine. */
+    bool produceFrame(void* buffer) override;
+
+    void* getPrimaryBuffer() override;
+    void* getSecondaryBuffer() override;
 
     /***************************************************************************
      * Qemu camera device data members
@@ -109,11 +117,20 @@
     String8             mDeviceName;
 
     /* Current preview framebuffer. */
-    uint32_t*           mPreviewFrame;
+    std::vector<uint32_t> mPreviewFrames[2];
 
-    /* Emulated FPS (frames per second).
-     * We will emulate 50 FPS. */
-    static const int    mEmulatedFPS = 50;
+    /* Since the Qemu camera needs to keep track of two buffers per frame we
+     * use a pair here. One frame is the camera frame and the other is the
+     * preview frame. These are in different formats and instead of converting
+     * them in the guest it's more efficient to have the host provide the same
+     * frame in two different formats. The first buffer in the pair is the raw
+     * frame and the second buffer is the RGB encoded frame. The downside of
+     * this is that we need to override the getCurrentFrame and
+     * getCurrentPreviewFrame methods to extract the correct buffer from this
+     * pair. */
+    using FrameBufferPair = std::pair<uint8_t*, uint32_t*>;
+    FrameBufferPair     mFrameBufferPairs[2];
+
 };
 
 }; /* namespace android */
diff --git a/camera/Exif.cpp b/camera/Exif.cpp
new file mode 100644
index 0000000..aa58f4b
--- /dev/null
+++ b/camera/Exif.cpp
@@ -0,0 +1,372 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Exif.h"
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Exif"
+#include <cutils/log.h>
+
+#include <inttypes.h>
+#include <math.h>
+#include <stdint.h>
+
+#include <camera/CameraParameters.h>
+#include <libexif/exif-data.h>
+#include <libexif/exif-entry.h>
+#include <libexif/exif-ifd.h>
+#include <libexif/exif-tag.h>
+
+#include <string>
+#include <vector>
+
+// For GPS timestamping we want to ensure we use a 64-bit time_t, 32-bit
+// platforms have time64_t but 64-bit platforms do not.
+#if defined(__LP64__)
+#include <time.h>
+using Timestamp = time_t;
+#define TIMESTAMP_TO_TM(timestamp, tm) gmtime_r(timestamp, tm)
+#else
+#include <time64.h>
+using Timestamp = time64_t;
+#define TIMESTAMP_TO_TM(timestamp, tm) gmtime64_r(timestamp, tm)
+#endif
+
+namespace android {
+
+// A prefix that is used for tags with the "undefined" format to indicate that
+// the contents are ASCII encoded. See the user comment section of the EXIF spec
+// for more details http://www.exif.org/Exif2-2.PDF
+static const unsigned char kAsciiPrefix[] = {
+    0x41, 0x53, 0x43, 0x49, 0x49, 0x00, 0x00, 0x00 // "ASCII\0\0\0"
+};
+
+// Remove an existing EXIF entry from |exifData| if it exists. This is useful
+// when replacing existing data, it's easier to just remove the data and
+// re-allocate it than to adjust the amount of allocated data.
+static void removeExistingEntry(ExifData* exifData, ExifIfd ifd, int tag) {
+    ExifEntry* entry = exif_content_get_entry(exifData->ifd[ifd],
+                                              static_cast<ExifTag>(tag));
+    if (entry) {
+        exif_content_remove_entry(exifData->ifd[ifd], entry);
+    }
+}
+
+static ExifEntry* allocateEntry(int tag,
+                                ExifFormat format,
+                                unsigned int numComponents) {
+    ExifMem* mem = exif_mem_new_default();
+    ExifEntry* entry = exif_entry_new_mem(mem);
+
+    unsigned int size = numComponents * exif_format_get_size(format);
+    entry->data = reinterpret_cast<unsigned char*>(exif_mem_alloc(mem, size));
+    entry->size = size;
+    entry->tag = static_cast<ExifTag>(tag);
+    entry->components = numComponents;
+    entry->format = format;
+
+    exif_mem_unref(mem);
+    return entry;
+}
+
+// Create an entry and place it in |exifData|, the entry is initialized with an
+// array of floats from |values|
+template<size_t N>
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag,
+                        const float (&values)[N],
+                        float denominator = 1000.0) {
+    removeExistingEntry(exifData, ifd, tag);
+    ExifByteOrder byteOrder = exif_data_get_byte_order(exifData);
+    ExifEntry* entry = allocateEntry(tag, EXIF_FORMAT_RATIONAL, N);
+    exif_content_add_entry(exifData->ifd[ifd], entry);
+    unsigned int rationalSize = exif_format_get_size(EXIF_FORMAT_RATIONAL);
+    for (size_t i = 0; i < N; ++i) {
+        ExifRational rational = {
+            static_cast<uint32_t>(values[i] * denominator),
+            static_cast<uint32_t>(denominator)
+        };
+
+        exif_set_rational(&entry->data[i * rationalSize], byteOrder, rational);
+    }
+
+    // Unref entry after changing owner to the ExifData struct
+    exif_entry_unref(entry);
+    return true;
+}
+
+// Create an entry with a single float |value| in it and place it in |exifData|
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag,
+                        const float value,
+                        float denominator = 1000.0) {
+    float values[1] = { value };
+    // Recycling functions is good for the environment
+    return createEntry(exifData, ifd, tag, values, denominator);
+}
+
+// Create an entry and place it in |exifData|, the entry contains the raw data
+// pointed to by |data| of length |size|.
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag,
+                        const unsigned char* data,
+                        size_t size,
+                        ExifFormat format = EXIF_FORMAT_UNDEFINED) {
+    removeExistingEntry(exifData, ifd, tag);
+    ExifEntry* entry = allocateEntry(tag, format, size);
+    memcpy(entry->data, data, size);
+    exif_content_add_entry(exifData->ifd[ifd], entry);
+    // Unref entry after changing owner to the ExifData struct
+    exif_entry_unref(entry);
+    return true;
+}
+
+// Create an entry and place it in |exifData|, the entry is initialized with
+// the string provided in |value|
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag,
+                        const char* value) {
+    unsigned int length = strlen(value) + 1;
+    const unsigned char* data = reinterpret_cast<const unsigned char*>(value);
+    return createEntry(exifData, ifd, tag, data, length, EXIF_FORMAT_ASCII);
+}
+
+// Create an entry and place it in |exifData|, the entry is initialized with a
+// single byte in |value|
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag,
+                        uint8_t value) {
+    return createEntry(exifData, ifd, tag, &value, 1, EXIF_FORMAT_BYTE);
+}
+
+// Create an entry and place it in |exifData|, the entry is default initialized
+// by the exif library based on |tag|
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag) {
+    removeExistingEntry(exifData, ifd, tag);
+    ExifEntry* entry = exif_entry_new();
+    exif_content_add_entry(exifData->ifd[ifd], entry);
+    exif_entry_initialize(entry, static_cast<ExifTag>(tag));
+    // Unref entry after changing owner to the ExifData struct
+    exif_entry_unref(entry);
+    return true;
+}
+
+// Create an entry with a single EXIF LONG (32-bit value) and place it in
+// |exifData|.
+static bool createEntry(ExifData* exifData,
+                        ExifIfd ifd,
+                        int tag,
+                        int value) {
+    removeExistingEntry(exifData, ifd, tag);
+    ExifByteOrder byteOrder = exif_data_get_byte_order(exifData);
+    ExifEntry* entry = allocateEntry(tag, EXIF_FORMAT_LONG, 1);
+    exif_content_add_entry(exifData->ifd[ifd], entry);
+    exif_set_long(entry->data, byteOrder, value);
+
+    // Unref entry after changing owner to the ExifData struct
+    exif_entry_unref(entry);
+    return true;
+}
+
+static bool getCameraParam(const CameraParameters& parameters,
+                           const char* parameterKey,
+                           const char** outValue) {
+    const char* value = parameters.get(parameterKey);
+    if (value) {
+        *outValue = value;
+        return true;
+    }
+    return false;
+}
+
+static bool getCameraParam(const CameraParameters& parameters,
+                           const char* parameterKey,
+                           float* outValue) {
+    const char* value = parameters.get(parameterKey);
+    if (value) {
+        *outValue = parameters.getFloat(parameterKey);
+        return true;
+    }
+    return false;
+}
+
+static bool getCameraParam(const CameraParameters& parameters,
+                           const char* parameterKey,
+                           int64_t* outValue) {
+    const char* value = parameters.get(parameterKey);
+    if (value) {
+        char dummy = 0;
+        // Attempt to scan an extra character and then make sure it was not
+        // scanned by checking that the return value indicates only one item.
+        // This way we fail on any trailing characters
+        if (sscanf(value, "%" SCNd64 "%c", outValue, &dummy) == 1) {
+            return true;
+        }
+    }
+    return false;
+}
+
+// Convert a GPS coordinate represented as a decimal degree value to sexagesimal
+// GPS coordinates comprised of <degrees> <minutes>' <seconds>"
+static void convertGpsCoordinate(float degrees, float (*result)[3]) {
+    float absDegrees = fabs(degrees);
+    // First value is degrees without any decimal digits
+    (*result)[0] = floor(absDegrees);
+
+    // Subtract degrees so we only have the fraction left, then multiply by
+    // 60 to get the minutes
+    float minutes = (absDegrees - (*result)[0]) * 60.0f;
+    (*result)[1] = floor(minutes);
+
+    // Same thing for seconds but here we store seconds with the fraction
+    float seconds = (minutes - (*result)[1]) * 60.0f;
+    (*result)[2] = seconds;
+}
+
+// Convert a UNIX epoch timestamp to a timestamp comprised of three floats for
+// hour, minute and second, and a date part that is represented as a string.
+static bool convertTimestampToTimeAndDate(int64_t timestamp,
+                                          float (*timeValues)[3],
+                                          std::string* date) {
+    Timestamp time = timestamp;
+    struct tm utcTime;
+    if (TIMESTAMP_TO_TM(&time, &utcTime) == nullptr) {
+        ALOGE("Could not decompose timestamp into components");
+        return false;
+    }
+    (*timeValues)[0] = utcTime.tm_hour;
+    (*timeValues)[1] = utcTime.tm_min;
+    (*timeValues)[2] = utcTime.tm_sec;
+
+    char buffer[64] = {};
+    if (strftime(buffer, sizeof(buffer), "%Y:%m:%d", &utcTime) == 0) {
+        ALOGE("Could not construct date string from timestamp");
+        return false;
+    }
+    *date = buffer;
+    return true;
+}
+
+ExifData* createExifData(const CameraParameters& params) {
+    ExifData* exifData = exif_data_new();
+
+    exif_data_set_option(exifData, EXIF_DATA_OPTION_FOLLOW_SPECIFICATION);
+    exif_data_set_data_type(exifData, EXIF_DATA_TYPE_COMPRESSED);
+    exif_data_set_byte_order(exifData, EXIF_BYTE_ORDER_INTEL);
+
+    // Create mandatory exif fields and set their default values
+    exif_data_fix(exifData);
+
+    float triplet[3];
+    float floatValue = 0.0f;
+    const char* stringValue;
+
+    // Datetime, creating and initializing a datetime tag will automatically
+    // set the current date and time in the tag so just do that.
+    createEntry(exifData, EXIF_IFD_0, EXIF_TAG_DATE_TIME);
+
+    // Picture size
+    int width = -1, height = -1;
+    params.getPictureSize(&width, &height);
+    if (width >= 0 && height >= 0) {
+        createEntry(exifData, EXIF_IFD_EXIF,
+                    EXIF_TAG_PIXEL_X_DIMENSION, width);
+        createEntry(exifData, EXIF_IFD_EXIF,
+                    EXIF_TAG_PIXEL_Y_DIMENSION, height);
+    }
+    // Focal length
+    if (getCameraParam(params,
+                       CameraParameters::KEY_FOCAL_LENGTH,
+                       &floatValue)) {
+        createEntry(exifData, EXIF_IFD_EXIF, EXIF_TAG_FOCAL_LENGTH, floatValue);
+    }
+    // GPS latitude and reference, reference indicates sign, store unsigned
+    if (getCameraParam(params,
+                       CameraParameters::KEY_GPS_LATITUDE,
+                       &floatValue)) {
+        convertGpsCoordinate(floatValue, &triplet);
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_LATITUDE, triplet);
+
+        const char* ref = floatValue < 0.0f ? "S" : "N";
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_LATITUDE_REF, ref);
+    }
+    // GPS longitude and reference, reference indicates sign, store unsigned
+    if (getCameraParam(params,
+                       CameraParameters::KEY_GPS_LONGITUDE,
+                       &floatValue)) {
+        convertGpsCoordinate(floatValue, &triplet);
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_LONGITUDE, triplet);
+
+        const char* ref = floatValue < 0.0f ? "W" : "E";
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_LONGITUDE_REF, ref);
+    }
+    // GPS altitude and reference, reference indicates sign, store unsigned
+    if (getCameraParam(params,
+                       CameraParameters::KEY_GPS_ALTITUDE,
+                       &floatValue)) {
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_ALTITUDE,
+                    static_cast<float>(fabs(floatValue)));
+
+        // 1 indicated below sea level, 0 indicates above sea level
+        uint8_t ref = floatValue < 0.0f ? 1 : 0;
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_ALTITUDE_REF, ref);
+    }
+    // GPS timestamp and datestamp
+    int64_t timestamp = 0;
+    if (getCameraParam(params,
+                       CameraParameters::KEY_GPS_TIMESTAMP,
+                       &timestamp)) {
+        std::string date;
+        if (convertTimestampToTimeAndDate(timestamp, &triplet, &date)) {
+            createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_TIME_STAMP,
+                        triplet, 1.0f);
+            createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_DATE_STAMP,
+                        date.c_str());
+        }
+    }
+
+    // GPS processing method
+    if (getCameraParam(params,
+                       CameraParameters::KEY_GPS_PROCESSING_METHOD,
+                       &stringValue)) {
+        std::vector<unsigned char> data;
+        // Because this is a tag with an undefined format it has to be prefixed
+        // with the encoding type. Insert an ASCII prefix first, then the
+        // actual string. Undefined tags do not have to be null terminated.
+        data.insert(data.end(),
+                    std::begin(kAsciiPrefix),
+                    std::end(kAsciiPrefix));
+        data.insert(data.end(), stringValue, stringValue + strlen(stringValue));
+        createEntry(exifData, EXIF_IFD_GPS, EXIF_TAG_GPS_PROCESSING_METHOD,
+                    &data[0], data.size());
+    }
+
+    return exifData;
+}
+
+void freeExifData(ExifData* exifData) {
+    exif_data_free(exifData);
+}
+
+}  // namespace android
+
diff --git a/camera/Exif.h b/camera/Exif.h
new file mode 100644
index 0000000..dad588d
--- /dev/null
+++ b/camera/Exif.h
@@ -0,0 +1,39 @@
+
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GOLDFISH_CAMERA_EXIF_H
+#define GOLDFISH_CAMERA_EXIF_H
+
+struct _ExifData;
+typedef struct _ExifData ExifData;
+
+namespace android {
+
+class CameraParameters;
+
+/* Create an EXIF data structure based on camera parameters. This includes
+ * things like GPS information that has been set by the camera client.
+ */
+ExifData* createExifData(const CameraParameters& parameters);
+
+/* Free EXIF data created in the createExifData call */
+void freeExifData(ExifData* exifData);
+
+}  // namespace android
+
+#endif  // GOLDFISH_CAMERA_EXIF_H
+
diff --git a/camera/JpegCompressor.cpp b/camera/JpegCompressor.cpp
index 8014ccf..9694ca3 100644
--- a/camera/JpegCompressor.cpp
+++ b/camera/JpegCompressor.cpp
@@ -37,10 +37,10 @@
     return res;
 }
 
-typedef void (*InitFunc)(JpegStub* stub, int* strides);
+typedef void (*InitFunc)(JpegStub* stub);
 typedef void (*CleanupFunc)(JpegStub* stub);
 typedef int (*CompressFunc)(JpegStub* stub, const void* image,
-        int width, int height, int quality);
+        int width, int height, int quality, ExifData* exifData);
 typedef void (*GetCompressedImageFunc)(JpegStub* stub, void* buff);
 typedef size_t (*GetCompressedSizeFunc)(JpegStub* stub);
 
@@ -53,7 +53,7 @@
     assert(mDl != NULL);
 
     InitFunc f = (InitFunc)getSymbol(mDl, "JpegStub_init");
-    (*f)(&mStub, mStrides);
+    (*f)(&mStub);
 }
 
 NV21JpegCompressor::~NV21JpegCompressor()
@@ -69,12 +69,11 @@
 status_t NV21JpegCompressor::compressRawImage(const void* image,
                                               int width,
                                               int height,
-                                              int quality)
+                                              int quality,
+                                              ExifData* exifData)
 {
-    mStrides[0] = width;
-    mStrides[1] = width;
     CompressFunc f = (CompressFunc)getSymbol(mDl, "JpegStub_compress");
-    return (status_t)(*f)(&mStub, image, width, height, quality);
+    return (status_t)(*f)(&mStub, image, width, height, quality, exifData);
 }
 
 
diff --git a/camera/JpegCompressor.h b/camera/JpegCompressor.h
index a6454d2..4ea595e 100644
--- a/camera/JpegCompressor.h
+++ b/camera/JpegCompressor.h
@@ -22,7 +22,7 @@
  * converter between YV21, and JPEG formats.
  */
 
-#include "JpegStub.h"
+#include "jpeg-stub/JpegStub.h"
 #include <utils/threads.h>
 
 namespace android {
@@ -50,6 +50,7 @@
      *  image - Raw NV21 image.
      *  width, height - Image dimensions.
      *  quality - JPEG quality.
+     *  exifData - an EXIF data structure to attach to the image, may be null
      * Return:
      *  NO_ERROR on success, or an appropriate error status.
      *
@@ -57,7 +58,8 @@
     status_t compressRawImage(const void* image,
                               int width,
                               int height,
-                              int quality);
+                              int quality,
+                              ExifData* exifData);
 
     /* Get size of the compressed JPEG buffer.
      * This method must be called only after a successful completion of
@@ -80,10 +82,6 @@
      * Class data
      ***************************************************************************/
 
-protected:
-    /* Strides for Y (the first element), and UV (the second one) panes. */
-    int                     mStrides[2];
-
 private:
     // library handle to dlopen
     static void* mDl;
diff --git a/camera/JpegStub.cpp b/camera/JpegStub.cpp
deleted file mode 100644
index 084f5fc..0000000
--- a/camera/JpegStub.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_NDEBUG 0
-#define LOG_TAG "EmulatedCamera_JPEGStub"
-#include <errno.h>
-#include <cutils/log.h>
-#include <YuvToJpegEncoder.h>
-
-#include "JpegStub.h"
-
-
-extern "C" void JpegStub_init(JpegStub* stub, int* strides) {
-    stub->mInternalEncoder = (void*) new Yuv420SpToJpegEncoder(strides);
-    stub->mInternalStream = (void*)new SkDynamicMemoryWStream();
-}
-
-extern "C" void JpegStub_cleanup(JpegStub* stub) {
-    delete((Yuv420SpToJpegEncoder*)stub->mInternalEncoder);
-    delete((SkDynamicMemoryWStream*)stub->mInternalStream);
-}
-
-extern "C" int JpegStub_compress(JpegStub* stub, const void* image,
-        int width, int height, int quality)
-{
-    void* pY = const_cast<void*>(image);
-    int offsets[2];
-    offsets[0] = 0;
-    offsets[1] = width * height;
-
-    Yuv420SpToJpegEncoder* encoder =
-        (Yuv420SpToJpegEncoder*)stub->mInternalEncoder;
-    SkDynamicMemoryWStream* stream =
-        (SkDynamicMemoryWStream*)stub->mInternalStream;
-    if (encoder->encode(stream, pY, width, height, offsets, quality)) {
-        ALOGV("%s: Compressed JPEG: %d[%dx%d] -> %zu bytes",
-              __FUNCTION__, (width * height * 12) / 8,
-              width, height, stream->getOffset());
-        return 0;
-    } else {
-        ALOGE("%s: JPEG compression failed", __FUNCTION__);
-        return errno ? errno: EINVAL;
-    }
-}
-
-extern "C" void JpegStub_getCompressedImage(JpegStub* stub, void* buff) {
-    SkDynamicMemoryWStream* stream =
-        (SkDynamicMemoryWStream*)stub->mInternalStream;
-    stream->copyTo(buff);
-}
-
-extern "C" size_t JpegStub_getCompressedSize(JpegStub* stub) {
-    SkDynamicMemoryWStream* stream =
-        (SkDynamicMemoryWStream*)stub->mInternalStream;
-    return stream->getOffset();
-}
diff --git a/camera/PreviewWindow.cpp b/camera/PreviewWindow.cpp
index 4101ed9..68d3d5d 100755
--- a/camera/PreviewWindow.cpp
+++ b/camera/PreviewWindow.cpp
@@ -31,7 +31,6 @@
 
 PreviewWindow::PreviewWindow()
     : mPreviewWindow(NULL),
-      mLastPreviewed(0),
       mPreviewFrameWidth(0),
       mPreviewFrameHeight(0),
       mPreviewEnabled(false)
@@ -56,18 +55,13 @@
 
     /* Reset preview info. */
     mPreviewFrameWidth = mPreviewFrameHeight = 0;
-    mPreviewAfter = 0;
-    mLastPreviewed = 0;
 
     if (window != NULL) {
         /* The CPU will write each frame to the preview window buffer.
          * Note that we delay setting preview window buffer geometry until
          * frames start to come in. */
         res = window->set_usage(window, GRALLOC_USAGE_SW_WRITE_OFTEN);
-        if (res == NO_ERROR) {
-            /* Set preview frequency. */
-            mPreviewAfter = 1000000 / preview_fps;
-        } else {
+        if (res != NO_ERROR) {
             window = NULL;
             res = -res; // set_usage returns a negative errno.
             ALOGE("%s: Error setting preview window usage %d -> %s",
@@ -101,14 +95,13 @@
  * Public API
  ***************************************************************************/
 
-void PreviewWindow::onNextFrameAvailable(const void* frame,
-                                         nsecs_t timestamp,
+void PreviewWindow::onNextFrameAvailable(nsecs_t timestamp,
                                          EmulatedCameraDevice* camera_dev)
 {
     int res;
     Mutex::Autolock locker(&mObjectLock);
 
-    if (!isPreviewEnabled() || mPreviewWindow == NULL || !isPreviewTime()) {
+    if (!isPreviewEnabled() || mPreviewWindow == NULL) {
         return;
     }
 
@@ -201,16 +194,4 @@
     return true;
 }
 
-bool PreviewWindow::isPreviewTime()
-{
-    timeval cur_time;
-    gettimeofday(&cur_time, NULL);
-    const uint64_t cur_mks = cur_time.tv_sec * 1000000LL + cur_time.tv_usec;
-    if ((cur_mks - mLastPreviewed) >= mPreviewAfter) {
-        mLastPreviewed = cur_mks;
-        return true;
-    }
-    return false;
-}
-
 }; /* namespace android */
diff --git a/camera/PreviewWindow.h b/camera/PreviewWindow.h
index d037c95..ef988e5 100755
--- a/camera/PreviewWindow.h
+++ b/camera/PreviewWindow.h
@@ -86,21 +86,16 @@
 public:
     /* Next frame is available in the camera device.
      * This is a notification callback that is invoked by the camera device when
-     * a new frame is available.
+     * a new frame is available. The frame is available through the |camera_dev|
+     * object. Remember to use an EmulatedCameraDevice::FrameLock object to
+     * protect access to the frame while using it.
      * Note that most likely this method is called in context of a worker thread
      * that camera device has created for frame capturing.
      * Param:
-     *  frame - Captured frame, or NULL if camera device didn't pull the frame
-     *      yet. If NULL is passed in this parameter use GetCurrentFrame method
-     *      of the camera device class to obtain the next frame. Also note that
-     *      the size of the frame that is passed here (as well as the frame
-     *      returned from the GetCurrentFrame method) is defined by the current
-     *      frame settings (width + height + pixel format) for the camera device.
      * timestamp - Frame's timestamp.
      * camera_dev - Camera device instance that delivered the frame.
      */
-    void onNextFrameAvailable(const void* frame,
-                              nsecs_t timestamp,
+    void onNextFrameAvailable(nsecs_t timestamp,
                               EmulatedCameraDevice* camera_dev);
 
     /***************************************************************************
@@ -127,10 +122,6 @@
      */
     bool adjustPreviewDimensions(EmulatedCameraDevice* camera_dev);
 
-    /* Checks if it's the time to push new frame to the preview window.
-     * Note that this method must be called while object is locked. */
-    bool isPreviewTime();
-
     /***************************************************************************
      * Data members
      **************************************************************************/
@@ -142,13 +133,6 @@
     /* Preview window instance. */
     preview_stream_ops*             mPreviewWindow;
 
-    /* Timestamp (abs. microseconds) when last frame has been pushed to the
-     * preview window. */
-    uint64_t                        mLastPreviewed;
-
-    /* Preview frequency in microseconds. */
-    uint32_t                        mPreviewAfter;
-
     /*
      * Cached preview window frame dimensions.
      */
diff --git a/camera/QemuClient.cpp b/camera/QemuClient.cpp
index 15c9d00..c769b29 100755
--- a/camera/QemuClient.cpp
+++ b/camera/QemuClient.cpp
@@ -34,7 +34,7 @@
 #endif  // LOG_QUERIES
 
 #define QEMU_PIPE_DEBUG  LOGQ
-#include <system/qemu_pipe.h>
+#include "qemu_pipe.h"
 
 namespace android {
 
@@ -228,7 +228,7 @@
     if (param == NULL || *param == '\0') {
         /* No parameters: connect to the factory service. */
         char pipe_name[512];
-        snprintf(pipe_name, sizeof(pipe_name), "pipe:qemud:%s",
+        snprintf(pipe_name, sizeof(pipe_name), "qemud:%s",
                  mCameraServiceName);
         mPipeFD = qemu_pipe_open(pipe_name);
     } else {
@@ -236,7 +236,7 @@
          * characters for 'pipe:qemud:'. This is required by pipe protocol. */
         char* connection_str = new char[strlen(mCameraServiceName) +
                                         strlen(param) + 8];
-        sprintf(connection_str, "pipe:qemud:%s:%s", mCameraServiceName, param);
+        sprintf(connection_str, "qemud:%s:%s", mCameraServiceName, param);
 
         mPipeFD = qemu_pipe_open(connection_str);
         delete[] connection_str;
diff --git a/camera/Thumbnail.cpp b/camera/Thumbnail.cpp
new file mode 100644
index 0000000..4d66f10
--- /dev/null
+++ b/camera/Thumbnail.cpp
@@ -0,0 +1,170 @@
+/*
+* Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Thumbnail.h"
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_Thumbnail"
+#include <cutils/log.h>
+#include <libexif/exif-data.h>
+#include <libyuv.h>
+
+#include "JpegCompressor.h"
+
+#include <vector>
+
+/*
+ * The NV21 format is a YUV format with an 8-bit Y-component and the U and V
+ * components are stored as 8 bits each but they are shared between a block of
+ * 2x2 pixels. So when calculating bits per pixel the 16 bits of U and V are
+ * shared between 4 pixels leading to 4 bits of U and V per pixel. Together
+ * with the 8 bits of Y this gives us 12 bits per pixel..
+ *
+ * The components are not grouped by pixels but separated into one Y-plane and
+ * one interleaved U and V-plane. The first half of the byte sequence is all of
+ * the Y data laid out in a linear fashion. After that the interleaved U and V-
+ * plane starts with one byte of V followed by one byte of U followed by one
+ * byte of V and so on. Each byte of U or V is associated with a 2x2 pixel block
+ * in a linear fashion.
+ *
+ * For an 8 by 4 pixel image the layout would be:
+ *
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | Y0  | Y1  | Y2  | Y3  | Y4  | Y5  | Y6  | Y7  |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | Y8  | Y9  | Y10 | Y11 | Y12 | Y13 | Y14 | Y15 |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | Y16 | Y17 | Y18 | Y19 | Y20 | Y21 | Y22 | Y23 |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | Y24 | Y25 | Y26 | Y27 | Y28 | Y29 | Y30 | Y31 |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | V0  | U0  | V1  | U1  | V2  | U2  | V3  | U3  |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ * | V4  | U4  | V5  | U5  | V6  | U6  | V7  | U7  |
+ * +-----+-----+-----+-----+-----+-----+-----+-----+
+ *
+ * In this image V0 and U0 are the V and U components for the 2x2 block of
+ * pixels whose Y components are Y0, Y1, Y8 and Y9. V1 and U1 are matched with
+ * the Y components Y2, Y3, Y10, Y11, and so on for that row. For the next row
+ * of V and U the V4 and U4 components would be paired with Y16, Y17, Y24 and
+ * Y25.
+ */
+
+namespace android {
+
+static bool createRawThumbnail(const unsigned char* sourceImage,
+                               int sourceWidth, int sourceHeight,
+                               int thumbnailWidth, int thumbnailHeight,
+                               std::vector<unsigned char>* thumbnail) {
+    // Deinterleave the U and V planes into separate planes, this is because
+    // libyuv requires the planes to be separate when scaling
+    const size_t sourceUVPlaneSize = (sourceWidth * sourceHeight) / 4;
+    // Put both U and V planes in one buffer, one after the other, to reduce
+    // memory fragmentation and number of allocations
+    std::vector<unsigned char> sourcePlanes(sourceUVPlaneSize * 2);
+    const unsigned char* ySourcePlane = sourceImage;
+    unsigned char* uSourcePlane = &sourcePlanes[0];
+    unsigned char* vSourcePlane = &sourcePlanes[sourceUVPlaneSize];
+
+    for (size_t i = 0; i < sourceUVPlaneSize; ++i) {
+        vSourcePlane[i] = sourceImage[sourceWidth * sourceHeight + i * 2 + 0];
+        uSourcePlane[i] = sourceImage[sourceWidth * sourceHeight + i * 2 + 1];
+    }
+
+    // Create enough space in the output vector for the result
+    thumbnail->resize((thumbnailWidth * thumbnailHeight * 12) / 8);
+
+    // The downscaled U and V planes will also be linear instead of interleaved,
+    // allocate space for them here
+    const size_t destUVPlaneSize = (thumbnailWidth * thumbnailHeight) / 4;
+    std::vector<unsigned char> destPlanes(destUVPlaneSize * 2);
+    unsigned char* yDestPlane = &(*thumbnail)[0];
+    unsigned char* uDestPlane = &destPlanes[0];
+    unsigned char* vDestPlane = &destPlanes[destUVPlaneSize];
+
+    // The strides for the U and V planes are half the width because the U and V
+    // components are common to 2x2 pixel blocks
+    int result = libyuv::I420Scale(ySourcePlane, sourceWidth,
+                                   uSourcePlane, sourceWidth / 2,
+                                   vSourcePlane, sourceWidth / 2,
+                                   sourceWidth, sourceHeight,
+                                   yDestPlane, thumbnailWidth,
+                                   uDestPlane, thumbnailWidth / 2,
+                                   vDestPlane, thumbnailWidth / 2,
+                                   thumbnailWidth, thumbnailHeight,
+                                   libyuv::kFilterBilinear);
+    if (result != 0) {
+        ALOGE("Unable to create thumbnail, downscaling failed with error: %d",
+              result);
+        return false;
+    }
+
+    // Now we need to interleave the downscaled U and V planes into the
+    // output buffer to make it NV21 encoded
+    const size_t uvPlanesOffset = thumbnailWidth * thumbnailHeight;
+    for (size_t i = 0; i < destUVPlaneSize; ++i) {
+        (*thumbnail)[uvPlanesOffset + i * 2 + 0] = vDestPlane[i];
+        (*thumbnail)[uvPlanesOffset + i * 2 + 1] = uDestPlane[i];
+    }
+
+    return true;
+}
+
+bool createThumbnail(const unsigned char* sourceImage,
+                     int sourceWidth, int sourceHeight,
+                     int thumbWidth, int thumbHeight, int quality,
+                     ExifData* exifData) {
+    if (thumbWidth <= 0 || thumbHeight <= 0) {
+        ALOGE("%s: Invalid thumbnail width=%d or height=%d, must be > 0",
+              __FUNCTION__, thumbWidth, thumbHeight);
+        return false;
+    }
+
+    // First downscale the source image into a thumbnail-sized raw image
+    std::vector<unsigned char> rawThumbnail;
+    if (!createRawThumbnail(sourceImage, sourceWidth, sourceHeight,
+                            thumbWidth, thumbHeight, &rawThumbnail)) {
+        // The thumbnail function will log an appropriate error if needed
+        return false;
+    }
+
+    // And then compress it into JPEG format without any EXIF data
+    NV21JpegCompressor compressor;
+    status_t result = compressor.compressRawImage(&rawThumbnail[0],
+                                                  thumbWidth, thumbHeight,
+                                                  quality, nullptr /* EXIF */);
+    if (result != NO_ERROR) {
+        ALOGE("%s: Unable to compress thumbnail", __FUNCTION__);
+        return false;
+    }
+
+    // And finally put it in the EXIF data. This transfers ownership of the
+    // malloc'd memory to the EXIF data structure. As long as the EXIF data
+    // structure is free'd using the EXIF library this memory will be free'd.
+    exifData->size = compressor.getCompressedSize();
+    exifData->data = reinterpret_cast<unsigned char*>(malloc(exifData->size));
+    if (exifData->data == nullptr) {
+        ALOGE("%s: Unable to allocate %u bytes of memory for thumbnail",
+              __FUNCTION__, exifData->size);
+        exifData->size = 0;
+        return false;
+    }
+    compressor.getCompressedImage(exifData->data);
+    return true;
+}
+
+}  // namespace android
+
diff --git a/camera/Thumbnail.h b/camera/Thumbnail.h
new file mode 100644
index 0000000..b27636c
--- /dev/null
+++ b/camera/Thumbnail.h
@@ -0,0 +1,37 @@
+/*
+* Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GOLDFISH_CAMERA_THUMBNAIL_H
+#define GOLDFISH_CAMERA_THUMBNAIL_H
+
+struct _ExifData;
+typedef struct _ExifData ExifData;
+
+namespace android {
+
+/* Create a thumbnail from NV21 source data in |sourceImage| with the given
+ * dimensions. The resulting thumbnail is JPEG compressed and a pointer and size
+ * is placed in |exifData| which takes ownership of the allocated memory.
+ */
+bool createThumbnail(const unsigned char* sourceImage,
+                     int sourceWidth, int sourceHeight,
+                     int thumbnailWidth, int thumbnailHeight, int quality,
+                     ExifData* exifData);
+
+}  // namespace android
+
+#endif  // GOLDFISH_CAMERA_THUMBNAIL_H
+
diff --git a/camera/WorkerThread.cpp b/camera/WorkerThread.cpp
new file mode 100644
index 0000000..2b5fe92
--- /dev/null
+++ b/camera/WorkerThread.cpp
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "WorkerThread.h"
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_WorkerThread"
+#include <cutils/log.h>
+
+#include <algorithm>
+
+namespace android {
+
+WorkerThread::WorkerThread(const char* threadName,
+                           EmulatedCameraDevice* cameraDevice,
+                           EmulatedCamera* cameraHAL)
+    : Thread(true),   // Callbacks may involve Java calls.
+      mCameraDevice(cameraDevice),
+      mCameraHAL(cameraHAL),
+      mRunning(false),
+      mThreadName(threadName) {
+}
+
+status_t WorkerThread::startThread(bool oneBurst) {
+    ALOGV("Starting worker thread, oneBurst=%s", oneBurst ? "true" : "false");
+    mOneBurst = oneBurst;
+    {
+        Mutex::Autolock lock(mRunningMutex);
+        mRunning = true;
+    }
+    return run(mThreadName, ANDROID_PRIORITY_URGENT_DISPLAY, 0);
+}
+
+status_t WorkerThread::stopThread() {
+    ALOGV("%s: Stopping worker thread...", __FUNCTION__);
+
+    Mutex::Autolock lock(mRunningMutex);
+    mRunning = false;
+    mRunningCondition.signal();
+    return NO_ERROR;
+}
+
+status_t WorkerThread::wakeThread() {
+    ALOGV("%s: Waking emulated camera device's worker thread...", __FUNCTION__);
+
+    mRunningCondition.signal();
+    return NO_ERROR;
+}
+
+status_t WorkerThread::joinThread() {
+    return join();
+}
+
+status_t WorkerThread::readyToRun()
+{
+    status_t res = onThreadStart();
+    if (res != NO_ERROR) {
+        return res;
+    }
+    return NO_ERROR;
+}
+
+bool WorkerThread::threadLoop() {
+    if (inWorkerThread() && !mOneBurst) {
+        /* Only return true if we're running. If mRunning has been set to false
+         * we fall through to ensure that onThreadExit is called. */
+        Mutex::Autolock lock(mRunningMutex);
+        if (mRunning) {
+            return true;
+        }
+    }
+    onThreadExit();
+    ALOGV("%s: Exiting thread, mOneBurst=%s",
+          __FUNCTION__, mOneBurst ? "true" : "false");
+    return false;
+}
+
+}  // namespace android
+
diff --git a/camera/WorkerThread.h b/camera/WorkerThread.h
new file mode 100644
index 0000000..fc40e67
--- /dev/null
+++ b/camera/WorkerThread.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HW_EMULATOR_CAMERA_WORKER_THREAD_H
+#define HW_EMULATOR_CAMERA_WORKER_THREAD_H
+
+#include <utils/Thread.h>
+
+namespace android {
+
+class EmulatedCamera;
+class EmulatedCameraDevice;
+
+class WorkerThread : public Thread {
+public:
+    WorkerThread(const char* threadName,
+                 EmulatedCameraDevice* camera_dev,
+                 EmulatedCamera* cameraHAL);
+    virtual ~WorkerThread() {}
+
+    /* Starts the thread
+     * Param:
+     *  oneBurst - Controls how many times thread loop should run. If
+     *      this parameter is 'true', thread routine will run only once
+     *      If this parameter is 'false', thread routine will run until
+     *      stopThread method is called. See startWorkerThread for more
+     *      info.
+     * Return:
+     *  NO_ERROR on success, or an appropriate error status.
+     */
+    status_t startThread(bool oneBurst);
+
+    /* Stops the thread, this only requests that the thread exits. The method
+     * will return right after the request has been made. Use joinThread to
+     * wait for the thread to exit. */
+    status_t stopThread();
+
+    /* Wake a thread that's currently waiting to timeout or to be awoken */
+    status_t wakeThread();
+
+    /* Join the thread, waits until the thread exits before returning. */
+    status_t joinThread();
+
+protected:
+    /* Perform whatever work should be done in the worker thread. A subclass
+     * needs to implement this.
+     * Return:
+     *  true To continue thread loop, or false to exit the thread loop and
+     *  terminate the thread.
+     */
+    virtual bool inWorkerThread() = 0;
+
+    /* This provides an opportunity for a subclass to perform some operation
+     * when the thread starts. This is run on the newly started thread. If this
+     * returns an error the thread will exit and inWorkerThread will never be
+     * called.
+     */
+    virtual status_t onThreadStart() { return NO_ERROR; }
+
+    /* This provides an opportunity for a subclass to perform some operation
+     * when the thread exits. This is run on the worker thread. By default this
+     * does nothing.
+     */
+    virtual void onThreadExit() { }
+
+    /* Containing camera device object. */
+    EmulatedCameraDevice* mCameraDevice;
+    /* The camera HAL from the camera device object */
+    EmulatedCamera* mCameraHAL;
+
+    /* Controls number of times the thread loop runs.
+     * See startThread for more information. */
+    bool mOneBurst;
+
+    /* Running Condition and mutex, use these to sleep the thread, the
+     * supporting functions will use these to signal wakes and exits. */
+    Condition mRunningCondition;
+    Mutex mRunningMutex;
+    bool mRunning;
+private:
+    /* Overriden base class method.
+     * It is overriden in order to provide one-time initialization just
+     * prior to starting the thread routine.
+     */
+    status_t readyToRun() final;
+
+    /* Implements abstract method of the base Thread class. */
+    bool threadLoop() final;
+
+    const char* mThreadName;
+};
+
+}  // namespace android
+
+#endif  // HW_EMULATOR_CAMERA_WORKER_THREAD_H
diff --git a/camera/jpeg-stub/Android.mk b/camera/jpeg-stub/Android.mk
new file mode 100644
index 0000000..eea0ced
--- /dev/null
+++ b/camera/jpeg-stub/Android.mk
@@ -0,0 +1,74 @@
+# Copyright (C) 2016 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# JPEG stub#####################################################################
+
+ifneq ($(TARGET_BUILD_PDK),true)
+
+include $(CLEAR_VARS)
+
+jpeg_module_relative_path := hw
+jpeg_cflags := -fno-short-enums -DQEMU_HARDWARE
+jpeg_cflags += -Wno-unused-parameter
+jpeg_clang_flags += -Wno-c++11-narrowing
+jpeg_shared_libraries := \
+    libcutils \
+    libexif \
+    libjpeg \
+    liblog \
+    libandroid_runtime
+jpeg_c_includes := external/libjpeg-turbo \
+                   external/libexif \
+                   frameworks/native/include
+jpeg_src := \
+    Compressor.cpp \
+    JpegStub.cpp \
+
+
+# goldfish build ###############################################################
+
+LOCAL_MODULE_RELATIVE_PATH := ${jpeg_module_relative_path}
+LOCAL_CFLAGS += ${jpeg_cflags}
+LOCAL_CLANG_CFLAGS += ${jpeg_clangflags}
+
+LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries}
+LOCAL_C_INCLUDES += ${jpeg_c_includes}
+LOCAL_SRC_FILES := ${jpeg_src}
+
+LOCAL_MODULE := camera.goldfish.jpeg
+
+include $(BUILD_SHARED_LIBRARY)
+
+# ranchu build #################################################################
+
+include ${CLEAR_VARS}
+
+LOCAL_MODULE := camera.ranchu.jpeg
+
+LOCAL_MODULE_RELATIVE_PATH := ${jpeg_module_relative_path}
+LOCAL_CFLAGS += ${jpeg_cflags}
+LOCAL_CLANG_CFLAGS += ${jpeg_clangflags}
+
+LOCAL_SHARED_LIBRARIES := ${jpeg_shared_libraries}
+LOCAL_C_INCLUDES += ${jpeg_c_includes}
+LOCAL_SRC_FILES := ${jpeg_src}
+
+include $(BUILD_SHARED_LIBRARY)
+
+endif # !PDK
diff --git a/camera/jpeg-stub/Compressor.cpp b/camera/jpeg-stub/Compressor.cpp
new file mode 100644
index 0000000..76c4a24
--- /dev/null
+++ b/camera/jpeg-stub/Compressor.cpp
@@ -0,0 +1,234 @@
+/*
+* Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Compressor.h"
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_JPEGStub_Compressor"
+#include <cutils/log.h>
+#include <libexif/exif-data.h>
+
+Compressor::Compressor() {
+
+}
+
+bool Compressor::compress(const unsigned char* data,
+                          int width, int height, int quality,
+                          ExifData* exifData) {
+    if (!configureCompressor(width, height, quality)) {
+        // The method will have logged a more detailed error message than we can
+        // provide here so just return.
+        return false;
+    }
+
+    return compressData(data, exifData);
+}
+
+const std::vector<uint8_t>& Compressor::getCompressedData() const {
+    return mDestManager.mBuffer;
+}
+
+bool Compressor::configureCompressor(int width, int height, int quality) {
+    mCompressInfo.err = jpeg_std_error(&mErrorManager);
+    // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+    // The compiler will not generate code to destroy them during the return
+    // below so they will leak. Additionally, do not place any calls to libjpeg
+    // that can fail above this line or any error will cause undefined behavior.
+    if (setjmp(mErrorManager.mJumpBuffer)) {
+        // This is where the error handler will jump in case setup fails
+        // The error manager will ALOG an appropriate error message
+        return false;
+    }
+
+    jpeg_create_compress(&mCompressInfo);
+
+    mCompressInfo.image_width = width;
+    mCompressInfo.image_height = height;
+    mCompressInfo.input_components = 3;
+    mCompressInfo.in_color_space = JCS_YCbCr;
+    jpeg_set_defaults(&mCompressInfo);
+
+    jpeg_set_quality(&mCompressInfo, quality, TRUE);
+    // It may seem weird to set color space here again but this will also set
+    // other fields. These fields might be overwritten by jpeg_set_defaults
+    jpeg_set_colorspace(&mCompressInfo, JCS_YCbCr);
+    mCompressInfo.raw_data_in = TRUE;
+    mCompressInfo.dct_method = JDCT_IFAST;
+    // Set sampling factors
+    mCompressInfo.comp_info[0].h_samp_factor = 2;
+    mCompressInfo.comp_info[0].v_samp_factor = 2;
+    mCompressInfo.comp_info[1].h_samp_factor = 1;
+    mCompressInfo.comp_info[1].v_samp_factor = 1;
+    mCompressInfo.comp_info[2].h_samp_factor = 1;
+    mCompressInfo.comp_info[2].v_samp_factor = 1;
+
+    mCompressInfo.dest = &mDestManager;
+
+    return true;
+}
+
+static void deinterleave(const uint8_t* vuPlanar, std::vector<uint8_t>& uRows,
+                         std::vector<uint8_t>& vRows, int rowIndex, int width,
+                         int height, int stride) {
+    int numRows = (height - rowIndex) / 2;
+    if (numRows > 8) numRows = 8;
+    for (int row = 0; row < numRows; ++row) {
+        int offset = ((rowIndex >> 1) + row) * stride;
+        const uint8_t* vu = vuPlanar + offset;
+        for (int i = 0; i < (width >> 1); ++i) {
+            int index = row * (width >> 1) + i;
+            uRows[index] = vu[1];
+            vRows[index] = vu[0];
+            vu += 2;
+        }
+    }
+}
+
+
+bool Compressor::compressData(const unsigned char* data, ExifData* exifData) {
+    const uint8_t* y[16];
+    const uint8_t* cb[8];
+    const uint8_t* cr[8];
+    const uint8_t** planes[3] = { y, cb, cr };
+
+    int i, offset;
+    int width = mCompressInfo.image_width;
+    int height = mCompressInfo.image_height;
+    const uint8_t* yPlanar = data;
+    const uint8_t* vuPlanar = data + (width * height);
+    std::vector<uint8_t> uRows(8 * (width >> 1));
+    std::vector<uint8_t> vRows(8 * (width >> 1));
+
+    // NOTE! DANGER! Do not construct any non-trivial objects below setjmp!
+    // The compiler will not generate code to destroy them during the return
+    // below so they will leak. Additionally, do not place any calls to libjpeg
+    // that can fail above this line or any error will cause undefined behavior.
+    if (setjmp(mErrorManager.mJumpBuffer)) {
+        // This is where the error handler will jump in case compression fails
+        // The error manager will ALOG an appropriate error message
+        return false;
+    }
+
+    jpeg_start_compress(&mCompressInfo, TRUE);
+
+    attachExifData(exifData);
+
+    // process 16 lines of Y and 8 lines of U/V each time.
+    while (mCompressInfo.next_scanline < mCompressInfo.image_height) {
+        //deinterleave u and v
+        deinterleave(vuPlanar, uRows, vRows, mCompressInfo.next_scanline,
+                     width, height, width);
+
+        // Jpeg library ignores the rows whose indices are greater than height.
+        for (i = 0; i < 16; i++) {
+            // y row
+            y[i] = yPlanar + (mCompressInfo.next_scanline + i) * width;
+
+            // construct u row and v row
+            if ((i & 1) == 0) {
+                // height and width are both halved because of downsampling
+                offset = (i >> 1) * (width >> 1);
+                cb[i/2] = &uRows[offset];
+                cr[i/2] = &vRows[offset];
+            }
+          }
+        jpeg_write_raw_data(&mCompressInfo, const_cast<JSAMPIMAGE>(planes), 16);
+    }
+
+    jpeg_finish_compress(&mCompressInfo);
+    jpeg_destroy_compress(&mCompressInfo);
+
+    return true;
+}
+
+bool Compressor::attachExifData(ExifData* exifData) {
+    if (exifData == nullptr) {
+        // This is not an error, we don't require EXIF data
+        return true;
+    }
+
+    // Save the EXIF data to memory
+    unsigned char* rawData = nullptr;
+    unsigned int size = 0;
+    exif_data_save_data(exifData, &rawData, &size);
+    if (rawData == nullptr) {
+        ALOGE("Failed to create EXIF data block");
+        return false;
+    }
+
+    jpeg_write_marker(&mCompressInfo, JPEG_APP0 + 1, rawData, size);
+    free(rawData);
+    return true;
+}
+
+Compressor::ErrorManager::ErrorManager() {
+    error_exit = &onJpegError;
+}
+
+void Compressor::ErrorManager::onJpegError(j_common_ptr cinfo) {
+    // NOTE! Do not construct any non-trivial objects in this method at the top
+    // scope. Their destructors will not be called. If you do need such an
+    // object create a local scope that does not include the longjmp call,
+    // that ensures the object is destroyed before longjmp is called.
+    ErrorManager* errorManager = reinterpret_cast<ErrorManager*>(cinfo->err);
+
+    // Format and log error message
+    char errorMessage[JMSG_LENGTH_MAX];
+    (*errorManager->format_message)(cinfo, errorMessage);
+    errorMessage[sizeof(errorMessage) - 1] = '\0';
+    ALOGE("JPEG compression error: %s", errorMessage);
+    jpeg_destroy(cinfo);
+
+    // And through the looking glass we go
+    longjmp(errorManager->mJumpBuffer, 1);
+}
+
+Compressor::DestinationManager::DestinationManager() {
+    init_destination = &initDestination;
+    empty_output_buffer = &emptyOutputBuffer;
+    term_destination = &termDestination;
+}
+
+void Compressor::DestinationManager::initDestination(j_compress_ptr cinfo) {
+    auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+    // Start out with some arbitrary but not too large buffer size
+    manager->mBuffer.resize(16 * 1024);
+    manager->next_output_byte = &manager->mBuffer[0];
+    manager->free_in_buffer = manager->mBuffer.size();
+}
+
+boolean Compressor::DestinationManager::emptyOutputBuffer(
+        j_compress_ptr cinfo) {
+    auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+    // Keep doubling the size of the buffer for a very low, amortized
+    // performance cost of the allocations
+    size_t oldSize = manager->mBuffer.size();
+    manager->mBuffer.resize(oldSize * 2);
+    manager->next_output_byte = &manager->mBuffer[oldSize];
+    manager->free_in_buffer = manager->mBuffer.size() - oldSize;
+    return manager->free_in_buffer != 0;
+}
+
+void Compressor::DestinationManager::termDestination(j_compress_ptr cinfo) {
+    auto manager = reinterpret_cast<DestinationManager*>(cinfo->dest);
+
+    // Resize down to the exact size of the output, that is remove as many
+    // bytes as there are left in the buffer
+    manager->mBuffer.resize(manager->mBuffer.size() - manager->free_in_buffer);
+}
+
diff --git a/camera/jpeg-stub/Compressor.h b/camera/jpeg-stub/Compressor.h
new file mode 100644
index 0000000..67a6e88
--- /dev/null
+++ b/camera/jpeg-stub/Compressor.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GOLDFISH_CAMERA_JPEG_STUB_COMPRESSOR_H
+#define GOLDFISH_CAMERA_JPEG_STUB_COMPRESSOR_H
+
+#include <setjmp.h>
+#include <stdlib.h>
+extern "C" {
+#include <jpeglib.h>
+#include <jerror.h>
+}
+
+#include <vector>
+
+struct _ExifData;
+typedef _ExifData ExifData;
+
+class Compressor {
+public:
+    Compressor();
+
+    /* Compress |data| which represents raw NV21 encoded data of dimensions
+     * |width| * |height|. |exifData| is optional EXIF data that will be
+     * attached to the compressed data if present, set to null if not needed.
+     */
+    bool compress(const unsigned char* data,
+                  int width, int height, int quality,
+                  ExifData* exifData);
+
+    /* Get a reference to the compressed data, this will return an empty vector
+     * if compress has not been called yet
+     */
+    const std::vector<unsigned char>& getCompressedData() const;
+
+private:
+    struct DestinationManager : jpeg_destination_mgr {
+        DestinationManager();
+
+        static void initDestination(j_compress_ptr cinfo);
+        static boolean emptyOutputBuffer(j_compress_ptr cinfo);
+        static void termDestination(j_compress_ptr cinfo);
+
+        std::vector<unsigned char> mBuffer;
+    };
+    struct ErrorManager : jpeg_error_mgr {
+        ErrorManager();
+
+        static void onJpegError(j_common_ptr cinfo);
+
+        jmp_buf mJumpBuffer;
+    };
+
+    jpeg_compress_struct mCompressInfo;
+    DestinationManager mDestManager;
+    ErrorManager mErrorManager;
+
+    bool configureCompressor(int width, int height, int quality);
+    bool compressData(const unsigned char* data, ExifData* exifData);
+    bool attachExifData(ExifData* exifData);
+};
+
+#endif  // GOLDFISH_CAMERA_JPEG_STUB_COMPRESSOR_H
+
diff --git a/camera/jpeg-stub/JpegStub.cpp b/camera/jpeg-stub/JpegStub.cpp
new file mode 100644
index 0000000..1eef9e2
--- /dev/null
+++ b/camera/jpeg-stub/JpegStub.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "JpegStub.h"
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "EmulatedCamera_JPEGStub"
+#include <errno.h>
+#include <cutils/log.h>
+#include <stdlib.h>
+
+#include "Compressor.h"
+
+extern "C" void JpegStub_init(JpegStub* stub) {
+    stub->mCompressor = static_cast<void*>(new Compressor());
+}
+
+extern "C" void JpegStub_cleanup(JpegStub* stub) {
+    delete reinterpret_cast<Compressor*>(stub->mCompressor);
+    stub->mCompressor = nullptr;
+}
+
+extern "C" int JpegStub_compress(JpegStub* stub,
+                                 const void* buffer,
+                                 int width,
+                                 int height,
+                                 int quality,
+                                 ExifData* exifData)
+{
+    Compressor* compressor = reinterpret_cast<Compressor*>(stub->mCompressor);
+
+    if (compressor->compress(reinterpret_cast<const unsigned char*>(buffer),
+                              width, height, quality, exifData)) {
+        ALOGV("%s: Compressed JPEG: %d[%dx%d] -> %zu bytes",
+              __FUNCTION__, (width * height * 12) / 8,
+              width, height, compressor->getCompressedData().size());
+        return 0;
+    }
+    ALOGE("%s: JPEG compression failed", __FUNCTION__);
+    return errno ? errno : EINVAL;
+}
+
+extern "C" void JpegStub_getCompressedImage(JpegStub* stub, void* buff) {
+    Compressor* compressor = reinterpret_cast<Compressor*>(stub->mCompressor);
+
+    const std::vector<unsigned char>& data = compressor->getCompressedData();
+    memcpy(buff, &data[0], data.size());
+}
+
+extern "C" size_t JpegStub_getCompressedSize(JpegStub* stub) {
+    Compressor* compressor = reinterpret_cast<Compressor*>(stub->mCompressor);
+
+    return compressor->getCompressedData().size();
+}
diff --git a/camera/JpegStub.h b/camera/jpeg-stub/JpegStub.h
similarity index 71%
rename from camera/JpegStub.h
rename to camera/jpeg-stub/JpegStub.h
index ad00a54..2e62182 100644
--- a/camera/JpegStub.h
+++ b/camera/jpeg-stub/JpegStub.h
@@ -17,17 +17,25 @@
 #ifndef JPEGSTUB_H_
 #define JPEGSTUB_H_
 
+#include <stddef.h>
+
+struct _ExifData;
+typedef _ExifData ExifData;
+
 extern "C" {
 
 struct JpegStub {
-    void* mInternalEncoder;
-    void* mInternalStream;
+    void* mCompressor;
 };
 
-void JpegStub_init(JpegStub* stub, int* strides);
+void JpegStub_init(JpegStub* stub);
 void JpegStub_cleanup(JpegStub* stub);
-int JpegStub_compress(JpegStub* stub, const void* image,
-        int width, int height, int quality);
+int JpegStub_compress(JpegStub* stub,
+                      const void* image,
+                      int width,
+                      int height,
+                      int quality,
+                      ExifData* exifData);
 void JpegStub_getCompressedImage(JpegStub* stub, void* buff);
 size_t JpegStub_getCompressedSize(JpegStub* stub);
 
diff --git a/camera/media_codecs.xml b/camera/media_codecs.xml
index 87d11f2..edead34 100644
--- a/camera/media_codecs.xml
+++ b/camera/media_codecs.xml
@@ -78,6 +78,10 @@
 -->
 
 <MediaCodecs>
+    <Settings>
+        <Setting name="max-video-encoder-input-buffers" value="12" />
+    </Settings>
+
     <Include href="media_codecs_google_audio.xml" />
     <Include href="media_codecs_google_telephony.xml" />
     <Include href="media_codecs_google_video.xml" />
diff --git a/camera/media_codecs_google_video.xml b/camera/media_codecs_google_video.xml
new file mode 100644
index 0000000..1dbd13d
--- /dev/null
+++ b/camera/media_codecs_google_video.xml
@@ -0,0 +1,106 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<Included>
+    <Decoders>
+        <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es">
+            <!-- profiles and levels:  ProfileSimple : Level3 -->
+            <Limit name="size" min="2x2" max="352x288" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="12-11880" />
+            <Limit name="bitrate" range="1-384000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp">
+            <!-- profiles and levels:  ProfileBaseline : Level30, ProfileBaseline : Level45
+                    ProfileISWV2 : Level30, ProfileISWV2 : Level45 -->
+            <Limit name="size" min="2x2" max="352x288" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-384000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h264.decoder" type="video/avc">
+            <!-- profiles and levels:  ProfileHigh : Level41 -->
+            <Limit name="size" min="16x16" max="1920x1088" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-244800" />
+            <Limit name="bitrate" range="1-12000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc">
+            <!-- profiles and levels:  ProfileMain : MainTierLevel51 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="8x8" />
+            <Limit name="block-count" range="1-139264" />
+            <Limit name="blocks-per-second" range="1-2000000" />
+            <Limit name="bitrate" range="1-10000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8">
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-1000000" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9">
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-500000" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="adaptive-playback" />
+        </MediaCodec>
+    </Decoders>
+
+    <Encoders>
+        <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp">
+            <!-- profiles and levels:  ProfileBaseline : Level45 -->
+            <Limit name="size" min="176x144" max="176x144" />
+            <Limit name="alignment" value="16x16" />
+            <Limit name="bitrate" range="1-128000" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h264.encoder" type="video/avc">
+            <!-- profiles and levels:  ProfileBaseline : Level41 -->
+            <Limit name="size" min="16x16" max="1920x1088" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="1-244800" />
+            <!-- Changed range from 12000000 to 20000000 for b/31648354 -->
+            <Limit name="bitrate" range="1-20000000" />
+            <Feature name="intra-refresh" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es">
+            <!-- profiles and levels:  ProfileCore : Level2 -->
+            <Limit name="size" min="16x16" max="176x144" />
+            <Limit name="alignment" value="16x16" />
+            <Limit name="block-size" value="16x16" />
+            <Limit name="blocks-per-second" range="12-1485" />
+            <Limit name="bitrate" range="1-64000" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8">
+            <!-- profiles and levels:  ProfileMain : Level_Version0-3 -->
+            <Limit name="size" min="2x2" max="2048x2048" />
+            <Limit name="alignment" value="2x2" />
+            <Limit name="bitrate" range="1-40000000" />
+            <Feature name="bitrate-modes" value="VBR,CBR" />
+        </MediaCodec>
+    </Encoders>
+</Included>
diff --git a/camera/media_codecs_performance.xml b/camera/media_codecs_performance.xml
new file mode 100644
index 0000000..3d12ceb
--- /dev/null
+++ b/camera/media_codecs_performance.xml
@@ -0,0 +1,109 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!-- Copyright 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<!--
+     This file was generated from running the following tests:
+        module CtsVideoTestCases test android.video.cts.VideoEncoderDecoderTest
+        module CtsMediaTestCases test android.media.cts.VideoDecoderPerfTest
+     System: z840
+     The results were fed through a script simliar to get_achievable_rates.py:
+     https://source.android.com/devices/media/oem.html
+-->
+
+<MediaCodecs>
+    <Encoders>
+        <MediaCodec name="OMX.google.h263.encoder" type="video/3gpp" update="true">
+            <!-- 3 runs, min 849 max 1008 gmean 943 -->
+            <Limit name="measured-frame-rate-176x144" range="849-1008" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h264.encoder" type="video/avc" update="true">
+            <!-- 3 runs, min 496 max 629 gmean 565 -->
+            <Limit name="measured-frame-rate-320x240" range="496-629" />
+            <!-- 2 runs, min 197 max 203 gmean 201 -->
+            <Limit name="measured-frame-rate-720x480" range="197-203" />
+            <!-- 2 runs, min 93 max 97 gmean 95 -->
+            <Limit name="measured-frame-rate-1280x720" range="93-97" />
+            <!-- 2 runs, min 45 max 47 gmean 46 -->
+            <Limit name="measured-frame-rate-1920x1080" range="45-47" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.mpeg4.encoder" type="video/mp4v-es" update="true">
+            <!-- 3 runs, min 881 max 1142 gmean 994 -->
+            <Limit name="measured-frame-rate-176x144" range="881-1142" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp8.encoder" type="video/x-vnd.on2.vp8" update="true">
+            <!-- 3 runs, min 249 max 285 gmean 264 -->
+            <Limit name="measured-frame-rate-320x180" range="249-285" />
+            <!-- 3 runs, min 104 max 115 gmean 109 -->
+            <Limit name="measured-frame-rate-640x360" range="104-115" />
+            <!-- 3 runs, min 34 max 35 gmean 34 -->
+            <Limit name="measured-frame-rate-1280x720" range="34-35" />
+            <!-- 3 runs, min 26 max 29 gmean 27 -->
+            <Limit name="measured-frame-rate-1920x1080" range="26-29" />
+        </MediaCodec>
+    </Encoders>
+    <Decoders>
+        <MediaCodec name="OMX.google.h263.decoder" type="video/3gpp" update="true">
+            <!-- 3 runs, min 1246 max 1390 gmean 1342 -->
+            <Limit name="measured-frame-rate-176x144" range="1246-1390" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.h264.decoder" type="video/avc" update="true">
+            <!-- 5 runs, min 299 max 629 gmean 567 -->
+            <Limit name="measured-frame-rate-320x240" range="299-629" />
+            <!-- 4 runs, min 215 max 250 gmean 232 -->
+            <Limit name="measured-frame-rate-720x480" range="215-250" />
+            <!-- 4 runs, min 75 max 85 gmean 78 -->
+            <Limit name="measured-frame-rate-1280x720" range="75-85" />
+            <!-- 4 runs, min 31 max 34 gmean 33 -->
+            <Limit name="measured-frame-rate-1920x1080" range="31-34" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.hevc.decoder" type="video/hevc" update="true">
+            <!-- 4 runs, min 754 max 817 gmean 775 -->
+            <Limit name="measured-frame-rate-352x288" range="754-817" />
+            <!-- 4 runs, min 323 max 394 gmean 373 -->
+            <Limit name="measured-frame-rate-640x360" range="323-394" />
+            <!-- 4 runs, min 349 max 372 gmean 358 -->
+            <Limit name="measured-frame-rate-720x480" range="349-372" />
+            <!-- 4 runs, min 144 max 157 gmean 151 -->
+            <Limit name="measured-frame-rate-1280x720" range="144-157" />
+            <!-- 4 runs, min 74 max 85 gmean 80 -->
+            <Limit name="measured-frame-rate-1920x1080" range="74-85" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.mpeg4.decoder" type="video/mp4v-es" update="true">
+            <!-- 4 runs, min 1439 max 1625 gmean 1523 -->
+            <Limit name="measured-frame-rate-176x144" range="1439-1625" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp8.decoder" type="video/x-vnd.on2.vp8" update="true">
+            <!-- 3 runs, min 1129 max 1261 gmean 1190 -->
+            <Limit name="measured-frame-rate-320x180" range="1129-1261" />
+            <!-- 3 runs, min 471 max 525 gmean 504 -->
+            <Limit name="measured-frame-rate-640x360" range="471-525" />
+            <!-- 3 runs, min 126 max 145 gmean 132 -->
+            <Limit name="measured-frame-rate-1280x720" range="126-145" />
+            <!-- 3 runs, min 48 max 51 gmean 49 -->
+            <Limit name="measured-frame-rate-1920x1080" range="48-51" />
+        </MediaCodec>
+        <MediaCodec name="OMX.google.vp9.decoder" type="video/x-vnd.on2.vp9" update="true">
+            <!-- 2 runs, min 968 max 1101 gmean 1044 -->
+            <Limit name="measured-frame-rate-320x180" range="968-1101" />
+            <!-- 3 runs, min 291 max 338 gmean 319 -->
+            <Limit name="measured-frame-rate-640x360" range="291-338" />
+            <!-- 3 runs, min 109 max 128 gmean 118 -->
+            <Limit name="measured-frame-rate-1280x720" range="109-128" />
+            <!-- 5 runs, min 69 max 76 gmean 73 -->
+            <Limit name="measured-frame-rate-1920x1080" range="69-76" />
+        </MediaCodec>
+    </Decoders>
+</MediaCodecs>
diff --git a/camera/media_profiles.xml b/camera/media_profiles.xml
index cd99857..a5970d0 100644
--- a/camera/media_profiles.xml
+++ b/camera/media_profiles.xml
@@ -84,7 +84,7 @@
                    bitRate="128000"
                    width="320"
                    height="240"
-                   frameRate="15" />
+                   frameRate="24" />
             <Audio codec="amrnb"
                    bitRate="12200"
                    sampleRate="8000"
@@ -325,20 +325,20 @@
     -->
     <VideoEncoderCap name="h264" enabled="true"
         minBitRate="64000" maxBitRate="192000"
-        minFrameWidth="176" maxFrameWidth="320"
-        minFrameHeight="144" maxFrameHeight="240"
+        minFrameWidth="176" maxFrameWidth="640"
+        minFrameHeight="144" maxFrameHeight="480"
         minFrameRate="15" maxFrameRate="30" />
 
     <VideoEncoderCap name="h263" enabled="true"
         minBitRate="64000" maxBitRate="192000"
-        minFrameWidth="176" maxFrameWidth="320"
-        minFrameHeight="144" maxFrameHeight="240"
+        minFrameWidth="176" maxFrameWidth="640"
+        minFrameHeight="144" maxFrameHeight="480"
         minFrameRate="15" maxFrameRate="30" />
 
     <VideoEncoderCap name="m4v" enabled="true"
         minBitRate="64000" maxBitRate="192000"
-        minFrameWidth="176" maxFrameWidth="320"
-        minFrameHeight="144" maxFrameHeight="240"
+        minFrameWidth="176" maxFrameWidth="640"
+        minFrameHeight="144" maxFrameHeight="480"
         minFrameRate="15" maxFrameRate="30" />
 
     <AudioEncoderCap name="aac" enabled="true"
diff --git a/data/etc/encryptionkey.img b/data/etc/encryptionkey.img
new file mode 100644
index 0000000..9e0f96a
--- /dev/null
+++ b/data/etc/encryptionkey.img
Binary files differ
diff --git a/data/etc/handheld_core_hardware.xml b/data/etc/handheld_core_hardware.xml
index 7da8ae1..b2aaf13 100644
--- a/data/etc/handheld_core_hardware.xml
+++ b/data/etc/handheld_core_hardware.xml
@@ -22,16 +22,35 @@
      Personal Media Players (PMPs), small tablets (7" or less), and similar
      devices.
 
-     This file is identical to frameworks/native/data/etc/handheld_core_hardware.xml,
-     except the android.hardware.bluetooth
+     This file is similar to frameworks/native/data/etc/handheld_core_hardware.xml.
+     The differences are:
+         'Normal' features that are not available in goldfish:
+             android.hardware.bluetooth
+         Extra features available in goldfish:
+             android.hardware.location.gps
+             android.hardware.sensor.barometer
+             android.hardware.sensor.light
+             android.hardware.sensor.proximity
+             android.hardware.sensor.relative_humidity
+             android.hardware.telephony
+             android.hardware.telephony.gsm
+             android.software.midi
 -->
 <permissions>
     <feature name="android.hardware.audio.output" />
     <feature name="android.hardware.camera" />
     <feature name="android.hardware.location" />
+    <feature name="android.hardware.location.gps" />
     <feature name="android.hardware.location.network" />
-    <feature name="android.hardware.sensor.compass" />
     <feature name="android.hardware.sensor.accelerometer" />
+    <feature name="android.hardware.sensor.ambient_temperature" />
+    <feature name="android.hardware.sensor.compass" />
+    <feature name="android.hardware.sensor.barometer" />
+    <feature name="android.hardware.sensor.light" />
+    <feature name="android.hardware.sensor.proximity" />
+    <feature name="android.hardware.sensor.relative_humidity" />
+    <feature name="android.hardware.telephony" />
+    <feature name="android.hardware.telephony.gsm" />
     <feature name="android.hardware.touchscreen" />
     <feature name="android.hardware.microphone" />
     <feature name="android.hardware.screen.portrait" />
@@ -53,6 +72,9 @@
     <!-- Feature to specify if the device support managed users. -->
     <feature name="android.software.managed_users" />
 
+    <feature name="android.software.picture_in_picture" />
+
+    <feature name="android.software.cts" />
     <!-- devices with GPS must include android.hardware.location.gps.xml -->
     <!-- devices with an autofocus camera and/or flash must include either
          android.hardware.camera.autofocus.xml or
diff --git a/data/etc/permissions/privapp-permissions-goldfish.xml b/data/etc/permissions/privapp-permissions-goldfish.xml
new file mode 100644
index 0000000..7124fb8
--- /dev/null
+++ b/data/etc/permissions/privapp-permissions-goldfish.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  ~ Copyright (C) 2017 The Android Open Source Project
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License
+  -->
+
+<!--
+This XML file declares which signature|privileged permissions should be granted to privileged
+applications on GMS or Google-branded devices.
+It allows additional grants on top of privapp-permissions-platform.xml
+-->
+
+<permissions>
+    <privapp-permissions package="com.android.sdksetup">
+        <permission name="android.permission.BACKUP"/>
+        <permission name="android.permission.WRITE_SECURE_SETTINGS"/>
+    </privapp-permissions>
+    <privapp-permissions package="com.android.dialer">
+        <permission name="android.permission.STATUS_BAR"/>
+    </privapp-permissions>
+</permissions>
\ No newline at end of file
diff --git a/fingerprint/Android.mk b/fingerprint/Android.mk
index f4b76c9..be21c97 100644
--- a/fingerprint/Android.mk
+++ b/fingerprint/Android.mk
@@ -18,6 +18,7 @@
 
 LOCAL_MODULE := fingerprint.goldfish
 LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := fingerprint.c
 LOCAL_SHARED_LIBRARIES := liblog
 
@@ -27,6 +28,7 @@
 
 LOCAL_MODULE := fingerprint.ranchu
 LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := fingerprint.c
 LOCAL_SHARED_LIBRARIES := liblog
 
diff --git a/fingerprint/fingerprint.c b/fingerprint/fingerprint.c
index 278b431..6041005 100644
--- a/fingerprint/fingerprint.c
+++ b/fingerprint/fingerprint.c
@@ -27,22 +27,19 @@
  */
 #define LOG_TAG "FingerprintHal"
 
-#include <cutils/log.h>
-#include <hardware/hardware.h>
-#include <hardware/fingerprint.h>
-#include <system/qemu_pipe.h>
-
 #include <errno.h>
 #include <endian.h>
 #include <inttypes.h>
 #include <malloc.h>
-#include <poll.h>
-#include <stdbool.h>
-#include <stdlib.h>
 #include <string.h>
+#include <cutils/log.h>
+#include <hardware/hardware.h>
+#include <hardware/fingerprint.h>
+#include "qemud.h"
 
+#include <poll.h>
 
-#define FINGERPRINT_LISTEN_SERVICE_NAME "pipe:qemud:fingerprintlisten"
+#define FINGERPRINT_LISTEN_SERVICE_NAME "fingerprintlisten"
 #define FINGERPRINT_FILENAME "emufp.bin"
 #define AUTHENTICATOR_ID_FILENAME "emuauthid.bin"
 #define MAX_COMM_CHARS 128
@@ -420,13 +417,21 @@
     fingerprint_msg_t message = {0, {0}};
     message.type = FINGERPRINT_TEMPLATE_ENUMERATING;
     message.data.enumerated.finger.gid = qdev->group_id;
-    for (int i = 0; i < MAX_NUM_FINGERS; i++) {
-        if (qdev->listener.secureid[i] != 0 ||
-            qdev->listener.fingerid[i] != 0) {
-            template_count--;
-            message.data.enumerated.remaining_templates = template_count;
-            message.data.enumerated.finger.fid = qdev->listener.fingerid[i];
-            qdev->device.notify(&message);
+
+    if(template_count == 0) {
+        message.data.enumerated.remaining_templates = 0;
+        message.data.enumerated.finger.fid = 0;
+        qdev->device.notify(&message);
+    }
+    else {
+        for (int i = 0; i < MAX_NUM_FINGERS; i++) {
+            if (qdev->listener.secureid[i] != 0 ||
+                qdev->listener.fingerid[i] != 0) {
+                template_count--;
+                message.data.enumerated.remaining_templates = template_count;
+                message.data.enumerated.finger.fid = qdev->listener.fingerid[i];
+                qdev->device.notify(&message);
+            }
         }
     }
 
@@ -477,11 +482,11 @@
                 }
             }  // end for (idx < MAX_NUM_FINGERS)
         } while (!listIsEmpty);
+        qdev->listener.state = STATE_IDLE;
+        pthread_mutex_unlock(&qdev->lock);
         msg.type = FINGERPRINT_TEMPLATE_REMOVED;
         msg.data.removed.finger.fid = 0;
         device->notify(&msg);
-        qdev->listener.state = STATE_IDLE;
-        pthread_mutex_unlock(&qdev->lock);
     } else {
         // Delete one fingerprint
         // Look for this finger ID in our table.
@@ -661,8 +666,9 @@
     ALOGD("----------------> %s ----------------->", __FUNCTION__);
     qemu_fingerprint_device_t* qdev = (qemu_fingerprint_device_t*)data;
 
+    int fd = qemud_channel_open(FINGERPRINT_LISTEN_SERVICE_NAME);
     pthread_mutex_lock(&qdev->lock);
-    qdev->qchanfd = qemu_pipe_open(FINGERPRINT_LISTEN_SERVICE_NAME);
+    qdev->qchanfd = fd;
     if (qdev->qchanfd < 0) {
         ALOGE("listener cannot open fingerprint listener service exit");
         pthread_mutex_unlock(&qdev->lock);
@@ -671,9 +677,8 @@
     qdev->listener.state = STATE_IDLE;
     pthread_mutex_unlock(&qdev->lock);
 
-    static const char kListenCmd[] = "listen";
-    size_t kListenCmdSize = sizeof(kListenCmd) - 1U;
-    if (qemu_pipe_frame_send(qdev->qchanfd, kListenCmd, kListenCmdSize) < 0) {
+    const char* cmd = "listen";
+    if (qemud_channel_send(qdev->qchanfd, cmd, strlen(cmd)) < 0) {
         ALOGE("cannot write fingerprint 'listen' to host");
         goto done_quiet;
     }
@@ -728,8 +733,8 @@
         }
 
         // Shouldn't block since we were just notified of a POLLIN event
-        if ((size = qemu_pipe_frame_recv(qdev->qchanfd, buffer,
-                                         sizeof(buffer) - 1)) > 0) {
+        if ((size = qemud_channel_recv(qdev->qchanfd, buffer,
+                                       sizeof(buffer) - 1)) > 0) {
             buffer[size] = '\0';
             if (sscanf(buffer, "on:%d", &fid) == 1) {
                 if (fid > 0 && fid <= MAX_FID_VALUE) {
diff --git a/fstab.ranchu b/fstab.ranchu
index f155c5a..b9eb754 100644
--- a/fstab.ranchu
+++ b/fstab.ranchu
@@ -4,5 +4,5 @@
 # specify MF_CHECK, and must come before any filesystems that do specify MF_CHECK
 /dev/block/vda                                          /system             ext4      ro                                                   wait
 /dev/block/vdb                                          /cache              ext4      noatime,nosuid,nodev,nomblk_io_submit,errors=panic   wait
-/dev/block/vdc                                          /data               ext4      noatime,nosuid,nodev,nomblk_io_submit,errors=panic   wait,check
-/devices/*/block/vdd                                    auto                auto      defaults                                             voldmanaged=sdcard:auto,encryptable=userdata
+/dev/block/vdc                                          /data               ext4      noatime,nosuid,nodev,nomblk_io_submit,errors=panic   wait,check,quota,forceencrypt=/dev/block/vdd
+/devices/*/block/vde                                    auto                auto      defaults                                             voldmanaged=sdcard:auto,encryptable=userdata
diff --git a/gps/Android.mk b/gps/Android.mk
index 0abf1fb..c143981 100644
--- a/gps/Android.mk
+++ b/gps/Android.mk
@@ -27,6 +27,7 @@
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_CFLAGS += -DQEMU_HARDWARE
 LOCAL_SHARED_LIBRARIES := liblog libcutils libhardware
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := gps_qemu.c
 ifeq ($(TARGET_PRODUCT),vbox_x86)
 LOCAL_MODULE := gps.vbox_x86
@@ -41,6 +42,7 @@
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_CFLAGS += -DQEMU_HARDWARE
 LOCAL_SHARED_LIBRARIES := liblog libcutils libhardware
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := gps_qemu.c
 LOCAL_MODULE := gps.ranchu
 
diff --git a/gps/gps_qemu.c b/gps/gps_qemu.c
index 6f077d3..e623418 100644
--- a/gps/gps_qemu.c
+++ b/gps/gps_qemu.c
@@ -34,10 +34,10 @@
 #include <cutils/log.h>
 #include <cutils/sockets.h>
 #include <hardware/gps.h>
-#include <system/qemu_pipe.h>
+#include "qemu_pipe.h"
 
 /* the name of the qemu-controlled pipe */
-#define  QEMU_CHANNEL_NAME  "pipe:qemud:gps"
+#define  QEMU_CHANNEL_NAME  "qemud:gps"
 
 #define  GPS_DEBUG  0
 
diff --git a/hwcomposer/Android.mk b/hwcomposer/Android.mk
new file mode 100644
index 0000000..5f20c58
--- /dev/null
+++ b/hwcomposer/Android.mk
@@ -0,0 +1,66 @@
+#
+# Copyright 2015 The Android Open-Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+emulator_hwcomposer_shared_libraries := \
+    liblog \
+    libutils \
+    libcutils \
+    libEGL \
+    libutils \
+    libhardware \
+    libsync \
+    libui \
+
+emulator_hwcomposer_src_files := \
+    hwcomposer.cpp
+
+emulator_hwcomposer_cflags += \
+    -DLOG_TAG=\"hwcomposer\"
+
+emulator_hwcomposer_c_includes += \
+    system/core/libsync \
+    system/core/libsync/include
+
+emulator_hwcomposer_relative_path := hw
+
+# GOLDFISH BUILD
+LOCAL_SHARED_LIBRARIES := $(emulator_hwcomposer_shared_libraries)
+LOCAL_SRC_FILES := $(emulator_hwcomposer_src_files)
+LOCAL_CFLAGS := $(emulator_hwcomposer_cflags)
+LOCAL_C_INCLUDES := $(emulator_hwcomposer_c_includes)
+LOCAL_MODULE_RELATIVE_PATH := $(emulator_hwcomposer_relative_path)
+
+LOCAL_MODULE := hwcomposer.goldfish
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
+
+# RANCHU BUILD
+include $(CLEAR_VARS)
+
+LOCAL_SHARED_LIBRARIES := $(emulator_hwcomposer_shared_libraries)
+LOCAL_SRC_FILES := $(emulator_hwcomposer_src_files)
+LOCAL_CFLAGS := $(emulator_hwcomposer_cflags)
+LOCAL_C_INCLUDES := $(emulator_hwcomposer_c_includes)
+LOCAL_MODULE_RELATIVE_PATH := $(emulator_hwcomposer_relative_path)
+
+LOCAL_MODULE := hwcomposer.ranchu
+LOCAL_MODULE_TAGS := optional
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/hwcomposer/hwcomposer.cpp b/hwcomposer/hwcomposer.cpp
new file mode 100644
index 0000000..61a3c46
--- /dev/null
+++ b/hwcomposer/hwcomposer.cpp
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include <cutils/log.h>
+#include <hardware/hwcomposer.h>
+#include <sync/sync.h>
+
+struct ranchu_hwc_composer_device_1 {
+    hwc_composer_device_1_t base; // constant after init
+    const hwc_procs_t *procs;     // constant after init
+    pthread_t vsync_thread;       // constant after init
+    int32_t vsync_period_ns;      // constant after init
+    framebuffer_device_t* fbdev;  // constant after init
+
+    pthread_mutex_t vsync_lock;
+    bool vsync_callback_enabled; // protected by this->vsync_lock
+};
+
+static int hwc_prepare(hwc_composer_device_1_t* dev __unused,
+                       size_t numDisplays, hwc_display_contents_1_t** displays) {
+
+    if (!numDisplays || !displays) return 0;
+
+    hwc_display_contents_1_t* contents = displays[HWC_DISPLAY_PRIMARY];
+
+    if (!contents) return 0;
+
+    for (size_t i = 0; i < contents->numHwLayers; i++) {
+    // We do not handle any layers, so set composition type of any non
+    // HWC_FRAMEBUFFER_TARGET layer to to HWC_FRAMEBUFFER.
+        if (contents->hwLayers[i].compositionType == HWC_FRAMEBUFFER_TARGET) {
+            continue;
+        }
+        contents->hwLayers[i].compositionType = HWC_FRAMEBUFFER;
+    }
+    return 0;
+}
+
+static int hwc_set(struct hwc_composer_device_1* dev,size_t numDisplays,
+                   hwc_display_contents_1_t** displays) {
+    struct ranchu_hwc_composer_device_1* pdev = (struct ranchu_hwc_composer_device_1*)dev;
+    if (!numDisplays || !displays) {
+        return 0;
+    }
+
+    hwc_display_contents_1_t* contents = displays[HWC_DISPLAY_PRIMARY];
+
+    int retireFenceFd = -1;
+    int err = 0;
+    for (size_t layer = 0; layer < contents->numHwLayers; layer++) {
+            hwc_layer_1_t* fb_layer = &contents->hwLayers[layer];
+
+        int releaseFenceFd = -1;
+        if (fb_layer->acquireFenceFd > 0) {
+            const int kAcquireWarningMS= 3000;
+            err = sync_wait(fb_layer->acquireFenceFd, kAcquireWarningMS);
+            if (err < 0 && errno == ETIME) {
+                ALOGE("hwcomposer waited on fence %d for %d ms",
+                      fb_layer->acquireFenceFd, kAcquireWarningMS);
+            }
+            close(fb_layer->acquireFenceFd);
+
+            if (fb_layer->compositionType != HWC_FRAMEBUFFER_TARGET) {
+                ALOGE("hwcomposer found acquire fence on layer %d which is not an"
+                      "HWC_FRAMEBUFFER_TARGET layer", layer);
+            }
+
+            releaseFenceFd = dup(fb_layer->acquireFenceFd);
+            fb_layer->acquireFenceFd = -1;
+        }
+
+        if (fb_layer->compositionType != HWC_FRAMEBUFFER_TARGET) {
+            continue;
+        }
+
+        pdev->fbdev->post(pdev->fbdev, fb_layer->handle);
+        fb_layer->releaseFenceFd = releaseFenceFd;
+
+        if (releaseFenceFd > 0) {
+            if (retireFenceFd == -1) {
+                retireFenceFd = dup(releaseFenceFd);
+            } else {
+                int mergedFenceFd = sync_merge("hwc_set retireFence",
+                                               releaseFenceFd, retireFenceFd);
+                close(retireFenceFd);
+                retireFenceFd = mergedFenceFd;
+            }
+        }
+    }
+
+    contents->retireFenceFd = retireFenceFd;
+    return err;
+}
+
+static int hwc_query(struct hwc_composer_device_1* dev, int what, int* value) {
+    struct ranchu_hwc_composer_device_1* pdev =
+            (struct ranchu_hwc_composer_device_1*)dev;
+
+    switch (what) {
+        case HWC_BACKGROUND_LAYER_SUPPORTED:
+            // we do not support the background layer
+            value[0] = 0;
+            break;
+        case HWC_VSYNC_PERIOD:
+            value[0] = pdev->vsync_period_ns;
+            break;
+        default:
+            // unsupported query
+            ALOGE("%s badness unsupported query what=%d", __FUNCTION__, what);
+            return -EINVAL;
+    }
+    return 0;
+}
+
+static int hwc_event_control(struct hwc_composer_device_1* dev, int dpy __unused,
+                             int event, int enabled) {
+    struct ranchu_hwc_composer_device_1* pdev =
+            (struct ranchu_hwc_composer_device_1*)dev;
+    int ret = -EINVAL;
+
+    // enabled can only be 0 or 1
+    if (!(enabled & ~1)) {
+        if (event == HWC_EVENT_VSYNC) {
+            pthread_mutex_lock(&pdev->vsync_lock);
+            pdev->vsync_callback_enabled=enabled;
+            pthread_mutex_unlock(&pdev->vsync_lock);
+            ret = 0;
+        }
+    }
+    return ret;
+}
+
+static int hwc_blank(struct hwc_composer_device_1* dev __unused, int disp,
+                     int blank __unused) {
+    if (disp != HWC_DISPLAY_PRIMARY) {
+        return -EINVAL;
+    }
+    return 0;
+}
+
+static void hwc_dump(hwc_composer_device_1* dev __unused, char* buff __unused,
+                     int buff_len __unused) {
+    // This is run when running dumpsys.
+    // No-op for now.
+}
+
+
+static int hwc_get_display_configs(struct hwc_composer_device_1* dev __unused,
+                                   int disp, uint32_t* configs, size_t* numConfigs) {
+    if (*numConfigs == 0) {
+        return 0;
+    }
+
+    if (disp == HWC_DISPLAY_PRIMARY) {
+        configs[0] = 0;
+        *numConfigs = 1;
+        return 0;
+    }
+
+    return -EINVAL;
+}
+
+
+static int32_t hwc_attribute(struct ranchu_hwc_composer_device_1* pdev,
+                             const uint32_t attribute) {
+    switch(attribute) {
+        case HWC_DISPLAY_VSYNC_PERIOD:
+            return pdev->vsync_period_ns;
+        case HWC_DISPLAY_WIDTH:
+            return pdev->fbdev->width;
+        case HWC_DISPLAY_HEIGHT:
+            return pdev->fbdev->height;
+        case HWC_DISPLAY_DPI_X:
+            return pdev->fbdev->xdpi*1000;
+        case HWC_DISPLAY_DPI_Y:
+            return pdev->fbdev->ydpi*1000;
+        default:
+            ALOGE("unknown display attribute %u", attribute);
+            return -EINVAL;
+    }
+}
+
+static int hwc_get_display_attributes(struct hwc_composer_device_1* dev __unused,
+                                      int disp, uint32_t config __unused,
+                                      const uint32_t* attributes, int32_t* values) {
+
+    struct ranchu_hwc_composer_device_1* pdev = (struct ranchu_hwc_composer_device_1*)dev;
+    for (int i = 0; attributes[i] != HWC_DISPLAY_NO_ATTRIBUTE; i++) {
+        if (disp == HWC_DISPLAY_PRIMARY) {
+            values[i] = hwc_attribute(pdev, attributes[i]);
+        } else {
+            ALOGE("unknown display type %u", disp);
+            return -EINVAL;
+        }
+    }
+
+    return 0;
+}
+
+static int hwc_close(hw_device_t* dev) {
+    struct ranchu_hwc_composer_device_1* pdev = (struct ranchu_hwc_composer_device_1*)dev;
+    pthread_kill(pdev->vsync_thread, SIGTERM);
+    pthread_join(pdev->vsync_thread, NULL);
+    free(dev);
+    return 0;
+}
+
+static void* hwc_vsync_thread(void* data) {
+    struct ranchu_hwc_composer_device_1* pdev = (struct ranchu_hwc_composer_device_1*)data;
+    setpriority(PRIO_PROCESS, 0, HAL_PRIORITY_URGENT_DISPLAY);
+
+    struct timespec rt;
+    if (clock_gettime(CLOCK_MONOTONIC, &rt) == -1) {
+        ALOGE("%s:%d error in vsync thread clock_gettime: %s",
+              __FILE__, __LINE__, strerror(errno));
+    }
+    const int log_interval = 60;
+    int64_t last_logged = rt.tv_sec;
+    int sent = 0;
+    int last_sent = 0;
+    bool vsync_enabled = false;
+    struct timespec wait_time;
+    wait_time.tv_sec = 0;
+    wait_time.tv_nsec = pdev->vsync_period_ns;
+
+    while (true) {
+        int err = nanosleep(&wait_time, NULL);
+        if (err == -1) {
+            if (errno == EINTR) {
+                break;
+            }
+            ALOGE("error in vsync thread: %s", strerror(errno));
+        }
+
+        pthread_mutex_lock(&pdev->vsync_lock);
+        vsync_enabled = pdev->vsync_callback_enabled;
+        pthread_mutex_unlock(&pdev->vsync_lock);
+
+        if (!vsync_enabled) {
+            continue;
+        }
+
+        if (clock_gettime(CLOCK_MONOTONIC, &rt) == -1) {
+            ALOGE("%s:%d error in vsync thread clock_gettime: %s",
+                  __FILE__, __LINE__, strerror(errno));
+        }
+
+        int64_t timestamp = int64_t(rt.tv_sec) * 1e9 + rt.tv_nsec;
+        pdev->procs->vsync(pdev->procs, 0, timestamp);
+        if (rt.tv_sec - last_logged >= log_interval) {
+            ALOGD("hw_composer sent %d syncs in %ds", sent - last_sent, rt.tv_sec - last_logged);
+            last_logged = rt.tv_sec;
+            last_sent = sent;
+        }
+        ++sent;
+    }
+
+    return NULL;
+}
+
+static void hwc_register_procs(struct hwc_composer_device_1* dev,
+                               hwc_procs_t const* procs) {
+    struct ranchu_hwc_composer_device_1* pdev = (struct ranchu_hwc_composer_device_1*)dev;
+    pdev->procs = procs;
+}
+
+static int hwc_open(const struct hw_module_t* module, const char* name,
+                    struct hw_device_t** device) {
+    int ret = 0;
+
+    if (strcmp(name, HWC_HARDWARE_COMPOSER)) {
+        ALOGE("%s called with bad name %s", __FUNCTION__, name);
+        return -EINVAL;
+    }
+
+    ranchu_hwc_composer_device_1 *pdev = new ranchu_hwc_composer_device_1();
+    if (!pdev) {
+        ALOGE("%s failed to allocate dev", __FUNCTION__);
+        return -ENOMEM;
+    }
+
+    pdev->base.common.tag = HARDWARE_DEVICE_TAG;
+    pdev->base.common.version = HWC_DEVICE_API_VERSION_1_1;
+    pdev->base.common.module = const_cast<hw_module_t *>(module);
+    pdev->base.common.close = hwc_close;
+
+    pdev->base.prepare = hwc_prepare;
+    pdev->base.set = hwc_set;
+    pdev->base.eventControl = hwc_event_control;
+    pdev->base.blank = hwc_blank;
+    pdev->base.query = hwc_query;
+    pdev->base.registerProcs = hwc_register_procs;
+    pdev->base.dump = hwc_dump;
+    pdev->base.getDisplayConfigs = hwc_get_display_configs;
+    pdev->base.getDisplayAttributes = hwc_get_display_attributes;
+
+    pdev->vsync_period_ns = 1000*1000*1000/60; // vsync is 60 hz
+
+    hw_module_t const* hw_module;
+    ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &hw_module);
+    if (ret != 0) {
+        ALOGE("ranchu_hw_composer hwc_open %s module not found", GRALLOC_HARDWARE_MODULE_ID);
+        return ret;
+    }
+    ret = framebuffer_open(hw_module, &pdev->fbdev);
+    if (ret != 0) {
+        ALOGE("ranchu_hw_composer hwc_open could not open framebuffer");
+    }
+
+    pthread_mutex_init(&pdev->vsync_lock, NULL);
+    pdev->vsync_callback_enabled = false;
+
+    ret = pthread_create (&pdev->vsync_thread, NULL, hwc_vsync_thread, pdev);
+    if (ret) {
+        ALOGE("ranchu_hw_composer could not start vsync_thread\n");
+    }
+
+    *device = &pdev->base.common;
+
+    return ret;
+}
+
+
+static struct hw_module_methods_t hwc_module_methods = {
+    open: hwc_open,
+};
+
+hwc_module_t HAL_MODULE_INFO_SYM = {
+    common: {
+        tag: HARDWARE_MODULE_TAG,
+        module_api_version: HWC_MODULE_API_VERSION_0_1,
+        hal_api_version: HARDWARE_HAL_API_VERSION,
+        id: HWC_HARDWARE_MODULE_ID,
+        name: "Android Emulator hwcomposer module",
+        author: "The Android Open Source Project",
+        methods: &hwc_module_methods,
+    }
+};
diff --git a/include/qemu.h b/include/qemu.h
new file mode 100644
index 0000000..b5ba463
--- /dev/null
+++ b/include/qemu.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef _libs_hardware_qemu_h
+#define _libs_hardware_qemu_h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* returns 1 iff we're running in the emulator */
+extern int  qemu_check(void);
+
+/* a structure used to hold enough state to connect to a given
+ * QEMU communication channel, either through a qemud socket or
+ * a serial port.
+ *
+ * initialize the structure by zero-ing it out
+ */
+typedef struct {
+    char   is_inited;
+    char   is_available;
+    char   is_qemud;
+    char   is_qemud_old;
+    char   is_tty;
+    int    fd;
+    char   device[32];
+} QemuChannel;
+
+/* try to open a qemu communication channel.
+ * returns a file descriptor on success, or -1 in case of
+ * error.
+ *
+ * 'channel' must be a QemuChannel structure that is empty
+ * on the first call. You can call this function several
+ * time to re-open the channel using the same 'channel'
+ * object to speed things a bit.
+ */
+extern int  qemu_channel_open( QemuChannel*  channel,
+                               const char*   name,
+                               int           mode );
+
+/* create a command made of a 4-hexchar prefix followed
+ * by the content. the prefix contains the content's length
+ * in hexadecimal coding.
+ *
+ * 'buffer' must be at last 6 bytes
+ * returns -1 in case of overflow, or the command's total length
+ * otherwise (i.e. content length + 4)
+ */
+extern int  qemu_command_format( char*        buffer, 
+                                 int          buffer_size,
+                                 const char*  format,
+                                 ... );
+
+/* directly sends a command through the 'hw-control' channel.
+ * this will open the channel, send the formatted command, then
+ * close the channel automatically.
+ * returns 0 on success, or -1 on error.
+ */
+extern int  qemu_control_command( const char*  fmt, ... );
+
+/* sends a question to the hw-control channel, then receive an answer in
+ * a user-allocated buffer. returns the length of the answer, or -1
+ * in case of error.
+ *
+ * 'question' *must* have been formatted through qemu_command_format
+ */
+extern int  qemu_control_query( const char*  question, int  questionlen,
+                                char*        answer,   int  answersize );
+
+/* use QEMU_FALLBACK(call) to call a QEMU-specific callback  */
+/* use QEMU_FALLBACK_VOID(call) if the function returns void */
+#  define  QEMU_FALLBACK(x)  \
+    do { \
+        if (qemu_check()) \
+            return qemu_ ## x ; \
+    } while (0)
+#  define  QEMU_FALLBACK_VOID(x)  \
+    do { \
+        if (qemu_check()) { \
+            qemu_ ## x ; \
+            return; \
+        } \
+    } while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _libs_hardware_qemu_h */
diff --git a/include/qemu_pipe.h b/include/qemu_pipe.h
new file mode 100644
index 0000000..d76ba3b
--- /dev/null
+++ b/include/qemu_pipe.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef ANDROID_INCLUDE_HARDWARE_QEMU_PIPE_H
+#define ANDROID_INCLUDE_HARDWARE_QEMU_PIPE_H
+
+#include <sys/cdefs.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>  /* for pthread_once() */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#ifndef D
+#  define  D(...)   do{}while(0)
+#endif
+
+static bool ReadFully(int fd, void* data, size_t byte_count) {
+  uint8_t* p = (uint8_t*)(data);
+  size_t remaining = byte_count;
+  while (remaining > 0) {
+    ssize_t n = TEMP_FAILURE_RETRY(read(fd, p, remaining));
+    if (n <= 0) return false;
+    p += n;
+    remaining -= n;
+  }
+  return true;
+}
+
+static bool WriteFully(int fd, const void* data, size_t byte_count) {
+  const uint8_t* p = (const uint8_t*)(data);
+  size_t remaining = byte_count;
+  while (remaining > 0) {
+    ssize_t n = TEMP_FAILURE_RETRY(write(fd, p, remaining));
+    if (n == -1) return false;
+    p += n;
+    remaining -= n;
+  }
+  return true;
+}
+
+/* Try to open a new Qemu fast-pipe. This function returns a file descriptor
+ * that can be used to communicate with a named service managed by the
+ * emulator.
+ *
+ * This file descriptor can be used as a standard pipe/socket descriptor.
+ *
+ * 'pipeName' is the name of the emulator service you want to connect to.
+ * E.g. 'opengles' or 'camera'.
+ *
+ * On success, return a valid file descriptor
+ * Returns -1 on error, and errno gives the error code, e.g.:
+ *
+ *    EINVAL  -> unknown/unsupported pipeName
+ *    ENOSYS  -> fast pipes not available in this system.
+ *
+ * ENOSYS should never happen, except if you're trying to run within a
+ * misconfigured emulator.
+ *
+ * You should be able to open several pipes to the same pipe service,
+ * except for a few special cases (e.g. GSM modem), where EBUSY will be
+ * returned if more than one client tries to connect to it.
+ */
+static __inline__ int
+qemu_pipe_open(const char*  pipeName)
+{
+    char  buff[256];
+    int   buffLen;
+    int   fd, ret;
+
+    if (pipeName == NULL || pipeName[0] == '\0') {
+        errno = EINVAL;
+        return -1;
+    }
+
+    snprintf(buff, sizeof buff, "pipe:%s", pipeName);
+
+    fd = TEMP_FAILURE_RETRY(open("/dev/qemu_pipe", O_RDWR));
+    if (fd < 0 && errno == ENOENT)
+        fd = TEMP_FAILURE_RETRY(open("/dev/goldfish_pipe", O_RDWR));
+    if (fd < 0) {
+        D("%s: Could not open /dev/qemu_pipe: %s", __FUNCTION__, strerror(errno));
+        //errno = ENOSYS;
+        return -1;
+    }
+
+    buffLen = strlen(buff);
+
+    if (!WriteFully(fd, buff, buffLen + 1)) {
+        D("%s: Could not connect to %s pipe service: %s", __FUNCTION__, pipeName, strerror(errno));
+        return -1;
+    }
+
+    return fd;
+}
+
+#endif /* ANDROID_INCLUDE_HARDWARE_QEMUD_PIPE_H */
diff --git a/include/qemud.h b/include/qemud.h
new file mode 100644
index 0000000..71ec3bf
--- /dev/null
+++ b/include/qemud.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_INCLUDE_HARDWARE_QEMUD_H
+#define ANDROID_INCLUDE_HARDWARE_QEMUD_H
+
+#include <cutils/sockets.h>
+#include "qemu_pipe.h"
+
+/* the following is helper code that is used by the QEMU-specific
+ * hardware HAL modules to communicate with the emulator program
+ * through the 'qemud' multiplexing daemon, or through the qemud
+ * pipe.
+ *
+ * see the documentation comments for details in
+ * development/emulator/qemud/qemud.c
+ *
+ * all definitions here are built into the HAL module to avoid
+ * having to write a tiny shared library for this.
+ */
+
+/* we expect the D macro to be defined to a function macro
+ * that sends its formatted string argument(s) to the log.
+ * If not, ignore the traces.
+ */
+#  define  D(...) ((void)0)
+
+static __inline__ int
+qemud_channel_open(const char*  name)
+{
+    int  fd;
+    int  namelen = strlen(name);
+    char answer[2];
+    char pipe_name[256];
+
+    /* First, try to connect to the pipe. */
+    snprintf(pipe_name, sizeof(pipe_name), "qemud:%s", name);
+    fd = qemu_pipe_open(pipe_name);
+    D("%s: pipe name %s (name %s) fd %d", __FUNCTION__, pipe_name, name, fd);
+    if (fd < 0) {
+        D("QEMUD pipe is not available for %s: %s", name, strerror(errno));
+        /* If pipe is not available, connect to qemud control socket */
+        fd = socket_local_client( "qemud",
+                                  ANDROID_SOCKET_NAMESPACE_RESERVED,
+                                  SOCK_STREAM );
+        if (fd < 0) {
+            D("no qemud control socket: %s", strerror(errno));
+            return -1;
+        }
+
+        /* send service name to connect */
+        if (!WriteFully(fd, name, namelen)) {
+            D("can't send service name to qemud: %s",
+               strerror(errno));
+            close(fd);
+            return -1;
+        }
+
+        /* read answer from daemon */
+        if (!ReadFully(fd, answer, 2) ||
+            answer[0] != 'O' || answer[1] != 'K') {
+            D("cant' connect to %s service through qemud", name);
+            close(fd);
+            return -1;
+        }
+    }
+    return fd;
+}
+
+static __inline__ int
+qemud_channel_send(int  fd, const void*  msg, int  msglen)
+{
+    char  header[5];
+
+    if (msglen < 0)
+        msglen = strlen((const char*)msg);
+
+    if (msglen == 0)
+        return 0;
+
+    snprintf(header, sizeof header, "%04x", msglen);
+    if (!WriteFully(fd, header, 4)) {
+        D("can't write qemud frame header: %s", strerror(errno));
+        return -1;
+    }
+
+    if (!WriteFully(fd, msg, msglen)) {
+        D("can4t write qemud frame payload: %s", strerror(errno));
+        return -1;
+    }
+    return 0;
+}
+
+static __inline__ int
+qemud_channel_recv(int  fd, void*  msg, int  msgsize)
+{
+    char  header[5];
+    int   size, avail;
+
+    if (!ReadFully(fd, header, 4)) {
+        D("can't read qemud frame header: %s", strerror(errno));
+        return -1;
+    }
+    header[4] = 0;
+    if (sscanf(header, "%04x", &size) != 1) {
+        D("malformed qemud frame header: '%.*s'", 4, header);
+        return -1;
+    }
+    if (size > msgsize)
+        return -1;
+
+    if (!ReadFully(fd, msg, size)) {
+        D("can't read qemud frame payload: %s", strerror(errno));
+        return -1;
+    }
+    return size;
+}
+
+#endif /* ANDROID_INCLUDE_HARDWARE_QEMUD_H */
diff --git a/init.goldfish.rc b/init.goldfish.rc
index ae8aeaf..b1613a4 100644
--- a/init.goldfish.rc
+++ b/init.goldfish.rc
@@ -1,18 +1,19 @@
 
 on early-init
-    mount debugfs debugfs /sys/kernel/debug
+    mount debugfs debugfs /sys/kernel/debug mode=755
 
 on init
 
-    symlink /dev/goldfish_pipe /dev/android_pipe
-    symlink /dev/goldfish_pipe /dev/qemu_pipe
-
 on boot
     setprop ARGH ARGH
     setprop net.eth0.gw 10.0.2.2
     setprop net.eth0.dns1 10.0.2.3
     setprop net.dns1 10.0.2.3
     setprop net.gprs.local-ip 10.0.2.15
+    setprop persist.adb.notify 1
+    setprop persist.sys.usb.config adb
+    setprop qemu.adb.secure 0
+    setprop ro.adb.secure 1
     setprop ro.radio.use-ppp no
     setprop ro.build.product generic
     setprop ro.product.device generic
@@ -41,7 +42,7 @@
     stop akmd
 
 # start essential services
-    setprop rild.libpath libreference-ril.so
+    start qemud
     start goldfish-logcat
     start goldfish-setup
 
@@ -67,9 +68,15 @@
 on fs
         mount_all /fstab.goldfish
 
+#emulator is not much useful before boot complete
+#start it later
+on property:sys.boot_completed=1
+    setprop sys.usb.config adb
+    start adbd
+
 service goldfish-setup /system/etc/init.goldfish.sh
     user root
-    group root wakelock
+    group root
     oneshot
 
 # The qemu-props program is used to set various system
@@ -84,6 +91,10 @@
     group root
     oneshot
 
+service qemud /system/bin/qemud
+    socket qemud    stream 666
+    oneshot
+
 # -Q is a special logcat option that forces the
 # program to check wether it runs on the emulator
 # if it does, it redirects its output to the device
@@ -99,3 +110,10 @@
 service fingerprintd /system/bin/fingerprintd
     class late_start
     user system
+
+service bugreport /system/bin/dumpstate -d -p -B \
+        -o /data/data/com.android.shell/files/bugreports/bugreport
+    class main
+    disabled
+    oneshot
+    keycodes 114 115 116
diff --git a/init.goldfish.sh b/init.goldfish.sh
index 459ed3b..412cd50 100755
--- a/init.goldfish.sh
+++ b/init.goldfish.sh
@@ -4,6 +4,38 @@
 ifconfig eth0 10.0.2.15 netmask 255.255.255.0 up
 route add default gw 10.0.2.2 dev eth0
 
+# ro.kernel.android.qemud is normally set when we
+# want the RIL (radio interface layer) to talk to
+# the emulated modem through qemud.
+#
+# However, this will be undefined in two cases:
+#
+# - When we want the RIL to talk directly to a guest
+#   serial device that is connected to a host serial
+#   device by the emulator.
+#
+# - We don't want to use the RIL but the VM-based
+#   modem emulation that runs inside the guest system
+#   instead.
+#
+# The following detects the latter case and sets up the
+# system for it.
+#
+qemud=`getprop ro.kernel.android.qemud`
+case "$qemud" in
+    "")
+    radio_ril=`getprop ro.kernel.android.ril`
+    case "$radio_ril" in
+        "")
+        # no need for the radio interface daemon
+        # telephony is entirely emulated in Java
+        setprop ro.radio.noril yes
+        stop ril-daemon
+        ;;
+    esac
+    ;;
+esac
+
 # Setup additionnal DNS servers if needed
 num_dns=`getprop ro.kernel.ndns`
 case "$num_dns" in
@@ -34,3 +66,6 @@
     *) ifconfig eth1 "$my_ip" netmask 255.255.255.0 up
     ;;
 esac
+
+# take the wake lock
+echo "emulator_wake_lock" > /sys/power/wake_lock
diff --git a/init.ranchu-core.sh b/init.ranchu-core.sh
new file mode 100755
index 0000000..fd21a37
--- /dev/null
+++ b/init.ranchu-core.sh
@@ -0,0 +1,46 @@
+#!/system/bin/sh
+
+
+# ro.kernel.android.qemud is normally set when we
+# want the RIL (radio interface layer) to talk to
+# the emulated modem through qemud.
+#
+# However, this will be undefined in two cases:
+#
+# - When we want the RIL to talk directly to a guest
+#   serial device that is connected to a host serial
+#   device by the emulator.
+#
+# - We don't want to use the RIL but the VM-based
+#   modem emulation that runs inside the guest system
+#   instead.
+#
+# The following detects the latter case and sets up the
+# system for it.
+#
+qemud=`getprop ro.kernel.android.qemud`
+case "$qemud" in
+    "")
+    radio_ril=`getprop ro.kernel.android.ril`
+    case "$radio_ril" in
+        "")
+        # no need for the radio interface daemon
+        # telephony is entirely emulated in Java
+        setprop ro.radio.noril yes
+        stop ril-daemon
+        ;;
+    esac
+    ;;
+esac
+
+
+# disable boot animation for a faster boot sequence when needed
+boot_anim=`getprop ro.kernel.android.bootanim`
+case "$boot_anim" in
+    0)  setprop debug.sf.nobootanimation 1
+    ;;
+esac
+
+
+# take the wake lock
+echo "emulator_wake_lock" > /sys/power/wake_lock
diff --git a/init.ranchu-net.sh b/init.ranchu-net.sh
new file mode 100755
index 0000000..e4347d3
--- /dev/null
+++ b/init.ranchu-net.sh
@@ -0,0 +1,32 @@
+#!/system/bin/sh
+
+# Setup networking when boot starts
+ifconfig eth0 10.0.2.15 netmask 255.255.255.0 up
+route add default gw 10.0.2.2 dev eth0
+
+
+# Setup additionnal DNS servers if needed
+num_dns=`getprop ro.kernel.ndns`
+case "$num_dns" in
+    2) setprop net.eth0.dns2 10.0.2.4
+       ;;
+    3) setprop net.eth0.dns2 10.0.2.4
+       setprop net.eth0.dns3 10.0.2.5
+       ;;
+    4) setprop net.eth0.dns2 10.0.2.4
+       setprop net.eth0.dns3 10.0.2.5
+       setprop net.eth0.dns4 10.0.2.6
+       ;;
+esac
+
+
+# set up the second interface (for inter-emulator connections)
+# if required
+my_ip=`getprop net.shared_net_ip`
+case "$my_ip" in
+    "")
+    ;;
+    *) ifconfig eth1 "$my_ip" netmask 255.255.255.0 up
+    ;;
+esac
+
diff --git a/init.ranchu.rc b/init.ranchu.rc
index c4a77fa..c9abe1d 100644
--- a/init.ranchu.rc
+++ b/init.ranchu.rc
@@ -2,13 +2,16 @@
     mount_all /fstab.ranchu
 
 on early-init
-    mount debugfs debugfs /sys/kernel/debug
+    mount debugfs debugfs /sys/kernel/debug mode=755
 
 on init
 
     symlink /dev/goldfish_pipe /dev/android_pipe
     symlink /dev/goldfish_pipe /dev/qemu_pipe
 
+on post-fs-data
+    setprop vold.post_fs_data_done 1
+
 on boot
     setprop net.eth0.gw 10.0.2.2
     setprop net.eth0.dns1 10.0.2.3
@@ -17,6 +20,7 @@
     setprop ro.build.product generic
     setprop ro.product.device generic
     setprop ro.hardware.audio.primary goldfish
+    setprop ro.setupwizard.mode EMULATOR
 
 # fake some battery state
     setprop status.battery.state Slow
@@ -42,17 +46,39 @@
     stop akmd
 
 # start essential services
-    setprop rild.libpath libreference-ril.so
-    # These were written for the classic emulator, but are applicable to ranchu
+# These were written for the classic emulator, but are applicable to ranchu
     start goldfish-logcat
-    start goldfish-setup
+#    start goldfish-setup
 
 
 # enable Google-specific location features,
 # like NetworkLocationProvider and LocationCollector
     setprop ro.com.google.locationfeatures 1
 
-service goldfish-setup /system/etc/init.goldfish.sh
+#emulator is not much useful before boot complete
+#start it later
+on property:sys.boot_completed=1
+    setprop sys.usb.config adb
+    start adbd
+    start goldfish-logcat
+
+on property:qemu.adbd=start
+    setprop sys.usb.config adb
+    start adbd
+    start goldfish-logcat
+
+# allow goldfish-setup to take wake lock
+    chown root system /sys/power/wake_lock
+    chown root system /sys/power/wake_unlock
+
+service ranchu-setup /system/bin/init.ranchu-core.sh
+    class core
+    user root
+    group root
+    oneshot
+
+service ranchu-net /system/bin/init.ranchu-net.sh
+    class late_start
     user root
     group root wakelock
     oneshot
@@ -69,6 +95,9 @@
     group root
     oneshot
 
+on property:qemu.logcat=start
+    start goldfish-logcat
+
 # -Q is a special logcat option that forces the
 # program to check wether it runs on the emulator
 # if it does, it redirects its output to the device
@@ -84,3 +113,10 @@
 service fingerprintd /system/bin/fingerprintd
     class late_start
     user system
+
+service bugreport /system/bin/dumpstate -d -p -B \
+        -o /data/data/com.android.shell/files/bugreports/bugreport
+    class main
+    disabled
+    oneshot
+    keycodes 114 115 116
diff --git a/libqemu/test_util.h b/libqemu/test_util.h
index 28e5115..60caf88 100644
--- a/libqemu/test_util.h
+++ b/libqemu/test_util.h
@@ -17,7 +17,7 @@
 #define TEST_UTIL_H
 
 #include <stddef.h>
-#include <hardware/qemu_pipe.h>
+#include "qemu_pipe.h"
 
 
 double now_secs(void);
diff --git a/lights/Android.mk b/lights/Android.mk
index fa783a9..dadcabc 100644
--- a/lights/Android.mk
+++ b/lights/Android.mk
@@ -19,6 +19,8 @@
 include $(CLEAR_VARS)
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_SHARED_LIBRARIES := liblog libcutils
+LOCAL_C_INC := lights_qemu.c
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := lights_qemu.c
 LOCAL_MODULE := lights.goldfish
 LOCAL_CFLAGS += -DLIGHT_BACKLIGHT
diff --git a/lights/lights_qemu.c b/lights/lights_qemu.c
index 3628588..1c408c1 100644
--- a/lights/lights_qemu.c
+++ b/lights/lights_qemu.c
@@ -26,6 +26,21 @@
 #define LOG_TAG "Lights"
 #endif
 
+/* we connect with the emulator through the "hw-control" qemud service */
+#define  LIGHTS_SERVICE_NAME "hw-control"
+
+#include <cutils/log.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <hardware/lights.h>
+#include "qemud.h"
+
 /* Set to 1 to enable debug messages to the log */
 #define DEBUG 0
 #if DEBUG
@@ -36,26 +51,6 @@
 
 #define  E(...)  ALOGE(__VA_ARGS__)
 
-/* we connect with the emulator through the "hw-control" qemud service */
-#define  LIGHTS_SERVICE_NAME "pipe:qemud:hw-control"
-
-#include <cutils/log.h>
-
-#define DEBUG_QEMU_PIPE D
-#include <system/qemu_pipe.h>
-
-#include <hardware/lights.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <stdlib.h>
-
 /* Get brightness(0~255) from state. */
 static int
 rgb_to_brightness( struct light_state_t const* state )
@@ -67,13 +62,13 @@
 
 /* set backlight brightness by LIGHTS_SERVICE_NAME service. */
 static int
-set_light_backlight( struct light_device_t* __unused dev, struct light_state_t const* state )
+set_light_backlight( struct light_device_t* dev, struct light_state_t const* state )
 {
     /* Get Lights service. */
-    int  fd = qemu_pipe_open(LIGHTS_SERVICE_NAME);
+    int  fd = qemud_channel_open( LIGHTS_SERVICE_NAME );
 
     if (fd < 0) {
-        E( "%s: no qemu pipe connection", __FUNCTION__ );
+        E( "%s: no qemud connection", __FUNCTION__ );
         return -1;
     }
 
@@ -92,7 +87,7 @@
     D( "%s: lcd_backlight command: %s", __FUNCTION__, buffer );
 
     /* send backlight command to perform the backlight setting. */
-    if (qemu_pipe_frame_send(fd, buffer, strlen(buffer)) < 0) {
+    if (qemud_channel_send( fd, buffer, -1 ) < 0) {
         E( "%s: could not query lcd_backlight: %s", __FUNCTION__, strerror(errno) );
         close( fd );
         return -1;
@@ -103,8 +98,7 @@
 }
 
 static int
-set_light_buttons( struct light_device_t* __unused dev,
-                   struct light_state_t const* __unused state )
+set_light_buttons( struct light_device_t* dev, struct light_state_t const* state )
 {
     /* @Waiting for later implementation. */
     D( "%s: Not implemented.", __FUNCTION__ );
@@ -113,8 +107,7 @@
 }
 
 static int
-set_light_battery( struct light_device_t* __unused dev,
-                   struct light_state_t const* __unused state )
+set_light_battery( struct light_device_t* dev, struct light_state_t const* state )
 {
     /* @Waiting for later implementation. */
     D( "%s: Not implemented.", __FUNCTION__ );
@@ -123,8 +116,7 @@
 }
 
 static int
-set_light_keyboard( struct light_device_t* __unused dev,
-                    struct light_state_t const* __unused state )
+set_light_keyboard( struct light_device_t* dev, struct light_state_t const* state )
 {
     /* @Waiting for later implementation. */
     D( "%s: Not implemented.", __FUNCTION__ );
@@ -133,8 +125,7 @@
 }
 
 static int
-set_light_notifications( struct light_device_t* __unused dev,
-                         struct light_state_t const* __unused state )
+set_light_notifications( struct light_device_t* dev, struct light_state_t const* state )
 {
     /* @Waiting for later implementation. */
     D( "%s: Not implemented.", __FUNCTION__ );
@@ -143,8 +134,7 @@
 }
 
 static int
-set_light_attention( struct light_device_t* __unused dev,
-                     struct light_state_t const* __unused state )
+set_light_attention( struct light_device_t* dev, struct light_state_t const* state )
 {
     /* @Waiting for later implementation. */
     D( "%s: Not implemented.", __FUNCTION__ );
diff --git a/manifest.xml b/manifest.xml
new file mode 100644
index 0000000..7d359a0
--- /dev/null
+++ b/manifest.xml
@@ -0,0 +1,35 @@
+<manifest version="1.0" type="device">
+    <hal format="hidl">
+        <name>android.hardware.drm</name>
+        <transport arch="32">passthrough</transport>
+        <impl level="generic"></impl>
+        <version>1.0</version>
+        <!-- TODO(b/36371166): change to default -->
+        <interface>
+            <name>ICryptoFactory</name>
+            <instance>crypto</instance>
+        </interface>
+        <interface>
+            <name>IDrmFactory</name>
+            <instance>drm</instance>
+        </interface>
+    </hal>
+    <hal format="hidl">
+        <name>android.hardware.graphics.allocator</name>
+        <transport>hwbinder</transport>
+        <version>2.0</version>
+        <interface>
+            <name>IAllocator</name>
+            <instance>default</instance>
+        </interface>
+    </hal>
+    <hal format="hidl">
+        <name>android.hardware.power</name>
+        <transport>hwbinder</transport>
+        <version>1.0</version>
+        <interface>
+            <name>IPower</name>
+            <instance>default</instance>
+        </interface>
+    </hal>
+</manifest>
diff --git a/overlay/frameworks/base/core/res/res/values/config.xml b/overlay/frameworks/base/core/res/res/values/config.xml
new file mode 100644
index 0000000..fb7c56f
--- /dev/null
+++ b/overlay/frameworks/base/core/res/res/values/config.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+/*
+** Copyright 2017, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+-->
+
+<resources xmlns:xliff="urn:oasis:names:tc:xliff:document:1.2">
+    <!-- Make this 'true' to allow the Emulator to control
+         the state of the headphone/microphone jack -->
+    <bool name="config_useDevInputEventForAudioJack">true</bool>
+
+    <!--  Maximum number of supported users -->
+    <integer name="config_multiuserMaximumUsers">4</integer>
+    <!--  Whether Multiuser UI should be shown -->
+    <bool name="config_enableMultiUserUI">true</bool>
+
+    <!-- Set to true to add links to Cell Broadcast app from Settings and MMS app. -->
+    <bool name="config_cellBroadcastAppLinks">true</bool>
+
+</resources>
diff --git a/overlay/frameworks/base/core/res/res/xml/config_webview_packages.xml b/overlay/frameworks/base/core/res/res/xml/config_webview_packages.xml
new file mode 100644
index 0000000..883d660
--- /dev/null
+++ b/overlay/frameworks/base/core/res/res/xml/config_webview_packages.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2015 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<!-- WebView providers in order of preference -->
+<!-- Signature checks are ignored if the package is preinstalled or if this is a userdebug or eng
+    build. If no signature is provided, the package can provide a WebView implementation only on userdebug/eng builds or if it's preinstalled -->
+<webviewproviders>
+    <webviewprovider description="Chrome Stable" packageName="com.android.chrome" availableByDefault="true">
+        <!-- Ignore this package on user/release builds unless preinstalled. -->
+    </webviewprovider>
+    <webviewprovider description="Google WebView" packageName="com.google.android.webview" availableByDefault="true" isFallback="true">
+        <!-- Ignore this package on user/release builds unless preinstalled. -->
+    </webviewprovider>
+    <webviewprovider description="Chrome Beta" packageName="com.chrome.beta">
+        <signature>MIIDwzCCAqugAwIBAgIJAOoj9MXoVhH6MA0GCSqGSIb3DQEBBQUAMHgxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKDAtHb29nbGUgSW5jLjEQMA4GA1UECwwHQW5kcm9pZDEUMBIGA1UEAwwLY2hyb21lX2JldGEwHhcNMTYwMjI5MTUxNTIzWhcNNDMwNzE3MTUxNTIzWjB4MQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEUMBIGA1UECgwLR29vZ2xlIEluYy4xEDAOBgNVBAsMB0FuZHJvaWQxFDASBgNVBAMMC2Nocm9tZV9iZXRhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAo/wW27nRxVqGbFOyXr8jtv2pc2Ke8XMr6Sfs+3JK2licVaAljGFpLtWH4wUdb50w/QQSPALNLSSyuK/94rtp5Jjs4RSJI+whuewV/R6El+mFXBO3Ek5/op4UrOsR91IM4emvS67Ji2u8gp5EmttVgJtllFZCbtZLPmKuTaOkOB+EdWIxrYiHVEEaAcQpEHa9UgWUZ0bMfPj8j3F0w+Ak2ttmTjoFGLaZjuBAYwfdctN1b0sdLT9Lif45kMCb8QwPp0F9/ozs0rrTc+I6vnTS8kfFQfk7GIE4Hgm+cYQEHkIA6gLJxUVWvPZGdulAZw7wPt/neOkazHNZPcV4pYuNLQIDAQABo1AwTjAdBgNVHQ4EFgQU5t7dhcZfOSixRsiJ1E46JhzPlwowHwYDVR0jBBgwFoAU5t7dhcZfOSixRsiJ1E46JhzPlwowDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAZO2jB8P1d8ki3KZILvp27a2VM3DInlp8I8UgG3gh7nBQfTrnZr5M1PL8eFHqX7MEvAiGCMTcrPklEhjtcHK/c7BcdeCWq6oL56UK3JTl33RxJcjmjrz3e3VI6ehRSm1feNAkMD0Nr2RWr2LCYheAEmwTPtluLOJS+i7WhnXJzBtg5UpUFEbdFYenqUbDzya+cUVp0197k7hUTs8/Hxs0wf79o/TZXzTBq9eYQkiITonRN8+5QCBl1XmZKV0IHkzGFES1RP+fTiZpIjZT+W4tasHgs9QTTks4CCpyHBAy+uy7tApe1AxCzihgecCfUN1hWIltKwGZS6EE0bu0OXPzaQ==</signature>
+    </webviewprovider>
+    <webviewprovider description="Chrome Dev" packageName="com.chrome.dev">
+        <signature>MIIDwTCCAqmgAwIBAgIJAOSN+O0cdii5MA0GCSqGSIb3DQEBBQUAMHcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKDAtHb29nbGUgSW5jLjEQMA4GA1UECwwHQW5kcm9pZDETMBEGA1UEAwwKY2hyb21lX2RldjAeFw0xNjAyMjkxNzUwMDdaFw00MzA3MTcxNzUwMDdaMHcxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKDAtHb29nbGUgSW5jLjEQMA4GA1UECwwHQW5kcm9pZDETMBEGA1UEAwwKY2hyb21lX2RldjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOYPj6Y9rVt8xizSHDYjDEkDfFZAgSiZ9T6tevkQXsFyfaq3Gk3h2qssi29G6cTPJ2VXFKlVB71wSXv5p9/LEcDQPWQiO3Q2cLmgUXxyhJWXI3g96tPAhZQX2q6SC37ZQdiBR/raMO70DAkvCyBGtNplsvutzSE3oZ7LYfzB8vTbe7zCh3fDYSS/7xb3ZVvFqydHS40uVq1qqg1S80Pge7tW3pDGsPMZN7yA4yfmsvA1rbHm9N8t3Rc9hqzh6OxNAAgRB535YcsWL7iF+mpdFILXk3jLYT0nMvMnB83rsdgnRREjlGQYHl2mh8+6CqujsW/eICDq/LR6BYDyqHhk0ECAwEAAaNQME4wHQYDVR0OBBYEFKzsl07JglgpbeYDYGqsgqRDo+01MB8GA1UdIwQYMBaAFKzsl07JglgpbeYDYGqsgqRDo+01MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBACka6SFF6xAcj8L8O6R36++E09DTiGZEjvKT8eIycgcQQ+p1WUmPb6M2EJpN6zvvSE62ussmXdzf8rIyc0JXA8jbViZt62Y39epNENFxPTLN9QzXlT+w8AW73Ka3cnbOuL5EgoDl8fM79WVlARY3X+wB/jGNrkiGIdRm2IZIeAodWgC2mtXMiferyYBKz2/F2bhnU6DwgCbegS8trFjEWviijWdJ+lBdobn7LRc3orZCtHl8UyvRDi7cye3sK9y3BM39k0g20F21wTNHAonnvL6zbuNgpd+UEsVxDpOeWrEdBFN7Md0CI2wnu8eA8ljJD45v0WWMEoxsIi131g5piNM=</signature>
+    </webviewprovider>
+    <webviewprovider description="Chrome Canary" packageName="com.chrome.canary">
+        <signature>MIIDxzCCAq+gAwIBAgIJAML7APITsgV7MA0GCSqGSIb3DQEBBQUAMHoxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKDAtHb29nbGUgSW5jLjEQMA4GA1UECwwHQW5kcm9pZDEWMBQGA1UEAwwNY2hyb21lX2NhbmFyeTAeFw0xNjAyMjkxOTA5MDdaFw00MzA3MTcxOTA5MDdaMHoxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRYwFAYDVQQHDA1Nb3VudGFpbiBWaWV3MRQwEgYDVQQKDAtHb29nbGUgSW5jLjEQMA4GA1UECwwHQW5kcm9pZDEWMBQGA1UEAwwNY2hyb21lX2NhbmFyeTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANXfeAoZlr0ya1HBzIfAz/nLLjpPJeAPvuX5dueaxmiQgv2hNG22acriFuiiJI6TU0t8AIVJD5Ifbc4OOuA0zeFhdzWWGnmTRH6x27WI7bzOKnAqOvv21ZBmE9i8Vo++K13xWdTs3qVn1bn9oUONxFu0wKDzXYZhoj1Jom0RZGjXm16xuPlEuOzMcjiNBDoYuxPAXkMcK/G1gP4P4nAV8Rd/GGIjKRS/SUtcShhoAMOQhs4WIEkUrvEVRwhBDIbpM87oFbCVdBH38r0XS6F6CdhPJsKFhoEfq4c01HZqNmDpCPA8AAcCuSWqmXoTIqs7OqkWgduE2bInbWU7WMaTl+kCAwEAAaNQME4wHQYDVR0OBBYEFB/AsC4iPAqaLoNytNSx29qByI7+MB8GA1UdIwQYMBaAFB/AsC4iPAqaLoNytNSx29qByI7+MAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAMb2Td3ro/+MGVnCPAbwBSOZMVLUKGqt6zr8CShW9mtFHnmy29EaWSYYAj1M4+6Vpkq85NsgBEck7rnUjV8A3Q0NKdTys1KRKJqVvQRBN6SwqQenSf/abxQCa8Z+69rh+3BkIU1HLtu5lrMDZwon5H91L5mpORn6vItd20uW132lwSDeUEW2CHslTrodoFuTUcSUlRiq/URfUH3baO1QHXkxpQwrBPKL5deJfcZnxh5MAtAGSQL7gHvayEFlDppETXdDO7vgGTH2dEK2TjKWALbGiKkxSqjRyTNt4/FOj10TqNRdUamj+ydVJgzGQ8bki4Vc6NnKm/r4asusxapkVR4=</signature>
+    </webviewprovider>
+    <webviewprovider description="Chrome Debug" packageName="com.google.android.apps.chrome">
+        <!-- Ignore this package on user/release builds unless preinstalled. -->
+    </webviewprovider>
+</webviewproviders>
diff --git a/overlay/frameworks/base/packages/SettingsProvider/res/values/defaults.xml b/overlay/frameworks/base/packages/SettingsProvider/res/values/defaults.xml
index 6602627..56fbf89 100644
--- a/overlay/frameworks/base/packages/SettingsProvider/res/values/defaults.xml
+++ b/overlay/frameworks/base/packages/SettingsProvider/res/values/defaults.xml
@@ -18,7 +18,9 @@
 -->
 
 <resources>
+    <!-- cannot set this to true because O-CTS is testing the keyguard b/37465076
     <bool name="def_lockscreen_disabled">true</bool>
+    -->
 
     <!-- Allow users to use both the on-screen keyboard, as well as a real
          keyboard -->
diff --git a/overlay/frameworks/base/packages/SystemUI/res/values/config.xml b/overlay/frameworks/base/packages/SystemUI/res/values/config.xml
new file mode 100644
index 0000000..044c383
--- /dev/null
+++ b/overlay/frameworks/base/packages/SystemUI/res/values/config.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+/**
+ * Copyright (c) 2016, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<resources>
+    <!-- Should "4G" be shown instead of "LTE" when the network is NETWORK_TYPE_LTE? -->
+    <!-- No, in the Emulator, show "LTE" for LTE -->
+    <bool name="config_show4GForLTE">false</bool>
+</resources>
diff --git a/overlay/packages/apps/Launcher3/res/values/config.xml b/overlay/packages/apps/Launcher3/res/values/config.xml
new file mode 100644
index 0000000..27e4102
--- /dev/null
+++ b/overlay/packages/apps/Launcher3/res/values/config.xml
@@ -0,0 +1,4 @@
+<resources>
+    <!-- Package name of the default wallpaper picker. -->
+    <string name="wallpaper_picker_package" translatable="false">com.android.wallpaper.livepicker</string>
+</resources>
diff --git a/power/Android.mk b/power/Android.mk
index 0273f04..3839088 100644
--- a/power/Android.mk
+++ b/power/Android.mk
@@ -22,7 +22,19 @@
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_CFLAGS += -DQEMU_HARDWARE
 LOCAL_SHARED_LIBRARIES := liblog libcutils
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := power_qemu.c
 LOCAL_MODULE := power.goldfish
 LOCAL_MODULE_TAGS := optional
 include $(BUILD_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_CFLAGS += -DQEMU_HARDWARE
+LOCAL_SHARED_LIBRARIES := liblog libcutils
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
+LOCAL_SRC_FILES := power_qemu.c
+LOCAL_MODULE := power.ranchu
+LOCAL_MODULE_TAGS := optional
+include $(BUILD_SHARED_LIBRARY)
diff --git a/power/power_qemu.c b/power/power_qemu.c
index 86c3ffd..808589a 100644
--- a/power/power_qemu.c
+++ b/power/power_qemu.c
@@ -19,27 +19,27 @@
 
 #include <hardware/hardware.h>
 #include <hardware/power.h>
-#include <system/qemu_pipe.h>
+#include "qemud.h"
 #include <fcntl.h>
 #include <errno.h>
 
 static int qemud_fd;
 
-static void power_qemu_init(struct power_module * __unused module)
+static void power_qemu_init(struct power_module *module)
 {
-    qemud_fd = qemu_pipe_open("pipe:qemud:hw-control");
+    qemud_fd = qemud_channel_open("hw-control");
 
     if (qemud_fd < 0)
         ALOGE("Error connecting to qemud hw-control service\n");
 }
 
-static void power_qemu_set_interactive(struct power_module * __unused module,
-                                       int on)
+static void power_qemu_set_interactive(struct power_module *module, int on)
 {
-    const char* command = on ? "power:screen_state:wake" :
-            "power:screen_state:standby";
+    int r;
 
-    int r = qemu_pipe_frame_send(qemud_fd, command, strlen(command));
+    r = qemud_channel_send(qemud_fd, on ? "power:screen_state:wake"
+                           : "power:screen_state:standby", -1);
+
     if (r < 0)
         ALOGE("Error sending power command to qemud hw-control service\n");
 }
diff --git a/qemu-props/Android.mk b/qemu-props/Android.mk
index 78ab1d5..d2af64d 100644
--- a/qemu-props/Android.mk
+++ b/qemu-props/Android.mk
@@ -23,6 +23,7 @@
 #
 include $(CLEAR_VARS)
 LOCAL_MODULE    := qemu-props
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := qemu-props.c
 LOCAL_SHARED_LIBRARIES := libcutils liblog
 include $(BUILD_EXECUTABLE)
diff --git a/qemu-props/qemu-props.c b/qemu-props/qemu-props.c
index 0b3ea11..c80863a 100644
--- a/qemu-props/qemu-props.c
+++ b/qemu-props/qemu-props.c
@@ -34,8 +34,8 @@
 #endif
 
 #include <cutils/properties.h>
-#include <system/qemu_pipe.h>
 #include <unistd.h>
+#include "qemud.h"
 
 /* Name of the qemud service we want to connect to.
  */
@@ -52,7 +52,7 @@
         int  tries = MAX_TRIES;
 
         while (1) {
-            qemud_fd = qemu_pipe_open( "pipe:qemud:boot-properties" );
+            qemud_fd = qemud_channel_open( "boot-properties" );
             if (qemud_fd >= 0)
                 break;
 
@@ -69,7 +69,7 @@
     DD("connected to '%s' qemud service.", QEMUD_SERVICE);
 
     /* send the 'list' command to the service */
-    if (qemu_pipe_frame_send(qemud_fd, "list", 4) < 0) {
+    if (qemud_channel_send(qemud_fd, "list", -1) < 0) {
         DD("could not send command to '%s' service", QEMUD_SERVICE);
         return 1;
     }
@@ -83,7 +83,7 @@
         DD("receiving..");
         char* q;
         char  temp[BUFF_SIZE];
-        int   len = qemu_pipe_frame_recv(qemud_fd, temp, sizeof temp - 1);
+        int   len = qemud_channel_recv(qemud_fd, temp, sizeof temp - 1);
 
         /* lone NUL-byte signals end of properties */
         if (len < 0 || len > BUFF_SIZE-1 || temp[0] == '\0')
@@ -109,6 +109,20 @@
     }
 
 
+    /* HACK start adbd periodically every minute, if adbd is already running, this is a no-op */
+    for(;;) {
+        usleep(60000000);
+        char  temp[BUFF_SIZE];
+        property_get("sys.boot_completed", temp, "");
+        int is_boot_completed = (strncmp(temp, "1", 1) == 0) ? 1 : 0;
+        if (is_boot_completed) {
+            DD("start adbd ...");
+            property_set("qemu.adbd", "start");
+        } else {
+            DD("skip starting adbd ...");
+        }
+    }
+
     /* finally, close the channel and exit */
     close(qemud_fd);
     DD("exiting (%d properties set).", count);
diff --git a/qemud/Android.mk b/qemud/Android.mk
new file mode 100644
index 0000000..30ee5eb
--- /dev/null
+++ b/qemud/Android.mk
@@ -0,0 +1,20 @@
+# Copyright 2008 The Android Open Source Project
+
+# We're moving the emulator-specific platform libs to
+# development.git/tools/emulator/. The following test is to ensure
+# smooth builds even if the tree contains both versions.
+#
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	qemud.c
+
+
+LOCAL_SHARED_LIBRARIES := \
+	libcutils liblog
+
+LOCAL_MODULE:= qemud
+
+include $(BUILD_EXECUTABLE)
diff --git a/qemud/qemud.c b/qemud/qemud.c
new file mode 100644
index 0000000..3d18893
--- /dev/null
+++ b/qemud/qemud.c
@@ -0,0 +1,1720 @@
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <termios.h>
+#include <unistd.h>
+#include <cutils/sockets.h>
+
+/*
+ *  the qemud daemon program is only used within Android as a bridge
+ *  between the emulator program and the emulated system. it really works as
+ *  a simple stream multiplexer that works as follows:
+ *
+ *    - qemud is started by init following instructions in
+ *      /system/etc/init.goldfish.rc (i.e. it is never started on real devices)
+ *
+ *    - qemud communicates with the emulator program through a single serial
+ *      port, whose name is passed through a kernel boot parameter
+ *      (e.g. android.qemud=ttyS1)
+ *
+ *    - qemud binds one unix local stream socket (/dev/socket/qemud, created
+ *      by init through /system/etc/init.goldfish.rc).
+ *
+ *
+ *      emulator <==serial==> qemud <---> /dev/socket/qemud <-+--> client1
+ *                                                            |
+ *                                                            +--> client2
+ *
+ *   - the special channel index 0 is used by the emulator and qemud only.
+ *     other channel numbers correspond to clients. More specifically,
+ *     connection are created like this:
+ *
+ *     * the client connects to /dev/socket/qemud
+ *
+ *     * the client sends the service name through the socket, as
+ *            <service-name>
+ *
+ *     * qemud creates a "Client" object internally, assigns it an
+ *       internal unique channel number > 0, then sends a connection
+ *       initiation request to the emulator (i.e. through channel 0):
+ *
+ *           connect:<id>:<name>
+ *
+ *       where <name> is the service name, and <id> is a 2-hexchar
+ *       number corresponding to the channel number.
+ *
+ *     * in case of success, the emulator responds through channel 0
+ *       with:
+ *
+ *           ok:connect:<id>
+ *
+ *       after this, all messages between the client and the emulator
+ *       are passed in pass-through mode.
+ *
+ *     * if the emulator refuses the service connection, it will
+ *       send the following through channel 0:
+ *
+ *           ko:connect:<id>:reason-for-failure
+ *
+ *     * If the client closes the connection, qemud sends the following
+ *       to the emulator:
+ *
+ *           disconnect:<id>
+ *
+ *       The same message is the opposite direction if the emulator
+ *       chooses to close the connection.
+ *
+ *     * any command sent through channel 0 to the emulator that is
+ *       not properly recognized will be answered by:
+ *
+ *           ko:unknown command
+ *
+ *
+ *  Internally, the daemon maintains a "Client" object for each client
+ *  connection (i.e. accepting socket connection).
+ */
+
+/* name of the single control socket used by the daemon */
+#define CONTROL_SOCKET_NAME  "qemud"
+
+#define  DEBUG     0
+#define  T_ACTIVE  0  /* set to 1 to dump traffic */
+
+#if DEBUG
+#  define LOG_TAG  "qemud"
+#  include <cutils/log.h>
+#  define  D(...)   ALOGD(__VA_ARGS__)
+#else
+#  define  D(...)  ((void)0)
+#  define  T(...)  ((void)0)
+#endif
+
+#if T_ACTIVE
+#  define  T(...)   D(__VA_ARGS__)
+#else
+#  define  T(...)   ((void)0)
+#endif
+
+/** UTILITIES
+ **/
+
+static void
+fatal( const char*  fmt, ... )
+{
+    va_list  args;
+    va_start(args, fmt);
+    fprintf(stderr, "PANIC: ");
+    vfprintf(stderr, fmt, args);
+    fprintf(stderr, "\n" );
+    va_end(args);
+    exit(1);
+}
+
+static void*
+xalloc( size_t   sz )
+{
+    void*  p;
+
+    if (sz == 0)
+        return NULL;
+
+    p = malloc(sz);
+    if (p == NULL)
+        fatal( "not enough memory" );
+
+    return p;
+}
+
+#define  xnew(p)   (p) = xalloc(sizeof(*(p)))
+
+static void*
+xalloc0( size_t  sz )
+{
+    void*  p = xalloc(sz);
+    memset( p, 0, sz );
+    return p;
+}
+
+#define  xnew0(p)   (p) = xalloc0(sizeof(*(p)))
+
+#define  xfree(p)    (free((p)), (p) = NULL)
+
+static void*
+xrealloc( void*  block, size_t  size )
+{
+    void*  p = realloc( block, size );
+
+    if (p == NULL && size > 0)
+        fatal( "not enough memory" );
+
+    return p;
+}
+
+#define  xrenew(p,count)  (p) = xrealloc((p),sizeof(*(p))*(count))
+
+static int
+hex2int( const uint8_t*  data, int  len )
+{
+    int  result = 0;
+    while (len > 0) {
+        int       c = *data++;
+        unsigned  d;
+
+        result <<= 4;
+        do {
+            d = (unsigned)(c - '0');
+            if (d < 10)
+                break;
+
+            d = (unsigned)(c - 'a');
+            if (d < 6) {
+                d += 10;
+                break;
+            }
+
+            d = (unsigned)(c - 'A');
+            if (d < 6) {
+                d += 10;
+                break;
+            }
+
+            return -1;
+        }
+        while (0);
+
+        result |= d;
+        len    -= 1;
+    }
+    return  result;
+}
+
+
+static void
+int2hex( int  value, uint8_t*  to, int  width )
+{
+    int  nn = 0;
+    static const char hexchars[16] = "0123456789abcdef";
+
+    for ( --width; width >= 0; width--, nn++ ) {
+        to[nn] = hexchars[(value >> (width*4)) & 15];
+    }
+}
+
+static int
+fd_read(int  fd, void*  to, int  len)
+{
+    int  ret;
+
+    do {
+        ret = read(fd, to, len);
+    } while (ret < 0 && errno == EINTR);
+
+    return ret;
+}
+
+static int
+fd_write(int  fd, const void*  from, int  len)
+{
+    int  ret;
+
+    do {
+        ret = write(fd, from, len);
+    } while (ret < 0 && errno == EINTR);
+
+    return ret;
+}
+
+static void
+fd_setnonblock(int  fd)
+{
+    int  ret, flags;
+
+    do {
+        flags = fcntl(fd, F_GETFD);
+    } while (flags < 0 && errno == EINTR);
+
+    if (flags < 0) {
+        fatal( "%s: could not get flags for fd %d: %s",
+               __FUNCTION__, fd, strerror(errno) );
+    }
+
+    do {
+        ret = fcntl(fd, F_SETFD, flags | O_NONBLOCK);
+    } while (ret < 0 && errno == EINTR);
+
+    if (ret < 0) {
+        fatal( "%s: could not set fd %d to non-blocking: %s",
+               __FUNCTION__, fd, strerror(errno) );
+    }
+}
+
+
+static int
+fd_accept(int  fd)
+{
+    struct sockaddr  from;
+    socklen_t        fromlen = sizeof(from);
+    int              ret;
+
+    do {
+        ret = accept(fd, &from, &fromlen);
+    } while (ret < 0 && errno == EINTR);
+
+    return ret;
+}
+
+/** FD EVENT LOOP
+ **/
+
+/* A Looper object is used to monitor activity on one or more
+ * file descriptors (e.g sockets).
+ *
+ * - call looper_add() to register a function that will be
+ *   called when events happen on the file descriptor.
+ *
+ * - call looper_enable() or looper_disable() to enable/disable
+ *   the set of monitored events for a given file descriptor.
+ *
+ * - call looper_del() to unregister a file descriptor.
+ *   this does *not* close the file descriptor.
+ *
+ * Note that you can only provide a single function to handle
+ * all events related to a given file descriptor.
+
+ * You can call looper_enable/_disable/_del within a function
+ * callback.
+ */
+
+/* the current implementation uses Linux's epoll facility
+ * the event mask we use are simply combinations of EPOLLIN
+ * EPOLLOUT, EPOLLHUP and EPOLLERR
+ */
+#include <sys/epoll.h>
+
+#define  MAX_CHANNELS  16
+#define  MAX_EVENTS    (MAX_CHANNELS+1)  /* each channel + the serial fd */
+
+/* the event handler function type, 'user' is a user-specific
+ * opaque pointer passed to looper_add().
+ */
+typedef void (*EventFunc)( void*  user, int  events );
+
+/* bit flags for the LoopHook structure.
+ *
+ * HOOK_PENDING means that an event happened on the
+ * corresponding file descriptor.
+ *
+ * HOOK_CLOSING is used to delay-close monitored
+ * file descriptors.
+ */
+enum {
+    HOOK_PENDING = (1 << 0),
+    HOOK_CLOSING = (1 << 1),
+};
+
+/* A LoopHook structure is used to monitor a given
+ * file descriptor and record its event handler.
+ */
+typedef struct {
+    int        fd;
+    int        wanted;  /* events we are monitoring */
+    int        events;  /* events that occured */
+    int        state;   /* see HOOK_XXX constants */
+    void*      ev_user; /* user-provided handler parameter */
+    EventFunc  ev_func; /* event handler callback */
+} LoopHook;
+
+/* Looper is the main object modeling a looper object
+ */
+typedef struct {
+    int                  epoll_fd;
+    int                  num_fds;
+    int                  max_fds;
+    struct epoll_event*  events;
+    LoopHook*            hooks;
+} Looper;
+
+/* initialize a looper object */
+static void
+looper_init( Looper*  l )
+{
+    l->epoll_fd = epoll_create(4);
+    l->num_fds  = 0;
+    l->max_fds  = 0;
+    l->events   = NULL;
+    l->hooks    = NULL;
+}
+
+/* finalize a looper object */
+static void
+looper_done( Looper*  l )
+{
+    xfree(l->events);
+    xfree(l->hooks);
+    l->max_fds = 0;
+    l->num_fds = 0;
+
+    close(l->epoll_fd);
+    l->epoll_fd  = -1;
+}
+
+/* return the LoopHook corresponding to a given
+ * monitored file descriptor, or NULL if not found
+ */
+static LoopHook*
+looper_find( Looper*  l, int  fd )
+{
+    LoopHook*  hook = l->hooks;
+    LoopHook*  end  = hook + l->num_fds;
+
+    for ( ; hook < end; hook++ ) {
+        if (hook->fd == fd)
+            return hook;
+    }
+    return NULL;
+}
+
+/* grow the arrays in the looper object */
+static void
+looper_grow( Looper*  l )
+{
+    int  old_max = l->max_fds;
+    int  new_max = old_max + (old_max >> 1) + 4;
+    int  n;
+
+    xrenew( l->events, new_max );
+    xrenew( l->hooks,  new_max );
+    l->max_fds = new_max;
+
+    /* now change the handles to all events */
+    for (n = 0; n < l->num_fds; n++) {
+        struct epoll_event ev;
+        LoopHook*          hook = l->hooks + n;
+
+        ev.events   = hook->wanted;
+        ev.data.ptr = hook;
+        epoll_ctl( l->epoll_fd, EPOLL_CTL_MOD, hook->fd, &ev );
+    }
+}
+
+/* register a file descriptor and its event handler.
+ * no event mask will be enabled
+ */
+static void
+looper_add( Looper*  l, int  fd, EventFunc  func, void*  user )
+{
+    struct epoll_event  ev;
+    LoopHook*           hook;
+
+    if (l->num_fds >= l->max_fds)
+        looper_grow(l);
+
+    hook = l->hooks + l->num_fds;
+
+    hook->fd      = fd;
+    hook->ev_user = user;
+    hook->ev_func = func;
+    hook->state   = 0;
+    hook->wanted  = 0;
+    hook->events  = 0;
+
+    fd_setnonblock(fd);
+
+    ev.events   = 0;
+    ev.data.ptr = hook;
+    epoll_ctl( l->epoll_fd, EPOLL_CTL_ADD, fd, &ev );
+
+    l->num_fds += 1;
+}
+
+/* unregister a file descriptor and its event handler
+ */
+static void
+looper_del( Looper*  l, int  fd )
+{
+    LoopHook*  hook = looper_find( l, fd );
+
+    if (!hook) {
+        D( "%s: invalid fd: %d", __FUNCTION__, fd );
+        return;
+    }
+    /* don't remove the hook yet */
+    hook->state |= HOOK_CLOSING;
+
+    epoll_ctl( l->epoll_fd, EPOLL_CTL_DEL, fd, NULL );
+}
+
+/* enable monitoring of certain events for a file
+ * descriptor. This adds 'events' to the current
+ * event mask
+ */
+static void
+looper_enable( Looper*  l, int  fd, int  events )
+{
+    LoopHook*  hook = looper_find( l, fd );
+
+    if (!hook) {
+        D("%s: invalid fd: %d", __FUNCTION__, fd );
+        return;
+    }
+
+    if (events & ~hook->wanted) {
+        struct epoll_event  ev;
+
+        hook->wanted |= events;
+        ev.events   = hook->wanted;
+        ev.data.ptr = hook;
+
+        epoll_ctl( l->epoll_fd, EPOLL_CTL_MOD, fd, &ev );
+    }
+}
+
+/* disable monitoring of certain events for a file
+ * descriptor. This ignores events that are not
+ * currently enabled.
+ */
+static void
+looper_disable( Looper*  l, int  fd, int  events )
+{
+    LoopHook*  hook = looper_find( l, fd );
+
+    if (!hook) {
+        D("%s: invalid fd: %d", __FUNCTION__, fd );
+        return;
+    }
+
+    if (events & hook->wanted) {
+        struct epoll_event  ev;
+
+        hook->wanted &= ~events;
+        ev.events   = hook->wanted;
+        ev.data.ptr = hook;
+
+        epoll_ctl( l->epoll_fd, EPOLL_CTL_MOD, fd, &ev );
+    }
+}
+
+/* wait until an event occurs on one of the registered file
+ * descriptors. Only returns in case of error !!
+ */
+static void
+looper_loop( Looper*  l )
+{
+    for (;;) {
+        int  n, count;
+
+        do {
+            count = epoll_wait( l->epoll_fd, l->events, l->num_fds, -1 );
+        } while (count < 0 && errno == EINTR);
+
+        if (count < 0) {
+            D("%s: error: %s", __FUNCTION__, strerror(errno) );
+            return;
+        }
+
+        if (count == 0) {
+            D("%s: huh ? epoll returned count=0", __FUNCTION__);
+            continue;
+        }
+
+        /* mark all pending hooks */
+        for (n = 0; n < count; n++) {
+            LoopHook*  hook = l->events[n].data.ptr;
+            hook->state  = HOOK_PENDING;
+            hook->events = l->events[n].events;
+        }
+
+        /* execute hook callbacks. this may change the 'hooks'
+         * and 'events' array, as well as l->num_fds, so be careful */
+        for (n = 0; n < l->num_fds; n++) {
+            LoopHook*  hook = l->hooks + n;
+            if (hook->state & HOOK_PENDING) {
+                hook->state &= ~HOOK_PENDING;
+                hook->ev_func( hook->ev_user, hook->events );
+            }
+        }
+
+        /* now remove all the hooks that were closed by
+         * the callbacks */
+        for (n = 0; n < l->num_fds;) {
+            struct epoll_event ev;
+            LoopHook*  hook = l->hooks + n;
+
+            if (!(hook->state & HOOK_CLOSING)) {
+                n++;
+                continue;
+            }
+
+            hook[0]     = l->hooks[l->num_fds-1];
+            l->num_fds -= 1;
+            ev.events   = hook->wanted;
+            ev.data.ptr = hook;
+            epoll_ctl( l->epoll_fd, EPOLL_CTL_MOD, hook->fd, &ev );
+        }
+    }
+}
+
+#if T_ACTIVE
+char*
+quote( const void*  data, int  len )
+{
+    const char*  p   = data;
+    const char*  end = p + len;
+    int          count = 0;
+    int          phase = 0;
+    static char*  buff = NULL;
+
+    for (phase = 0; phase < 2; phase++) {
+        if (phase != 0) {
+            xfree(buff);
+            buff = xalloc(count+1);
+        }
+        count = 0;
+        for (p = data; p < end; p++) {
+            int  c = *p;
+
+            if (c == '\\') {
+                if (phase != 0) {
+                    buff[count] = buff[count+1] = '\\';
+                }
+                count += 2;
+                continue;
+            }
+
+            if (c >= 32 && c < 127) {
+                if (phase != 0)
+                    buff[count] = c;
+                count += 1;
+                continue;
+            }
+
+
+            if (c == '\t') {
+                if (phase != 0) {
+                    memcpy(buff+count, "<TAB>", 5);
+                }
+                count += 5;
+                continue;
+            }
+            if (c == '\n') {
+                if (phase != 0) {
+                    memcpy(buff+count, "<LN>", 4);
+                }
+                count += 4;
+                continue;
+            }
+            if (c == '\r') {
+                if (phase != 0) {
+                    memcpy(buff+count, "<CR>", 4);
+                }
+                count += 4;
+                continue;
+            }
+
+            if (phase != 0) {
+                buff[count+0] = '\\';
+                buff[count+1] = 'x';
+                buff[count+2] = "0123456789abcdef"[(c >> 4) & 15];
+                buff[count+3] = "0123456789abcdef"[     (c) & 15];
+            }
+            count += 4;
+        }
+    }
+    buff[count] = 0;
+    return buff;
+}
+#endif /* T_ACTIVE */
+
+/** PACKETS
+ **
+ ** We need a way to buffer data before it can be sent to the
+ ** corresponding file descriptor. We use linked list of Packet
+ ** objects to do this.
+ **/
+
+typedef struct Packet   Packet;
+
+#define  MAX_PAYLOAD  4000
+
+struct Packet {
+    Packet*   next;
+    int       len;
+    int       channel;
+    uint8_t   data[ MAX_PAYLOAD ];
+};
+
+/* we expect to alloc/free a lot of packets during
+ * operations so use a single linked list of free packets
+ * to keep things speedy and simple.
+ */
+static Packet*   _free_packets;
+
+/* Allocate a packet */
+static Packet*
+packet_alloc(void)
+{
+    Packet*  p = _free_packets;
+    if (p != NULL) {
+        _free_packets = p->next;
+    } else {
+        xnew(p);
+    }
+    p->next    = NULL;
+    p->len     = 0;
+    p->channel = -1;
+    return p;
+}
+
+/* Release a packet. This takes the address of a packet
+ * pointer that will be set to NULL on exit (avoids
+ * referencing dangling pointers in case of bugs)
+ */
+static void
+packet_free( Packet*  *ppacket )
+{
+    Packet*  p = *ppacket;
+    if (p) {
+        p->next       = _free_packets;
+        _free_packets = p;
+        *ppacket = NULL;
+    }
+}
+
+/** PACKET RECEIVER
+ **
+ ** Simple abstraction for something that can receive a packet
+ ** from a FDHandler (see below) or something else.
+ **
+ ** Send a packet to it with 'receiver_post'
+ **
+ ** Call 'receiver_close' to indicate that the corresponding
+ ** packet source was closed.
+ **/
+
+typedef void (*PostFunc) ( void*  user, Packet*  p );
+typedef void (*CloseFunc)( void*  user );
+
+typedef struct {
+    PostFunc   post;
+    CloseFunc  close;
+    void*      user;
+} Receiver;
+
+/* post a packet to a receiver. Note that this transfers
+ * ownership of the packet to the receiver.
+ */
+static __inline__ void
+receiver_post( Receiver*  r, Packet*  p )
+{
+    if (r->post)
+        r->post( r->user, p );
+    else
+        packet_free(&p);
+}
+
+/* tell a receiver the packet source was closed.
+ * this will also prevent further posting to the
+ * receiver.
+ */
+static __inline__ void
+receiver_close( Receiver*  r )
+{
+    if (r->close) {
+        r->close( r->user );
+        r->close = NULL;
+    }
+    r->post  = NULL;
+}
+
+
+/** FD HANDLERS
+ **
+ ** these are smart listeners that send incoming packets to a receiver
+ ** and can queue one or more outgoing packets and send them when
+ ** possible to the FD.
+ **
+ ** note that we support clean shutdown of file descriptors,
+ ** i.e. we try to send all outgoing packets before destroying
+ ** the FDHandler.
+ **/
+
+typedef struct FDHandler      FDHandler;
+typedef struct FDHandlerList  FDHandlerList;
+
+struct FDHandler {
+    int             fd;
+    FDHandlerList*  list;
+    char            closing;
+    Receiver        receiver[1];
+
+    /* queue of outgoing packets */
+    int             out_pos;
+    Packet*         out_first;
+    Packet**        out_ptail;
+
+    FDHandler*      next;
+    FDHandler**     pref;
+
+};
+
+struct FDHandlerList {
+    /* the looper that manages the fds */
+    Looper*      looper;
+
+    /* list of active FDHandler objects */
+    FDHandler*   active;
+
+    /* list of closing FDHandler objects.
+     * these are waiting to push their
+     * queued packets to the fd before
+     * freeing themselves.
+     */
+    FDHandler*   closing;
+
+};
+
+/* remove a FDHandler from its current list */
+static void
+fdhandler_remove( FDHandler*  f )
+{
+    f->pref[0] = f->next;
+    if (f->next)
+        f->next->pref = f->pref;
+}
+
+/* add a FDHandler to a given list */
+static void
+fdhandler_prepend( FDHandler*  f, FDHandler**  list )
+{
+    f->next = list[0];
+    f->pref = list;
+    list[0] = f;
+    if (f->next)
+        f->next->pref = &f->next;
+}
+
+/* initialize a FDHandler list */
+static void
+fdhandler_list_init( FDHandlerList*  list, Looper*  looper )
+{
+    list->looper  = looper;
+    list->active  = NULL;
+    list->closing = NULL;
+}
+
+
+/* close a FDHandler (and free it). Note that this will not
+ * perform a graceful shutdown, i.e. all packets in the
+ * outgoing queue will be immediately free.
+ *
+ * this *will* notify the receiver that the file descriptor
+ * was closed.
+ *
+ * you should call fdhandler_shutdown() if you want to
+ * notify the FDHandler that its packet source is closed.
+ */
+static void
+fdhandler_close( FDHandler*  f )
+{
+    /* notify receiver */
+    receiver_close(f->receiver);
+
+    /* remove the handler from its list */
+    fdhandler_remove(f);
+
+    /* get rid of outgoing packet queue */
+    if (f->out_first != NULL) {
+        Packet*  p;
+        while ((p = f->out_first) != NULL) {
+            f->out_first = p->next;
+            packet_free(&p);
+        }
+    }
+
+    /* get rid of file descriptor */
+    if (f->fd >= 0) {
+        looper_del( f->list->looper, f->fd );
+        close(f->fd);
+        f->fd = -1;
+    }
+
+    f->list = NULL;
+    xfree(f);
+}
+
+/* Ask the FDHandler to cleanly shutdown the connection,
+ * i.e. send any pending outgoing packets then auto-free
+ * itself.
+ */
+static void
+fdhandler_shutdown( FDHandler*  f )
+{
+    /* prevent later fdhandler_close() to
+     * call the receiver's close.
+     */
+    f->receiver->close = NULL;
+
+    if (f->out_first != NULL && !f->closing)
+    {
+        /* move the handler to the 'closing' list */
+        f->closing = 1;
+        fdhandler_remove(f);
+        fdhandler_prepend(f, &f->list->closing);
+        return;
+    }
+
+    fdhandler_close(f);
+}
+
+/* Enqueue a new packet that the FDHandler will
+ * send through its file descriptor.
+ */
+static void
+fdhandler_enqueue( FDHandler*  f, Packet*  p )
+{
+    Packet*  first = f->out_first;
+
+    p->next         = NULL;
+    f->out_ptail[0] = p;
+    f->out_ptail    = &p->next;
+
+    if (first == NULL) {
+        f->out_pos = 0;
+        looper_enable( f->list->looper, f->fd, EPOLLOUT );
+    }
+}
+
+
+/* FDHandler file descriptor event callback for read/write ops */
+static void
+fdhandler_event( FDHandler*  f, int  events )
+{
+   int  len;
+
+    /* in certain cases, it's possible to have both EPOLLIN and
+     * EPOLLHUP at the same time. This indicates that there is incoming
+     * data to read, but that the connection was nonetheless closed
+     * by the sender. Be sure to read the data before closing
+     * the receiver to avoid packet loss.
+     */
+
+    if (events & EPOLLIN) {
+        Packet*  p = packet_alloc();
+        int      len;
+
+        if ((len = fd_read(f->fd, p->data, MAX_PAYLOAD)) < 0) {
+            D("%s: can't recv: %s", __FUNCTION__, strerror(errno));
+            packet_free(&p);
+        } else if (len > 0) {
+            p->len     = len;
+            p->channel = -101;  /* special debug value, not used */
+            receiver_post( f->receiver, p );
+        }
+    }
+
+    if (events & (EPOLLHUP|EPOLLERR)) {
+        /* disconnection */
+        D("%s: disconnect on fd %d", __FUNCTION__, f->fd);
+        fdhandler_close(f);
+        return;
+    }
+
+    if (events & EPOLLOUT && f->out_first) {
+        Packet*  p = f->out_first;
+        int      avail, len;
+
+        avail = p->len - f->out_pos;
+        if ((len = fd_write(f->fd, p->data + f->out_pos, avail)) < 0) {
+            D("%s: can't send: %s", __FUNCTION__, strerror(errno));
+        } else {
+            f->out_pos += len;
+            if (f->out_pos >= p->len) {
+                f->out_pos   = 0;
+                f->out_first = p->next;
+                packet_free(&p);
+                if (f->out_first == NULL) {
+                    f->out_ptail = &f->out_first;
+                    looper_disable( f->list->looper, f->fd, EPOLLOUT );
+                }
+            }
+        }
+    }
+}
+
+
+/* Create a new FDHandler that monitors read/writes */
+static FDHandler*
+fdhandler_new( int             fd,
+               FDHandlerList*  list,
+               Receiver*       receiver )
+{
+    FDHandler*  f = xalloc0(sizeof(*f));
+
+    f->fd          = fd;
+    f->list        = list;
+    f->receiver[0] = receiver[0];
+    f->out_first   = NULL;
+    f->out_ptail   = &f->out_first;
+    f->out_pos     = 0;
+
+    fdhandler_prepend(f, &list->active);
+
+    looper_add( list->looper, fd, (EventFunc) fdhandler_event, f );
+    looper_enable( list->looper, fd, EPOLLIN );
+
+    return f;
+}
+
+
+/* event callback function to monitor accepts() on server sockets.
+ * the convention used here is that the receiver will receive a
+ * dummy packet with the new client socket in p->channel
+ */
+static void
+fdhandler_accept_event( FDHandler*  f, int  events )
+{
+    if (events & EPOLLIN) {
+        /* this is an accept - send a dummy packet to the receiver */
+        Packet*  p = packet_alloc();
+
+        D("%s: accepting on fd %d", __FUNCTION__, f->fd);
+        p->data[0] = 1;
+        p->len     = 1;
+        p->channel = fd_accept(f->fd);
+        if (p->channel < 0) {
+            D("%s: accept failed ?: %s", __FUNCTION__, strerror(errno));
+            packet_free(&p);
+            return;
+        }
+        receiver_post( f->receiver, p );
+    }
+
+    if (events & (EPOLLHUP|EPOLLERR)) {
+        /* disconnecting !! */
+        D("%s: closing accept fd %d", __FUNCTION__, f->fd);
+        fdhandler_close(f);
+        return;
+    }
+}
+
+
+/* Create a new FDHandler used to monitor new connections on a
+ * server socket. The receiver must expect the new connection
+ * fd in the 'channel' field of a dummy packet.
+ */
+static FDHandler*
+fdhandler_new_accept( int             fd,
+                      FDHandlerList*  list,
+                      Receiver*       receiver )
+{
+    FDHandler*  f = xalloc0(sizeof(*f));
+
+    f->fd          = fd;
+    f->list        = list;
+    f->receiver[0] = receiver[0];
+
+    fdhandler_prepend(f, &list->active);
+
+    looper_add( list->looper, fd, (EventFunc) fdhandler_accept_event, f );
+    looper_enable( list->looper, fd, EPOLLIN );
+    listen( fd, 5 );
+
+    return f;
+}
+
+/** SERIAL CONNECTION STATE
+ **
+ ** The following is used to handle the framing protocol
+ ** used on the serial port connection.
+ **/
+
+/* each packet is made of a 6 byte header followed by a payload
+ * the header looks like:
+ *
+ *   offset   size    description
+ *       0       2    a 2-byte hex string for the channel number
+ *       4       4    a 4-char hex string for the size of the payload
+ *       6       n    the payload itself
+ */
+#define  HEADER_SIZE    6
+#define  CHANNEL_OFFSET 0
+#define  LENGTH_OFFSET  2
+#define  CHANNEL_SIZE   2
+#define  LENGTH_SIZE    4
+
+#define  CHANNEL_CONTROL  0
+
+/* The Serial object receives data from the serial port,
+ * extracts the payload size and channel index, then sends
+ * the resulting messages as a packet to a generic receiver.
+ *
+ * You can also use serial_send to send a packet through
+ * the serial port.
+ */
+typedef struct Serial {
+    FDHandler*  fdhandler;   /* used to monitor serial port fd */
+    Receiver    receiver[1]; /* send payload there */
+    int         in_len;      /* current bytes in input packet */
+    int         in_datalen;  /* payload size, or 0 when reading header */
+    int         in_channel;  /* extracted channel number */
+    Packet*     in_packet;   /* used to read incoming packets */
+} Serial;
+
+
+/* a callback called when the serial port's fd is closed */
+static void
+serial_fd_close( Serial*  s )
+{
+    fatal("unexpected serial port close !!");
+}
+
+static void
+serial_dump( Packet*  p, const char*  funcname )
+{
+    T("%s: %03d bytes: '%s'",
+      funcname, p->len, quote(p->data, p->len));
+}
+
+/* a callback called when a packet arrives from the serial port's FDHandler.
+ *
+ * This will essentially parse the header, extract the channel number and
+ * the payload size and store them in 'in_datalen' and 'in_channel'.
+ *
+ * After that, the payload is sent to the receiver once completed.
+ */
+static void
+serial_fd_receive( Serial*  s, Packet*  p )
+{
+    int      rpos  = 0, rcount = p->len;
+    Packet*  inp   = s->in_packet;
+    int      inpos = s->in_len;
+
+    serial_dump( p, __FUNCTION__ );
+
+    while (rpos < rcount)
+    {
+        int  avail = rcount - rpos;
+
+        /* first, try to read the header */
+        if (s->in_datalen == 0) {
+            int  wanted = HEADER_SIZE - inpos;
+            if (avail > wanted)
+                avail = wanted;
+
+            memcpy( inp->data + inpos, p->data + rpos, avail );
+            inpos += avail;
+            rpos  += avail;
+
+            if (inpos == HEADER_SIZE) {
+                s->in_datalen = hex2int( inp->data + LENGTH_OFFSET,  LENGTH_SIZE );
+                s->in_channel = hex2int( inp->data + CHANNEL_OFFSET, CHANNEL_SIZE );
+
+                if (s->in_datalen <= 0) {
+                    D("ignoring %s packet from serial port",
+                      s->in_datalen ? "empty" : "malformed");
+                    s->in_datalen = 0;
+                }
+
+                //D("received %d bytes packet for channel %d", s->in_datalen, s->in_channel);
+                inpos = 0;
+            }
+        }
+        else /* then, populate the packet itself */
+        {
+            int   wanted = s->in_datalen - inpos;
+
+            if (avail > wanted)
+                avail = wanted;
+
+            memcpy( inp->data + inpos, p->data + rpos, avail );
+            inpos += avail;
+            rpos  += avail;
+
+            if (inpos == s->in_datalen) {
+                if (s->in_channel < 0) {
+                    D("ignoring %d bytes addressed to channel %d",
+                       inpos, s->in_channel);
+                } else {
+                    inp->len     = inpos;
+                    inp->channel = s->in_channel;
+                    receiver_post( s->receiver, inp );
+                    s->in_packet  = inp = packet_alloc();
+                }
+                s->in_datalen = 0;
+                inpos         = 0;
+            }
+        }
+    }
+    s->in_len = inpos;
+    packet_free(&p);
+}
+
+
+/* send a packet to the serial port.
+ * this assumes that p->len and p->channel contain the payload's
+ * size and channel and will add the appropriate header.
+ */
+static void
+serial_send( Serial*  s, Packet*  p )
+{
+    Packet*  h = packet_alloc();
+
+    //D("sending to serial %d bytes from channel %d: '%.*s'", p->len, p->channel, p->len, p->data);
+
+    /* insert a small header before this packet */
+    h->len = HEADER_SIZE;
+    int2hex( p->len,     h->data + LENGTH_OFFSET,  LENGTH_SIZE );
+    int2hex( p->channel, h->data + CHANNEL_OFFSET, CHANNEL_SIZE );
+
+    serial_dump( h, __FUNCTION__ );
+    serial_dump( p, __FUNCTION__ );
+
+    fdhandler_enqueue( s->fdhandler, h );
+    fdhandler_enqueue( s->fdhandler, p );
+}
+
+
+/* initialize serial reader */
+static void
+serial_init( Serial*         s,
+             int             fd,
+             FDHandlerList*  list,
+             Receiver*       receiver )
+{
+    Receiver  recv;
+
+    recv.user  = s;
+    recv.post  = (PostFunc)  serial_fd_receive;
+    recv.close = (CloseFunc) serial_fd_close;
+
+    s->receiver[0] = receiver[0];
+
+    s->fdhandler = fdhandler_new( fd, list, &recv );
+    s->in_len     = 0;
+    s->in_datalen = 0;
+    s->in_channel = 0;
+    s->in_packet  = packet_alloc();
+}
+
+
+/** CLIENTS
+ **/
+
+typedef struct Client       Client;
+typedef struct Multiplexer  Multiplexer;
+
+/* A Client object models a single qemud client socket
+ * connection in the emulated system.
+ *
+ * the client first sends the name of the system service
+ * it wants to contact (no framing), then waits for a 2
+ * byte answer from qemud.
+ *
+ * the answer is either "OK" or "KO" to indicate
+ * success or failure.
+ *
+ * In case of success, the client can send messages
+ * to the service.
+ *
+ * In case of failure, it can disconnect or try sending
+ * the name of another service.
+ */
+struct Client {
+    Client*       next;
+    Client**      pref;
+    int           channel;
+    char          registered;
+    FDHandler*    fdhandler;
+    Multiplexer*  multiplexer;
+};
+
+struct Multiplexer {
+    Client*        clients;
+    int            last_channel;
+    Serial         serial[1];
+    Looper         looper[1];
+    FDHandlerList  fdhandlers[1];
+};
+
+
+static int   multiplexer_open_channel( Multiplexer*  mult, Packet*  p );
+static void  multiplexer_close_channel( Multiplexer*  mult, int  channel );
+static void  multiplexer_serial_send( Multiplexer* mult, int  channel, Packet*  p );
+
+static void
+client_dump( Client*  c, Packet*  p, const char*  funcname )
+{
+    T("%s: client %p (%d): %3d bytes: '%s'",
+      funcname, c, c->fdhandler->fd,
+      p->len, quote(p->data, p->len));
+}
+
+/* destroy a client */
+static void
+client_free( Client*  c )
+{
+    /* remove from list */
+    c->pref[0] = c->next;
+    if (c->next)
+        c->next->pref = c->pref;
+
+    c->channel    = -1;
+    c->registered = 0;
+
+    /* gently ask the FDHandler to shutdown to
+     * avoid losing queued outgoing packets */
+    if (c->fdhandler != NULL) {
+        fdhandler_shutdown(c->fdhandler);
+        c->fdhandler = NULL;
+    }
+
+    xfree(c);
+}
+
+
+/* a function called when a client socket receives data */
+static void
+client_fd_receive( Client*  c, Packet*  p )
+{
+    client_dump(c, p, __FUNCTION__);
+
+    if (c->registered) {
+        /* the client is registered, just send the
+         * data through the serial port
+         */
+        multiplexer_serial_send(c->multiplexer, c->channel, p);
+        return;
+    }
+
+    if (c->channel > 0) {
+        /* the client is waiting registration results.
+         * this should not happen because the client
+         * should wait for our 'ok' or 'ko'.
+         * close the connection.
+         */
+         D("%s: bad client sending data before end of registration",
+           __FUNCTION__);
+     BAD_CLIENT:
+         packet_free(&p);
+         client_free(c);
+         return;
+    }
+
+    /* the client hasn't registered a service yet,
+     * so this must be the name of a service, call
+     * the multiplexer to start registration for
+     * it.
+     */
+    D("%s: attempting registration for service '%.*s'",
+      __FUNCTION__, p->len, p->data);
+    c->channel = multiplexer_open_channel(c->multiplexer, p);
+    if (c->channel < 0) {
+        D("%s: service name too long", __FUNCTION__);
+        goto BAD_CLIENT;
+    }
+    D("%s:    -> received channel id %d", __FUNCTION__, c->channel);
+    packet_free(&p);
+}
+
+
+/* a function called when the client socket is closed. */
+static void
+client_fd_close( Client*  c )
+{
+    T("%s: client %p (%d)", __FUNCTION__, c, c->fdhandler->fd);
+
+    /* no need to shutdown the FDHandler */
+    c->fdhandler = NULL;
+
+    /* tell the emulator we're out */
+    if (c->channel > 0)
+        multiplexer_close_channel(c->multiplexer, c->channel);
+
+    /* free the client */
+    client_free(c);
+}
+
+/* a function called when the multiplexer received a registration
+ * response from the emulator for a given client.
+ */
+static void
+client_registration( Client*  c, int  registered )
+{
+    Packet*  p = packet_alloc();
+
+    /* sends registration status to client */
+    if (!registered) {
+        D("%s: registration failed for client %d", __FUNCTION__, c->channel);
+        memcpy( p->data, "KO", 2 );
+        p->len = 2;
+    } else {
+        D("%s: registration succeeded for client %d", __FUNCTION__, c->channel);
+        memcpy( p->data, "OK", 2 );
+        p->len = 2;
+    }
+    client_dump(c, p, __FUNCTION__);
+    fdhandler_enqueue(c->fdhandler, p);
+
+    /* now save registration state
+     */
+    c->registered = registered;
+    if (!registered) {
+        /* allow the client to try registering another service */
+        c->channel = -1;
+    }
+}
+
+/* send data to a client */
+static void
+client_send( Client*  c, Packet*  p )
+{
+    client_dump(c, p, __FUNCTION__);
+    fdhandler_enqueue(c->fdhandler, p);
+}
+
+
+/* Create new client socket handler */
+static Client*
+client_new( Multiplexer*    mult,
+            int             fd,
+            FDHandlerList*  pfdhandlers,
+            Client**        pclients )
+{
+    Client*   c;
+    Receiver  recv;
+
+    xnew(c);
+
+    c->multiplexer = mult;
+    c->next        = NULL;
+    c->pref        = &c->next;
+    c->channel     = -1;
+    c->registered  = 0;
+
+    recv.user  = c;
+    recv.post  = (PostFunc)  client_fd_receive;
+    recv.close = (CloseFunc) client_fd_close;
+
+    c->fdhandler = fdhandler_new( fd, pfdhandlers, &recv );
+
+    /* add to client list */
+    c->next   = *pclients;
+    c->pref   = pclients;
+    *pclients = c;
+    if (c->next)
+        c->next->pref = &c->next;
+
+    return c;
+}
+
+/**  GLOBAL MULTIPLEXER
+ **/
+
+/* find a client by its channel */
+static Client*
+multiplexer_find_client( Multiplexer*  mult, int  channel )
+{
+    Client* c = mult->clients;
+
+    for ( ; c != NULL; c = c->next ) {
+        if (c->channel == channel)
+            return c;
+    }
+    return NULL;
+}
+
+/* handle control messages coming from the serial port
+ * on CONTROL_CHANNEL.
+ */
+static void
+multiplexer_handle_control( Multiplexer*  mult, Packet*  p )
+{
+    /* connection registration success */
+    if (p->len == 13 && !memcmp(p->data, "ok:connect:", 11)) {
+        int      channel = hex2int(p->data+11, 2);
+        Client*  client  = multiplexer_find_client(mult, channel);
+
+        /* note that 'client' can be NULL if the corresponding
+         * socket was closed before the emulator response arrived.
+         */
+        if (client != NULL) {
+            client_registration(client, 1);
+        } else {
+            D("%s: NULL client: '%.*s'", __FUNCTION__, p->len, p->data+11);
+        }
+        goto EXIT;
+    }
+
+    /* connection registration failure */
+    if (p->len == 13 && !memcmp(p->data, "ko:connect:",11)) {
+        int     channel = hex2int(p->data+11, 2);
+        Client* client  = multiplexer_find_client(mult, channel);
+
+        if (client != NULL)
+            client_registration(client, 0);
+
+        goto EXIT;
+    }
+
+    /* emulator-induced client disconnection */
+    if (p->len == 13 && !memcmp(p->data, "disconnect:",11)) {
+        int      channel = hex2int(p->data+11, 2);
+        Client*  client  = multiplexer_find_client(mult, channel);
+
+        if (client != NULL)
+            client_free(client);
+
+        goto EXIT;
+    }
+
+    /* A message that begins with "X00" is a probe sent by
+     * the emulator used to detect which version of qemud it runs
+     * against (in order to detect 1.0/1.1 system images. Just
+     * silently ignore it there instead of printing an error
+     * message.
+     */
+    if (p->len >= 3 && !memcmp(p->data,"X00",3)) {
+        goto EXIT;
+    }
+
+    D("%s: unknown control message (%d bytes): '%.*s'",
+      __FUNCTION__, p->len, p->len, p->data);
+
+EXIT:
+    packet_free(&p);
+}
+
+/* a function called when an incoming packet comes from the serial port */
+static void
+multiplexer_serial_receive( Multiplexer*  mult, Packet*  p )
+{
+    Client*  client;
+
+    T("%s: channel=%d '%.*s'", __FUNCTION__, p->channel, p->len, p->data);
+
+    if (p->channel == CHANNEL_CONTROL) {
+        multiplexer_handle_control(mult, p);
+        return;
+    }
+
+    client = multiplexer_find_client(mult, p->channel);
+    if (client != NULL) {
+        client_send(client, p);
+        return;
+    }
+
+    D("%s: discarding packet for unknown channel %d", __FUNCTION__, p->channel);
+    packet_free(&p);
+}
+
+/* a function called when the serial reader closes */
+static void
+multiplexer_serial_close( Multiplexer*  mult )
+{
+    fatal("unexpected close of serial reader");
+}
+
+/* a function called to send a packet to the serial port */
+static void
+multiplexer_serial_send( Multiplexer*  mult, int  channel, Packet*  p )
+{
+    p->channel = channel;
+    serial_send( mult->serial, p );
+}
+
+
+
+/* a function used by a client to allocate a new channel id and
+ * ask the emulator to open it. 'service' must be a packet containing
+ * the name of the service in its payload.
+ *
+ * returns -1 if the service name is too long.
+ *
+ * notice that client_registration() will be called later when
+ * the answer arrives.
+ */
+static int
+multiplexer_open_channel( Multiplexer*  mult, Packet*  service )
+{
+    Packet*   p = packet_alloc();
+    int       len, channel;
+
+    /* find a free channel number, assume we don't have many
+     * clients here. */
+    {
+        Client*  c;
+    TRY_AGAIN:
+        channel = (++mult->last_channel) & 0xff;
+
+        for (c = mult->clients; c != NULL; c = c->next)
+            if (c->channel == channel)
+                goto TRY_AGAIN;
+    }
+
+    len = snprintf((char*)p->data, sizeof p->data, "connect:%.*s:%02x", service->len, service->data, channel);
+    if (len >= (int)sizeof(p->data)) {
+        D("%s: weird, service name too long (%d > %d)", __FUNCTION__, len, sizeof(p->data));
+        packet_free(&p);
+        return -1;
+    }
+    p->channel = CHANNEL_CONTROL;
+    p->len     = len;
+
+    serial_send(mult->serial, p);
+    return channel;
+}
+
+/* used to tell the emulator a channel was closed by a client */
+static void
+multiplexer_close_channel( Multiplexer*  mult, int  channel )
+{
+    Packet*  p   = packet_alloc();
+    int      len = snprintf((char*)p->data, sizeof(p->data), "disconnect:%02x", channel);
+
+    if (len > (int)sizeof(p->data)) {
+        /* should not happen */
+        return;
+    }
+
+    p->channel = CHANNEL_CONTROL;
+    p->len     = len;
+
+    serial_send(mult->serial, p);
+}
+
+/* this function is used when a new connection happens on the control
+ * socket.
+ */
+static void
+multiplexer_control_accept( Multiplexer*  m, Packet*  p )
+{
+    /* the file descriptor for the new socket connection is
+     * in p->channel. See fdhandler_accept_event() */
+    int      fd     = p->channel;
+    Client*  client = client_new( m, fd, m->fdhandlers, &m->clients );
+
+    D("created client %p listening on fd %d", client, fd);
+
+    /* free dummy packet */
+    packet_free(&p);
+}
+
+static void
+multiplexer_control_close( Multiplexer*  m )
+{
+    fatal("unexpected multiplexer control close");
+}
+
+static void
+multiplexer_init( Multiplexer*  m, const char*  serial_dev )
+{
+    int       fd, control_fd;
+    Receiver  recv;
+
+    /* initialize looper and fdhandlers list */
+    looper_init( m->looper );
+    fdhandler_list_init( m->fdhandlers, m->looper );
+
+    /* open the serial port */
+    do {
+        fd = open(serial_dev, O_RDWR);
+    } while (fd < 0 && errno == EINTR);
+
+    if (fd < 0) {
+        fatal( "%s: could not open '%s': %s", __FUNCTION__, serial_dev,
+               strerror(errno) );
+    }
+    // disable echo on serial lines
+    if ( !memcmp( serial_dev, "/dev/tty", 8 ) ) {
+        struct termios  ios;
+        tcgetattr( fd, &ios );
+        ios.c_lflag = 0;  /* disable ECHO, ICANON, etc... */
+        tcsetattr( fd, TCSANOW, &ios );
+    }
+
+    /* initialize the serial reader/writer */
+    recv.user  = m;
+    recv.post  = (PostFunc)  multiplexer_serial_receive;
+    recv.close = (CloseFunc) multiplexer_serial_close;
+
+    serial_init( m->serial, fd, m->fdhandlers, &recv );
+
+    /* open the qemud control socket */
+    recv.user  = m;
+    recv.post  = (PostFunc)  multiplexer_control_accept;
+    recv.close = (CloseFunc) multiplexer_control_close;
+
+    fd = android_get_control_socket(CONTROL_SOCKET_NAME);
+    if (fd < 0) {
+        fatal("couldn't get fd for control socket '%s'", CONTROL_SOCKET_NAME);
+    }
+
+    fdhandler_new_accept( fd, m->fdhandlers, &recv );
+
+    /* initialize clients list */
+    m->clients = NULL;
+}
+
+/** MAIN LOOP
+ **/
+
+static Multiplexer  _multiplexer[1];
+
+int  main( void )
+{
+    Multiplexer*  m = _multiplexer;
+
+   /* extract the name of our serial device from the kernel
+    * boot options that are stored in /proc/cmdline
+    */
+#define  KERNEL_OPTION  "android.qemud="
+
+    {
+        char          buff[1024];
+        int           fd, len;
+        char*         p;
+        char*         q;
+
+        fd = open( "/proc/cmdline", O_RDONLY );
+        if (fd < 0) {
+            D("%s: can't open /proc/cmdline !!: %s", __FUNCTION__,
+            strerror(errno));
+            exit(1);
+        }
+
+        len = fd_read( fd, buff, sizeof(buff)-1 );
+        close(fd);
+        if (len < 0) {
+            D("%s: can't read /proc/cmdline: %s", __FUNCTION__,
+            strerror(errno));
+            exit(1);
+        }
+        buff[len] = 0;
+
+        p = strstr( buff, KERNEL_OPTION );
+        if (p == NULL) {
+            D("%s: can't find '%s' in /proc/cmdline",
+            __FUNCTION__, KERNEL_OPTION );
+            exit(1);
+        }
+
+        p += sizeof(KERNEL_OPTION)-1;  /* skip option */
+        q  = p;
+        while ( *q && *q != ' ' && *q != '\t' )
+            q += 1;
+
+        snprintf( buff, sizeof(buff), "/dev/%.*s", q-p, p );
+
+        multiplexer_init( m, buff );
+    }
+
+    D( "entering main loop");
+    looper_loop( m->looper );
+    D( "unexpected termination !!" );
+    return 0;
+}
diff --git a/sensors/Android.mk b/sensors/Android.mk
index 4b935a7..e3df473 100644
--- a/sensors/Android.mk
+++ b/sensors/Android.mk
@@ -21,6 +21,7 @@
 
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_SHARED_LIBRARIES := liblog libcutils
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := sensors_qemu.c
 ifeq ($(TARGET_PRODUCT),vbox_x86)
 LOCAL_MODULE := sensors.vbox_x86
@@ -30,10 +31,12 @@
 include $(BUILD_SHARED_LIBRARY)
 
 
+
 include $(CLEAR_VARS)
 
 LOCAL_MODULE_RELATIVE_PATH := hw
 LOCAL_SHARED_LIBRARIES := liblog libcutils
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../include
 LOCAL_SRC_FILES := sensors_qemu.c
 LOCAL_MODULE := sensors.ranchu
 
diff --git a/sensors/sensors_qemu.c b/sensors/sensors_qemu.c
index 7175a44..9eee746 100644
--- a/sensors/sensors_qemu.c
+++ b/sensors/sensors_qemu.c
@@ -23,9 +23,9 @@
  */
 
 
-/* we connect with the emulator through the "sensors" qemu pipe service
+/* we connect with the emulator through the "sensors" qemud service
  */
-#define  SENSORS_SERVICE_NAME "pipe:qemud:sensors"
+#define  SENSORS_SERVICE_NAME "sensors"
 
 #define LOG_TAG "QemuSensors"
 
@@ -45,7 +45,7 @@
 
 #define  E(...)  ALOGE(__VA_ARGS__)
 
-#include <system/qemu_pipe.h>
+#include "qemud.h"
 
 /** SENSOR IDS AND NAMES
  **/
@@ -152,9 +152,9 @@
  * from different threads, and poll() is blocking.
  *
  * Note that the emulator's sensors service creates a new client for each
- * connection through qemu_pipe_open(), where each client has its own
+ * connection through qemud_channel_open(), where each client has its own
  * delay and set of activated sensors. This precludes calling
- * qemu_pipe_open() on each request, because a typical emulated system
+ * qemud_channel_open() on each request, because a typical emulated system
  * will do something like:
  *
  * 1) On a first thread, de-activate() all sensors first, then call poll(),
@@ -174,7 +174,7 @@
 static int sensor_device_get_fd_locked(SensorDevice* dev) {
     /* Create connection to service on first call */
     if (dev->fd < 0) {
-        dev->fd = qemu_pipe_open(SENSORS_SERVICE_NAME);
+        dev->fd = qemud_channel_open(SENSORS_SERVICE_NAME);
         if (dev->fd < 0) {
             int ret = -errno;
             E("%s: Could not open connection to service: %s", __FUNCTION__,
@@ -196,7 +196,7 @@
     }
 
     int ret = 0;
-    if (qemu_pipe_frame_send(fd, cmd, strlen(cmd)) < 0) {
+    if (qemud_channel_send(fd, cmd, strlen(cmd)) < 0) {
         ret = -errno;
         E("%s(fd=%d): ERROR: %s", __FUNCTION__, fd, strerror(errno));
     }
@@ -216,9 +216,20 @@
     if (mask) {
         uint32_t i = 31 - __builtin_clz(mask);
         d->pendingSensors &= ~(1U << i);
+        // Copy the structure
         *event = d->sensors[i];
-        event->sensor = i;
-        event->version = sizeof(*event);
+
+        if (d->sensors[i].type == SENSOR_TYPE_META_DATA) {
+            // sensor_device_poll_event_locked() will leave
+            // the meta-data in place until we have it.
+            // Set |type| to something other than META_DATA
+            // so sensor_device_poll_event_locked() can
+            // continue.
+            d->sensors[i].type = SENSOR_TYPE_META_DATA + 1;
+        } else {
+            event->sensor = i;
+            event->version = sizeof(*event);
+        }
 
         D("%s: %d [%f, %f, %f]", __FUNCTION__,
                 i,
@@ -229,7 +240,7 @@
     }
     E("No sensor to return!!! pendingSensors=0x%08x", d->pendingSensors);
     // we may end-up in a busy loop, slow things down, just in case.
-    usleep(100000);
+    usleep(1000);
     return -EINVAL;
 }
 
@@ -267,7 +278,7 @@
 
         /* read the next event */
         char buff[256];
-        int len = qemu_pipe_frame_recv(fd, buff, sizeof(buff) - 1U);
+        int len = qemud_channel_recv(fd, buff, sizeof(buff) - 1U);
         /* re-acquire the lock to modify the device state. */
         pthread_mutex_lock(&dev->lock);
 
@@ -290,10 +301,15 @@
 
         float params[3];
 
+        // If the existing entry for this sensor is META_DATA,
+        // do not overwrite it. We can resume saving sensor
+        // values after that meta data has been received.
+
         /* "acceleration:<x>:<y>:<z>" corresponds to an acceleration event */
         if (sscanf(buff, "acceleration:%g:%g:%g", params+0, params+1, params+2)
                 == 3) {
             new_sensors |= SENSORS_ACCELERATION;
+            if (events[ID_ACCELERATION].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_ACCELERATION].acceleration.x = params[0];
             events[ID_ACCELERATION].acceleration.y = params[1];
             events[ID_ACCELERATION].acceleration.z = params[2];
@@ -306,6 +322,7 @@
         if (sscanf(buff, "orientation:%g:%g:%g", params+0, params+1, params+2)
                 == 3) {
             new_sensors |= SENSORS_ORIENTATION;
+            if (events[ID_ORIENTATION].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_ORIENTATION].orientation.azimuth = params[0];
             events[ID_ORIENTATION].orientation.pitch   = params[1];
             events[ID_ORIENTATION].orientation.roll    = params[2];
@@ -320,6 +337,7 @@
         if (sscanf(buff, "magnetic:%g:%g:%g", params+0, params+1, params+2)
                 == 3) {
             new_sensors |= SENSORS_MAGNETIC_FIELD;
+            if (events[ID_MAGNETIC_FIELD].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_MAGNETIC_FIELD].magnetic.x = params[0];
             events[ID_MAGNETIC_FIELD].magnetic.y = params[1];
             events[ID_MAGNETIC_FIELD].magnetic.z = params[2];
@@ -332,14 +350,16 @@
         /* "temperature:<celsius>" */
         if (sscanf(buff, "temperature:%g", params+0) == 1) {
             new_sensors |= SENSORS_TEMPERATURE;
+            if (events[ID_TEMPERATURE].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_TEMPERATURE].temperature = params[0];
-            events[ID_TEMPERATURE].type = SENSOR_TYPE_TEMPERATURE;
+            events[ID_TEMPERATURE].type = SENSOR_TYPE_AMBIENT_TEMPERATURE;
             continue;
         }
- 
+
         /* "proximity:<value>" */
         if (sscanf(buff, "proximity:%g", params+0) == 1) {
             new_sensors |= SENSORS_PROXIMITY;
+            if (events[ID_PROXIMITY].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_PROXIMITY].distance = params[0];
             events[ID_PROXIMITY].type = SENSOR_TYPE_PROXIMITY;
             continue;
@@ -347,6 +367,7 @@
         /* "light:<lux>" */
         if (sscanf(buff, "light:%g", params+0) == 1) {
             new_sensors |= SENSORS_LIGHT;
+            if (events[ID_LIGHT].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_LIGHT].light = params[0];
             events[ID_LIGHT].type = SENSOR_TYPE_LIGHT;
             continue;
@@ -355,6 +376,7 @@
         /* "pressure:<hpa>" */
         if (sscanf(buff, "pressure:%g", params+0) == 1) {
             new_sensors |= SENSORS_PRESSURE;
+            if (events[ID_PRESSURE].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_PRESSURE].pressure = params[0];
             events[ID_PRESSURE].type = SENSOR_TYPE_PRESSURE;
             continue;
@@ -363,6 +385,7 @@
         /* "humidity:<percent>" */
         if (sscanf(buff, "humidity:%g", params+0) == 1) {
             new_sensors |= SENSORS_HUMIDITY;
+            if (events[ID_HUMIDITY].type == SENSOR_TYPE_META_DATA) continue;
             events[ID_HUMIDITY].relative_humidity = params[0];
             events[ID_HUMIDITY].type = SENSOR_TYPE_RELATIVE_HUMIDITY;
             continue;
@@ -387,13 +410,28 @@
         dev->pendingSensors |= new_sensors;
         int64_t t = (event_time < 0) ? 0 : event_time * 1000LL;
 
-        /* use the time at the first sync: as the base for later
-         * time values */
+        /* Use the time at the first "sync:" as the base for later
+         * time values.
+         * CTS tests require sensors to return an event timestamp (sync) that is
+         * strictly before the time of the event arrival. We don't actually have
+         * a time syncronization protocol here, and the only data point is the
+         * "sync:" timestamp - which is an emulator's timestamp of a clock that
+         * is synced with the guest clock, and it only the timestamp after all
+         * events were sent.
+         * To make it work, let's compare the calculated timestamp with current
+         * time and take the lower value - we don't believe in events from the
+         * future anyway.
+         */
+        const int64_t now = now_ns();
+
         if (dev->timeStart == 0) {
-            dev->timeStart  = now_ns();
+            dev->timeStart  = now;
             dev->timeOffset = dev->timeStart - t;
         }
         t += dev->timeOffset;
+        if (t > now) {
+            t = now;
+        }
 
         while (new_sensors) {
             uint32_t i = 31 - __builtin_clz(new_sensors);
@@ -525,6 +563,34 @@
     return ret;
 }
 
+static int sensor_device_default_flush(
+        struct sensors_poll_device_1* dev0,
+        int handle) {
+
+    SensorDevice* dev = (void*)dev0;
+
+    D("%s: handle=%s (%d)", __FUNCTION__,
+        _sensorIdToName(handle), handle);
+
+    /* Sanity check */
+    if (!ID_CHECK(handle)) {
+        E("%s: bad handle ID", __FUNCTION__);
+        return -EINVAL;
+    }
+
+    pthread_mutex_lock(&dev->lock);
+    dev->sensors[handle].version = META_DATA_VERSION;
+    dev->sensors[handle].type = SENSOR_TYPE_META_DATA;
+    dev->sensors[handle].sensor = 0;
+    dev->sensors[handle].timestamp = 0;
+    dev->sensors[handle].meta_data.sensor = handle;
+    dev->sensors[handle].meta_data.what = META_DATA_FLUSH_COMPLETE;
+    dev->pendingSensors |= (1U << handle);
+    pthread_mutex_unlock(&dev->lock);
+
+    return 0;
+}
+
 static int sensor_device_set_delay(struct sensors_poll_device_t *dev0,
                                    int handle __unused,
                                    int64_t ns)
@@ -546,6 +612,15 @@
     return ret;
 }
 
+static int sensor_device_default_batch(
+     struct sensors_poll_device_1* dev,
+     int sensor_handle,
+     int flags,
+     int64_t sampling_period_ns,
+     int64_t max_report_latency_ns) {
+    return sensor_device_set_delay(dev, sensor_handle, sampling_period_ns);
+}
+
 /** MODULE REGISTRATION SUPPORT
  **
  ** This is required so that hardware/libhardware/hardware.c
@@ -571,6 +646,13 @@
           .maxRange   = 2.8f,
           .resolution = 1.0f/4032.0f,
           .power      = 3.0f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_CONTINUOUS_MODE,
           .reserved   = {}
         },
 
@@ -582,6 +664,13 @@
           .maxRange   = 2000.0f,
           .resolution = 1.0f,
           .power      = 6.7f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_CONTINUOUS_MODE,
           .reserved   = {}
         },
 
@@ -593,6 +682,13 @@
           .maxRange   = 360.0f,
           .resolution = 1.0f,
           .power      = 9.7f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_CONTINUOUS_MODE,
           .reserved   = {}
         },
 
@@ -600,10 +696,17 @@
           .vendor     = "The Android Open Source Project",
           .version    = 1,
           .handle     = ID_TEMPERATURE,
-          .type       = SENSOR_TYPE_TEMPERATURE,
+          .type       = SENSOR_TYPE_AMBIENT_TEMPERATURE,
           .maxRange   = 80.0f,
           .resolution = 1.0f,
           .power      = 0.0f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_CONTINUOUS_MODE,
           .reserved   = {}
         },
 
@@ -615,6 +718,13 @@
           .maxRange   = 1.0f,
           .resolution = 1.0f,
           .power      = 20.0f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_WAKE_UP | SENSOR_FLAG_ON_CHANGE_MODE,
           .reserved   = {}
         },
 
@@ -626,6 +736,13 @@
           .maxRange   = 40000.0f,
           .resolution = 1.0f,
           .power      = 20.0f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_ON_CHANGE_MODE,
           .reserved   = {}
         },
 
@@ -637,6 +754,13 @@
           .maxRange   = 800.0f,
           .resolution = 1.0f,
           .power      = 20.0f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_CONTINUOUS_MODE,
           .reserved   = {}
         },
 
@@ -648,6 +772,13 @@
           .maxRange   = 100.0f,
           .resolution = 1.0f,
           .power      = 20.0f,
+          .minDelay   = 10000,
+          .maxDelay   = 60 * 1000 * 1000,
+          .fifoReservedEventCount = 0,
+          .fifoMaxEventCount =   0,
+          .stringType =         0,
+          .requiredPermission = 0,
+          .flags = SENSOR_FLAG_CONTINUOUS_MODE,
           .reserved   = {}
         }
 };
@@ -657,23 +788,22 @@
 static int sensors__get_sensors_list(struct sensors_module_t* module __unused,
         struct sensor_t const** list)
 {
-    int  fd = qemu_pipe_open(SENSORS_SERVICE_NAME);
+    int  fd = qemud_channel_open(SENSORS_SERVICE_NAME);
     char buffer[12];
     int  mask, nn, count;
     int  ret = 0;
 
     if (fd < 0) {
-        E("%s: no qemu pipe connection", __FUNCTION__);
+        E("%s: no qemud connection", __FUNCTION__);
         goto out;
     }
-    static const char kListSensors[] = "list-sensors";
-    ret = qemu_pipe_frame_send(fd, kListSensors, sizeof(kListSensors) - 1);
+    ret = qemud_channel_send(fd, "list-sensors", -1);
     if (ret < 0) {
         E("%s: could not query sensor list: %s", __FUNCTION__,
           strerror(errno));
         goto out;
     }
-    ret = qemu_pipe_frame_recv(fd, buffer, sizeof buffer-1);
+    ret = qemud_channel_recv(fd, buffer, sizeof buffer-1);
     if (ret < 0) {
         E("%s: could not receive sensor list: %s", __FUNCTION__,
           strerror(errno));
@@ -687,7 +817,6 @@
     for (nn = 0; nn < MAX_NUM_SENSORS; nn++) {
         if (((1 << nn) & mask) == 0)
             continue;
-
         sSensorList[count++] = sSensorListInit[nn];
     }
     D("%s: returned %d sensors (mask=%d)", __FUNCTION__, count, mask);
@@ -717,13 +846,23 @@
         memset(dev, 0, sizeof(*dev));
 
         dev->device.common.tag     = HARDWARE_DEVICE_TAG;
-        dev->device.common.version = SENSORS_DEVICE_API_VERSION_1_0;
+        dev->device.common.version = SENSORS_DEVICE_API_VERSION_1_3;
         dev->device.common.module  = (struct hw_module_t*) module;
         dev->device.common.close   = sensor_device_close;
         dev->device.poll           = sensor_device_poll;
         dev->device.activate       = sensor_device_activate;
         dev->device.setDelay       = sensor_device_set_delay;
 
+        // (dev->sensors[i].type == SENSOR_TYPE_META_DATA) is
+        // sticky. Don't start off with that setting.
+        for (int idx = 0; idx < MAX_NUM_SENSORS; idx++) {
+            dev->sensors[idx].type = SENSOR_TYPE_META_DATA + 1;
+        }
+
+        // Version 1.3-specific functions
+        dev->device.batch       = sensor_device_default_batch;
+        dev->device.flush       = sensor_device_default_flush;
+
         dev->fd = -1;
         pthread_mutex_init(&dev->lock, NULL);
 
@@ -742,7 +881,7 @@
     .common = {
         .tag = HARDWARE_MODULE_TAG,
         .version_major = 1,
-        .version_minor = 0,
+        .version_minor = 3,
         .id = SENSORS_HARDWARE_MODULE_ID,
         .name = "Goldfish SENSORS Module",
         .author = "The Android Open Source Project",
diff --git a/vibrator/Android.mk b/vibrator/Android.mk
index 7253cbc..dba243f 100644
--- a/vibrator/Android.mk
+++ b/vibrator/Android.mk
@@ -21,8 +21,9 @@
 # HAL module implemenation stored in
 # hw/<VIBRATOR_HARDWARE_MODULE_ID>.goldfish.so
 LOCAL_MODULE_RELATIVE_PATH := hw
-LOCAL_SRC_FILES := vibrator_qemu.c
-LOCAL_SHARED_LIBRARIES := liblog libhardware
+LOCAL_C_INCLUDES := hardware/libhardware hardware/libhardware_legacy $(LOCAL_PATH)/../include
+LOCAL_SRC_FILES := qemu.c vibrator_qemu.c
+LOCAL_SHARED_LIBRARIES := liblog libhardware libhardware_legacy
 LOCAL_MODULE_TAGS := optional
 
-include $(BUILD_SHARED_LIBRARY)
+# include $(BUILD_SHARED_LIBRARY)
diff --git a/vibrator/qemu.c b/vibrator/qemu.c
new file mode 100644
index 0000000..8b64b57
--- /dev/null
+++ b/vibrator/qemu.c
@@ -0,0 +1,401 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* this file contains various functions used by all libhardware modules
+ * that support QEMU emulation
+ */
+#include "qemu.h"
+#define  LOG_TAG  "hardware-qemu"
+#include <cutils/log.h>
+#include <cutils/properties.h>
+#include <cutils/sockets.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <termios.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#define  QEMU_DEBUG  0
+
+#if QEMU_DEBUG
+#  define  D(...)   ALOGD(__VA_ARGS__)
+#else
+#  define  D(...)   ((void)0)
+#endif
+
+#include "qemu_pipe.h"
+
+int
+qemu_check(void)
+{
+    static int  in_qemu = -1;
+
+    if (__builtin_expect(in_qemu < 0,0)) {
+        char  propBuf[PROPERTY_VALUE_MAX];
+        property_get("ro.kernel.qemu", propBuf, "");
+        in_qemu = (propBuf[0] == '1');
+    }
+    return in_qemu;
+}
+
+static int
+qemu_fd_write( int  fd, const char*  cmd, int  len )
+{
+    int  len2;
+    do {
+        len2 = write(fd, cmd, len);
+    } while (len2 < 0 && errno == EINTR);
+    return len2;
+}
+
+static int
+qemu_fd_read( int  fd, char*  buff, int  len )
+{
+    int  len2;
+    do {
+        len2 = read(fd, buff, len);
+    } while (len2 < 0 && errno == EINTR);
+    return len2;
+}
+
+static int
+qemu_channel_open_qemud_pipe( QemuChannel*  channel,
+                              const char*   name )
+{
+    int   fd;
+    char  pipe_name[512];
+
+    snprintf(pipe_name, sizeof(pipe_name), "qemud:%s", name);
+    fd = qemu_pipe_open(pipe_name);
+    if (fd < 0) {
+        D("no qemud pipe: %s", strerror(errno));
+        return -1;
+    }
+
+    channel->is_qemud = 1;
+    channel->fd       = fd;
+    return 0;
+}
+
+static int
+qemu_channel_open_qemud( QemuChannel*  channel,
+                         const char*   name )
+{
+    int   fd, ret, namelen = strlen(name);
+    char  answer[2];
+
+    fd = socket_local_client( "qemud",
+                              ANDROID_SOCKET_NAMESPACE_RESERVED,
+                              SOCK_STREAM );
+    if (fd < 0) {
+        D("no qemud control socket: %s", strerror(errno));
+        return -1;
+    }
+
+    /* send service name to connect */
+    if (qemu_fd_write(fd, name, namelen) != namelen) {
+        D("can't send service name to qemud: %s",
+           strerror(errno));
+        close(fd);
+        return -1;
+    }
+
+    /* read answer from daemon */
+    if (qemu_fd_read(fd, answer, 2) != 2 ||
+        answer[0] != 'O' || answer[1] != 'K') {
+        D("cant' connect to %s service through qemud", name);
+        close(fd);
+        return -1;
+    }
+
+    channel->is_qemud = 1;
+    channel->fd       = fd;
+    return 0;
+}
+
+
+static int
+qemu_channel_open_qemud_old( QemuChannel*  channel,
+                             const char*   name )
+{
+    int  fd;
+
+    snprintf(channel->device, sizeof channel->device,
+                "qemud_%s", name);
+
+    fd = socket_local_client( channel->device,
+                              ANDROID_SOCKET_NAMESPACE_RESERVED,
+                              SOCK_STREAM );
+    if (fd < 0) {
+        D("no '%s' control socket available: %s",
+            channel->device, strerror(errno));
+        return -1;
+    }
+
+    close(fd);
+    channel->is_qemud_old = 1;
+    return 0;
+}
+
+
+static int
+qemu_channel_open_tty( QemuChannel*  channel,
+                       const char*   name,
+                       int           mode )
+{
+    char   key[PROPERTY_KEY_MAX];
+    char   prop[PROPERTY_VALUE_MAX];
+    int    ret;
+
+    ret = snprintf(key, sizeof key, "ro.kernel.android.%s", name);
+    if (ret >= (int)sizeof key)
+        return -1;
+
+    if (property_get(key, prop, "") == 0) {
+        D("no kernel-provided %s device name", name);
+        return -1;
+    }
+
+    ret = snprintf(channel->device, sizeof channel->device,
+                    "/dev/%s", prop);
+    if (ret >= (int)sizeof channel->device) {
+        D("%s device name too long: '%s'", name, prop);
+        return -1;
+    }
+
+    channel->is_tty = !memcmp("/dev/tty", channel->device, 8);
+    return 0;
+}
+
+int
+qemu_channel_open( QemuChannel*  channel,
+                   const char*   name,
+                   int           mode )
+{
+    int  fd = -1;
+
+    /* initialize the channel is needed */
+    if (!channel->is_inited)
+    {
+        channel->is_inited = 1;
+
+        do {
+            if (qemu_channel_open_qemud_pipe(channel, name) == 0)
+                break;
+
+            if (qemu_channel_open_qemud(channel, name) == 0)
+                break;
+
+            if (qemu_channel_open_qemud_old(channel, name) == 0)
+                break;
+
+            if (qemu_channel_open_tty(channel, name, mode) == 0)
+                break;
+
+            channel->is_available = 0;
+            return -1;
+        } while (0);
+
+        channel->is_available = 1;
+    }
+
+    /* try to open the file */
+    if (!channel->is_available) {
+        errno = ENOENT;
+        return -1;
+    }
+
+    if (channel->is_qemud) {
+        return dup(channel->fd);
+    }
+
+    if (channel->is_qemud_old) {
+        do {
+            fd = socket_local_client( channel->device,
+                                      ANDROID_SOCKET_NAMESPACE_RESERVED,
+                                      SOCK_STREAM );
+        } while (fd < 0 && errno == EINTR);
+    }
+    else /* /dev/ttySn ? */
+    {
+        do {
+            fd = open(channel->device, mode);
+        } while (fd < 0 && errno == EINTR);
+
+        /* disable ECHO on serial lines */
+        if (fd >= 0 && channel->is_tty) {
+            struct termios  ios;
+            tcgetattr( fd, &ios );
+            ios.c_lflag = 0;  /* disable ECHO, ICANON, etc... */
+            tcsetattr( fd, TCSANOW, &ios );
+        }
+    }
+    return fd;
+}
+
+
+static int
+qemu_command_vformat( char*        buffer,
+                      int          buffer_size,
+                      const char*  format,
+                      va_list      args )
+{
+    char     header[5];
+    int      len;
+
+    if (buffer_size < 6)
+        return -1;
+
+    len = vsnprintf(buffer+4, buffer_size-4, format, args);
+    if (len >= buffer_size-4)
+        return -1;
+
+    snprintf(header, sizeof header, "%04x", len);
+    memcpy(buffer, header, 4);
+    return len + 4;
+}
+
+extern int
+qemu_command_format( char*        buffer,
+                     int          buffer_size,
+                     const char*  format,
+                     ... )
+{
+    va_list  args;
+    int      ret;
+
+    va_start(args, format);
+    ret = qemu_command_vformat(buffer, buffer_size, format, args);
+    va_end(args);
+    return ret;
+}
+
+
+static int
+qemu_control_fd(void)
+{
+    static QemuChannel  channel[1];
+    int                 fd;
+
+    fd = qemu_channel_open( channel, "hw-control", O_RDWR );
+    if (fd < 0) {
+        D("%s: could not open control channel: %s", __FUNCTION__,
+          strerror(errno));
+    }
+    return fd;
+}
+
+static int
+qemu_control_send(const char*  cmd, int  len)
+{
+    int  fd, len2;
+
+    if (len < 0) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    fd = qemu_control_fd();
+    if (fd < 0)
+        return -1;
+
+    len2 = qemu_fd_write(fd, cmd, len);
+    close(fd);
+    if (len2 != len) {
+        D("%s: could not send everything %d < %d",
+          __FUNCTION__, len2, len);
+        return -1;
+    }
+    return 0;
+}
+
+
+int
+qemu_control_command( const char*  fmt, ... )
+{
+    va_list  args;
+    char     command[256];
+    int      len, fd;
+
+    va_start(args, fmt);
+    len = qemu_command_vformat( command, sizeof command, fmt, args );
+    va_end(args);
+
+    if (len < 0 || len >= (int)sizeof command) {
+        if (len < 0) {
+            D("%s: could not send: %s", __FUNCTION__, strerror(errno));
+        } else {
+            D("%s: too large %d > %d", __FUNCTION__, len, (int)(sizeof command));
+        }
+        errno = EINVAL;
+        return -1;
+    }
+
+    return qemu_control_send( command, len );
+}
+
+extern int  qemu_control_query( const char*  question, int  questionlen,
+                                char*        answer,   int  answersize )
+{
+    int   ret, fd, len, result = -1;
+    char  header[5], *end;
+
+    if (questionlen <= 0) {
+        errno = EINVAL;
+        return -1;
+    }
+
+    fd = qemu_control_fd();
+    if (fd < 0)
+        return -1;
+
+    ret = qemu_fd_write( fd, question, questionlen );
+    if (ret != questionlen) {
+        D("%s: could not write all: %d < %d", __FUNCTION__,
+          ret, questionlen);
+        goto Exit;
+    }
+
+    /* read a 4-byte header giving the length of the following content */
+    ret = qemu_fd_read( fd, header, 4 );
+    if (ret != 4) {
+        D("%s: could not read header (%d != 4)",
+          __FUNCTION__, ret);
+        goto Exit;
+    }
+
+    header[4] = 0;
+    len = strtol( header, &end,  16 );
+    if ( len < 0 || end == NULL || end != header+4 || len > answersize ) {
+        D("%s: could not parse header: '%s'",
+          __FUNCTION__, header);
+        goto Exit;
+    }
+
+    /* read the answer */
+    ret = qemu_fd_read( fd, answer, len );
+    if (ret != len) {
+        D("%s: could not read all of answer %d < %d",
+          __FUNCTION__, ret, len);
+        goto Exit;
+    }
+
+    result = len;
+
+Exit:
+    close(fd);
+    return result;
+}
diff --git a/vibrator/vibrator_qemu.c b/vibrator/vibrator_qemu.c
index 59085f7..da28484 100644
--- a/vibrator/vibrator_qemu.c
+++ b/vibrator/vibrator_qemu.c
@@ -20,29 +20,23 @@
 #include <cutils/log.h>
 
 #define QEMU_HARDWARE
+#include "qemu.h"
 #include <hardware/hardware.h>
 #include <hardware/vibrator.h>
-#include <system/qemu_pipe.h>
 
 static int sendit(unsigned int timeout_ms)
 {
-    static int pipe_fd = -2;
-    if (pipe_fd < -1) {
-        pipe_fd = qemu_pipe_open("pipe:qemud:hw-control");
+    if (qemu_check()) {
+        if (qemu_control_command("vibrator:%u", timeout_ms) < 0) {
+            return -errno;
+        }
+        return 0;
     }
-    if (pipe_fd < 0) {
-        return -ENOSYS;
-    }
-    char buff[16];
-    snprintf(buff, sizeof(buff), "vibrator:%u", timeout_ms);
-    if (qemu_pipe_frame_send(pipe_fd, buff, strlen(buff)) < 0) {
-        return -errno;
-    }
-    return 0;
+
+    return -ENOSYS;
 }
 
-static int qemu_vibra_on(vibrator_device_t* vibradev __unused,
-                         unsigned int timeout_ms)
+static int qemu_vibra_on(vibrator_device_t* vibradev __unused, unsigned int timeout_ms)
 {
     return sendit(timeout_ms);
 }