Merge "DO NOT MERGE - Merge Android 10 into master"
diff --git a/alsa_utils/alsa_device_profile.c b/alsa_utils/alsa_device_profile.c
index b02d3b7..6b76bbe 100644
--- a/alsa_utils/alsa_device_profile.c
+++ b/alsa_utils/alsa_device_profile.c
@@ -136,12 +136,25 @@
 unsigned profile_get_default_sample_rate(const alsa_device_profile* profile)
 {
     /*
+     * This is probably a poor algorithm. The default sample rate should be the highest (within
+     * limits) rate that is available for both input and output. HOWEVER, the profile has only
+     * one or the other, so that will need to be done at a higher level, like in the HAL.
+     */
+    /*
      * TODO this won't be right in general. we should store a preferred rate as we are scanning.
      * But right now it will return the highest rate, which may be correct.
      */
     return profile_is_valid(profile) ? profile->sample_rates[0] : DEFAULT_SAMPLE_RATE;
 }
 
+unsigned profile_get_highest_sample_rate(const alsa_device_profile* profile) {
+    /* The hightest sample rate is always stored in the first element of sample_rates.
+     * Note that profile_reset() initiaizes the first element of samples_rates to 0
+     * Which is what we want to return if the profile had not been read anyway.
+     */
+    return profile->sample_rates[0];
+}
+
 bool profile_is_sample_rate_valid(const alsa_device_profile* profile, unsigned rate)
 {
     if (profile_is_valid(profile)) {
@@ -154,6 +167,7 @@
 
         return false;
     } else {
+        ALOGW("**** PROFILE NOT VALID!");
         return rate == DEFAULT_SAMPLE_RATE;
     }
 }
diff --git a/alsa_utils/alsa_device_proxy.c b/alsa_utils/alsa_device_proxy.c
index e64a42e..d97ddbc 100644
--- a/alsa_utils/alsa_device_proxy.c
+++ b/alsa_utils/alsa_device_proxy.c
@@ -24,6 +24,8 @@
 #include <stdio.h>
 #include <string.h>
 
+#include <audio_utils/clock.h>
+
 #include "include/alsa_device_proxy.h"
 
 #include "include/alsa_logging.h"
@@ -231,6 +233,28 @@
     return ret;
 }
 
+int proxy_get_capture_position(const alsa_device_proxy * proxy,
+        int64_t *frames, int64_t *time)
+{
+    int ret = -ENOSYS;
+    unsigned int avail;
+    struct timespec timestamp;
+    // TODO: add logging for tinyalsa errors.
+    if (proxy->pcm != NULL
+            && pcm_get_htimestamp(proxy->pcm, &avail, &timestamp) == 0) {
+        const size_t kernel_buffer_size =
+                proxy->alsa_config.period_size * proxy->alsa_config.period_count;
+        if (avail > kernel_buffer_size) {
+            ALOGE("available frames(%u) > buffer size(%zu)", avail, kernel_buffer_size);
+        } else {
+            *frames = proxy->transferred + avail;
+            *time = audio_utils_ns_from_timespec(&timestamp);
+            ret = 0;
+        }
+    }
+    return ret;
+}
+
 /*
  * I/O
  */
@@ -243,9 +267,13 @@
     return ret;
 }
 
-int proxy_read(const alsa_device_proxy * proxy, void *data, unsigned int count)
+int proxy_read(alsa_device_proxy * proxy, void *data, unsigned int count)
 {
-    return pcm_read(proxy->pcm, data, count);
+    int ret = pcm_read(proxy->pcm, data, count);
+    if (ret == 0) {
+        proxy->transferred += count / proxy->frame_size;
+    }
+    return ret;
 }
 
 /*
diff --git a/alsa_utils/include/alsa_device_profile.h b/alsa_utils/include/alsa_device_profile.h
index 8f581d9..117d6fa 100644
--- a/alsa_utils/include/alsa_device_profile.h
+++ b/alsa_utils/include/alsa_device_profile.h
@@ -74,6 +74,7 @@
 
 /* Sample Rate Methods */
 unsigned profile_get_default_sample_rate(const alsa_device_profile* profile);
+unsigned profile_get_highest_sample_rate(const alsa_device_profile* profile);
 bool profile_is_sample_rate_valid(const alsa_device_profile* profile, unsigned rate);
 
 /* Format Methods */
diff --git a/alsa_utils/include/alsa_device_proxy.h b/alsa_utils/include/alsa_device_proxy.h
index 64565e1..49f7019 100644
--- a/alsa_utils/include/alsa_device_proxy.h
+++ b/alsa_utils/include/alsa_device_proxy.h
@@ -40,6 +40,8 @@
 void proxy_close(alsa_device_proxy * proxy);
 int proxy_get_presentation_position(const alsa_device_proxy * proxy,
         uint64_t *frames, struct timespec *timestamp);
+int proxy_get_capture_position(const alsa_device_proxy * proxy,
+        int64_t *frames, int64_t *time);
 
 /* Attributes */
 unsigned proxy_get_sample_rate(const alsa_device_proxy * proxy);
@@ -58,7 +60,7 @@
 
 /* I/O */
 int proxy_write(alsa_device_proxy * proxy, const void *data, unsigned int count);
-int proxy_read(const alsa_device_proxy * proxy, void *data, unsigned int count);
+int proxy_read(alsa_device_proxy * proxy, void *data, unsigned int count);
 
 /* Debugging */
 void proxy_dump(const alsa_device_proxy * proxy, int fd);
diff --git a/audio/include/system/audio-base-utils.h b/audio/include/system/audio-base-utils.h
index 213d8d6..d3f78f4 100644
--- a/audio/include/system/audio-base-utils.h
+++ b/audio/include/system/audio-base-utils.h
@@ -36,7 +36,7 @@
    /** Total number of stream. */
     AUDIO_STREAM_CNT          = AUDIO_STREAM_PATCH + 1,
 
-    AUDIO_SOURCE_MAX          = AUDIO_SOURCE_UNPROCESSED,
+    AUDIO_SOURCE_MAX          = AUDIO_SOURCE_VOICE_PERFORMANCE,
     AUDIO_SOURCE_CNT          = AUDIO_SOURCE_MAX + 1,
 
     AUDIO_MODE_MAX            = AUDIO_MODE_IN_COMMUNICATION,
@@ -66,7 +66,9 @@
                                 AUDIO_CHANNEL_OUT_TOP_BACK_CENTER |
                                 AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT |
                                 AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT |
-                                AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT,
+                                AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT |
+                                AUDIO_CHANNEL_OUT_HAPTIC_B |
+                                AUDIO_CHANNEL_OUT_HAPTIC_A,
 
     AUDIO_CHANNEL_IN_ALL      = AUDIO_CHANNEL_IN_LEFT |
                                 AUDIO_CHANNEL_IN_RIGHT |
@@ -89,6 +91,9 @@
                                 AUDIO_CHANNEL_IN_TOP_LEFT |
                                 AUDIO_CHANNEL_IN_TOP_RIGHT,
 
+    AUDIO_CHANNEL_HAPTIC_ALL  = AUDIO_CHANNEL_OUT_HAPTIC_B |
+                                AUDIO_CHANNEL_OUT_HAPTIC_A,
+
     AUDIO_DEVICE_OUT_ALL      = AUDIO_DEVICE_OUT_EARPIECE |
                                 AUDIO_DEVICE_OUT_SPEAKER |
                                 AUDIO_DEVICE_OUT_WIRED_HEADSET |
@@ -156,6 +161,8 @@
                                 AUDIO_DEVICE_IN_PROXY |
                                 AUDIO_DEVICE_IN_USB_HEADSET |
                                 AUDIO_DEVICE_IN_BLUETOOTH_BLE |
+                                AUDIO_DEVICE_IN_HDMI_ARC |
+                                AUDIO_DEVICE_IN_ECHO_REFERENCE |
                                 AUDIO_DEVICE_IN_DEFAULT,
 
     AUDIO_DEVICE_IN_ALL_SCO   = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET,
@@ -180,4 +187,10 @@
     AUDIO_CHANNEL_OUT_3POINT1               = 0xFu,     // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER | OUT_LOW_FREQUENCY
 };
 
+// Microphone Field Dimension Constants
+#define MIC_FIELD_DIMENSION_WIDE (-1.0f)
+#define MIC_FIELD_DIMENSION_NORMAL (0.0f)
+#define MIC_FIELD_DIMENSION_NARROW (1.0f)
+#define MIC_FIELD_DIMENSION_DEFAULT MIC_FIELD_DIMENSION_NORMAL
+
 #endif  // ANDROID_AUDIO_BASE_UTILS_H
diff --git a/audio/include/system/audio-base.h b/audio/include/system/audio-base.h
index 3d0638d..54b4cb2 100644
--- a/audio/include/system/audio-base.h
+++ b/audio/include/system/audio-base.h
@@ -1,6 +1,6 @@
 // This file is autogenerated by hidl-gen
 // then manualy edited for retrocompatiblity
-// Source: android.hardware.audio.common@4.0
+// Source: android.hardware.audio.common@5.0
 // Root: android.hardware:hardware/interfaces
 
 #ifndef HIDL_GENERATED_ANDROID_HARDWARE_AUDIO_COMMON_V4_0_EXPORTED_CONSTANTS_H_
@@ -50,6 +50,8 @@
     AUDIO_SOURCE_VOICE_COMMUNICATION = 7,
     AUDIO_SOURCE_REMOTE_SUBMIX = 8,
     AUDIO_SOURCE_UNPROCESSED = 9,
+    AUDIO_SOURCE_VOICE_PERFORMANCE = 10,
+    AUDIO_SOURCE_ECHO_REFERENCE = 1997,
     AUDIO_SOURCE_FM_TUNER = 1998,
 #ifndef AUDIO_NO_SYSTEM_DECLARATIONS
     /**
@@ -107,6 +109,12 @@
     AUDIO_FORMAT_AC4                 = 0x22000000u,
     AUDIO_FORMAT_LDAC                = 0x23000000u,
     AUDIO_FORMAT_MAT                 = 0x24000000u,
+    AUDIO_FORMAT_AAC_LATM            = 0x25000000u,
+    AUDIO_FORMAT_CELT                = 0x26000000u,
+    AUDIO_FORMAT_APTX_ADAPTIVE       = 0x27000000u,
+    AUDIO_FORMAT_LHDC                = 0x28000000u,
+    AUDIO_FORMAT_LHDC_LL             = 0x29000000u,
+    AUDIO_FORMAT_APTX_TWSP           = 0x2A000000u,
     AUDIO_FORMAT_MAIN_MASK           = 0xFF000000u,
     AUDIO_FORMAT_SUB_MASK            = 0x00FFFFFFu,
 
@@ -171,6 +179,9 @@
     AUDIO_FORMAT_AAC_ADTS_HE_V2        = 0x1e000100u, // (AAC_ADTS | AAC_SUB_HE_V2)
     AUDIO_FORMAT_AAC_ADTS_ELD          = 0x1e000200u, // (AAC_ADTS | AAC_SUB_ELD)
     AUDIO_FORMAT_AAC_ADTS_XHE          = 0x1e000300u, // (AAC_ADTS | AAC_SUB_XHE)
+    AUDIO_FORMAT_AAC_LATM_LC           = 0x25000002u, // (AAC_LATM | AAC_SUB_LC)
+    AUDIO_FORMAT_AAC_LATM_HE_V1        = 0x25000010u, // (AAC_LATM | AAC_SUB_HE_V1)
+    AUDIO_FORMAT_AAC_LATM_HE_V2        = 0x25000100u, // (AAC_LATM | AAC_SUB_HE_V2)
     AUDIO_FORMAT_E_AC3_JOC             = 0xA000001u,  // (E_AC3 | E_AC3_SUB_JOC)
     AUDIO_FORMAT_MAT_1_0               = 0x24000001u, // (MAT | MAT_SUB_1_0)
     AUDIO_FORMAT_MAT_2_0               = 0x24000002u, // (MAT | MAT_SUB_2_0)
@@ -208,13 +219,15 @@
     AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
     AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
     AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u,
+    AUDIO_CHANNEL_OUT_HAPTIC_A              = 0x20000000u,
+    AUDIO_CHANNEL_OUT_HAPTIC_B              = 0x10000000u,
     AUDIO_CHANNEL_OUT_MONO                  = 0x1u,     // OUT_FRONT_LEFT
     AUDIO_CHANNEL_OUT_STEREO                = 0x3u,     // OUT_FRONT_LEFT | OUT_FRONT_RIGHT
     AUDIO_CHANNEL_OUT_2POINT1               = 0xBu,     // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_LOW_FREQUENCY
     AUDIO_CHANNEL_OUT_2POINT0POINT2         = 0xC0003u, // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT
     AUDIO_CHANNEL_OUT_2POINT1POINT2         = 0xC000Bu, // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT | OUT_LOW_FREQUENCY
-    AUDIO_CHANNEL_OUT_3POINT0POINT2         = 0xC0007u, // OUT_FRONT_LEFT | OUT_FRONT_CENTER | OUT_FRONT_RIGHT | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT
-    AUDIO_CHANNEL_OUT_3POINT1POINT2         = 0xC000Fu, // OUT_FRONT_LEFT | OUT_FRONT_CENTER | OUT_FRONT_RIGHT | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT | OUT_LOW_FREQUENCY
+    AUDIO_CHANNEL_OUT_3POINT0POINT2         = 0xC0007u, // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT
+    AUDIO_CHANNEL_OUT_3POINT1POINT2         = 0xC000Fu, // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT | OUT_LOW_FREQUENCY
     AUDIO_CHANNEL_OUT_QUAD                  = 0x33u,    // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_BACK_LEFT | OUT_BACK_RIGHT
     AUDIO_CHANNEL_OUT_QUAD_BACK             = 0x33u,    // OUT_QUAD
     AUDIO_CHANNEL_OUT_QUAD_SIDE             = 0x603u,   // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_SIDE_LEFT | OUT_SIDE_RIGHT
@@ -229,6 +242,11 @@
     AUDIO_CHANNEL_OUT_7POINT1               = 0x63Fu,   // OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_FRONT_CENTER | OUT_LOW_FREQUENCY | OUT_BACK_LEFT | OUT_BACK_RIGHT | OUT_SIDE_LEFT | OUT_SIDE_RIGHT
     AUDIO_CHANNEL_OUT_7POINT1POINT2         = 0xC063Fu, // OUT_7POINT1 | OUT_TOP_SIDE_LEFT | OUT_TOP_SIDE_RIGHT
     AUDIO_CHANNEL_OUT_7POINT1POINT4         = 0x2D63Fu, // OUT_7POINT1 | OUT_TOP_FRONT_LEFT | OUT_TOP_FRONT_RIGHT | OUT_TOP_BACK_LEFT | OUT_TOP_BACK_RIGHT
+    AUDIO_CHANNEL_OUT_MONO_HAPTIC_A         = 0x20000001u,// OUT_FRONT_LEFT | OUT_HAPTIC_A
+    AUDIO_CHANNEL_OUT_STEREO_HAPTIC_A       = 0x20000003u,// OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_HAPTIC_A
+    AUDIO_CHANNEL_OUT_HAPTIC_AB             = 0x30000000u,// OUT_HAPTIC_A | OUT_HAPTIC_B
+    AUDIO_CHANNEL_OUT_MONO_HAPTIC_AB        = 0x30000001u,// OUT_FRONT_LEFT | OUT_HAPTIC_A | OUT_HAPTIC_B
+    AUDIO_CHANNEL_OUT_STEREO_HAPTIC_AB      = 0x30000003u,// OUT_FRONT_LEFT | OUT_FRONT_RIGHT | OUT_HAPTIC_A | OUT_HAPTIC_B
 
     AUDIO_CHANNEL_IN_LEFT                   = 0x4u,
     AUDIO_CHANNEL_IN_RIGHT                  = 0x8u,
@@ -273,6 +291,22 @@
     AUDIO_CHANNEL_INDEX_MASK_6              = 0x8000003Fu, // INDEX_HDR | (1 << 6) - 1
     AUDIO_CHANNEL_INDEX_MASK_7              = 0x8000007Fu, // INDEX_HDR | (1 << 7) - 1
     AUDIO_CHANNEL_INDEX_MASK_8              = 0x800000FFu, // INDEX_HDR | (1 << 8) - 1
+    AUDIO_CHANNEL_INDEX_MASK_9              = 0x800001FFu, // INDEX_HDR | (1 << 9) - 1
+    AUDIO_CHANNEL_INDEX_MASK_10             = 0x800003FFu, // INDEX_HDR | (1 << 10) - 1
+    AUDIO_CHANNEL_INDEX_MASK_11             = 0x800007FFu, // INDEX_HDR | (1 << 11) - 1
+    AUDIO_CHANNEL_INDEX_MASK_12             = 0x80000FFFu, // INDEX_HDR | (1 << 12) - 1
+    AUDIO_CHANNEL_INDEX_MASK_13             = 0x80001FFFu, // INDEX_HDR | (1 << 13) - 1
+    AUDIO_CHANNEL_INDEX_MASK_14             = 0x80003FFFu, // INDEX_HDR | (1 << 14) - 1
+    AUDIO_CHANNEL_INDEX_MASK_15             = 0x80007FFFu, // INDEX_HDR | (1 << 15) - 1
+    AUDIO_CHANNEL_INDEX_MASK_16             = 0x8000FFFFu, // INDEX_HDR | (1 << 16) - 1
+    AUDIO_CHANNEL_INDEX_MASK_17             = 0x8001FFFFu, // INDEX_HDR | (1 << 17) - 1
+    AUDIO_CHANNEL_INDEX_MASK_18             = 0x8003FFFFu, // INDEX_HDR | (1 << 18) - 1
+    AUDIO_CHANNEL_INDEX_MASK_19             = 0x8007FFFFu, // INDEX_HDR | (1 << 19) - 1
+    AUDIO_CHANNEL_INDEX_MASK_20             = 0x800FFFFFu, // INDEX_HDR | (1 << 20) - 1
+    AUDIO_CHANNEL_INDEX_MASK_21             = 0x801FFFFFu, // INDEX_HDR | (1 << 21) - 1
+    AUDIO_CHANNEL_INDEX_MASK_22             = 0x803FFFFFu, // INDEX_HDR | (1 << 22) - 1
+    AUDIO_CHANNEL_INDEX_MASK_23             = 0x807FFFFFu, // INDEX_HDR | (1 << 23) - 1
+    AUDIO_CHANNEL_INDEX_MASK_24             = 0x80FFFFFFu, // INDEX_HDR | (1 << 24) - 1
 };
 
 typedef enum {
@@ -349,6 +383,8 @@
     AUDIO_DEVICE_IN_PROXY                      = 0x81000000u, // BIT_IN | 0x1000000
     AUDIO_DEVICE_IN_USB_HEADSET                = 0x82000000u, // BIT_IN | 0x2000000
     AUDIO_DEVICE_IN_BLUETOOTH_BLE              = 0x84000000u, // BIT_IN | 0x4000000
+    AUDIO_DEVICE_IN_HDMI_ARC                   = 0x88000000u, // BIT_IN | 0x8000000
+    AUDIO_DEVICE_IN_ECHO_REFERENCE             = 0x90000000u, // BIT_IN | 0x10000000
     AUDIO_DEVICE_IN_DEFAULT                    = 0xC0000000u, // BIT_IN | BIT_DEFAULT
 };
 
@@ -380,6 +416,10 @@
     AUDIO_INPUT_FLAG_MMAP_NOIRQ = 0x10,
     AUDIO_INPUT_FLAG_VOIP_TX    = 0x20,
     AUDIO_INPUT_FLAG_HW_AV_SYNC = 0x40,
+#ifndef AUDIO_NO_SYSTEM_DECLARATIONS  // TODO: Expose at HAL interface, remove FRAMEWORK_FLAGS mask
+    AUDIO_INPUT_FLAG_DIRECT     = 0x80,
+    AUDIO_INPUT_FRAMEWORK_FLAGS = AUDIO_INPUT_FLAG_DIRECT,
+#endif
 } audio_input_flags_t;
 
 typedef enum {
@@ -436,6 +476,9 @@
     AUDIO_PORT_CONFIG_CHANNEL_MASK = 0x2u,
     AUDIO_PORT_CONFIG_FORMAT       = 0x4u,
     AUDIO_PORT_CONFIG_GAIN         = 0x8u,
+#ifndef AUDIO_NO_SYSTEM_DECLARATIONS
+    AUDIO_PORT_CONFIG_FLAGS        = 0x10u,  // Absent from AudioPortConfigMask, framework only.
+#endif
 };
 
 typedef enum {
@@ -443,6 +486,12 @@
     AUDIO_LATENCY_NORMAL = 1, // (::android::hardware::audio::common::V4_0::AudioMixLatencyClass.LOW implicitly + 1)
 } audio_mix_latency_class_t;
 
+typedef enum {
+    MIC_DIRECTION_UNSPECIFIED = 0,
+    MIC_DIRECTION_FRONT = 1,
+    MIC_DIRECTION_BACK = 2,
+    MIC_DIRECTION_EXTERNAL = 3,
+} audio_microphone_direction_t;
 #ifdef __cplusplus
 }
 #endif
diff --git a/audio/include/system/audio.h b/audio/include/system/audio.h
index 550913b..acfcda2 100644
--- a/audio/include/system/audio.h
+++ b/audio/include/system/audio.h
@@ -32,14 +32,13 @@
 /*
  * Annotation to tell clang that we intend to fall through from one case to
  * another in a switch (for c++ files). Sourced from android-base/macros.h.
- * TODO: See also C++17 [[fallthough]].
  */
 #ifndef FALLTHROUGH_INTENDED
-#if defined(__clang__) && defined(__cplusplus)
-#define FALLTHROUGH_INTENDED [[clang::fallthrough]]
+#ifdef __cplusplus
+#define FALLTHROUGH_INTENDED [[fallthrough]]
 #else
 #define FALLTHROUGH_INTENDED
-#endif // __clang__ && __cplusplus
+#endif // __cplusplus
 #endif // FALLTHROUGH_INTENDED
 
 __BEGIN_DECLS
@@ -74,6 +73,9 @@
     AUDIO_FLAG_BYPASS_MUTE                = 0x80,
     AUDIO_FLAG_LOW_LATENCY                = 0x100,
     AUDIO_FLAG_DEEP_BUFFER                = 0x200,
+    AUDIO_FLAG_NO_MEDIA_PROJECTION        = 0X400,
+    AUDIO_FLAG_MUTE_HAPTIC                = 0x800,
+    AUDIO_FLAG_NO_SYSTEM_CAPTURE          = 0X1000,
 };
 
 /* Audio attributes */
@@ -86,6 +88,39 @@
     char                 tags[AUDIO_ATTRIBUTES_TAGS_MAX_SIZE]; /* UTF8 */
 } __attribute__((packed)) audio_attributes_t; // sent through Binder;
 
+static const audio_attributes_t AUDIO_ATTRIBUTES_INITIALIZER = {
+    /* .content_type = */ AUDIO_CONTENT_TYPE_UNKNOWN,
+    /* .usage = */ AUDIO_USAGE_UNKNOWN,
+    /* .source = */ AUDIO_SOURCE_DEFAULT,
+    /* .flags = */ AUDIO_FLAG_NONE,
+    /* .tags = */ ""
+};
+
+static inline audio_attributes_t attributes_initializer(audio_usage_t usage)
+{
+    audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+    attributes.usage = usage;
+    return attributes;
+}
+
+static inline void audio_flags_to_audio_output_flags(
+                                           const audio_flags_mask_t audio_flags,
+                                           audio_output_flags_t *flags)
+{
+    if ((audio_flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+        *flags = (audio_output_flags_t)(*flags |
+            AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
+    if ((audio_flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
+        *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_FAST);
+    }
+    // check deep buffer after flags have been modified above
+    if (*flags == AUDIO_OUTPUT_FLAG_NONE && (audio_flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
+        *flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+    }
+}
+
+
 /* a unique ID allocated by AudioFlinger for use as an audio_io_handle_t, audio_session_t,
  * effect ID (int), audio_module_handle_t, and audio_patch_handle_t.
  * Audio port IDs (audio_port_handle_t) are allocated by AudioPolicy
@@ -102,7 +137,7 @@
     AUDIO_UNIQUE_ID_USE_PATCH = 4,
     AUDIO_UNIQUE_ID_USE_OUTPUT = 5,
     AUDIO_UNIQUE_ID_USE_INPUT = 6,
-    AUDIO_UNIQUE_ID_USE_PLAYER = 7,
+    AUDIO_UNIQUE_ID_USE_CLIENT = 7,  // client-side players and recorders
     AUDIO_UNIQUE_ID_USE_MAX = 8,  // must be a power-of-two
     AUDIO_UNIQUE_ID_USE_MASK = AUDIO_UNIQUE_ID_USE_MAX - 1
 } audio_unique_id_use_t;
@@ -397,6 +432,13 @@
 /* the maximum length for the human-readable device name */
 #define AUDIO_PORT_MAX_NAME_LEN 128
 
+/* a union to store port configuration flags. Declared as a type so can be reused
+   in framework code */
+union audio_io_flags {
+    audio_input_flags_t  input;
+    audio_output_flags_t output;
+};
+
 /* maximum audio device address length */
 #define AUDIO_DEVICE_MAX_ADDRESS_LEN 32
 
@@ -437,6 +479,9 @@
     audio_channel_mask_t     channel_mask; /* channel mask if applicable */
     audio_format_t           format;       /* format if applicable */
     struct audio_gain_config gain;         /* gain to apply if applicable */
+#ifndef AUDIO_NO_SYSTEM_DECLARATIONS
+    union audio_io_flags     flags;        /* framework only: HW_AV_SYNC, DIRECT, ... */
+#endif
     union {
         struct audio_port_config_device_ext  device;  /* device specific info */
         struct audio_port_config_mix_ext     mix;     /* mix specific info */
@@ -570,17 +615,21 @@
                                     is called */
 };
 
-/** Metadata of a record track for an in stream. */
+/** Metadata of a playback track for an in stream. */
 typedef struct playback_track_metadata {
     audio_usage_t usage;
     audio_content_type_t content_type;
     float gain; // Normalized linear volume. 0=silence, 1=0dbfs...
 } playback_track_metadata_t;
 
-/** Metadata of a playback track for an out stream. */
+/** Metadata of a record track for an out stream. */
 typedef struct record_track_metadata {
     audio_source_t source;
     float gain; // Normalized linear volume. 0=silence, 1=0dbfs...
+    // For record tracks originating from a software patch, the dest_device
+    // fields provide information about the downstream device.
+    audio_devices_t dest_device;
+    char dest_device_address[AUDIO_DEVICE_MAX_ADDRESS_LEN];
 } record_track_metadata_t;
 
 
@@ -814,8 +863,8 @@
     case 2:
         bits = AUDIO_CHANNEL_OUT_STEREO;
         break;
-    case 3:
-        bits = AUDIO_CHANNEL_OUT_STEREO | AUDIO_CHANNEL_OUT_FRONT_CENTER;
+    case 3: // 2.1
+        bits = AUDIO_CHANNEL_OUT_STEREO | AUDIO_CHANNEL_OUT_LOW_FREQUENCY;
         break;
     case 4: // 4.0
         bits = AUDIO_CHANNEL_OUT_QUAD;
@@ -874,6 +923,22 @@
             AUDIO_CHANNEL_REPRESENTATION_POSITION, bits);
 }
 
+/* Derive a default haptic channel mask from a channel count.
+ */
+static inline audio_channel_mask_t haptic_channel_mask_from_count(uint32_t channel_count)
+{
+    switch(channel_count) {
+    case 0:
+        return AUDIO_CHANNEL_NONE;
+    case 1:
+        return AUDIO_CHANNEL_OUT_HAPTIC_A;
+    case 2:
+        return AUDIO_CHANNEL_OUT_HAPTIC_AB;
+    default:
+        return AUDIO_CHANNEL_INVALID;
+    }
+}
+
 static inline audio_channel_mask_t audio_channel_mask_in_to_out(audio_channel_mask_t in)
 {
     switch (in) {
@@ -896,6 +961,42 @@
     }
 }
 
+static inline audio_channel_mask_t audio_channel_mask_out_to_in(audio_channel_mask_t out)
+{
+    switch (out) {
+    case AUDIO_CHANNEL_OUT_MONO:
+        return AUDIO_CHANNEL_IN_MONO;
+    case AUDIO_CHANNEL_OUT_STEREO:
+        return AUDIO_CHANNEL_IN_STEREO;
+    case AUDIO_CHANNEL_OUT_5POINT1:
+        return AUDIO_CHANNEL_IN_5POINT1;
+    case AUDIO_CHANNEL_OUT_3POINT1POINT2:
+        return AUDIO_CHANNEL_IN_3POINT1POINT2;
+    case AUDIO_CHANNEL_OUT_3POINT0POINT2:
+        return AUDIO_CHANNEL_IN_3POINT0POINT2;
+    case AUDIO_CHANNEL_OUT_2POINT1POINT2:
+        return AUDIO_CHANNEL_IN_2POINT1POINT2;
+    case AUDIO_CHANNEL_OUT_2POINT0POINT2:
+        return AUDIO_CHANNEL_IN_2POINT0POINT2;
+    default:
+        return AUDIO_CHANNEL_INVALID;
+    }
+}
+
+static inline bool audio_channel_position_mask_is_out_canonical(audio_channel_mask_t channelMask)
+{
+    if (audio_channel_mask_get_representation(channelMask)
+            != AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+        return false;
+    }
+    const uint32_t audioChannelCount = audio_channel_count_from_out_mask(
+            channelMask & ~AUDIO_CHANNEL_HAPTIC_ALL);
+    const uint32_t hapticChannelCount = audio_channel_count_from_out_mask(
+            channelMask & AUDIO_CHANNEL_HAPTIC_ALL);
+    return channelMask == (audio_channel_out_mask_from_count(audioChannelCount) |
+            haptic_channel_mask_from_count(hapticChannelCount));
+}
+
 static inline bool audio_is_valid_format(audio_format_t format)
 {
     switch (format & AUDIO_FORMAT_MAIN_MASK) {
@@ -915,40 +1016,112 @@
     case AUDIO_FORMAT_MP3:
     case AUDIO_FORMAT_AMR_NB:
     case AUDIO_FORMAT_AMR_WB:
+        return true;
     case AUDIO_FORMAT_AAC:
-    case AUDIO_FORMAT_AAC_ADTS:
+        switch (format) {
+        case AUDIO_FORMAT_AAC:
+        case AUDIO_FORMAT_AAC_MAIN:
+        case AUDIO_FORMAT_AAC_LC:
+        case AUDIO_FORMAT_AAC_SSR:
+        case AUDIO_FORMAT_AAC_LTP:
+        case AUDIO_FORMAT_AAC_HE_V1:
+        case AUDIO_FORMAT_AAC_SCALABLE:
+        case AUDIO_FORMAT_AAC_ERLC:
+        case AUDIO_FORMAT_AAC_LD:
+        case AUDIO_FORMAT_AAC_HE_V2:
+        case AUDIO_FORMAT_AAC_ELD:
+        case AUDIO_FORMAT_AAC_XHE:
+            return true;
+        default:
+            return false;
+        }
+        /* not reached */
     case AUDIO_FORMAT_HE_AAC_V1:
     case AUDIO_FORMAT_HE_AAC_V2:
-    case AUDIO_FORMAT_AAC_ELD:
-    case AUDIO_FORMAT_AAC_XHE:
     case AUDIO_FORMAT_VORBIS:
     case AUDIO_FORMAT_OPUS:
     case AUDIO_FORMAT_AC3:
+        return true;
     case AUDIO_FORMAT_E_AC3:
+        switch (format) {
+        case AUDIO_FORMAT_E_AC3:
+        case AUDIO_FORMAT_E_AC3_JOC:
+            return true;
+        default:
+            return false;
+        }
+        /* not reached */
     case AUDIO_FORMAT_DTS:
     case AUDIO_FORMAT_DTS_HD:
     case AUDIO_FORMAT_IEC61937:
     case AUDIO_FORMAT_DOLBY_TRUEHD:
-    case AUDIO_FORMAT_QCELP:
     case AUDIO_FORMAT_EVRC:
     case AUDIO_FORMAT_EVRCB:
     case AUDIO_FORMAT_EVRCWB:
+    case AUDIO_FORMAT_EVRCNW:
     case AUDIO_FORMAT_AAC_ADIF:
+    case AUDIO_FORMAT_WMA:
+    case AUDIO_FORMAT_WMA_PRO:
     case AUDIO_FORMAT_AMR_WB_PLUS:
     case AUDIO_FORMAT_MP2:
-    case AUDIO_FORMAT_EVRCNW:
+    case AUDIO_FORMAT_QCELP:
+    case AUDIO_FORMAT_DSD:
     case AUDIO_FORMAT_FLAC:
     case AUDIO_FORMAT_ALAC:
     case AUDIO_FORMAT_APE:
-    case AUDIO_FORMAT_WMA:
-    case AUDIO_FORMAT_WMA_PRO:
-    case AUDIO_FORMAT_DSD:
+        return true;
+    case AUDIO_FORMAT_AAC_ADTS:
+        switch (format) {
+        case AUDIO_FORMAT_AAC_ADTS:
+        case AUDIO_FORMAT_AAC_ADTS_MAIN:
+        case AUDIO_FORMAT_AAC_ADTS_LC:
+        case AUDIO_FORMAT_AAC_ADTS_SSR:
+        case AUDIO_FORMAT_AAC_ADTS_LTP:
+        case AUDIO_FORMAT_AAC_ADTS_HE_V1:
+        case AUDIO_FORMAT_AAC_ADTS_SCALABLE:
+        case AUDIO_FORMAT_AAC_ADTS_ERLC:
+        case AUDIO_FORMAT_AAC_ADTS_LD:
+        case AUDIO_FORMAT_AAC_ADTS_HE_V2:
+        case AUDIO_FORMAT_AAC_ADTS_ELD:
+        case AUDIO_FORMAT_AAC_ADTS_XHE:
+            return true;
+        default:
+            return false;
+        }
+        /* not reached */
+    case AUDIO_FORMAT_SBC:
+    case AUDIO_FORMAT_APTX:
+    case AUDIO_FORMAT_APTX_HD:
     case AUDIO_FORMAT_AC4:
     case AUDIO_FORMAT_LDAC:
-    case AUDIO_FORMAT_E_AC3_JOC:
-    case AUDIO_FORMAT_MAT_1_0:
-    case AUDIO_FORMAT_MAT_2_0:
-    case AUDIO_FORMAT_MAT_2_1:
+        return true;
+    case AUDIO_FORMAT_MAT:
+        switch (format) {
+        case AUDIO_FORMAT_MAT:
+        case AUDIO_FORMAT_MAT_1_0:
+        case AUDIO_FORMAT_MAT_2_0:
+        case AUDIO_FORMAT_MAT_2_1:
+            return true;
+        default:
+            return false;
+        }
+        /* not reached */
+    case AUDIO_FORMAT_AAC_LATM:
+        switch (format) {
+        case AUDIO_FORMAT_AAC_LATM:
+        case AUDIO_FORMAT_AAC_LATM_LC:
+        case AUDIO_FORMAT_AAC_LATM_HE_V1:
+        case AUDIO_FORMAT_AAC_LATM_HE_V2:
+            return true;
+        default:
+            return false;
+        }
+        /* not reached */
+    case AUDIO_FORMAT_CELT:
+    case AUDIO_FORMAT_APTX_ADAPTIVE:
+    case AUDIO_FORMAT_LHDC:
+    case AUDIO_FORMAT_LHDC_LL:
+    case AUDIO_FORMAT_APTX_TWSP:
         return true;
     default:
         return false;
@@ -1028,13 +1201,22 @@
     const size_t kSize = AUDIO_DEVICE_MAX_ADDRESS_LEN + sizeof("a2dp_sink_address=");
     char param[kSize];
 
-    if (device & AUDIO_DEVICE_OUT_ALL_A2DP)
-        snprintf(param, kSize, "%s=%s", "a2dp_sink_address", address);
-    else if (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
-        snprintf(param, kSize, "%s=%s", "mix", address);
-    else
-        snprintf(param, kSize, "%s", address);
-
+    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+        device &= ~AUDIO_DEVICE_BIT_IN;
+        if (device & AUDIO_DEVICE_IN_BLUETOOTH_A2DP)
+            snprintf(param, kSize, "%s=%s", "a2dp_source_address", address);
+        else if (device & AUDIO_DEVICE_IN_REMOTE_SUBMIX)
+            snprintf(param, kSize, "%s=%s", "mix", address);
+        else
+            snprintf(param, kSize, "%s", address);
+    } else {
+        if (device & AUDIO_DEVICE_OUT_ALL_A2DP)
+            snprintf(param, kSize, "%s=%s", "a2dp_sink_address", address);
+        else if (device & AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
+            snprintf(param, kSize, "%s=%s", "mix", address);
+        else
+            snprintf(param, kSize, "%s", address);
+    }
     return strdup(param);
 }
 
@@ -1043,6 +1225,7 @@
         // input
         return (~AUDIO_DEVICE_BIT_IN & device & (AUDIO_DEVICE_IN_ALL_USB |
                           AUDIO_DEVICE_IN_HDMI |
+                          AUDIO_DEVICE_IN_HDMI_ARC |
                           AUDIO_DEVICE_IN_SPDIF |
                           AUDIO_DEVICE_IN_IP |
                           AUDIO_DEVICE_IN_BUS)) != 0;
@@ -1057,6 +1240,128 @@
     }
 }
 
+#ifndef AUDIO_NO_SYSTEM_DECLARATIONS
+
+static inline bool audio_gain_config_are_equal(
+        const struct audio_gain_config *lhs, const struct audio_gain_config *rhs) {
+    if (lhs->mode != rhs->mode) return false;
+    switch (lhs->mode) {
+    case AUDIO_GAIN_MODE_JOINT:
+        if (lhs->values[0] != rhs->values[0]) return false;
+        break;
+    case AUDIO_GAIN_MODE_CHANNELS:
+    case AUDIO_GAIN_MODE_RAMP:
+        if (lhs->channel_mask != rhs->channel_mask) return false;
+        for (int i = 0; i < popcount(lhs->channel_mask); ++i) {
+            if (lhs->values[i] != rhs->values[i]) return false;
+        }
+        break;
+    default: return false;
+    }
+    return lhs->ramp_duration_ms == rhs->ramp_duration_ms;
+}
+
+static inline bool audio_port_config_has_input_direction(const struct audio_port_config *port_cfg) {
+    switch (port_cfg->type) {
+    case AUDIO_PORT_TYPE_DEVICE:
+        switch (port_cfg->role) {
+        case AUDIO_PORT_ROLE_SOURCE: return true;
+        case AUDIO_PORT_ROLE_SINK: return false;
+        default: return false;
+        }
+    case AUDIO_PORT_TYPE_MIX:
+        switch (port_cfg->role) {
+        case AUDIO_PORT_ROLE_SOURCE: return false;
+        case AUDIO_PORT_ROLE_SINK: return true;
+        default: return false;
+        }
+    default: return false;
+    }
+}
+
+static inline bool audio_port_configs_are_equal(
+        const struct audio_port_config *lhs, const struct audio_port_config *rhs) {
+    if (lhs->role != rhs->role || lhs->type != rhs->type) return false;
+    switch (lhs->type) {
+    case AUDIO_PORT_TYPE_NONE: break;
+    case AUDIO_PORT_TYPE_DEVICE:
+        if (lhs->ext.device.hw_module != rhs->ext.device.hw_module ||
+                lhs->ext.device.type != rhs->ext.device.type ||
+                strncmp(lhs->ext.device.address, rhs->ext.device.address,
+                        AUDIO_DEVICE_MAX_ADDRESS_LEN) != 0) {
+            return false;
+        }
+        break;
+    case AUDIO_PORT_TYPE_MIX:
+        if (lhs->ext.mix.hw_module != rhs->ext.mix.hw_module ||
+                lhs->ext.mix.handle != rhs->ext.mix.handle) return false;
+        if (lhs->role == AUDIO_PORT_ROLE_SOURCE &&
+                lhs->ext.mix.usecase.stream != rhs->ext.mix.usecase.stream) return false;
+        else if (lhs->role == AUDIO_PORT_ROLE_SINK &&
+                lhs->ext.mix.usecase.source != rhs->ext.mix.usecase.source) return false;
+        break;
+    case AUDIO_PORT_TYPE_SESSION:
+        if (lhs->ext.session.session != rhs->ext.session.session) return false;
+        break;
+    default: return false;
+    }
+    return lhs->config_mask == rhs->config_mask &&
+            ((lhs->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) == 0 ||
+                    lhs->sample_rate == rhs->sample_rate) &&
+            ((lhs->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) == 0 ||
+                    lhs->channel_mask == rhs->channel_mask) &&
+            ((lhs->config_mask & AUDIO_PORT_CONFIG_FORMAT) == 0 ||
+                    lhs->format == rhs->format) &&
+            ((lhs->config_mask & AUDIO_PORT_CONFIG_GAIN) == 0 ||
+                    audio_gain_config_are_equal(&lhs->gain, &rhs->gain)) &&
+            ((lhs->config_mask & AUDIO_PORT_CONFIG_FLAGS) == 0 ||
+                    (audio_port_config_has_input_direction(lhs) ?
+                            lhs->flags.input == rhs->flags.input :
+                            lhs->flags.output == rhs->flags.output));
+}
+
+static inline bool audio_port_config_has_hw_av_sync(const struct audio_port_config *port_cfg) {
+    if (!(port_cfg->config_mask & AUDIO_PORT_CONFIG_FLAGS)) {
+        return false;
+    }
+    return audio_port_config_has_input_direction(port_cfg) ?
+            port_cfg->flags.input & AUDIO_INPUT_FLAG_HW_AV_SYNC
+            : port_cfg->flags.output & AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+}
+
+static inline bool audio_patch_has_hw_av_sync(const struct audio_patch *patch) {
+    for (unsigned int i = 0; i < patch->num_sources; ++i) {
+        if (audio_port_config_has_hw_av_sync(&patch->sources[i])) return true;
+    }
+    for (unsigned int i = 0; i < patch->num_sinks; ++i) {
+        if (audio_port_config_has_hw_av_sync(&patch->sinks[i])) return true;
+    }
+    return false;
+}
+
+static inline bool audio_patch_is_valid(const struct audio_patch *patch) {
+    // Note that patch can have no sinks.
+    return patch->num_sources != 0 && patch->num_sources <= AUDIO_PATCH_PORTS_MAX &&
+            patch->num_sinks <= AUDIO_PATCH_PORTS_MAX;
+}
+
+// Note that when checking for equality the order of ports must match.
+// Patches will not be equivalent if they contain the same ports but they are permuted differently.
+static inline bool audio_patches_are_equal(
+        const struct audio_patch *lhs, const struct audio_patch *rhs) {
+    if (!audio_patch_is_valid(lhs) || !audio_patch_is_valid(rhs)) return false;
+    if (lhs->num_sources != rhs->num_sources || lhs->num_sinks != rhs->num_sinks) return false;
+    for (unsigned int i = 0; i < lhs->num_sources; ++i) {
+        if (!audio_port_configs_are_equal(&lhs->sources[i], &rhs->sources[i])) return false;
+    }
+    for (unsigned int i = 0; i < lhs->num_sinks; ++i) {
+        if (!audio_port_configs_are_equal(&lhs->sinks[i], &rhs->sinks[i])) return false;
+    }
+    return true;
+}
+
+#endif
+
 // Unique effect ID (can be generated from the following site:
 //  http://www.itu.int/ITU-T/asn1/uuid.html)
 // This struct is used for effects identification and in soundtrigger.
@@ -1164,6 +1469,7 @@
 #define AUDIO_HARDWARE_MODULE_ID_CODEC_OFFLOAD "codec_offload"
 #define AUDIO_HARDWARE_MODULE_ID_STUB "stub"
 #define AUDIO_HARDWARE_MODULE_ID_HEARING_AID "hearing_aid"
+#define AUDIO_HARDWARE_MODULE_ID_MSD "msd"
 
 /**
  * Multi-Stream Decoder (MSD) HAL service name. MSD HAL is used to mix
@@ -1204,6 +1510,10 @@
 /* Screen state */
 #define AUDIO_PARAMETER_KEY_SCREEN_STATE "screen_state"
 
+/* User's preferred audio language setting (in ISO 639-2/T three-letter string code)
+ * used to select a specific language presentation for next generation audio codecs. */
+#define AUDIO_PARAMETER_KEY_AUDIO_LANGUAGE_PREFERRED "audio_language_preferred"
+
 /**
  *  audio stream parameters
  */
diff --git a/audio/include/system/audio_effect-base.h b/audio/include/system/audio_effect-base.h
index 7a6a593..f613128 100644
--- a/audio/include/system/audio_effect-base.h
+++ b/audio/include/system/audio_effect-base.h
@@ -30,6 +30,7 @@
     EFFECT_FLAG_VOLUME_MASK = 448, // (((1 << VOLUME_SIZE) - 1) << VOLUME_SHIFT)
     EFFECT_FLAG_VOLUME_CTRL = 64, // (1 << VOLUME_SHIFT)
     EFFECT_FLAG_VOLUME_IND = 128, // (2 << VOLUME_SHIFT)
+    EFFECT_FLAG_VOLUME_MONITOR = 192, // (3 << VOLUME_SHIFT)
     EFFECT_FLAG_VOLUME_NONE = 0, // (0 << VOLUME_SHIFT)
     EFFECT_FLAG_DEVICE_SHIFT = 9, // (VOLUME_SHIFT + VOLUME_SIZE)
     EFFECT_FLAG_DEVICE_SIZE = 3,
diff --git a/audio/include/system/audio_effect.h b/audio/include/system/audio_effect.h
index 4cdc773..3937543 100644
--- a/audio/include/system/audio_effect.h
+++ b/audio/include/system/audio_effect.h
@@ -96,6 +96,7 @@
    EFFECT_CMD_SET_AUDIO_SOURCE,     // set the audio source (see audio.h, audio_source_t)
    EFFECT_CMD_OFFLOAD,              // set if effect thread is an offload one,
                                     // send the ioHandle of the effect thread
+   EFFECT_CMD_DUMP,                 // dump effect current state, for debugging
    EFFECT_CMD_FIRST_PROPRIETARY = 0x10000 // first proprietary command code
 };
 
@@ -415,6 +416,20 @@
 // reply format:
 //  size: sizeof(uint32_t)
 //  data: uint32_t
+//==================================================================================================
+// command: EFFECT_CMD_DUMP
+//--------------------------------------------------------------------------------------------------
+// description:
+//  Output the current state description into the provided file descriptor for inclusion
+//  into the audio service dump
+//--------------------------------------------------------------------------------------------------
+// command format:
+//  size: sizeof(uint32_t)
+//  data: uint32_t (which is in fact a file descriptor, int)
+//--------------------------------------------------------------------------------------------------
+// reply format:
+//  size: 0
+//  data: N/A
 //--------------------------------------------------------------------------------------------------
 // command: EFFECT_CMD_FIRST_PROPRIETARY
 //--------------------------------------------------------------------------------------------------
diff --git a/audio/include/system/audio_effects/audio_effects_conf.h b/audio/include/system/audio_effects/audio_effects_conf.h
index 79f08a6..3fadf01 100644
--- a/audio/include/system/audio_effects/audio_effects_conf.h
+++ b/audio/include/system/audio_effects/audio_effects_conf.h
@@ -44,14 +44,15 @@
 #define STRING_TAG "string"
 
 // audio_source_t
-#define MIC_SRC_TAG "mic"                           // AUDIO_SOURCE_MIC
-#define VOICE_UL_SRC_TAG "voice_uplink"             // AUDIO_SOURCE_VOICE_UPLINK
-#define VOICE_DL_SRC_TAG "voice_downlink"           // AUDIO_SOURCE_VOICE_DOWNLINK
-#define VOICE_CALL_SRC_TAG "voice_call"             // AUDIO_SOURCE_VOICE_CALL
-#define CAMCORDER_SRC_TAG "camcorder"               // AUDIO_SOURCE_CAMCORDER
-#define VOICE_REC_SRC_TAG "voice_recognition"       // AUDIO_SOURCE_VOICE_RECOGNITION
-#define VOICE_COMM_SRC_TAG "voice_communication"    // AUDIO_SOURCE_VOICE_COMMUNICATION
-#define UNPROCESSED_SRC_TAG "unprocessed"           // AUDIO_SOURCE_UNPROCESSED
+#define MIC_SRC_TAG "mic"                             // AUDIO_SOURCE_MIC
+#define VOICE_UL_SRC_TAG "voice_uplink"               // AUDIO_SOURCE_VOICE_UPLINK
+#define VOICE_DL_SRC_TAG "voice_downlink"             // AUDIO_SOURCE_VOICE_DOWNLINK
+#define VOICE_CALL_SRC_TAG "voice_call"               // AUDIO_SOURCE_VOICE_CALL
+#define CAMCORDER_SRC_TAG "camcorder"                 // AUDIO_SOURCE_CAMCORDER
+#define VOICE_REC_SRC_TAG "voice_recognition"         // AUDIO_SOURCE_VOICE_RECOGNITION
+#define VOICE_COMM_SRC_TAG "voice_communication"      // AUDIO_SOURCE_VOICE_COMMUNICATION
+#define UNPROCESSED_SRC_TAG "unprocessed"             // AUDIO_SOURCE_UNPROCESSED
+#define VOICE_PERFORMANCE_SRC_TAG "voice_performance" // AUDIO_SOURCE_VOICE_PERFORMANCE
 
 // audio_stream_type_t
 #define AUDIO_STREAM_DEFAULT_TAG "default"
diff --git a/audio/include/system/audio_policy.h b/audio/include/system/audio_policy.h
index 450125b..9071f5b 100644
--- a/audio/include/system/audio_policy.h
+++ b/audio/include/system/audio_policy.h
@@ -93,19 +93,14 @@
     AUDIO_POLICY_TONE_MAX                  = AUDIO_POLICY_TONE_CNT - 1,
 } audio_policy_tone_t;
 
-
-static inline bool audio_is_low_visibility(audio_stream_type_t stream)
-{
-    switch (stream) {
-    case AUDIO_STREAM_SYSTEM:
-    case AUDIO_STREAM_NOTIFICATION:
-    case AUDIO_STREAM_RING:
-        return true;
-    default:
-        return false;
-    }
-}
-
+/* AudioRecord client UID state derived from ActivityManager::PROCESS_STATE_XXX
+ * and used for concurrent capture policy.
+ */
+typedef enum {
+    APP_STATE_IDLE = 0,   /* client is idle: cannot capture */
+    APP_STATE_FOREGROUND, /* client has a foreground service: can capture */
+    APP_STATE_TOP, /* client has a visible UI: can capture and select use case */
+} app_state_t;
 
 __END_DECLS
 
diff --git a/audio/include/system/sound_trigger.h b/audio/include/system/sound_trigger.h
index 6182ff3..03e40c8 100644
--- a/audio/include/system/sound_trigger.h
+++ b/audio/include/system/sound_trigger.h
@@ -41,6 +41,9 @@
 #define RECOGNITION_STATUS_SUCCESS 0
 #define RECOGNITION_STATUS_ABORT 1
 #define RECOGNITION_STATUS_FAILURE 2
+#define RECOGNITION_STATUS_GET_STATE_RESPONSE 3  /* Indicates that the recognition event is in
+                                                    response to a state request and was not
+                                                    triggered by a real DSP recognition */
 
 #define SOUND_MODEL_STATUS_UPDATED 0
 
diff --git a/audio_route/audio_route.c b/audio_route/audio_route.c
index 700af61..febca07 100644
--- a/audio_route/audio_route.c
+++ b/audio_route/audio_route.c
@@ -45,6 +45,7 @@
     union ctl_values old_value;
     union ctl_values new_value;
     union ctl_values reset_value;
+    unsigned int active_count;
 };
 
 struct mixer_setting {
@@ -599,6 +600,7 @@
 
         ar->mixer_state[i].ctl = ctl;
         ar->mixer_state[i].num_values = num_values;
+        ar->mixer_state[i].active_count = 0;
 
         /* Skip unsupported types that are not supported yet in XML */
         type = mixer_ctl_get_type(ctl);
@@ -797,7 +799,6 @@
         return -1;
     }
 
-
     for (size_t i = 0; i < path->length; ++i) {
         unsigned int ctl_index;
         enum mixer_ctl_type type;
@@ -811,23 +812,53 @@
             continue;
         }
 
+        if (reverse && ms->active_count > 0) {
+            ms->active_count--;
+        } else if (!reverse) {
+            ms->active_count++;
+        }
+
        size_t value_sz = sizeof_ctl_type(type);
         /* if any value has changed, update the mixer */
         for (j = 0; j < ms->num_values; j++) {
             if (type == MIXER_CTL_TYPE_BYTE) {
                 if (ms->old_value.bytes[j] != ms->new_value.bytes[j]) {
+                    if (reverse && ms->active_count > 0) {
+                        ALOGD("%s: skip to reset mixer control '%s' in path '%s' "
+                            "because it is still needed by other paths", __func__,
+                            mixer_ctl_get_name(ms->ctl), name);
+                        memcpy(ms->new_value.bytes, ms->old_value.bytes,
+                            ms->num_values * value_sz);
+                        break;
+                    }
                     mixer_ctl_set_array(ms->ctl, ms->new_value.bytes, ms->num_values);
                     memcpy(ms->old_value.bytes, ms->new_value.bytes, ms->num_values * value_sz);
                     break;
                 }
             } else if (type == MIXER_CTL_TYPE_ENUM) {
                 if (ms->old_value.enumerated[j] != ms->new_value.enumerated[j]) {
+                    if (reverse && ms->active_count > 0) {
+                        ALOGD("%s: skip to reset mixer control '%s' in path '%s' "
+                            "because it is still needed by other paths", __func__,
+                            mixer_ctl_get_name(ms->ctl), name);
+                        memcpy(ms->new_value.enumerated, ms->old_value.enumerated,
+                            ms->num_values * value_sz);
+                        break;
+                    }
                     mixer_ctl_set_value(ms->ctl, 0, ms->new_value.enumerated[0]);
                     memcpy(ms->old_value.enumerated, ms->new_value.enumerated,
                             ms->num_values * value_sz);
                     break;
                 }
             } else if (ms->old_value.integer[j] != ms->new_value.integer[j]) {
+                if (reverse && ms->active_count > 0) {
+                    ALOGD("%s: skip to reset mixer control '%s' in path '%s' "
+                        "because it is still needed by other paths", __func__,
+                        mixer_ctl_get_name(ms->ctl), name);
+                    memcpy(ms->new_value.integer, ms->old_value.integer,
+                        ms->num_values * value_sz);
+                    break;
+                }
                 mixer_ctl_set_array(ms->ctl, ms->new_value.integer, ms->num_values);
                 memcpy(ms->old_value.integer, ms->new_value.integer, ms->num_values * value_sz);
                 break;
diff --git a/audio_utils/Android.bp b/audio_utils/Android.bp
index b699ce3..f52a422 100644
--- a/audio_utils/Android.bp
+++ b/audio_utils/Android.bp
@@ -23,6 +23,7 @@
     defaults: ["audio_utils_defaults"],
 
     srcs: [
+        "Balance.cpp",
         "channels.c",
         "ErrorLog.cpp",
         "fifo.cpp",
@@ -35,6 +36,7 @@
         "PowerLog.cpp",
         "primitives.c",
         "roundup.c",
+        "sample.c",
     ],
 
     header_libs: [
diff --git a/audio_utils/Balance.cpp b/audio_utils/Balance.cpp
new file mode 100644
index 0000000..64769e6
--- /dev/null
+++ b/audio_utils/Balance.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <audio_utils/Balance.h>
+
+namespace android::audio_utils {
+
+void Balance::setChannelMask(audio_channel_mask_t channelMask)
+{
+    channelMask &= ~ AUDIO_CHANNEL_HAPTIC_ALL;
+    if (!audio_is_output_channel(channelMask) // invalid mask
+            || mChannelMask == channelMask) { // no need to do anything
+        return;
+    }
+
+    mChannelMask = channelMask;
+    mChannelCount = audio_channel_count_from_out_mask(channelMask);
+
+    // save mBalance into balance for later restoring, then reset
+    const float balance = mBalance;
+    mBalance = 0.f;
+
+    // reset mVolumes
+    mVolumes.resize(mChannelCount);
+    std::fill(mVolumes.begin(), mVolumes.end(), 1.f);
+
+    // reset ramping variables
+    mRampBalance = 0.f;
+    mRampVolumes.clear();
+
+    if (audio_channel_mask_get_representation(mChannelMask)
+            == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+        mSides.clear();       // mSides unused for channel index masks.
+        setBalance(balance);  // recompute balance
+        return;
+    }
+
+    // Implementation detail (may change):
+    // For implementation speed, we precompute the side (left, right, center),
+    // which is a fixed geometrical constant for a given channel mask.
+    // This assumes that the channel mask does not change frequently.
+    //
+    // For the channel mask spec, see system/media/audio/include/system/audio-base.h.
+    //
+    // The side is: 0 = left, 1 = right, 2 = center.
+    static constexpr int sideFromChannel[] = {
+        0, // AUDIO_CHANNEL_OUT_FRONT_LEFT            = 0x1u,
+        1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT           = 0x2u,
+        2, // AUDIO_CHANNEL_OUT_FRONT_CENTER          = 0x4u,
+        2, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY         = 0x8u,
+        0, // AUDIO_CHANNEL_OUT_BACK_LEFT             = 0x10u,
+        1, // AUDIO_CHANNEL_OUT_BACK_RIGHT            = 0x20u,
+        0, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER  = 0x40u,
+        1, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+        2, // AUDIO_CHANNEL_OUT_BACK_CENTER           = 0x100u,
+        0, // AUDIO_CHANNEL_OUT_SIDE_LEFT             = 0x200u,
+        1, // AUDIO_CHANNEL_OUT_SIDE_RIGHT            = 0x400u,
+        2, // AUDIO_CHANNEL_OUT_TOP_CENTER            = 0x800u,
+        0, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT        = 0x1000u,
+        2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER      = 0x2000u,
+        1, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT       = 0x4000u,
+        0, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT         = 0x8000u,
+        2, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER       = 0x10000u,
+        1, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT        = 0x20000u,
+        0, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT         = 0x40000u,
+        1, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT        = 0x80000u,
+     };
+
+    mSides.resize(mChannelCount);
+    for (unsigned i = 0, channel = channelMask; channel != 0; ++i) {
+        const int index = __builtin_ctz(channel);
+        if (index < std::size(sideFromChannel)) {
+            mSides[i] = sideFromChannel[index];
+        } else {
+            mSides[i] = 2; // consider center
+        }
+        channel &= ~(1 << index);
+    }
+    setBalance(balance); // recompute balance
+}
+
+void Balance::process(float *buffer, size_t frames)
+{
+    if (mBalance == 0.f || mChannelCount < 2) {
+        return;
+    }
+
+    if (mRamp) {
+        if (mRampVolumes.size() != mVolumes.size()) {
+            // If mRampVolumes is empty, we do not ramp in this process() but directly
+            // apply the existing mVolumes. We save the balance and volume state here
+            // and fall through to non-ramping code below. The next process() will ramp if needed.
+            mRampBalance = mBalance;
+            mRampVolumes = mVolumes;
+        } else if (mRampBalance != mBalance) {
+            if (frames > 0) {
+                std::vector<float> mDeltas(mVolumes.size());
+                const float r = 1.f / frames;
+                for (size_t j = 0; j < mChannelCount; ++j) {
+                    mDeltas[j] = (mVolumes[j] - mRampVolumes[j]) * r;
+                }
+
+                // ramped balance
+                for (size_t i = 0; i < frames; ++i) {
+                    const float findex = i;
+                    for (size_t j = 0; j < mChannelCount; ++j) { // better precision: delta * i
+                        *buffer++ *= mRampVolumes[j] + mDeltas[j] * findex;
+                    }
+                }
+            }
+            mRampBalance = mBalance;
+            mRampVolumes = mVolumes;
+            return;
+        }
+        // fall through
+    }
+
+    // non-ramped balance
+    for (size_t i = 0; i < frames; ++i) {
+        for (size_t j = 0; j < mChannelCount; ++j) {
+            *buffer++ *= mVolumes[j];
+        }
+    }
+}
+
+void Balance::computeStereoBalance(float balance, float *left, float *right) const
+{
+    if (balance > 0.f) {
+        *left = mCurve(1.f - balance);
+        *right = 1.f;
+    } else if (balance < 0.f) {
+        *left = 1.f;
+        *right = mCurve(1.f + balance);
+    } else {
+        *left = 1.f;
+        *right = 1.f;
+    }
+
+    // Functionally:
+    // *left = balance > 0.f ? mCurve(1.f - balance) : 1.f;
+    // *right = balance < 0.f ? mCurve(1.f + balance) : 1.f;
+}
+
+std::string Balance::toString() const
+{
+    std::stringstream ss;
+    ss << "balance " << mBalance << " channelCount " << mChannelCount << " volumes:";
+    for (float volume : mVolumes) {
+        ss << " " << volume;
+    }
+    // we do not show mSides, which is only valid for channel position masks.
+    return ss.str();
+}
+
+void Balance::setBalance(float balance)
+{
+    if (mBalance == balance                         // no change
+        || isnan(balance) || fabs(balance) > 1.f) { // balance out of range
+        return;
+    }
+
+    mBalance = balance;
+
+    if (mChannelCount < 2) { // if channel count is 1, mVolumes[0] is already set to 1.f
+        return;              // and if channel count < 2, we don't do anything in process().
+    }
+
+    // Handle the common cases:
+    // stereo and channel index masks only affect the first two channels as left and right.
+    if (mChannelMask == AUDIO_CHANNEL_OUT_STEREO
+            || audio_channel_mask_get_representation(mChannelMask)
+                    == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+        computeStereoBalance(balance, &mVolumes[0], &mVolumes[1]);
+        return;
+    }
+
+    // For position masks with more than 2 channels, we consider which side the
+    // speaker position is on to figure the volume used.
+    float balanceVolumes[3]; // left, right, center
+    computeStereoBalance(balance, &balanceVolumes[0], &balanceVolumes[1]);
+    balanceVolumes[2] = 1.f; // center  TODO: consider center scaling.
+
+    for (size_t i = 0; i < mVolumes.size(); ++i) {
+        mVolumes[i] = balanceVolumes[mSides[i]];
+    }
+}
+
+} // namespace android::audio_utils
diff --git a/audio_utils/PowerLog.cpp b/audio_utils/PowerLog.cpp
index 15048ba..01c60c9 100644
--- a/audio_utils/PowerLog.cpp
+++ b/audio_utils/PowerLog.cpp
@@ -24,8 +24,10 @@
 #include <sstream>
 #include <stdint.h>
 #include <unistd.h>
+#include <vector>
 
 #include <audio_utils/clock.h>
+#include <audio_utils/LogPlot.h>
 #include <audio_utils/power.h>
 #include <audio_utils/PowerLog.h>
 
@@ -165,6 +167,9 @@
     if (nonzeros == 0) {
         ss << prefix << "Signal power history: (none)\n";
     } else {
+        // First value is power, second value is whether value is start of
+        // a new time stamp.
+        std::vector<std::pair<float, bool>> plotEntries;
         ss << prefix << "Signal power history:\n";
 
         size_t column = 0;
@@ -179,6 +184,12 @@
             if (energy == 0.f) {
                 if (!first) {
                     ss << " ] sum(" << audio_utils_power_from_energy(cumulative) << ")";
+                    // Add an entry to denote the start of a new time stamp series.
+                    if (!plotEntries.empty()) {
+                        // First value should be between min and max of all graph entries
+                        // so that it doesn't mess with y-axis scaling.
+                        plotEntries.emplace_back(plotEntries.back().first, true);
+                    }
                 }
                 cumulative = 0.f;
                 column = 0;
@@ -207,7 +218,11 @@
                     audio_utils_power_from_energy(energy / (mChannelCount * mFramesPerEntry));
             ss << std::setw(6) << power;
             ALOGV("state: %d %lld %f", state, (long long)time, power);
+            // Add an entry to the ASCII art power log graph.
+            // false indicates the value doesn't have a new series time stamp.
+            plotEntries.emplace_back(power, false);
         }
+        ss << "\n" << audio_utils_log_plot(plotEntries.begin(), plotEntries.end());
         ss << "\n";
     }
     return ss.str();
diff --git a/audio_utils/channels.c b/audio_utils/channels.c
index 83a74b8..6747527 100644
--- a/audio_utils/channels.c
+++ b/audio_utils/channels.c
@@ -58,12 +58,20 @@
     return out;
 }
 
+/* This is written as a C macro because it operates on generic types,
+ * which in a C++ file could be alternatively achieved by a "template"
+ * or an "auto" declaration.
+ * TODO: convert this from a C file to a C++ file.
+ */
+
 /* Channel expands (adds zeroes to audio frame end) from an input buffer to an output buffer.
  * See expand_channels() function below for parameter definitions.
  *
  * Move from back to front so that the conversion can be done in-place
  * i.e. in_buff == out_buff
  * NOTE: num_in_bytes must be a multiple of in_buff_channels * in_buff_sample_size.
+ *
+ * Macro has a return statement.
  */
 #define EXPAND_CHANNELS(in_buff, in_buff_chans, out_buff, out_buff_chans, num_in_bytes, zero) \
 { \
@@ -94,11 +102,8 @@
  * Move from back to front so that the conversion can be done in-place
  * i.e. in_buff == out_buff
  * NOTE: num_in_bytes must be a multiple of in_buff_channels * in_buff_sample_size.
- */
-/* This is written as a C macro because it operates on generic types,
- * which in a C++ file could be alternatively achieved by a "template"
- * or an "auto" declaration.
- * TODO: convert this from a C file to a C++ file.
+ *
+ * Macro has a return statement.
  */
 #define EXPAND_SELECTED_CHANNELS( \
         in_buff, in_buff_chans, out_buff, out_buff_chans, num_in_bytes) \
@@ -119,6 +124,53 @@
     return num_out_samples * sizeof(*(out_buff)); \
 }
 
+/* Expand number of channels from an input buffer to an output buffer.
+ * See expand_channels_non_destructive() function below for parameter definitions.
+ *
+ * Input channels are copied to the output buffer, with extra output
+ * channels interleaved from back of input buffer.
+ *
+ * So for in_chans = 2, out_chans = 4: [1|2|1|2...|3|4|3|4] => [1|2|3|4|1|2|3|4...]
+ *
+ * NOTE: in_buff must be same size as out_buff and num_in_bytes must
+ * be a multiple of in_buff_channels * in_buff_sample_size.
+ *
+ * Uses a temporary buffer so that the conversion can be done in-place
+ * i.e. in_buff == out_buff
+ *
+ * Macro has a return statement.
+ */
+#define EXPAND_CHANNELS_NON_DESTRUCTIVE(in_buff, in_buff_chans, \
+        out_buff, out_buff_chans, num_in_bytes) \
+{ \
+    size_t num_in_samples = (num_in_bytes) / sizeof(*(in_buff)); \
+    size_t num_out_samples = (num_in_samples * (out_buff_chans)) / (in_buff_chans); \
+    typeof(out_buff) dst_ptr = (out_buff); \
+    typeof(in_buff) src_ptr = (in_buff); \
+    typeof(*out_buff) temp_buff[num_in_samples]; \
+    typeof(out_buff) temp_ptr = temp_buff; \
+    /* if in-place, copy input channels to a temp buffer */ \
+    if ((in_buff) == (out_buff)) { \
+        memcpy(temp_buff, src_ptr, (num_in_bytes)); \
+        src_ptr += num_in_samples; \
+    } else { \
+        temp_ptr = (typeof(out_buff)) src_ptr; \
+        src_ptr += num_in_samples; \
+    } \
+    /* interleave channels from end of source buffer with those from front */ \
+    size_t src_index; \
+    for (src_index = 0; src_index < num_out_samples; src_index += (out_buff_chans)) { \
+        size_t dst_offset; \
+        for (dst_offset = 0; dst_offset < (in_buff_chans); dst_offset++) { \
+            *dst_ptr++ = *temp_ptr++; \
+        } \
+        for (;dst_offset < (out_buff_chans); dst_offset++) { \
+            *dst_ptr++ = *src_ptr++; \
+        } \
+    } \
+    /* return number of *bytes* generated */ \
+    return num_out_samples * sizeof(*(out_buff)); \
+}
 
 /* Channel expands from a MONO input buffer to a MULTICHANNEL output buffer by duplicating the
  * single input channel to the first 2 output channels and 0-filling the remaining.
@@ -129,6 +181,8 @@
  * Move from back to front so that the conversion can be done in-place
  * i.e. in_buff == out_buff
  * NOTE: num_in_bytes must be a multiple of in_buff_channels * in_buff_sample_size.
+ *
+ * Macro has a return statement.
  */
 #define EXPAND_MONO_TO_MULTI(in_buff, in_buff_chans, out_buff, out_buff_chans, num_in_bytes, zero) \
 { \
@@ -158,6 +212,8 @@
  * Move from front to back so that the conversion can be done in-place
  * i.e. in_buff == out_buff
  * NOTE: num_in_bytes must be a multiple of in_buff_channels * in_buff_sample_size.
+ *
+ * Macro has a return statement.
  */
 #define CONTRACT_CHANNELS(in_buff, in_buff_chans, out_buff, out_buff_chans, num_in_bytes) \
 { \
@@ -178,6 +234,56 @@
     return num_out_samples * sizeof(*(out_buff)); \
 }
 
+/* Contract number of channels from an input buffer to an output buffer,
+ * storing removed channels at end of buffer.
+ *
+ * See contract_channels_non_destructive() function below for parameter definitions.
+ *
+ * So for in_chans = 4, out_chans = 2: [1|2|3|4|1|2|3|4...] => [1|2|1|2...|3|4|3|4]
+ *
+ * NOTE: in_buff must be same size as out_buff and num_in_bytes must
+ * be a multiple of in_buff_channels * in_buff_sample_size.
+ *
+ * Uses a temporary buffer so that the conversion can be done in-place
+ * i.e. in_buff == out_buff
+ *
+ * Macro has a return statement.
+ */
+#define CONTRACT_CHANNELS_NON_DESTRUCTIVE(in_buff, in_buff_chans, out_buff, \
+        out_buff_chans, num_in_bytes) \
+{ \
+    size_t num_in_samples = (num_in_bytes) / sizeof(*(in_buff)); \
+    size_t num_out_samples = (num_in_samples * (out_buff_chans)) / (in_buff_chans); \
+    typeof(out_buff) dst_ptr = (out_buff); \
+    typeof(in_buff) src_ptr = (in_buff); \
+    size_t num_temp_samples = num_in_samples - num_out_samples; \
+    typeof(*out_buff) temp_buff[num_temp_samples]; \
+    typeof(out_buff) temp_ptr; \
+    /* if in-place, copy input channels to a temp buffer instead of out buffer */ \
+    if ((in_buff) == (out_buff)) { \
+        temp_ptr = temp_buff; \
+    } else { \
+        temp_ptr = dst_ptr + num_out_samples; \
+    } \
+    size_t src_index; \
+    for (src_index = 0; src_index < num_in_samples; src_index += (in_buff_chans)) { \
+        size_t dst_offset; \
+        for (dst_offset = 0; dst_offset < (out_buff_chans); dst_offset++) { \
+            *dst_ptr++ = *src_ptr++; \
+        } \
+        for (;dst_offset < (in_buff_chans); dst_offset++) { \
+            *temp_ptr++ = *src_ptr++; \
+        } \
+    } \
+    /* if in-place, interleave channels from the temp buffer */ \
+    if ((in_buff) == (out_buff)) { \
+        temp_ptr = temp_buff; \
+        memcpy(dst_ptr, temp_ptr, num_temp_samples * sizeof(*(in_buff))); \
+    } \
+    /* return number of *bytes* generated */ \
+    return num_out_samples * sizeof(*(out_buff)); \
+}
+
 /* Channel contracts from a MULTICHANNEL input buffer to a MONO output buffer by mixing the
  * first two input channels into the single output channel (and skipping the rest).
  * See contract_channels() function below for parameter definitions.
@@ -189,6 +295,8 @@
  * NOTE: num_in_bytes must be a multiple of in_buff_channels * in_buff_sample_size.
  * NOTE: Overload of the summed channels is avoided by averaging the two input channels.
  * NOTE: Can not be used for uint8x3_t samples, see CONTRACT_TO_MONO_24() below.
+ *
+ * Macro has a return statement.
  */
 #define CONTRACT_TO_MONO(in_buff, out_buff, num_in_bytes) \
 { \
@@ -222,6 +330,8 @@
  * NOTE: num_in_bytes must be a multiple of in_buff_channels * in_buff_sample_size.
  * NOTE: Overload of the summed channels is avoided by averaging the two input channels.
  * NOTE: Can not be used for normal, scalar samples, see CONTRACT_TO_MONO() above.
+ *
+ * Macro has a return statement.
  */
 #define CONTRACT_TO_MONO_24(in_buff, out_buff, num_in_bytes) \
 { \
@@ -315,6 +425,53 @@
 
 /*
  * Convert a buffer of N-channel, interleaved samples to M-channel
+ * (where N > M).
+ *   in_buff points to the buffer of samples
+ *   in_buff_channels specifies the number of channels in the input buffer.
+ *   out_buff points to the buffer to receive converted samples.
+ *   out_buff_channels specifies the number of channels in the output buffer.
+ *   sample_size_in_bytes specifies the number of bytes per sample.
+ *   num_in_bytes size of input buffer in BYTES
+ * returns
+ *   the number of BYTES of output data.
+ * NOTE
+ *   channels > M are stored at the end of the output buffer.
+ *   The output and input buffers must be the same length.
+ *   The output and input buffers must either be completely separate (non-overlapping), or
+ *   they must both start at the same address. Partially overlapping buffers are not supported.
+ */
+static size_t contract_channels_non_destructive(const void* in_buff, size_t in_buff_chans,
+                                void* out_buff, size_t out_buff_chans,
+                                unsigned sample_size_in_bytes, size_t num_in_bytes)
+{
+    switch (sample_size_in_bytes) {
+    case 1:
+        CONTRACT_CHANNELS_NON_DESTRUCTIVE((const uint8_t*)in_buff, in_buff_chans,
+                          (uint8_t*)out_buff, out_buff_chans,
+                          num_in_bytes);
+        // returns in macro
+    case 2:
+        CONTRACT_CHANNELS_NON_DESTRUCTIVE((const int16_t*)in_buff, in_buff_chans,
+                          (int16_t*)out_buff, out_buff_chans,
+                          num_in_bytes);
+        // returns in macro
+    case 3:
+        CONTRACT_CHANNELS_NON_DESTRUCTIVE((const uint8x3_t*)in_buff, in_buff_chans,
+                          (uint8x3_t*)out_buff, out_buff_chans,
+                          num_in_bytes);
+        // returns in macro
+    case 4:
+        CONTRACT_CHANNELS_NON_DESTRUCTIVE((const int32_t*)in_buff, in_buff_chans,
+                          (int32_t*)out_buff, out_buff_chans,
+                          num_in_bytes);
+        // returns in macro
+    default:
+        return 0;
+    }
+}
+
+/*
+ * Convert a buffer of N-channel, interleaved samples to M-channel
  * (where N < M).
  *   in_buff points to the buffer of samples
  *   in_buff_channels Specifies the number of channels in the input buffer.
@@ -393,23 +550,6 @@
     }
 }
 
-size_t adjust_channels(const void* in_buff, size_t in_buff_chans,
-                       void* out_buff, size_t out_buff_chans,
-                       unsigned sample_size_in_bytes, size_t num_in_bytes)
-{
-    if (out_buff_chans > in_buff_chans) {
-        return expand_channels(in_buff, in_buff_chans, out_buff,  out_buff_chans,
-                               sample_size_in_bytes, num_in_bytes);
-    } else if (out_buff_chans < in_buff_chans) {
-        return contract_channels(in_buff, in_buff_chans, out_buff,  out_buff_chans,
-                                 sample_size_in_bytes, num_in_bytes);
-    } else if (in_buff != out_buff) {
-        memcpy(out_buff, in_buff, num_in_bytes);
-    }
-
-    return num_in_bytes;
-}
-
 /*
  * Convert a buffer of N-channel, interleaved samples to M-channel
  * (where N < M).
@@ -464,6 +604,78 @@
     }
 }
 
+/*
+ * Convert a buffer of N-channel, interleaved samples to M-channel
+ * (where N < M).
+ *   in_buff points to the buffer of samples
+ *   in_buff_channels Specifies the number of channels in the input buffer.
+ *   out_buff points to the buffer to receive converted samples.
+ *   out_buff_channels Specifies the number of channels in the output buffer.
+ *   sample_size_in_bytes Specifies the number of bytes per sample.
+ *   num_in_bytes size of input buffer in BYTES
+ * returns
+ *   the number of BYTES of output data.
+ * NOTE
+ *   channels > N are interleaved with data from the end of the input buffer.
+ *   The output and input buffers must be the same length.
+ *   The output and input buffers must either be completely separate (non-overlapping), or
+ *   they must both start at the same address. Partially overlapping buffers are not supported.
+ */
+static size_t expand_channels_non_destructive(const void* in_buff, size_t in_buff_chans,
+                              void* out_buff, size_t out_buff_chans,
+                              unsigned sample_size_in_bytes, size_t num_in_bytes)
+{
+    switch (sample_size_in_bytes) {
+    case 1:
+
+        EXPAND_CHANNELS_NON_DESTRUCTIVE((const uint8_t*)in_buff, in_buff_chans,
+                        (uint8_t*)out_buff, out_buff_chans,
+                        num_in_bytes);
+        // returns in macro
+
+    case 2:
+
+        EXPAND_CHANNELS_NON_DESTRUCTIVE((const int16_t*)in_buff, in_buff_chans,
+                        (int16_t*)out_buff, out_buff_chans,
+                        num_in_bytes);
+        // returns in macro
+
+    case 3:
+
+        EXPAND_CHANNELS_NON_DESTRUCTIVE((const uint8x3_t*)in_buff, in_buff_chans,
+                        (uint8x3_t*)out_buff, out_buff_chans,
+                        num_in_bytes);
+        // returns in macro
+
+    case 4:
+
+        EXPAND_CHANNELS_NON_DESTRUCTIVE((const int32_t*)in_buff, in_buff_chans,
+                        (int32_t*)out_buff, out_buff_chans,
+                        num_in_bytes);
+        // returns in macro
+
+    default:
+        return 0;
+    }
+}
+
+size_t adjust_channels(const void* in_buff, size_t in_buff_chans,
+                       void* out_buff, size_t out_buff_chans,
+                       unsigned sample_size_in_bytes, size_t num_in_bytes)
+{
+    if (out_buff_chans > in_buff_chans) {
+        return expand_channels(in_buff, in_buff_chans, out_buff,  out_buff_chans,
+                               sample_size_in_bytes, num_in_bytes);
+    } else if (out_buff_chans < in_buff_chans) {
+        return contract_channels(in_buff, in_buff_chans, out_buff,  out_buff_chans,
+                                 sample_size_in_bytes, num_in_bytes);
+    } else if (in_buff != out_buff) {
+        memcpy(out_buff, in_buff, num_in_bytes);
+    }
+
+    return num_in_bytes;
+}
+
 size_t adjust_selected_channels(const void* in_buff, size_t in_buff_chans,
                        void* out_buff, size_t out_buff_chans,
                        unsigned sample_size_in_bytes, size_t num_in_bytes)
@@ -481,3 +693,19 @@
     return num_in_bytes;
 }
 
+size_t adjust_channels_non_destructive(const void* in_buff, size_t in_buff_chans,
+                       void* out_buff, size_t out_buff_chans,
+                       unsigned sample_size_in_bytes, size_t num_in_bytes)
+{
+    if (out_buff_chans > in_buff_chans) {
+        return expand_channels_non_destructive(in_buff, in_buff_chans, out_buff, out_buff_chans,
+                               sample_size_in_bytes, num_in_bytes);
+    } else if (out_buff_chans < in_buff_chans) {
+        return contract_channels_non_destructive(in_buff, in_buff_chans, out_buff, out_buff_chans,
+                                 sample_size_in_bytes, num_in_bytes);
+    } else if (in_buff != out_buff) {
+        memcpy(out_buff, in_buff, num_in_bytes);
+    }
+
+    return num_in_bytes;
+}
diff --git a/audio_utils/echo_reference.c b/audio_utils/echo_reference.c
index a822519..b23627a 100644
--- a/audio_utils/echo_reference.c
+++ b/audio_utils/echo_reference.c
@@ -544,4 +544,3 @@
     }
     free(er);
 }
-
diff --git a/audio_utils/fixedfft.cpp b/audio_utils/fixedfft.cpp
index 3fcc247..fb2c78e 100644
--- a/audio_utils/fixedfft.cpp
+++ b/audio_utils/fixedfft.cpp
@@ -24,7 +24,6 @@
  * it even faster or smaller, it costs too much on one of the aspects.
  */
 
-#include <stdio.h>
 #include <stdint.h>
 
 #include <audio_utils/fixedfft.h>
diff --git a/audio_utils/format.c b/audio_utils/format.c
index 50872fc..c5575a0 100644
--- a/audio_utils/format.c
+++ b/audio_utils/format.c
@@ -19,8 +19,8 @@
 
 #include <log/log.h>
 
-#include <audio_utils/primitives.h>
 #include <audio_utils/format.h>
+#include <audio_utils/primitives.h>
 
 void memcpy_by_audio_format(void *dst, audio_format_t dst_format,
         const void *src, audio_format_t src_format, size_t count)
diff --git a/audio_utils/include/audio_utils/Balance.h b/audio_utils/include/audio_utils/Balance.h
new file mode 100644
index 0000000..a1e678d
--- /dev/null
+++ b/audio_utils/include/audio_utils/Balance.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_UTILS_BALANCE_H
+#define ANDROID_AUDIO_UTILS_BALANCE_H
+
+#include <math.h>       /* expf */
+#include <sstream>
+#include <system/audio.h>
+#include <vector>
+
+namespace android::audio_utils {
+
+class Balance {
+public:
+   /**
+     * \brief Balance processing of left-right volume on audio data.
+     *
+     * Allows processing of audio data with a single balance parameter from [-1, 1].
+     * For efficiency, the class caches balance and channel mask data between calls;
+     * hence, use by multiple threads will require caller locking.
+     *
+     * \param ramp whether to ramp volume or not.
+     * \param curve a monotonic increasing function f: [0, 1] -> [a, b]
+     *        which represents the volume steps from an input domain of [0, 1] to
+     *        an output range [a, b] (ostensibly also from 0 to 1).
+     *        If [a, b] is not [0, 1], it is normalized to [0, 1].
+     *        Curve is typically a convex function, some possible examples:
+     *        [](float x) { return expf(2.f * x); }
+     *        or
+     *        [](float x) { return x * (x + 0.2f); }
+     */
+    explicit Balance(
+            bool ramp = true,
+            std::function<float(float)> curve = [](float x) { return x * (x + 0.2f); })
+        : mRamp(ramp)
+        , mCurve(normalize(std::move(curve))) { }
+
+    /**
+     * \brief Sets whether the process ramps left-right volume changes.
+     *
+     * The default value is true.
+     * The ramp will take place, if needed, on the following process()
+     * using the current balance and volume as the starting point.
+     *
+     * Toggling ramp off and then back on will reset the ramp starting point.
+     *
+     * \param ramp whether ramping is used to smooth volume changes.
+     */
+    void setRamp(bool ramp) {
+        if (ramp == mRamp) return; // no change
+        mRamp = ramp;
+        if (mRamp) { // use current volume and balance as starting point.
+           mRampVolumes = mVolumes;
+           mRampBalance = mBalance;
+        }
+    }
+
+    /**
+     * \brief Sets the channel mask for data passed in.
+     *
+     * setChannelMask() must called before process() to set
+     * a valid output audio channel mask.
+     *
+     * \param channelMask the audio output channel mask to use.
+     *                    Invalid channel masks are ignored.
+     *
+     */
+    void setChannelMask(audio_channel_mask_t channelMask);
+
+
+    /**
+     * \brief Sets the left-right balance parameter.
+     *
+     * setBalance() should be called before process() to set
+     * the balance.  The initial value is 0.f (no action).
+     *
+     * \param balance   from -1.f (left) to 0.f (center) to 1.f (right).
+     *
+     */
+    void setBalance(float balance);
+
+    /**
+     * \brief Processes balance for audio data.
+     *
+     * setChannelMask() should be called at least once before calling process()
+     * to set the channel mask.  A balance of 0.f or a channel mask of
+     * less than 2 channels will return with the buffer untouched.
+     *
+     * \param buffer    pointer to the audio data to be modified in-place.
+     * \param frames    number of frames of audio data to convert.
+     *
+     */
+    void process(float *buffer, size_t frames);
+
+    /**
+     * \brief Computes the stereo gains for left and right channels.
+     *
+     * Implementation detail (may change):
+     * This is not an energy preserving balance (e.g. using sin/cos cross fade or some such).
+     * Rather balance preserves full gain on left and right when balance is 0.f,
+     * and decreases the right or left as one changes the balance parameter.
+     *
+     * \param balance   from -1.f (left) to 0.f (center) to 1.f (right).
+     * \param left      pointer to the float where the left gain will be stored.
+     * \param right     pointer to the float where the right gain will be stored.
+     */
+    void computeStereoBalance(float balance, float *left, float *right) const;
+
+    /**
+     * \brief Creates a std::string representation of Balance object for logging.
+     *
+     * \return string representation of Balance object
+     */
+    std::string toString() const;
+
+private:
+
+    /**
+     * \brief Normalizes f: [0, 1] -> [a, b] to g: [0, 1] -> [0, 1].
+     *
+     * A helper function to normalize a float volume function.
+     * g(0) is exactly zero, but g(1) may not necessarily be 1 since we
+     * use reciprocal multiplication instead of division to scale.
+     *
+     * \param f a function from [0, 1] -> [a, b]
+     * \return g a function from [0, 1] -> [0, 1] as a linear function of f.
+     */
+    template<typename T>
+    static std::function<T(T)> normalize(std::function<T(T)> f) {
+        const T f0 = f(0);
+        const T r = T(1) / (f(1) - f0); // reciprocal multiplication
+
+        if (f0 != T(0) ||  // must be exactly 0 at 0, since we promise g(0) == 0
+            fabs(r - T(1)) > std::numeric_limits<T>::epsilon() * 3) { // some fudge allowed on r.
+            return [f, f0, r](T x) { return r * (f(x) - f0); };
+        }
+        // no translation required.
+        return f;
+    }
+
+    // setBalance() changes mBalance and mVolumes based on the channel geometry information.
+    float mBalance = 0.f;              // balance: -1.f (left), 0.f (center), 1.f (right)
+    std::vector<float> mVolumes;       // per channel, the volume adjustment due to balance.
+
+    // setChannelMask() updates mChannelMask, mChannelCount, and mSides to cache the geometry
+    // and then calls setBalance() to update mVolumes.
+
+    audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_INVALID;
+    size_t mChannelCount = 0;          // from mChannelMask, 0 means no processing done.
+
+    std::vector<int> mSides;           // per channel, the side (0 = left, 1 = right, 2 = center)
+                                       // only used for channel position masks.
+
+    // Ramping variables
+    bool mRamp;                       // whether ramp is enabled.
+    float mRampBalance = 0.f;         // last (starting) balance to begin ramp.
+    std::vector<float> mRampVolumes;  // last (starting) volumes to begin ramp, clear for no ramp.
+
+    const std::function<float(float)> mCurve; // monotone volume transfer func [0, 1] -> [0, 1]
+};
+
+} // namespace android::audio_utils
+
+#endif // !ANDROID_AUDIO_UTILS_BALANCE_H
diff --git a/audio_utils/include/audio_utils/FdToString.h b/audio_utils/include/audio_utils/FdToString.h
new file mode 100644
index 0000000..25bdd4f
--- /dev/null
+++ b/audio_utils/include/audio_utils/FdToString.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_FD_TO_STRING_H
+#define ANDROID_AUDIO_FD_TO_STRING_H
+
+#include <fcntl.h>
+#include <future>
+#include <poll.h>
+#include <sstream>
+#include <unistd.h>
+#include <utils/Timers.h>
+
+#include "clock.h"
+
+namespace android {
+namespace audio_utils {
+
+/**
+ * FdToString
+ *
+ * Captures string data written to a file descriptor.
+ * The class will furnish a writable file descriptor by fd().
+ * The string may be read through getStringAndClose().
+ */
+
+class FdToString {
+public:
+    /**
+     * \param prefix is the prefix string prepended to each new line.
+     * \param timeout is the total timeout to wait for obtaining data.
+     */
+    explicit FdToString(const std::string &prefix = "- ", int timeoutMs = 200)
+            : mPrefix(prefix)
+            , mTimeoutTimeNs(systemTime() + timeoutMs * NANOS_PER_MILLISECOND) {
+        const int status = pipe2(mPipeFd, O_CLOEXEC);
+        if (status == 0) {
+            mOutput = std::async(std::launch::async, reader, mPipeFd[0], mTimeoutTimeNs, mPrefix);
+        }
+        // on initialization failure fd() returns -1.
+    }
+
+    ~FdToString() {
+        for (auto &fd : mPipeFd) {
+            if (fd >= 0) {
+                close(fd);
+                fd = -1;
+            }
+        }
+    }
+
+    /**
+     * Returns the write end of the pipe as a file descriptor or -1 if invalid or already closed.
+     *
+     * Do not close this fd directly as this class should own the fd. Instead, use
+     * getStringAndClose() to close the fd and return the string.
+     */
+    int fd() const {
+        return mPipeFd[1];
+    }
+
+    /**
+     * Returns the string representation of data written to the fd.
+     *
+     * An empty string is returned on failure (or timeout).  It is acceptable to call this
+     * method multiple times to obtain the final string; the fd is closed after the first call.
+     */
+    std::string getStringAndClose() {
+        if (!mOutput.valid()) return "";
+        if (mPipeFd[1] >= 0) {
+            close(mPipeFd[1]);
+            mPipeFd[1] = -1;
+        }
+        const int waitMs = toMillisecondTimeoutDelay(systemTime(), mTimeoutTimeNs);
+        std::future_status status = mOutput.wait_for(std::chrono::milliseconds(waitMs));
+        return status == std::future_status::ready ? mOutput.get() : "";
+    }
+
+private:
+    static std::string reader(int fd, int64_t timeoutTimeNs, std::string prefix) {
+        char buf[4096];
+        int red;
+        std::stringstream ss;
+        bool requiresPrefix = true;
+
+        while (true) {
+            struct pollfd pfd = {
+                .fd = fd,
+                .events = POLLIN | POLLRDHUP,
+            };
+            const int waitMs = toMillisecondTimeoutDelay(systemTime(), timeoutTimeNs);
+            // ALOGD("waitMs: %d", waitMs);
+            if (waitMs <= 0) break;
+            const int retval = poll(&pfd, 1 /* nfds*/, waitMs);
+            if (retval <= 0 || (pfd.revents & POLLIN) != POLLIN) break; // error or timeout
+            // data is available
+            if ((red = read(fd, buf, sizeof(buf))) <= 0) break;
+            char *delim, *bptr = buf;
+            while (!prefix.empty() && (delim = (char *)memchr(bptr, '\n', red)) != nullptr) {
+                if (requiresPrefix) ss << prefix;
+                const size_t line = delim - bptr + 1;
+                ss.write(bptr, line);
+                bptr += line;
+                red -= line;
+                requiresPrefix = true;
+            }
+            if (red > 0) {
+                ss << prefix;
+                ss.write(bptr, red);
+                requiresPrefix = false;
+            }
+        }
+        return ss.str();
+    }
+
+    const std::string mPrefix;
+    const int64_t mTimeoutTimeNs;
+    int mPipeFd[2] = {-1, -1};
+    std::future<std::string> mOutput;
+};
+
+} // namespace audio_utils
+} // namespace android
+
+#endif // !ANDROID_AUDIO_FD_TO_STRING_H
diff --git a/audio_utils/include/audio_utils/LogPlot.h b/audio_utils/include/audio_utils/LogPlot.h
new file mode 100644
index 0000000..c666a30
--- /dev/null
+++ b/audio_utils/include/audio_utils/LogPlot.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LOG_PLOT_H
+#define LOG_PLOT_H
+
+#include <algorithm>
+#include <cmath>
+#include <iomanip>      // setw
+#include <iostream>
+#include <map>
+#include <sstream>
+#include <string>
+
+// TODO Make a class called LogPlot and put this functionality in it.
+// Actually maybe this file can be called AsciiPlot or something...
+/**
+ * \brief Creates a std::string graph representation of equally-spaced time-series data points.
+ *
+ * \param first     RandomAccessIterator iterator to initial position of sequence.
+ *                  Iterator shall point to a pair<float, bool>, where the float is the data value
+ *                  and the bool is whether the data value is the start of a new data point in time
+ *                  (i.e. a break in time continuity).
+ * \param last      RandomAccessIterator iterator to final position of sequence.
+ * \return the std::string of the graph.
+ *
+ */
+template <class RandomAccessIterator>
+std::string audio_utils_log_plot(RandomAccessIterator first, RandomAccessIterator last)
+{
+    using T = decltype((*first).first);
+
+    constexpr int HEIGHT = 14;                    // Character height of the plot
+    // Leave 20% display space before min and after max data points
+    constexpr float RANGE_BUFFER_ROOM = 0.2f;
+    // Minimum range of lowest and highest y-axis value to display
+    constexpr int RANGE_MIN = 14;
+    constexpr unsigned int WIDTH_MAX = 200U;      // Max character width of plot
+    const size_t size = last - first;
+
+    if (size <= 0) {
+        return "";
+    }
+
+    // Find min and max element in the vector.
+    const auto result = std::minmax_element(first, last);
+    const T minVal = (*result.first).first;
+    const T maxVal = (*result.second).first;
+
+    const T range = maxVal - minVal;
+    T graphMin, graphMax;
+    if (range < RANGE_MIN) {
+        T avg = (maxVal + minVal) / 2;
+        graphMin = avg - RANGE_MIN / 2;
+        graphMax = avg + RANGE_MIN / 2;
+    } else {
+        graphMin = minVal - range * RANGE_BUFFER_ROOM;
+        graphMax = maxVal + range * RANGE_BUFFER_ROOM;
+    }
+
+    // Value of one character height increase on the graph
+    const T increment = (graphMax - graphMin) / HEIGHT;
+    // Something went wrong if we reached this statement..
+    if (increment <= 0.0f) {
+        return "";
+    }
+
+    std::stringstream ss;
+    ss << std::fixed << std::setprecision(1);
+
+    // Start storing the graph into string.
+    // TODO store everything into a preallocated string rather than use stringstream.
+    // This may make the code easier to maintain.
+    ss << "\n";
+    for (int height = HEIGHT - 1; height >= 0; height--) {
+        int spaces = 1;     // Amount of spaces before the data point
+        ss << std::setw(9) << graphMin + increment * height;
+        ss << std::setw(3) << "-|";
+        auto it = size <= WIDTH_MAX ? first : first + size - WIDTH_MAX;
+        for (; it < last; ++it) {
+            const T power = it->first;
+            const bool start = it->second;
+            // TODO explicitly do type conversion for parameter passed to round()?
+            int px = (int)round((power - graphMin) / increment);
+            // The it != last - 1 is a temporary workaround to prevent vertical bar
+            // separators after the last data point entry.
+            if ((start || px == height) && it != last - 1) {
+                ss << std::setw(spaces) << (start ? "|" : "*");
+                spaces = 1;
+            } else {
+                spaces++;
+            }
+        }
+        ss << "\n";
+    }
+    ss << std::setw(12) << "|";
+    ss << std::string(std::min(size - (size_t)1, (size_t)WIDTH_MAX), '_') << "\n\n";
+
+    return ss.str();
+}
+
+// determines how many character spaces an integer takes up.
+inline int widthOf(int x) {
+    int width = 0;
+    if (x < 0) {
+        ++width;
+        x = x == INT_MIN ? INT_MAX : -x;
+    }
+    // assert (x >= 0)
+    do {
+        ++width;
+        x /= 10;
+    } while (x > 0);
+    return width;
+}
+
+// computes the column width required for a specific histogram value
+inline int numberWidth(double number, int leftPadding) {
+    // Added values account for whitespaces needed around numbers, and for the
+    // dot and decimal digit not accounted for by widthOf
+    return std::max(std::max(widthOf(static_cast<int>(number)) + 3, 2), leftPadding + 1);
+}
+
+// TODO Make this templated and add comments.
+inline std::string audio_utils_plot_histogram(const std::map<double, int> &buckets,
+        const char *title = "", const char *label = "", int maxHeight = 10)
+{
+    if (buckets.empty()) {
+        return "";
+    }
+
+    auto it = buckets.begin();
+    double maxDelta = it->first;
+    int maxCount = it->second;
+    // Compute maximum values
+    while (++it != buckets.end()) {
+        if (it->first > maxDelta) {
+            maxDelta = it->first;
+        }
+        if (it->second > maxCount) {
+            maxCount = it->second;
+        }
+    }
+    int height = log2(maxCount) + 1; // maxCount > 0, safe to call log2
+    const int leftPadding = widthOf(1 << height);
+    const int bucketWidth = numberWidth(maxDelta, leftPadding);
+    int scalingFactor = 1;
+    // scale data if it exceeds maximum height
+    if (height > maxHeight) {
+        scalingFactor = (height + maxHeight) / maxHeight;
+        height /= scalingFactor;
+    }
+    std::stringstream ss;
+    ss << title << "\n " << std::setw(leftPadding) << " ";
+    // write histogram label line with bucket values
+    for (auto const &x : buckets) {
+        const int colWidth = numberWidth(x.first, leftPadding);
+        ss << std::setw(colWidth) << x.second;
+    }
+    // write histogram ascii art
+    // underscores and spaces length corresponds to maximum width of histogram
+    constexpr int kLen = 200;
+    static const std::string underscores(kLen, '_');
+    static const std::string spaces(kLen, ' ');
+    auto getTail = [](const size_t n, const std::string &s) {
+        return s.c_str() + s.size() - std::min(n, s.size());
+    };
+
+    ss << "\n ";
+    for (int row = height * scalingFactor; row >= 0; row -= scalingFactor) {
+        // TODO explain how value is derived from log2 and why it doesn't overflow.
+        const int value = 1 << row;
+        ss << getTail(leftPadding, spaces);
+        for (auto const &x : buckets) {
+            const int colWidth = numberWidth(x.first, leftPadding);
+            ss << getTail(colWidth - 1, spaces) <<
+                (x.second < value ? " " : "|");
+        }
+        ss << "\n ";
+    }
+    // print x-axis
+    const int columns = static_cast<int>(buckets.size());
+    ss << std::setw(leftPadding) << " "
+        << getTail((columns + 1) * bucketWidth, underscores) << "\n ";
+
+    // write footer with bucket labels
+    ss << std::setw(leftPadding) << " ";
+    for (auto const &x : buckets) {
+        const int colWidth = numberWidth(x.first, leftPadding);
+        ss << std::setw(colWidth) << std::fixed << std::setprecision(1) << x.first;
+    }
+    ss << getTail(bucketWidth, spaces) << label << "\n";
+
+    return ss.str();
+}
+
+#endif // !LOG_PLOT_H
diff --git a/audio_utils/include/audio_utils/SimpleLog.h b/audio_utils/include/audio_utils/SimpleLog.h
index d5fd021..420947c 100644
--- a/audio_utils/include/audio_utils/SimpleLog.h
+++ b/audio_utils/include/audio_utils/SimpleLog.h
@@ -129,15 +129,17 @@
      * \param buffer            contains a null terminated string, which may have
      *                          special characters such as % and \ that are
      *                          not interpreted.
+     *                          This could be a char * or a std::string.
      */
-    void logs(int64_t nowNs, const char *buffer)
+    template <typename U>
+    void logs(int64_t nowNs, U&& buffer)
     {
         // store in circular array
         std::lock_guard<std::mutex> guard(mLock);
         if (nowNs == -1) {
             nowNs = audio_utils_get_real_time_ns();
         }
-        mLog.emplace_back(nowNs, std::string(buffer));
+        mLog.emplace_back(nowNs, std::forward<U>(buffer));
         if (mLog.size() > mMaxLogLines) {
             mLog.pop_front();
         }
diff --git a/audio_utils/include/audio_utils/Statistics.h b/audio_utils/include/audio_utils/Statistics.h
new file mode 100644
index 0000000..75b9335
--- /dev/null
+++ b/audio_utils/include/audio_utils/Statistics.h
@@ -0,0 +1,857 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_UTILS_STATISTICS_H
+#define ANDROID_AUDIO_UTILS_STATISTICS_H
+
+#ifdef __cplusplus
+
+#include "variadic_utils.h"
+
+// variadic_utils already contains stl headers; in addition:
+#include <deque> // for ReferenceStatistics implementation
+#include <sstream>
+
+namespace android {
+namespace audio_utils {
+
+/**
+ * Compensated summation is used to accumulate a sequence of floating point
+ * values, with "compensation" information to help preserve precision lost
+ * due to catastrophic cancellation, e.g. (BIG) + (SMALL) - (BIG) = 0.
+ *
+ * We provide two forms of compensated summation:
+ * the Kahan variant (which has better properties if the sum is generally
+ * larger than the data added; and the Neumaier variant which is better if
+ * the sum or delta may alternatively be larger.
+ *
+ * https://en.wikipedia.org/wiki/Kahan_summation_algorithm
+ *
+ * Alternative approaches include divide-and-conquer summation
+ * which provides increased accuracy with log n stack depth (recursion).
+ *
+ * https://en.wikipedia.org/wiki/Pairwise_summation
+ */
+
+template <typename T>
+struct KahanSum {
+    T mSum{};
+    T mCorrection{}; // negative low order bits of mSum.
+
+    constexpr KahanSum<T>() = default;
+
+    explicit constexpr KahanSum<T>(const T& value)
+        : mSum{value}
+    { }
+
+    // takes T not KahanSum<T>
+    friend constexpr KahanSum<T> operator+(KahanSum<T> lhs, const T& rhs) {
+        const T y = rhs - lhs.mCorrection;
+        const T t = lhs.mSum + y;
+
+#ifdef __FAST_MATH__
+#warning "fast math enabled, could optimize out KahanSum correction"
+#endif
+
+        lhs.mCorrection = (t - lhs.mSum) - y; // compiler, please do not optimize with /fp:fast
+        lhs.mSum = t;
+        return lhs;
+    }
+
+    constexpr KahanSum<T>& operator+=(const T& rhs) { // takes T not KahanSum<T>
+        *this = *this + rhs;
+        return *this;
+    }
+
+    constexpr operator T() const {
+        return mSum;
+    }
+
+    constexpr void reset() {
+        mSum = {};
+        mCorrection = {};
+    }
+};
+
+// A more robust version of Kahan summation for input greater than sum.
+// TODO: investigate variants that reincorporate mCorrection bits into mSum if possible.
+template <typename T>
+struct NeumaierSum {
+    T mSum{};
+    T mCorrection{}; // low order bits of mSum.
+
+    constexpr NeumaierSum<T>() = default;
+
+    explicit constexpr NeumaierSum<T>(const T& value)
+        : mSum{value}
+    { }
+
+    friend constexpr NeumaierSum<T> operator+(NeumaierSum<T> lhs, const T& rhs) {
+        const T t = lhs.mSum + rhs;
+
+        if (const_abs(lhs.mSum) >= const_abs(rhs)) {
+            lhs.mCorrection += (lhs.mSum - t) + rhs;
+        } else {
+            lhs.mCorrection += (rhs - t) + lhs.mSum;
+        }
+        lhs.mSum = t;
+        return lhs;
+    }
+
+    constexpr NeumaierSum<T>& operator+=(const T& rhs) { // takes T not NeumaierSum<T>
+        *this = *this + rhs;
+        return *this;
+    }
+
+    static constexpr T const_abs(T x) {
+        return x < T{} ? -x : x;
+    }
+
+    constexpr operator T() const {
+        return mSum + mCorrection;
+    }
+
+    constexpr void reset() {
+        mSum = {};
+        mCorrection = {};
+    }
+};
+
+//-------------------------------------------------------------------
+// Constants and limits
+
+template <typename T, typename T2=void>  struct StatisticsConstants;
+
+template <typename T>
+struct StatisticsConstants<T, std::enable_if_t<std::is_arithmetic<T>::value>> {
+    // value closest to negative infinity for type T
+    static constexpr T negativeInfinity() {
+        return std::numeric_limits<T>::has_infinity ?
+                -std::numeric_limits<T>::infinity() : std::numeric_limits<T>::min();
+    };
+
+    static constexpr T mNegativeInfinity = negativeInfinity();
+
+    // value closest to positive infinity for type T
+    static constexpr T positiveInfinity() {
+        return std::numeric_limits<T>::has_infinity ?
+                std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
+    }
+
+    static constexpr T mPositiveInfinity = positiveInfinity();
+};
+
+// specialize for tuple and pair
+template <typename T>
+struct StatisticsConstants<T, std::enable_if_t<!std::is_arithmetic<T>::value>> {
+private:
+    template <std::size_t... I >
+    static constexpr auto negativeInfinity(std::index_sequence<I...>) {
+       return T{StatisticsConstants<
+               typename std::tuple_element<I, T>::type>::mNegativeInfinity...};
+    }
+    template <std::size_t... I >
+    static constexpr auto positiveInfinity(std::index_sequence<I...>) {
+       return T{StatisticsConstants<
+               typename std::tuple_element<I, T>::type>::mPositiveInfinity...};
+    }
+public:
+    static constexpr auto negativeInfinity() {
+       return negativeInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
+    }
+    static constexpr auto mNegativeInfinity =
+        negativeInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
+    static constexpr auto positiveInfinity() {
+       return positiveInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
+    }
+    static constexpr auto mPositiveInfinity =
+        positiveInfinity(std::make_index_sequence<std::tuple_size<T>::value>());
+};
+
+/**
+ * Statistics provides a running weighted average, variance, and standard deviation of
+ * a sample stream. It is more numerically stable for floating point computation than a
+ * naive sum of values, sum of values squared.
+ *
+ * The weighting is like an IIR filter, with the most recent sample weighted as 1, and decaying
+ * by alpha (between 0 and 1).  With alpha == 1. this is rectangular weighting, reducing to
+ * Welford's algorithm.
+ *
+ * The IIR filter weighting emphasizes more recent samples, has low overhead updates,
+ * constant storage, and constant computation (per update or variance read).
+ *
+ * This is a variant of the weighted mean and variance algorithms described here:
+ * https://en.wikipedia.org/wiki/Moving_average
+ * https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm
+ * https://en.wikipedia.org/wiki/Weighted_arithmetic_mean
+ *
+ * weight = sum_{i=1}^n \alpha^{n-i}
+ * mean = 1/weight * sum_{i=1}^n \alpha^{n-i}x_i
+ * var = 1/weight * sum_{i=1}^n alpha^{n-i}(x_i- mean)^2
+ *
+ * The Statistics class is safe to call from a SCHED_FIFO thread with the exception of
+ * the toString() method, which uses std::stringstream to format data for printing.
+ *
+ * Long term data accumulation and constant alpha:
+ * If the alpha weight is 1 (or not specified) then statistics objects with float
+ * summation types (D, S) should NOT add more than the mantissa-bits elements
+ * without reset to prevent variance increases due to weight precision underflow.
+ * This is 1 << 23 elements for float and 1 << 52 elements for double.
+ *
+ * Setting alpha less than 1 avoids this underflow problem.
+ * Alpha < 1 - (epsilon * 32), where epsilon is std::numeric_limits<D>::epsilon()
+ * is recommended for continuously running statistics (alpha <= 0.999996
+ * for float summation precision).
+ *
+ * Alpha may also change on-the-fly, based on the reliability of
+ * new information.  In that case, alpha may be set temporarily greater
+ * than 1.
+ *
+ * https://en.wikipedia.org/wiki/Weighted_arithmetic_mean#Reliability_weights_2
+ *
+ * Statistics may also be collected on variadic "vector" object instead of
+ * scalars, where the variance may be computed as an inner product radial squared
+ * distance from the mean; or as an outer product where the variance returned
+ * is a covariance matrix.
+ *
+ * TODO:
+ * 1) Alternative versions of Kahan/Neumaier sum that better preserve precision.
+ * 2) Add binary math ops to corrected sum classes for better precision in lieu of long double.
+ * 3) Add Cholesky decomposition to ensure positive definite covariance matrices if
+ *    the input is a variadic object.
+ */
+
+/**
+ * Mean may have catastrophic cancellation of positive and negative sample values,
+ * so we use Kahan summation in the algorithms below (or substitute "D" if not needed).
+ *
+ * https://en.wikipedia.org/wiki/Kahan_summation_algorithm
+ */
+
+template <
+    typename T,               // input data type
+    typename D = double,      // output mean data type
+    typename S = KahanSum<D>, // compensated mean summation type, if any
+    typename A = double,      // weight type
+    typename D2 = double,     // output variance "D^2" type
+    typename PRODUCT = std::multiplies<D> // how the output variance is computed
+    >
+class Statistics {
+public:
+    /** alpha is the weight (if alpha == 1. we use a rectangular window) */
+    explicit constexpr Statistics(A alpha = A(1.))
+        : mAlpha(alpha)
+    { }
+
+    template <size_t N>
+    explicit constexpr Statistics(const T (&a)[N], A alpha = A(1.))
+        : mAlpha(alpha)
+    {
+        for (const auto &data : a) {
+            add(data);
+        }
+    }
+
+    constexpr void setAlpha(A alpha) {
+        mAlpha = alpha;
+    }
+
+    constexpr void add(const T &value) {
+        // Note: fastest implementation uses fmin fminf but would not be constexpr
+
+        mMax = audio_utils::max(mMax, value); // order important: reject NaN
+        mMin = audio_utils::min(mMin, value); // order important: reject NaN
+        ++mN;
+        const D delta = value - mMean;
+        /* if (mAlpha == 1.) we have Welford's algorithm
+            ++mN;
+            mMean += delta / mN;
+            mM2 += delta * (value - mMean);
+
+            Note delta * (value - mMean) should be non-negative.
+        */
+        mWeight = A(1.) + mAlpha * mWeight;
+        mWeight2 = A(1.) + mAlpha * mAlpha * mWeight2;
+        D meanDelta = delta / mWeight;
+        mMean += meanDelta;
+        mM2 = mAlpha * mM2 + PRODUCT()(delta, (value - mMean));
+
+        /*
+           Alternate variant related to:
+           http://mathworld.wolfram.com/SampleVarianceComputation.html
+
+           const double sweight = mAlpha * mWeight;
+           mWeight = 1. + sweight;
+           const double dmean = delta / mWeight;
+           mMean += dmean;
+           mM2 = mAlpha * mM2 + mWeight * sweight * dmean * dmean;
+
+           The update is slightly different than Welford's algorithm
+           showing a by-construction non-negative update to M2.
+        */
+    }
+
+    constexpr int64_t getN() const {
+        return mN;
+    }
+
+    constexpr void reset() {
+        mMin = StatisticsConstants<T>::positiveInfinity();
+        mMax = StatisticsConstants<T>::negativeInfinity();
+        mN = 0;
+        mWeight = {};
+        mWeight2 = {};
+        mMean = {};
+        mM2 = {};
+    }
+
+    constexpr A getWeight() const {
+        return mWeight;
+    }
+
+    constexpr D getMean() const {
+        return mMean;
+    }
+
+    constexpr D2 getVariance() const {
+        if (mN < 2) {
+            // must have 2 samples for sample variance.
+            return {};
+        } else {
+            return mM2 / getSampleWeight();
+        }
+    }
+
+    constexpr D2 getPopVariance() const {
+        if (mN < 1) {
+            return {};
+        } else {
+            return mM2 / mWeight;
+        }
+    }
+
+    // explicitly use sqrt_constexpr if you need a constexpr version
+    D2 getStdDev() const {
+        return android::audio_utils::sqrt(getVariance());
+    }
+
+    D2 getPopStdDev() const {
+        return android::audio_utils::sqrt(getPopVariance());
+    }
+
+    constexpr T getMin() const {
+        return mMin;
+    }
+
+    constexpr T getMax() const {
+        return mMax;
+    }
+
+    std::string toString() const {
+        const int64_t N = getN();
+        if (N == 0) return "unavail";
+
+        std::stringstream ss;
+        ss << "ave=" << getMean();
+        if (N > 1) {
+            // we use the sample standard deviation (not entirely unbiased,
+            // though the sample variance is unbiased).
+            ss << " std=" << getStdDev();
+        }
+        ss << " min=" << getMin();
+        ss << " max=" << getMax();
+        return ss.str();
+    }
+
+private:
+    A mAlpha;
+    T mMin{StatisticsConstants<T>::positiveInfinity()};
+    T mMax{StatisticsConstants<T>::negativeInfinity()};
+
+    int64_t mN = 0;  // running count of samples.
+    A mWeight{};     // sum of weights.
+    A mWeight2{};    // sum of weights squared.
+    S mMean{};       // running mean.
+    D2 mM2{};         // running unnormalized variance.
+
+    // Reliability correction for unbiasing variance, since mean is estimated
+    // from same sample stream as variance.
+    // if mAlpha == 1 this is mWeight - 1;
+    //
+    // TODO: consider exposing the correction factor.
+    constexpr A getSampleWeight() const {
+        // if mAlpha is constant then the mWeight2 member variable is not
+        // needed, one can use instead:
+        // return (mWeight - D(1.)) * D(2.) / (D(1.) + mAlpha);
+
+        return mWeight - mWeight2 / mWeight;
+    }
+};
+
+/**
+ * ReferenceStatistics is a naive implementation of the weighted running variance,
+ * which consumes more space and is slower than Statistics.  It is provided for
+ * comparison and testing purposes.  Do not call from a SCHED_FIFO thread!
+ *
+ * Note: Common code not combined for implementation clarity.
+ *       We don't invoke Kahan summation or other tricks.
+ */
+template <
+    typename T, // input data type
+    typename D = double // output mean/variance data type
+    >
+class ReferenceStatistics {
+public:
+    /** alpha is the weight (alpha == 1. is rectangular window) */
+    explicit ReferenceStatistics(D alpha = D(1.))
+        : mAlpha(alpha)
+    { }
+
+    constexpr void setAlpha(D alpha) {
+        mAlpha = alpha;
+    }
+
+    // For independent testing, have intentionally slightly different behavior
+    // of min and max than Statistics with respect to Nan.
+    constexpr void add(const T &value) {
+        if (getN() == 0) {
+            mMax = value;
+            mMin = value;
+        } else if (value > mMax) {
+            mMax = value;
+        } else if (value < mMin) {
+            mMin = value;
+        }
+
+        mData.push_front(value);
+        mAlphaList.push_front(mAlpha);
+    }
+
+    int64_t getN() const {
+        return mData.size();
+    }
+
+    void reset() {
+        mMin = {};
+        mMax = {};
+        mData.clear();
+        mAlphaList.clear();
+    }
+
+    D getWeight() const {
+        D weight{};
+        D alpha_i(1.);
+        for (size_t i = 0; i < mData.size(); ++i) {
+            weight += alpha_i;
+            alpha_i *= mAlphaList[i];
+        }
+        return weight;
+    }
+
+    D getWeight2() const {
+        D weight2{};
+        D alpha2_i(1.);
+        for (size_t i = 0; i < mData.size(); ++i) {
+            weight2 += alpha2_i;
+            alpha2_i *= mAlphaList[i] * mAlphaList[i];
+        }
+        return weight2;
+    }
+
+    D getMean() const {
+        D wsum{};
+        D alpha_i(1.);
+        for (size_t i = 0; i < mData.size(); ++i) {
+            wsum += alpha_i * mData[i];
+            alpha_i *= mAlphaList[i];
+        }
+        return wsum / getWeight();
+    }
+
+    // Should always return a positive value.
+    D getVariance() const {
+        return getUnweightedVariance() / (getWeight() - getWeight2() / getWeight());
+    }
+
+    // Should always return a positive value.
+    D getPopVariance() const {
+        return getUnweightedVariance() / getWeight();
+    }
+
+    D getStdDev() const {
+        return sqrt(getVariance());
+    }
+
+    D getPopStdDev() const {
+        return sqrt(getPopVariance());
+    }
+
+    T getMin() const {
+        return mMin;
+    }
+
+    T getMax() const {
+        return mMax;
+    }
+
+    std::string toString() const {
+        const auto N = getN();
+        if (N == 0) return "unavail";
+
+        std::stringstream ss;
+        ss << "ave=" << getMean();
+        if (N > 1) {
+            // we use the sample standard deviation (not entirely unbiased,
+            // though the sample variance is unbiased).
+            ss << " std=" << getStdDev();
+        }
+        ss << " min=" << getMin();
+        ss << " max=" << getMax();
+        return ss.str();
+    }
+
+private:
+    T mMin{};
+    T mMax{};
+
+    D mAlpha;                 // current alpha value
+    std::deque<T> mData;      // store all the data for exact summation, mData[0] most recent.
+    std::deque<D> mAlphaList; // alpha value for the data added.
+
+    D getUnweightedVariance() const {
+        const D mean = getMean();
+        D wsum{};
+        D alpha_i(1.);
+        for (size_t i = 0; i < mData.size(); ++i) {
+            const D diff = mData[i] - mean;
+            wsum += alpha_i * diff * diff;
+            alpha_i *= mAlphaList[i];
+        }
+        return wsum;
+    }
+};
+
+/**
+ * Least squares fitting of a 2D straight line based on the covariance matrix.
+ *
+ * See formula from:
+ * http://mathworld.wolfram.com/LeastSquaresFitting.html
+ *
+ * y = a + b*x
+ *
+ * returns a: y intercept
+ *         b: slope
+ *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
+ *
+ * For better numerical stability, it is suggested to use the slope b only:
+ * as the least squares fit line intersects the mean.
+ *
+ * (y - mean_y) = b * (x - mean_x).
+ *
+ */
+template <typename T>
+constexpr void computeYLineFromStatistics(
+        T &a, T& b, T &r2,
+        const T& mean_x,
+        const T& mean_y,
+        const T& var_x,
+        const T& cov_xy,
+        const T& var_y) {
+
+    // Dimensionally r2 is unitless.  If there is no correlation
+    // then r2 is clearly 0 as cov_xy == 0.  If x and y are identical up to a scale
+    // and shift, then r2 is 1.
+    r2 = cov_xy * cov_xy / (var_x * var_y);
+
+    // The least squares solution to the overconstrained matrix equation requires
+    // the pseudo-inverse. In 2D, the best-fit slope is the mean removed
+    // (via covariance and variance) dy/dx derived from the joint expectation
+    // (this is also dimensionally correct).
+    b = cov_xy / var_x;
+
+    // The best fit line goes through the mean, and can be used to find the y intercept.
+    a = mean_y - b * mean_x;
+}
+
+/**
+ * LinearLeastSquaresFit<> class is derived from the Statistics<> class, with a 2 element array.
+ * Arrays are preferred over tuples or pairs because copy assignment is constexpr and
+ * arrays are trivially copyable.
+ */
+template <typename T>
+class LinearLeastSquaresFit : public
+    Statistics<std::array<T, 2>, // input
+               std::array<T, 2>, // mean data output
+               std::array<T, 2>, // compensated mean sum
+               T,                // weight type
+               std::array<T, 3>, // covariance_ut
+               audio_utils::outerProduct_UT_array<std::array<T, 2>>>
+{
+public:
+    constexpr explicit LinearLeastSquaresFit(const T &alpha = T(1.))
+        : Statistics<std::array<T, 2>,
+             std::array<T, 2>,
+             std::array<T, 2>,
+             T,
+             std::array<T, 3>, // covariance_ut
+             audio_utils::outerProduct_UT_array<std::array<T, 2>>>(alpha) { }
+
+    /* Note: base class method: add(value)
+
+    constexpr void add(const std::array<T, 2>& value);
+
+       use:
+          add({1., 2.});
+       or
+          add(to_array(myTuple));
+    */
+
+    /**
+     * y = a + b*x
+     *
+     * returns a: y intercept
+     *         b: y slope (dy / dx)
+     *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
+     */
+    constexpr void computeYLine(T &a, T &b, T &r2) const {
+        computeYLineFromStatistics(a, b, r2,
+                std::get<0>(this->getMean()), /* mean_x */
+                std::get<1>(this->getMean()), /* mean_y */
+                std::get<0>(this->getPopVariance()), /* var_x */
+                std::get<1>(this->getPopVariance()), /* cov_xy */
+                std::get<2>(this->getPopVariance())); /* var_y */
+    }
+
+    /**
+     * x = a + b*y
+     *
+     * returns a: x intercept
+     *         b: x slope (dx / dy)
+     *         r2: correlation coefficient (1.0 means great fit, 0.0 means no fit.)
+     */
+    constexpr void computeXLine(T &a, T &b, T &r2) const {
+        // reverse x and y for X line computation
+        computeYLineFromStatistics(a, b, r2,
+                std::get<1>(this->getMean()), /* mean_x */
+                std::get<0>(this->getMean()), /* mean_y */
+                std::get<2>(this->getPopVariance()), /* var_x */
+                std::get<1>(this->getPopVariance()), /* cov_xy */
+                std::get<0>(this->getPopVariance())); /* var_y */
+    }
+
+    /**
+     * this returns the estimate of y from a given x
+     */
+    constexpr T getYFromX(const T &x) const {
+        const T var_x = std::get<0>(this->getPopVariance());
+        const T cov_xy = std::get<1>(this->getPopVariance());
+        const T b = cov_xy / var_x;  // dy / dx
+
+        const T mean_x = std::get<0>(this->getMean());
+        const T mean_y = std::get<1>(this->getMean());
+        return /* y = */ b * (x - mean_x) + mean_y;
+    }
+
+    /**
+     * this returns the estimate of x from a given y
+     */
+    constexpr T getXFromY(const T &y) const {
+        const T cov_xy = std::get<1>(this->getPopVariance());
+        const T var_y = std::get<2>(this->getPopVariance());
+        const T b = cov_xy / var_y;  // dx / dy
+
+        const T mean_x = std::get<0>(this->getMean());
+        const T mean_y = std::get<1>(this->getMean());
+        return /* x = */ b * (y - mean_y) + mean_x;
+    }
+
+    constexpr T getR2() const {
+        const T var_x = std::get<0>(this->getPopVariance());
+        const T cov_xy = std::get<1>(this->getPopVariance());
+        const T var_y = std::get<2>(this->getPopVariance());
+        return cov_xy * cov_xy / (var_x * var_y);
+    }
+};
+
+/**
+ * constexpr statistics functions of form:
+ * algorithm(forward_iterator begin, forward_iterator end)
+ *
+ * These check that the input looks like an iterator, but doesn't
+ * check if __is_forward_iterator<>.
+ *
+ * divide-and-conquer pairwise summation forms will require
+ * __is_random_access_iterator<>.
+ */
+
+// returns max of elements, or if no elements negative infinity.
+template <typename T,
+          std::enable_if_t<is_iterator<T>::value, int> = 0>
+constexpr auto max(T begin, T end) {
+    using S = std::remove_cv_t<std::remove_reference_t<
+            decltype(*begin)>>;
+    S maxValue = StatisticsConstants<S>::mNegativeInfinity;
+    for (auto it = begin; it != end; ++it) {
+        maxValue = std::max(maxValue, *it);
+    }
+    return maxValue;
+}
+
+// returns min of elements, or if no elements positive infinity.
+template <typename T,
+          std::enable_if_t<is_iterator<T>::value, int> = 0>
+constexpr auto min(T begin, T end) {
+    using S = std::remove_cv_t<std::remove_reference_t<
+            decltype(*begin)>>;
+    S minValue = StatisticsConstants<S>::mPositiveInfinity;
+    for (auto it = begin; it != end; ++it) {
+        minValue = std::min(minValue, *it);
+    }
+    return minValue;
+}
+
+template <typename D = double, typename S = KahanSum<D>, typename T,
+          std::enable_if_t<is_iterator<T>::value, int> = 0>
+constexpr auto sum(T begin, T end) {
+    S sum{};
+    for (auto it = begin; it != end; ++it) {
+        sum += D(*it);
+    }
+    return sum;
+}
+
+template <typename D = double, typename S = KahanSum<D>, typename T,
+          std::enable_if_t<is_iterator<T>::value, int> = 0>
+constexpr auto sumSqDiff(T begin, T end, D x = {}) {
+    S sum{};
+    for (auto it = begin; it != end; ++it) {
+        const D diff = *it - x;
+        sum += diff * diff;
+    }
+    return sum;
+}
+
+// Form: algorithm(array[]), where array size is known to the compiler.
+template <typename T, size_t N>
+constexpr T max(const T (&a)[N]) {
+    return max(&a[0], &a[N]);
+}
+
+template <typename T, size_t N>
+constexpr T min(const T (&a)[N]) {
+    return min(&a[0], &a[N]);
+}
+
+template <typename D = double, typename S = KahanSum<D>, typename T, size_t N>
+constexpr D sum(const T (&a)[N]) {
+    return sum<D, S>(&a[0], &a[N]);
+}
+
+template <typename D = double, typename S = KahanSum<D>, typename T, size_t N>
+constexpr D sumSqDiff(const T (&a)[N], D x = {}) {
+    return sumSqDiff<D, S>(&a[0], &a[N], x);
+}
+
+// TODO: remove when std::isnan is constexpr
+template <typename T>
+constexpr T isnan(T x) {
+    return __builtin_isnan(x);
+}
+
+// constexpr sqrt computed by the Babylonian (Newton's) method.
+// Please use math libraries for non-constexpr cases.
+// TODO: remove when there is some std::sqrt which is constexpr.
+//
+// https://en.wikipedia.org/wiki/Methods_of_computing_square_roots
+
+// watch out using the unchecked version, use the checked version below.
+template <typename T>
+constexpr T sqrt_constexpr_unchecked(T x, T prev) {
+    static_assert(std::is_floating_point<T>::value, "must be floating point type");
+    const T next = T(0.5) * (prev + x / prev);
+    return next == prev ? next : sqrt_constexpr_unchecked(x, next);
+}
+
+// checked sqrt
+template <typename T>
+constexpr T sqrt_constexpr(T x) {
+    static_assert(std::is_floating_point<T>::value, "must be floating point type");
+    if (x < T{}) { // negative values return nan
+        return std::numeric_limits<T>::quiet_NaN();
+    } else if (isnan(x)
+            || x == std::numeric_limits<T>::infinity()
+            || x == T{}) {
+        return x;
+    } else { // good to go.
+        return sqrt_constexpr_unchecked(x, T(1.));
+    }
+}
+
+} // namespace audio_utils
+} // namespace android
+
+#endif // __cplusplus
+
+/** \cond */
+ __BEGIN_DECLS
+/** \endcond */
+
+/** Simple stats structure for low overhead statistics gathering.
+ * Designed to be accessed by C (with no functional getters).
+ * Zero initialize {} to clear or reset.
+ */
+typedef struct {
+   int64_t n;
+   double min;
+   double max;
+   double last;
+   double mean;
+} simple_stats_t;
+
+/** logs new value to the simple_stats_t */
+static inline void simple_stats_log(simple_stats_t *stats, double value) {
+    if (++stats->n == 1) {
+        stats->min = stats->max = stats->last = stats->mean = value;
+    } else {
+        stats->last = value;
+        if (value < stats->min) {
+            stats->min = value;
+        } else if (value > stats->max) {
+            stats->max = value;
+        }
+        // Welford's algorithm for mean
+        const double delta = value - stats->mean;
+        stats->mean += delta / stats->n;
+    }
+}
+
+/** dumps statistics to a string, returns the length of string excluding null termination. */
+static inline size_t simple_stats_to_string(simple_stats_t *stats, char *buffer, size_t size) {
+    if (size == 0) {
+        return 0;
+    } else if (stats->n == 0) {
+        return snprintf(buffer, size, "none");
+    } else {
+        return snprintf(buffer, size, "(mean: %lf  min: %lf  max: %lf  last: %lf  n: %lld)",
+                stats->mean, stats->min, stats->max, stats->last, (long long)stats->n);
+    }
+}
+
+/** \cond */
+__END_DECLS
+/** \endcond */
+
+#endif // !ANDROID_AUDIO_UTILS_STATISTICS_H
diff --git a/audio_utils/include/audio_utils/TimestampVerifier.h b/audio_utils/include/audio_utils/TimestampVerifier.h
new file mode 100644
index 0000000..968d046
--- /dev/null
+++ b/audio_utils/include/audio_utils/TimestampVerifier.h
@@ -0,0 +1,309 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_UTILS_TIMESTAMP_VERIFIER_H
+#define ANDROID_AUDIO_UTILS_TIMESTAMP_VERIFIER_H
+
+#include <audio_utils/clock.h>
+#include <audio_utils/Statistics.h>
+
+namespace android {
+
+/** Verifies that a sequence of timestamps (a frame count, time pair)
+ * is consistent based on sample rate.
+ *
+ * F is the type of frame counts (for example int64_t)
+ * T is the type of time in Ns (for example int64_t ns)
+ *
+ * All methods except for toString() are safe to call from a SCHED_FIFO thread.
+ */
+template <typename F /* frame count */, typename T /* time units */>
+class TimestampVerifier {
+public:
+    explicit constexpr TimestampVerifier(
+            double alphaJitter = kDefaultAlphaJitter,
+            double alphaEstimator = kDefaultAlphaEstimator)
+        : mJitterMs{alphaJitter}
+        , mTimestampEstimator{alphaEstimator}
+        , mCorrectedJitterMs{alphaJitter}
+    { }
+
+    // construct from static arrays.
+    template <size_t N>
+    constexpr TimestampVerifier(const F (&frames)[N], const T (&timeNs)[N], uint32_t sampleRate) {
+         for (size_t i = 0; i < N; ++i) {
+             add(frames[i], timeNs[i], sampleRate);
+         }
+    }
+
+    /** adds a timestamp, represented by a (frames, timeNs) pair and the
+     * sample rate to the TimestampVerifier.
+     *
+     * The frames and timeNs should be monotonic increasing
+     * (from the previous discontinuity, or the start of adding).
+     *
+     * A sample rate change triggers a discontinuity automatically.
+     */
+    constexpr void add(F frames, T timeNs, uint32_t sampleRate) {
+        // We consider negative time as "not ready".
+        // TODO: do we need to consider an explicit epoch start time?
+        if (timeNs < 0) {
+           ++mNotReady;
+           return;
+        }
+        if (mDiscontinuity || mSampleRate != sampleRate) {
+            // ALOGD("disc:%d frames:%lld timeNs:%lld",
+            //         mDiscontinuity, (long long)frames, (long long)timeNs);
+            switch (mDiscontinuityMode) {
+            case DISCONTINUITY_MODE_CONTINUOUS:
+                break;
+            case DISCONTINUITY_MODE_ZERO:
+                // frame position reset to 0 on discontinuity - detect this.
+                if (mLastTimestamp.mFrames
+                        > kDiscontinuityZeroStartThresholdMs * sampleRate / MILLIS_PER_SECOND
+                        && frames >= mLastTimestamp.mFrames) {
+                    return;
+                }
+                break;
+            default:
+                assert(false); // never here.
+                break;
+            }
+            mDiscontinuity = false;
+            mFirstTimestamp = {frames, timeNs};
+            mLastTimestamp = mFirstTimestamp;
+            mSampleRate = sampleRate;
+        } else {
+            assert(sampleRate != 0);
+            const FrameTime timestamp{frames, timeNs};
+            if (mCold && (timestamp.mTimeNs == mLastTimestamp.mTimeNs
+                    || computeRatio(timestamp, mLastTimestamp, sampleRate)
+                            < kMinimumSpeedToStartVerification)) {
+                // cold is when the timestamp may take some time to start advancing at normal rate.
+                ++mColds;
+                mFirstTimestamp = timestamp;
+                // ALOGD("colds:%lld frames:%lld timeNs:%lld",
+                //         (long long)mColds, (long long)frames, (long long)timeNs);
+            } else {
+                const double jitterMs = computeJitterMs(timestamp, mLastTimestamp, sampleRate);
+                mJitterMs.add(jitterMs);
+                // ALOGD("frames:%lld  timeNs:%lld jitterMs:%lf",
+                //         (long long)frames, (long long)timeNs, jitterMs);
+
+                // Handle timestamp estimation
+                if (mCold) {
+                    mCold = false;
+                    mTimestampEstimator.reset();
+                    mTimestampEstimator.add(
+                        {(double)mFirstTimestamp.mTimeNs * 1e-9, (double)mFirstTimestamp.mFrames});
+                    mFirstCorrectedTimestamp = mFirstTimestamp;
+                    mLastCorrectedTimestamp = mFirstCorrectedTimestamp;
+                }
+                mTimestampEstimator.add({(double)timeNs * 1e-9, (double)frames});
+
+                // Find the corrected timestamp, a posteriori estimate.
+                FrameTime correctedTimestamp = timestamp;
+
+                // The estimator is valid after 2 timestamps; we choose correlation
+                // of kEstimatorR2Lock to signal locked.
+                if (mTimestampEstimator.getN() > 2
+                        && mTimestampEstimator.getR2() >= kEstimatorR2Lock) {
+#if 1
+                    // We choose frame correction over time correction.
+                    // TODO: analyze preference.
+
+                    // figure out frames based on time.
+                    const F newFrames = mTimestampEstimator.getYFromX((double)timeNs * 1e-9);
+                    // prevent retrograde correction.
+                    correctedTimestamp.mFrames = std::max(
+                            newFrames, mLastCorrectedTimestamp.mFrames);
+#else
+                    // figure out time based on frames
+                    const T newTimeNs = mTimestampEstimator.getXFromY((double)frames) * 1e9;
+                    // prevent retrograde correction.
+                    correctedTimestamp.mTimeNs = std::max(
+                            newTimeNs, mLastCorrectedTimestamp.mTimeNs);
+#endif
+                }
+
+                // Compute the jitter if the corrected timestamp is used.
+                const double correctedJitterMs = computeJitterMs(
+                        correctedTimestamp, mLastCorrectedTimestamp, sampleRate);
+                mCorrectedJitterMs.add(correctedJitterMs);
+                mLastCorrectedTimestamp = correctedTimestamp;
+            }
+            mLastTimestamp = timestamp;
+        }
+        ++mTimestamps;
+    }
+
+    /** registers a discontinuity.
+     *
+     * The next timestamp added does not participate in any statistics with the last
+     * timestamp, but rather anchors following timestamp sequence verification.
+     *
+     * Consecutive discontinuities are treated as one for the purposes of counting.
+     */
+    constexpr void discontinuity() {
+        if (!mDiscontinuity) {
+            // ALOGD("discontinuity");
+            mDiscontinuity = true;
+            mCold = true;
+            ++mDiscontinuities;
+        }
+    }
+
+    /** registers an error.
+     *
+     * The timestamp sequence is still assumed continuous after error. Use discontinuity()
+     * if it is not.
+     */
+    constexpr void error() {
+        ++mErrors;
+    }
+
+    // How a discontinuity affects frame position.
+    enum DiscontinuityMode : int32_t {
+        DISCONTINUITY_MODE_CONTINUOUS, // frame position is unaffected.
+        DISCONTINUITY_MODE_ZERO,       // frame position resets to zero.
+    };
+
+    constexpr void setDiscontinuityMode(DiscontinuityMode mode) {
+        assert(mode == DISCONTINUITY_MODE_CONTINUOUS
+                || mode == DISCONTINUITY_MODE_ZERO);
+        mDiscontinuityMode = mode;
+    }
+
+    constexpr DiscontinuityMode getDiscontinuityMode() const {
+        return mDiscontinuityMode;
+    }
+
+    /** returns a string with relevant statistics.
+     *
+     * Should not be called from a SCHED_FIFO thread since it uses std::string.
+     */
+    std::string toString() const {
+        std::stringstream ss;
+
+        ss << "n=" << mTimestamps;          // number of timestamps added with valid times.
+        ss << " disc=" << mDiscontinuities; // discontinuities encountered (dups ignored).
+        ss << " cold=" << mColds;           // timestamps not progressing after discontinuity.
+        ss << " nRdy=" << mNotReady;        // timestamps not ready (time negative).
+        ss << " err=" << mErrors;           // errors encountered.
+        if (mSampleRate != 0) {             // ratio of time-by-frames / time
+            ss << " rate=" << computeRatio( // (since last discontinuity).
+                    mLastTimestamp, mFirstTimestamp, mSampleRate);
+        }
+        ss << " jitterMs(" << mJitterMs.toString() << ")";  // timestamp jitter statistics.
+
+        double a, b, r2; // sample rate is the slope b.
+        estimateSampleRate(a, b, r2);
+
+        // r2 is correlation coefficient (where 1 is best and 0 is worst),
+        // so for better printed resolution, we print from 1 - r2.
+        ss << " localSR(" << b << ", " << (1. - r2) << ")";
+        ss << " correctedJitterMs(" << mCorrectedJitterMs.toString() << ")";
+        return ss.str();
+    }
+
+    // general counters
+    constexpr int64_t getN() const { return mTimestamps; }
+    constexpr int64_t getDiscontinuities() const { return mDiscontinuities; }
+    constexpr int64_t getNotReady() const { return mNotReady; }
+    constexpr int64_t getColds() const { return mColds; }
+    constexpr int64_t getErrors() const { return mErrors; }
+    constexpr const audio_utils::Statistics<double> & getJitterMs() const {
+        return mJitterMs;
+    }
+    // estimate local sample rate (dframes / dtime) which is the slope b from:
+    // y = a + bx
+    constexpr void estimateSampleRate(double &a, double &b, double &r2) const {
+        mTimestampEstimator.computeYLine(a, b, r2);
+    }
+
+    // timestamp anchor info
+    using FrameTime = struct { F mFrames; T mTimeNs; }; // a "constexpr" pair
+    constexpr FrameTime getFirstTimestamp() const { return mFirstTimestamp; }
+    constexpr FrameTime getLastTimestamp() const { return mLastTimestamp; }
+    constexpr uint32_t getSampleRate() const { return mSampleRate; }
+
+    constexpr FrameTime getLastCorrectedTimestamp() const { return mLastCorrectedTimestamp; }
+
+    // Inf+-, NaN is possible only if sampleRate is 0 (should not happen)
+    static constexpr double computeJitterMs(
+            const FrameTime &current, const FrameTime &last, uint32_t sampleRate) {
+        const auto diff = sub(current, last);
+        const double frameDifferenceNs = diff.first * 1e9 / sampleRate;
+        const double jitterNs = diff.second - frameDifferenceNs;  // actual - expected
+        return jitterNs * 1e-6;
+    }
+
+private:
+    // our statistics have exponentially weighted history.
+    // the defaults are here.
+    static constexpr double kDefaultAlphaJitter = 0.999;
+    static constexpr double kDefaultAlphaEstimator = 0.99;
+    static constexpr double kEstimatorR2Lock = 0.95;
+
+    // general counters
+    int64_t mTimestamps = 0;
+    int64_t mDiscontinuities = 0;
+    int64_t mNotReady = 0;
+    int64_t mColds = 0;
+    int64_t mErrors = 0;
+    audio_utils::Statistics<double> mJitterMs{kDefaultAlphaJitter};
+
+    // timestamp anchor info
+    bool mDiscontinuity = true;
+    bool mCold = true;
+    FrameTime mFirstTimestamp{};
+    FrameTime mLastTimestamp{};
+    uint32_t mSampleRate = 0;
+
+    // timestamp estimation and correction
+    audio_utils::LinearLeastSquaresFit<double> mTimestampEstimator{kDefaultAlphaEstimator};
+    FrameTime mFirstCorrectedTimestamp{};
+    FrameTime mLastCorrectedTimestamp{};
+    audio_utils::Statistics<double> mCorrectedJitterMs{kDefaultAlphaJitter};
+
+    // configuration
+    DiscontinuityMode mDiscontinuityMode = DISCONTINUITY_MODE_CONTINUOUS;
+
+    static constexpr double kMinimumSpeedToStartVerification = 0.1;
+     // Number of ms so small that initial jitter is OK for DISCONTINUITY_MODE_ZERO.
+    static constexpr int64_t kDiscontinuityZeroStartThresholdMs = 5;
+
+    // sub returns the signed type of the difference between left and right.
+    // This is only important if F or T are unsigned int types.
+    __attribute__((no_sanitize("integer")))
+    static constexpr auto sub(const FrameTime &left, const FrameTime &right) {
+        return std::make_pair<
+                typename std::make_signed<F>::type, typename std::make_signed<T>::type>(
+                        left.mFrames - right.mFrames, left.mTimeNs - right.mTimeNs);
+    }
+
+    // Inf+-, Nan possible depending on differences between current and last.
+    static constexpr double computeRatio(
+            const FrameTime &current, const FrameTime &last, uint32_t sampleRate) {
+        const auto diff = sub(current, last);
+        const double frameDifferenceNs = diff.first * 1e9 / sampleRate;
+        return frameDifferenceNs / diff.second;
+    }
+};
+
+} // namespace android
+
+#endif // !ANDROID_AUDIO_UTILS_TIMESTAMP_VERIFIER_H
diff --git a/audio_utils/include/audio_utils/channels.h b/audio_utils/include/audio_utils/channels.h
index 10026f4..d710dca 100644
--- a/audio_utils/include/audio_utils/channels.h
+++ b/audio_utils/include/audio_utils/channels.h
@@ -32,10 +32,10 @@
  *   \param out_buff_chans       Specifies the number of channels in the output buffer.
  *   \param sample_size_in_bytes Specifies the number of bytes per sample. 1, 2, 3, 4 are
  *     currently valid.
- *   \param num_in_bytes         size of input buffer in BYTES
+ *   \param num_in_bytes         size of input buffer in bytes
  *
  * \return
- *   the number of BYTES of output data or 0 if an error occurs.
+ *   the number of bytes of output data or 0 if an error occurs.
  *
  * \note
  *   The out and sums buffers must either be completely separate (non-overlapping), or
@@ -56,10 +56,10 @@
  *   \param out_buff_chans       Specifies the number of channels in the output buffer.
  *   \param sample_size_in_bytes Specifies the number of bytes per sample. 1, 2, 3, 4 are
  *     currently valid.
- *   \param num_in_bytes         size of input buffer in BYTES
+ *   \param num_in_bytes         size of input buffer in bytes
  *
  * \return
- *   the number of BYTES of output data or 0 if an error occurs.
+ *   the number of bytes of output data or 0 if an error occurs.
  *
  * \note
  *   The out and in buffers must either be completely separate (non-overlapping), or
@@ -69,8 +69,33 @@
                        void* out_buff, size_t out_buff_chans,
                        unsigned sample_size_in_bytes, size_t num_in_bytes);
 
+/**
+ * Expands or contracts sample data from one interleaved channel format to another.
+ * Extra expanded channels are interleaved in from the end of the input buffer.
+ * Contracted channels are copied to the end of the output buffer.
+ *
+ *   \param in_buff              points to the buffer of samples.
+ *   \param in_buff_chans        Specifies the number of channels in the input buffer.
+ *   \param out_buff             points to the buffer to receive converted samples.
+ *   \param out_buff_chans       Specifies the number of channels in the output buffer.
+ *   \param sample_size_in_bytes Specifies the number of bytes per sample. 1, 2, 3, 4 are
+ *     currently valid.
+ *   \param num_in_bytes         size of input buffer in bytes.
+ *
+ * \return
+ *   the number of bytes of output data or 0 if an error occurs.
+ *
+ * \note
+ *   The out and in buffers must be the same length.
+ *   The out and in buffers must either be completely separate (non-overlapping), or
+ *   they must both start at the same address. Partially overlapping buffers are not supported.
+ */
+size_t adjust_channels_non_destructive(const void* in_buff, size_t in_buff_chans,
+                       void* out_buff, size_t out_buff_chans,
+                       unsigned sample_size_in_bytes, size_t num_in_bytes);
+
 /** \cond */
 __END_DECLS
 /** \endcond */
 
-#endif
+#endif  // !ANDROID_AUDIO_CHANNELS_H
diff --git a/audio_utils/include/audio_utils/clock.h b/audio_utils/include/audio_utils/clock.h
index b3298e7..45488b6 100644
--- a/audio_utils/include/audio_utils/clock.h
+++ b/audio_utils/include/audio_utils/clock.h
@@ -32,6 +32,17 @@
 #define NANOS_PER_MILLISECOND  1000000LL
 #define NANOS_PER_SECOND    1000000000LL
 
+#define SECONDS_PER_MINUTE  60LL
+#define MINUTES_PER_HOUR    60LL
+
+#define MICROS_PER_MINUTE   (MICROS_PER_SECOND * SECONDS_PER_MINUTE)
+#define MILLIS_PER_MINUTE   (MILLIS_PER_SECOND * SECONDS_PER_MINUTE)
+#define NANOS_PER_MINUTE    (NANOS_PER_SECOND  * SECONDS_PER_MINUTE)
+
+#define MICROS_PER_HOUR     (MICROS_PER_MINUTE * MINUTES_PER_HOUR)
+#define MILLIS_PER_HOUR     (MILLIS_PER_MINUTE * MINUTES_PER_HOUR)
+#define NANOS_PER_HOUR      (NANOS_PER_MINUTE  * MINUTES_PER_HOUR)
+
 /**
  * \brief Converts time in ns to a time string, with format similar to logcat.
  * \param ns          input time in nanoseconds to convert.
diff --git a/audio_utils/include/audio_utils/fifo.h b/audio_utils/include/audio_utils/fifo.h
index e933f9b..4926c09 100644
--- a/audio_utils/include/audio_utils/fifo.h
+++ b/audio_utils/include/audio_utils/fifo.h
@@ -273,7 +273,7 @@
      * except they convey extra information as to the cause.
      * After any error, both iovec[0] and iovec[1] will be empty.
      */
-    virtual ssize_t obtain(audio_utils_iovec iovec[2], size_t count,
+    virtual ssize_t obtain(audio_utils_iovec iovec[2], size_t count = SIZE_MAX,
             const struct timespec *timeout = NULL) = 0;
 
     /**
@@ -290,6 +290,7 @@
      * Determine the number of frames that could be obtained or read/written without blocking.
      * There's an inherent race condition: the value may soon be obsolete so shouldn't be trusted.
      * available() may be called after obtain(), but doesn't affect the number of releasable frames.
+     * The implementation unfortunately prevents the method from being marked 'const'.
      *
      * \return Number of available frames, if greater than or equal to zero.
      *  \retval -EIO        corrupted indices, no recovery is possible
@@ -315,6 +316,9 @@
     uint64_t totalReleased() const
             { return mTotalReleased; }
 
+    /** Return a reference to the associated FIFO. */
+    audio_utils_fifo& fifo()    { return mFifo; }
+
 protected:
     audio_utils_fifo&   mFifo;
 
@@ -381,7 +385,7 @@
     ssize_t write(const void *buffer, size_t count, const struct timespec *timeout = NULL);
 
     // Implement audio_utils_fifo_provider
-    virtual ssize_t obtain(audio_utils_iovec iovec[2], size_t count,
+    virtual ssize_t obtain(audio_utils_iovec iovec[2], size_t count = SIZE_MAX,
             const struct timespec *timeout = NULL);
     virtual void release(size_t count);
     virtual ssize_t available();
@@ -509,7 +513,7 @@
             size_t *lost = NULL);
 
     // Implement audio_utils_fifo_provider
-    virtual ssize_t obtain(audio_utils_iovec iovec[2], size_t count,
+    virtual ssize_t obtain(audio_utils_iovec iovec[2], size_t count = SIZE_MAX,
             const struct timespec *timeout = NULL);
     virtual void release(size_t count);
     virtual ssize_t available();
@@ -531,6 +535,7 @@
      * Determine the number of frames that could be obtained or read without blocking.
      * There's an inherent race condition: the value may soon be obsolete so shouldn't be trusted.
      * available() may be called after obtain(), but doesn't affect the number of releasable frames.
+     * The implementation unfortunately prevents the method from being marked 'const'.
      *
      * \param lost    If non-NULL, set to the approximate number of frames lost before
      *                re-synchronization when -EOVERFLOW occurs, or set to zero when no frames lost.
diff --git a/audio_utils/include/audio_utils/fifo_index.h b/audio_utils/include/audio_utils/fifo_index.h
index 9d2ab51..2639f6f 100644
--- a/audio_utils/include/audio_utils/fifo_index.h
+++ b/audio_utils/include/audio_utils/fifo_index.h
@@ -112,7 +112,7 @@
     RefIndexDeferredStoreReleaseDeferredWake(audio_utils_fifo_index& index);
     ~RefIndexDeferredStoreReleaseDeferredWake();
 
-    // Place 'value' into the cache but do not store it to memory yet.
+    // Place 'value' into the cache, but do not store it to memory yet.
     void set(uint32_t value);
 
     // If there is a new value in the cache, store it now with memory order 'release'.
@@ -149,7 +149,7 @@
     ~RefIndexCachedLoadAcquireDeferredWait();
 
     // If value is already cached, return the cached value.
-    // Otherwise load now with memory order 'acquire', cache for later, and return the value.
+    // Otherwise load now with memory order 'acquire', cache for later use, and return the value.
     uint32_t    get();
 
     // If value is already cached, this is a no-op.
diff --git a/audio_utils/include/audio_utils/mono_blend.h b/audio_utils/include/audio_utils/mono_blend.h
index 12e03e0..dc97afb 100644
--- a/audio_utils/include/audio_utils/mono_blend.h
+++ b/audio_utils/include/audio_utils/mono_blend.h
@@ -14,8 +14,8 @@
  * limitations under the License.
  */
 
-#ifndef ANDROID_AUDIO_CONVERSION_H
-#define ANDROID_AUDIO_CONVERSION_H
+#ifndef ANDROID_AUDIO_MONO_BLEND_H
+#define ANDROID_AUDIO_MONO_BLEND_H
 #include <stdint.h>
 #include <sys/cdefs.h>
 #include <system/audio.h>
@@ -47,4 +47,4 @@
 __END_DECLS
 /** \endcond */
 
-#endif
+#endif // !ANDROID_AUDIO_MONO_BLEND_H
diff --git a/audio_utils/include/audio_utils/primitives.h b/audio_utils/include/audio_utils/primitives.h
index a3727d7..6b9ca3d 100644
--- a/audio_utils/include/audio_utils/primitives.h
+++ b/audio_utils/include/audio_utils/primitives.h
@@ -364,6 +364,18 @@
 void memcpy_to_float_from_q8_23(float *dst, const int32_t *src, size_t count);
 
 /**
+ * Expand and copy samples from unsigned 8-bit offset by 0x80 to signed 32-bit.
+ *
+ *  \param dst     Destination buffer
+ *  \param src     Source buffer
+ *  \param count   Number of samples to copy
+ *
+ * The destination and source buffers must either be completely separate (non-overlapping), or
+ * they must both start at the same address.  Partially overlapping buffers are not supported.
+ */
+void memcpy_to_i32_from_u8(int32_t *dst, const uint8_t *src, size_t count);
+
+/**
  * Copy samples from signed fixed point 16-bit Q0.15 to signed fixed-point 32-bit Q0.31.
  * The output data range is [0x80000000, 0x7fff0000] at intervals of 0x10000.
  *
diff --git a/audio_utils/include/audio_utils/safe_math.h b/audio_utils/include/audio_utils/safe_math.h
new file mode 100644
index 0000000..7dcab3f
--- /dev/null
+++ b/audio_utils/include/audio_utils/safe_math.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android::audio_utils {
+
+// safe_sub_overflow is used ensure that subtraction occurs in the same native
+// type with proper 2's complement overflow.  Without calling this function, it
+// is possible, for example, that optimizing compilers may elect to treat 32 bit
+// subtraction as 64 bit subtraction when storing into a 64 bit destination as
+// integer overflow is technically undefined.
+template <typename T, typename U,
+          typename = std::enable_if_t<
+              std::is_same<std::decay_t<T>, std::decay_t<U>>{}>>
+// ensure arguments are same type (ignoring volatile, which is used in cblk
+// variables).
+auto safe_sub_overflow(const T& a, const U& b) {
+  std::decay_t<T> result;
+  (void)__builtin_sub_overflow(a, b, &result);
+  // note if __builtin_sub_overflow returns true, an overflow occurred.
+  return result;
+}
+
+// similar to safe_sub_overflow but for add operator.
+template <typename T, typename U,
+          typename = std::enable_if_t<
+              std::is_same<std::decay_t<T>, std::decay_t<U>>{}>>
+// ensure arguments are same type (ignoring volatile, which is used in cblk
+// variables).
+auto safe_add_overflow(const T& a, const U& b) {
+  std::decay_t<T> result;
+  (void)__builtin_add_overflow(a, b, &result);
+  // note if __builtin_add_overflow returns true, an overflow occurred.
+  return result;
+}
+
+} // namespace android::audio_utils
diff --git a/audio_utils/include/audio_utils/sample.h b/audio_utils/include/audio_utils/sample.h
new file mode 100644
index 0000000..c2f3966
--- /dev/null
+++ b/audio_utils/include/audio_utils/sample.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_SAMPLE_H
+#define ANDROID_AUDIO_SAMPLE_H
+
+#include <stdint.h>
+#include <sys/cdefs.h>
+
+__BEGIN_DECLS
+
+/* A single signed sample expressed as minifloat, in linear range
+ *   -2.0f < x < 2.0f
+ * normalized precision about 4 decimal digits, and dynamic range of approximately -104 dB to +6 dB.
+ * Representation has 13 significand bits (1 sign, 12 mantissa, 1 hidden) and 3 exponent bits.
+ * There is no signed zero because of lack of hardware support for efficient comparisons,
+ * no infinity, and no NaN.
+ */
+typedef int16_t sample_minifloat_t;
+sample_minifloat_t sample_from_float(float f);
+float float_from_sample(sample_minifloat_t sample);
+
+__END_DECLS
+
+#endif // ANDROID_AUDIO_SAMPLE_H
diff --git a/audio_utils/include/audio_utils/variadic_utils.h b/audio_utils/include/audio_utils/variadic_utils.h
new file mode 100644
index 0000000..a6b7470
--- /dev/null
+++ b/audio_utils/include/audio_utils/variadic_utils.h
@@ -0,0 +1,527 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_UTILS_VARIADIC_UTILS_H
+#define ANDROID_AUDIO_UTILS_VARIADIC_UTILS_H
+
+#include <array>
+#include <cmath> // for std::sqrt
+#include <ostream>
+#include <tuple>
+#include <utility>
+
+namespace android {
+namespace audio_utils {
+
+/**
+ * We provide operator overloading for variadic math and printing.
+ *
+ * A object allowed for variadic operation requires the following:
+ *   1) variadic constructor
+ *   2) support std::get<>
+ *   3) support std::tuple_size<>
+ *   4) support std::tuple_element<>
+ *
+ * Examples of common variadic classes: std::pair, std::tuple, std::array.
+ *
+ * User defined variadic classes will need to create overloaded functions for
+ * std::get, std::tuple_size, std::tuple_element.
+ *
+ * Overloads and functions always check whether the type of the argument is
+ * variadic to prevent false application, unless parameters include a variadic index sequence.
+ * This makes shorter function names safe from name collision as well.
+ */
+
+template <typename T, template <typename...> class C>
+struct is_template : std::false_type {};
+template <template <typename...> class C, typename... args>
+struct is_template<C<args...>, C> : std::true_type {};
+
+template <typename T> using is_tuple = is_template<std::decay_t<T>, std::tuple>;
+template <typename T> using is_pair = is_template<std::decay_t<T>, std::pair>;
+
+/* is_array<T>::value , different than std::is_array<T>::value */
+template <typename T>
+struct is_array_impl : std::false_type {};
+template <typename T, size_t N>
+struct is_array_impl<std::array<T, N>> : std::true_type {};
+template <typename T>
+struct is_array : is_array_impl<std::decay_t<T>> {};
+
+/* is_variadic<T>::value is true if T supports std::tuple_size<T> */
+struct is_variadic_impl {
+    // SFINAE test(0) prefers this if std::tuple_size<T>::value exists
+    template <typename T> static int test(int, int[std::tuple_size<T>::value] = nullptr);
+    template <typename T> static bool test(...);
+};
+
+template <typename T>
+struct is_variadic : std::integral_constant<bool,
+    std::is_same<decltype(is_variadic_impl::test<std::decay_t<T>>(0)), int>::value> {};
+
+/**
+ * We allow variadic OP variadic or variadic OP scalar or scalar OP variadic
+ *
+ * where OP is +, -, *, /.
+ *
+ * Deep operations are possible on nested variadics, for example:
+ *
+ * std::cout << std::make_pair(0, std::make_pair(1 , 2)) + 2;
+ * -> (2, (3, 4))
+ *
+ */
+
+#define MAKE_VARIADIC_BINARY_OPERATOR(OPERATOR, OPERATOR_NAME) \
+template <typename T1, typename T2, std::size_t... I> \
+constexpr auto OPERATOR_NAME##_VS(const T1& t1, const T2& t2, std::index_sequence<I...>); \
+template <typename T1, typename T2, std::size_t... I> \
+constexpr auto OPERATOR_NAME##_VV(const T1& t1, const T2& t2, std::index_sequence<I...>); \
+template <typename T1, typename T2, \
+         std::enable_if_t<is_variadic<T1>::value && !is_variadic<T2>::value, int> = 0> \
+constexpr auto operator OPERATOR(const T1& t1, const T2& t2) { \
+    return OPERATOR_NAME##_VS(t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>{}); \
+} \
+template <typename T1, typename T2, \
+         std::enable_if_t<!is_variadic<T1>::value && is_variadic<T2>::value, int> = 0> \
+constexpr auto operator OPERATOR(const T1& t1, const T2& t2) { \
+    return OPERATOR_NAME##_VS( \
+            t2, t1, std::make_index_sequence<std::tuple_size<T2>::value>{}); \
+} \
+template <typename T1, typename T2, \
+         std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0> \
+constexpr auto operator OPERATOR(const T1& t1,  const T2& t2) { \
+    static_assert(std::tuple_size<T1>::value == std::tuple_size<T2>::value, \
+                  #OPERATOR_NAME " size must match"); \
+    return OPERATOR_NAME##_VV(t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>{}); \
+} \
+template <typename T1, typename T2, \
+         std::enable_if_t<is_variadic<T1>::value && !is_variadic<T2>::value, int> = 0> \
+constexpr auto operator OPERATOR##=(T1& t1, const T2& t2) { \
+    t1 = OPERATOR_NAME##_VS(t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>{}); \
+    return t1; \
+} \
+template <typename T1, typename T2, \
+         std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0> \
+constexpr auto operator OPERATOR##=(T1& t1,  const T2& t2) { \
+    static_assert(std::tuple_size<T1>::value == std::tuple_size<T2>::value, \
+                  #OPERATOR_NAME " size must match"); \
+    t1 = OPERATOR_NAME##_VV(t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>{}); \
+    return t1; \
+} \
+template <typename T1, typename T2, std::size_t... I> \
+constexpr auto OPERATOR_NAME##_VS(const T1& t1, const T2& t2, std::index_sequence<I...>) { \
+    return T1{std::get<I>(t1) OPERATOR t2...}; \
+} \
+template <typename T1, typename T2, std::size_t... I> \
+constexpr auto OPERATOR_NAME##_VV(const T1& t1, const T2& t2, std::index_sequence<I...>) { \
+    return T1{std::get<I>(t1) OPERATOR std::get<I>(t2)...}; \
+} \
+
+MAKE_VARIADIC_BINARY_OPERATOR(+, plus)
+MAKE_VARIADIC_BINARY_OPERATOR(-, minus)
+MAKE_VARIADIC_BINARY_OPERATOR(*, multiplies)
+MAKE_VARIADIC_BINARY_OPERATOR(/, divides)
+
+#undef MAKE_VARIADIC_BINARY_OPERATOR
+
+/**
+ * We overload ostream operators for stringification or printing.
+ *
+ * Nested variadics are properly printed.
+ *
+ * std::cout << std::make_pair(1, 2) << std::make_tuple(1., 2., 3.)
+ *           << std::make_pair(0, std::make_pair(3, 4));
+ */
+
+// forward declaration of helper
+template <class charT, class traits, class T, std::size_t... I>
+auto& ostream_variadic(
+        std::basic_ostream<charT, traits>& os,
+        const T& t,
+        std::index_sequence<I...>);
+
+// operator overload
+template <class charT, class traits, class T, \
+         std::enable_if_t<is_variadic<T>::value, int> = 0> \
+auto& operator<<(std::basic_ostream<charT, traits>& os, const T& t) { \
+    return ostream_variadic(os, t, std::make_index_sequence<std::tuple_size<T>::value>{}); \
+}
+
+// helper function (recursively calls <<)
+template <class charT, class traits, class T, std::size_t... I>
+auto& ostream_variadic(
+        std::basic_ostream<charT, traits>& os,
+        const T& t,
+        std::index_sequence<I...>) {
+    os << "(";
+    // ((os << (I == 0 ? "" : ", ") << std::get<I>(t)), ...); is C++17
+    int dummy[] __unused = { (os << (I == 0 ? "" : ", ") << std::get<I>(t), 0) ... };
+    return os << ")";
+}
+
+/**
+ * We have a fold operator which converts a variadic to a scalar using
+ * a binary operator.
+ *
+ * Following standard binary operator convention, it is a left-associative fold.
+ *
+ * Example:
+ *
+ * fold(std::plus<>(), std::make_pair(1, 2));
+ *
+ * This is a shallow operation - does not recurse through nested variadics.
+ */
+
+// helper
+template <size_t index, typename Op, typename T,
+          std::enable_if_t<index == 0 && is_variadic<T>::value, int> = 0>
+constexpr auto fold(Op&& op __unused, T&& t) {
+    return std::get<index>(std::forward<T>(t));
+}
+
+// helper
+template <size_t index, typename Op, typename T,
+          std::enable_if_t<(index > 0) && is_variadic<T>::value, int> = 0>
+constexpr auto fold(Op&& op, T&& t) {
+    return op(fold<index - 1>(std::forward<Op>(op), t), std::get<index>(std::forward<T>(t)));
+}
+
+// variadic
+template <typename Op, typename T,
+          std::enable_if_t<is_variadic<T>::value, int> = 0>
+constexpr auto fold(Op&& op, T&& t)  {
+    return fold<std::tuple_size<T>::value - 1>(std::forward<Op>(op), std::forward<T>(t));
+}
+
+
+/**
+ * tupleOp returns a tuple resulting from an element-wise operation on two variadics.
+ *
+ * the type of each tuple element depends on the return value of the op.
+ *
+ * This is a shallow operation - does not recurse through nested variadics.
+ */
+// helper
+template <typename Op, typename T1, typename T2, std::size_t... I,
+         std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0>
+constexpr auto tupleOp(Op&& op, T1&& t1, T2&& t2, std::index_sequence<I...>) {
+    return std::make_tuple(
+            op(std::get<I>(std::forward<T1>(t1)), std::get<I>(std::forward<T2>(t2)))...);
+}
+
+// variadic
+template <typename Op, typename T1, typename T2,
+         std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0>
+constexpr auto tupleOp(Op&& op, T1&& t1, T2&& t2) {
+    static_assert(std::tuple_size<std::remove_reference_t<T1>>::value
+            == std::tuple_size<std::remove_reference_t<T2>>::value,
+            "tuple size must match");
+    return tupleOp(std::forward<Op>(op),
+                   std::forward<T1>(t1),
+                   std::forward<T2>(t2),
+                   std::make_index_sequence<
+                           std::tuple_size<std::remove_reference_t<T1>>::value>());
+}
+
+/**
+ *  equivalent compares two variadics OR scalars
+ *
+ * equivalent(std::make_pair(1, 2), std::make_tuple(1, 2)) == true
+ *
+ * Does a deep compare through nested variadics.
+ *
+ * Does not do short-circuit evaluation.
+ * C++17 will allow for a better implementation of this.
+ */
+
+// scalar
+template <typename T1, typename T2,
+          std::enable_if_t<!is_variadic<T1>::value && !is_variadic<T2>::value, int> = 0>
+constexpr bool equivalent(const T1& t1, const T2& t2) {
+    return t1 == t2;
+}
+
+// variadic / scalar mismatch overload
+template <typename T1, typename T2,
+          std::enable_if_t<is_variadic<T1>::value != is_variadic<T2>::value, int> = 0>
+constexpr bool equivalent(const T1& t1 __unused, const T2& t2 __unused) {
+    return false;
+}
+
+// variadic
+template <typename T1, typename T2,
+          std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0>
+constexpr bool equivalent(const T1& t1, const T2& t2) {
+    return std::tuple_size<T1>::value == std::tuple_size<T2>::value
+        && fold([](const bool& v1, const bool& v2) { return v1 && v2; },
+                tupleOp([](const auto &v1, const auto &v2) { return equivalent(v1, v2); },
+                          t1, t2));
+}
+
+/**
+ *  The innerProduct is the dot product of two 1D variadics.
+ *
+ * innerProduct(std::make_pair(1, 2), std::make_pair(3, 4)) == 11
+ */
+
+template <typename T1, typename T2,
+          std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0>
+constexpr auto innerProduct(const T1& t1, const T2& t2) {
+    return fold(std::plus<>{}, t1 * t2);
+}
+
+/**
+ * The outerProduct is the tensor product of two 1D variadics.
+ *
+ * This only returns tuples, regardless of the input.
+ *
+ * outerProduct(std::make_tuple(1, 2), std::make_tuple(1, 2)) ==
+ * std::make_tuple(1, 2, 2, 4);
+ *
+ */
+
+// helper
+template <typename T1, typename T2, std::size_t... I>
+constexpr auto outerProduct(const T1& t1, const T2& t2, std::index_sequence<I...>)  {
+    return std::tuple_cat(std::get<I>(t1) * t2 ...);
+}
+
+// variadic
+template <typename T1, typename T2,
+          std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0>
+constexpr auto outerProduct(const T1& t1, const T2& t2) {
+    return outerProduct(t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>());
+}
+
+/**
+ * tail_variadic returns the tail offset by a template size_t Offset
+ * of a variadic object. It always returns a tuple.
+ */
+
+// helper
+template <size_t Offset, typename T, std::size_t... I>
+constexpr auto tail_variadic(T&& t, std::index_sequence<I...>) {
+    return std::make_tuple(std::get<I + Offset>(std::forward<T>(t))...);  // force a tuple here
+}
+
+// variadic
+template <size_t Offset, typename T,
+          std::enable_if_t<is_variadic<T>::value, int> = 0>
+constexpr auto tail_variadic(T&& t) {
+    return tail_variadic<Offset>(
+           std::forward<T>(t),
+           std::make_index_sequence<std::tuple_size<
+                   std::remove_reference_t<T>>::value - Offset>());
+}
+
+/**
+ * The outerProduct_UT is the tensor product of two identical length 1D variadics,
+ * but only the upper triangular portion, eliminating the symmetric lower triangular
+ * half.  This is useful for the outerProduct of two parallel variadics.
+ *
+ * This only returns tuples, regardless of the input.
+ *
+ * outerProduct_UT(std::make_tuple(1, 2, 3), std::make_tuple(1, 2, 3)) ==
+ * std::make_tuple(1, 2, 3, 4, 6, 9);
+ */
+
+// helper
+template <typename T1, typename T2, std::size_t... I>
+constexpr auto outerProduct_UT(const T1& t1, const T2& t2, std::index_sequence<I...>)  {
+    return std::tuple_cat(std::get<I>(t1) * tail_variadic<I>(t2) ...);
+}
+
+// variadic
+template <typename T1, typename T2,
+          std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0>
+constexpr auto outerProduct_UT(const T1& t1, const T2& t2) {
+    static_assert(std::tuple_size<T1>::value == std::tuple_size<T2>::value,
+                  "tuple size must match");
+    return outerProduct_UT(t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>());
+}
+
+/**
+ * to_array does a conversion of any variadic to a std::array whose element type is
+ * the input variadic's first tuple element and whose size is the tuple size.
+ * This is a shallow operation and does not work on nested variadics.
+ */
+
+ // helper
+template <typename T, std::size_t...I>
+constexpr auto to_array(const T &t, std::index_sequence<I...>) {
+    return std::array<std::tuple_element_t<0, T>, std::tuple_size<T>::value>{std::get<I>(t)...};
+}
+
+// variadic
+template <typename T>
+constexpr auto to_array(const T &t) {
+     return to_array(t, std::make_index_sequence<std::tuple_size<T>::value>());
+}
+
+/**
+ * We create functor versions of the inner and outer products to
+ * pass in as a type argument to a template.  A tuple and an array
+ * return variant are provided.
+ *
+ * See related: std::function<>.
+ */
+
+template <typename T>
+struct innerProduct_scalar {
+    constexpr auto operator()(const T &lhs, const T &rhs) const {
+        return innerProduct(lhs, rhs);
+    }
+};
+
+template <typename T>
+struct outerProduct_tuple {
+    constexpr auto operator()(const T &lhs, const T &rhs) const {
+        return outerProduct(lhs, rhs);
+    }
+};
+
+template <typename T>
+struct outerProduct_UT_tuple {
+    constexpr auto operator()(const T &lhs, const T &rhs) const {
+        return outerProduct_UT(lhs, rhs);
+    }
+};
+
+template <typename T>
+struct outerProduct_array {
+    constexpr auto operator()(const T &lhs, const T &rhs) const {
+        return to_array(outerProduct(lhs, rhs));
+    }
+};
+
+template <typename T>
+struct outerProduct_UT_array {
+    constexpr auto operator()(const T &lhs, const T &rhs) const {
+        return to_array(outerProduct_UT(lhs, rhs));
+    }
+};
+
+/**
+ * for_each is used to apply an operation to each element of a variadic OR scalar object.
+ *
+ * auto t = std:make_tuple(1, 2);
+ * for_each([](int &x) { ++x; }, t);
+ *
+ * Related: std::for_each<>
+ * Note difference from std::apply, which forwards tuple elements as arguments to a function.
+ */
+
+// helper
+template <typename T, typename Op, std::size_t... I >
+constexpr void for_each(T& t, Op op, std::index_sequence<I...>) {
+    int dummy[] __unused = {(op(std::get<I>(t)), 0)...};
+}
+
+// variadic
+template <typename T, typename Op,
+          std::enable_if_t<is_variadic<T>::value, int> = 0>
+constexpr void for_each(T& t, Op op) {
+    for_each(t, op, std::make_index_sequence<std::tuple_size<T>::value>());
+}
+
+// scalar version applies if not a class, rather than not a variadic
+template <typename T, typename Op,
+          std::enable_if_t<!std::is_class<T>::value, int> = 0>
+constexpr void for_each(T& t, Op op) {
+    op(t);
+}
+
+/**
+ * We make variants of the unary function std::sqrt()
+ * and the binary std::min(), std::max() to work on variadics.
+ *
+ * These are shallow operations and do not work on nested variadics.
+ *
+ * TODO: update to variadic function application for C++17
+ * with built-in std::apply, std::invoke, and constexpr lambdas.
+ *
+ */
+
+#define MAKE_VARIADIC_STD_UNARY_FUNCTION(FUNCTION) \
+template <typename T, \
+          std::enable_if_t<!is_variadic<T>::value, int> = 0> \
+constexpr auto FUNCTION(const T &t) { \
+    return std::FUNCTION(t); \
+} \
+template <typename T, std::size_t... I > \
+constexpr auto FUNCTION(const T &t, std::index_sequence<I...>) { \
+    return T{audio_utils::FUNCTION(std::get<I>(t))...}; \
+} \
+template <typename T, \
+          std::enable_if_t<is_variadic<T>::value, int> = 0> \
+constexpr auto FUNCTION(const T& t) { \
+    return audio_utils::FUNCTION(t, std::make_index_sequence<std::tuple_size<T>::value>()); \
+}
+
+MAKE_VARIADIC_STD_UNARY_FUNCTION(sqrt);
+
+#undef MAKE_VARIADIC_STD_UNARY_FUNCTION
+
+#define MAKE_VARIADIC_STD_BINARY_FUNCTION(FUNCTION) \
+template <typename T1, typename T2, \
+          std::enable_if_t<!is_variadic<T1>::value && !is_variadic<T2>::value, int> = 0> \
+constexpr auto FUNCTION(const T1 &t1, const T2 &t2) { \
+    return std::FUNCTION(t1, t2); \
+} \
+template <typename T1, typename T2, std::size_t... I > \
+constexpr auto FUNCTION(const T1 &t1, const T2 &t2, std::index_sequence<I...>) { \
+    return T1{audio_utils::FUNCTION(std::get<I>(t1), std::get<I>(t2))...}; \
+} \
+template <typename T1, typename T2, \
+          std::enable_if_t<is_variadic<T1>::value && is_variadic<T2>::value, int> = 0> \
+constexpr auto FUNCTION(const T1 &t1, const T2 &t2) { \
+    static_assert(std::tuple_size<T1>::value == std::tuple_size<T2>::value, \
+                  #FUNCTION " size must match"); \
+    return audio_utils::FUNCTION( \
+            t1, t2, std::make_index_sequence<std::tuple_size<T1>::value>()); \
+}
+
+MAKE_VARIADIC_STD_BINARY_FUNCTION(min);
+MAKE_VARIADIC_STD_BINARY_FUNCTION(max);
+
+/* is_iterator<T>::value is true if T supports std::iterator_traits<T>
+
+   TODO: poor resolution on iterator type, prefer emulating hidden STL templates
+   __is_input_iterator<>
+   __is_forward_iterator<>
+   ...
+ */
+ // helper
+struct is_iterator_impl {
+    // SFINAE test(0) preferred if iterator traits
+    template <typename T,
+              typename = typename std::iterator_traits<T>::difference_type,
+              typename = typename std::iterator_traits<T>::value_type,
+              typename = typename std::iterator_traits<T>::pointer,
+              typename = typename std::iterator_traits<T>::iterator_category>
+              static int test(int);
+    template <typename T> static bool test(...);
+};
+
+template <typename T>
+struct is_iterator : std::integral_constant<bool,
+    std::is_same<decltype(is_iterator_impl::test<std::decay_t<T>>(0)), int>::value> {};
+
+
+} // namespace audio_utils
+} // namespace android
+
+#endif // !ANDROID_AUDIO_UTILS_VARIADIC_UTILS_H
diff --git a/audio_utils/mono_blend.cpp b/audio_utils/mono_blend.cpp
index 0ace077..33b46c2 100644
--- a/audio_utils/mono_blend.cpp
+++ b/audio_utils/mono_blend.cpp
@@ -18,9 +18,9 @@
 #define LOG_TAG "audio_utils_mono_blend"
 
 #include <math.h>
-#include <audio_utils/mono_blend.h>
 #include <log/log.h>
 #include <audio_utils/limiter.h>
+#include <audio_utils/mono_blend.h>
 
 // TODO: Speed up for special case of 2 channels?
 void mono_blend(void *buf, audio_format_t format, size_t channelCount, size_t frames, bool limit) {
diff --git a/audio_utils/power.cpp b/audio_utils/power.cpp
index fea3112..47de41d 100644
--- a/audio_utils/power.cpp
+++ b/audio_utils/power.cpp
@@ -275,4 +275,3 @@
 {
     return isFormatSupported(format);
 }
-
diff --git a/audio_utils/primitives.c b/audio_utils/primitives.c
index 594f1c5..7b1ce75 100644
--- a/audio_utils/primitives.c
+++ b/audio_utils/primitives.c
@@ -253,6 +253,15 @@
     }
 }
 
+void memcpy_to_i32_from_u8(int32_t *dst, const uint8_t *src, size_t count)
+{
+    dst += count;
+    src += count;
+    for (; count > 0; --count) {
+        *--dst = ((int32_t)(*--src) - 0x80) << 24;
+    }
+}
+
 void memcpy_to_i32_from_i16(int32_t *dst, const int16_t *src, size_t count)
 {
     dst += count;
diff --git a/audio_utils/sample.c b/audio_utils/sample.c
new file mode 100644
index 0000000..d2a9100
--- /dev/null
+++ b/audio_utils/sample.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <math.h>
+#include <audio_utils/sample.h>
+
+#define SAMPLE_NEG_MAX  0xFFFF
+#define SAMPLE_POS_MAX  0x7FFF
+
+#define SIGN_MASK       0x8000
+#define SIGN_BITS       1
+
+#define EXPONENT_BITS   3
+#define EXPONENT_MAX    ((1 << EXPONENT_BITS) - 1)
+#define EXCESS          ((1 << EXPONENT_BITS) - 2)
+
+#define MANTISSA_BITS   12
+#define MANTISSA_MAX    ((1 << MANTISSA_BITS) - 1)
+#define HIDDEN_BIT      (1 << MANTISSA_BITS)
+#define ONE_FLOAT       ((float) (1 << (MANTISSA_BITS + 1)))
+
+
+#if SIGN_BITS + EXPONENT_BITS + MANTISSA_BITS != 16
+#error SIGN_BITS, EXPONENT_BITS and MANTISSA_BITS must sum to 16
+#endif
+
+sample_minifloat_t sample_from_float(float v)
+{
+    if (isnan(v)) {
+        return 0;
+    }
+    sample_minifloat_t sign = 0;
+    if (v < 0.0f) {
+        sign = SIGN_MASK;
+        v = -v;
+    }
+    // This check could conceivably be tighter: v < constexpr float_from_sample(1).
+    // Probably only useful to be made more accurate if this is changed to
+    // manipulate the raw IEEE single precision float bit fields.
+    if (v <= 0.0f) {
+        // originally returned sign, but now seems better not to return negative 0
+        return 0;
+    }
+    if (v >= 2.0f) {
+        return SAMPLE_POS_MAX | sign;
+    }
+    int exp;
+    float r = frexpf(v, &exp);
+    if ((exp += EXCESS) > EXPONENT_MAX) {
+        return SAMPLE_POS_MAX | sign;
+    }
+    if (-exp >= MANTISSA_BITS) {
+        // originally returned sign, but now seems better not to return negative 0
+        return 0;
+    }
+    int mantissa = (int) (r * ONE_FLOAT);
+    sample_minifloat_t ret = exp > 0 ? (exp << MANTISSA_BITS) | (mantissa & ~HIDDEN_BIT) :
+            (mantissa >> (1 - exp)) & MANTISSA_MAX;
+    // ret != 0
+    return ret | sign;
+}
+
+float float_from_sample(sample_minifloat_t a)
+{
+    int mantissa = a & MANTISSA_MAX;
+    int exponent = (a >> MANTISSA_BITS) & EXPONENT_MAX;
+    float f = ldexpf((exponent > 0 ? HIDDEN_BIT | mantissa : mantissa << 1) / ONE_FLOAT,
+            exponent - EXCESS);
+    return a & SIGN_MASK ? -f : f;
+}
diff --git a/audio_utils/tests/Android.bp b/audio_utils/tests/Android.bp
index e0403aa..e104868 100644
--- a/audio_utils/tests/Android.bp
+++ b/audio_utils/tests/Android.bp
@@ -1,6 +1,28 @@
 // Build the unit tests for audio_utils
 
 cc_test {
+    name: "fdtostring_tests",
+    host_supported: false,
+
+    shared_libs: [
+        "libcutils",
+        "liblog",
+        "libutils", //for systemTime
+    ],
+    srcs: ["fdtostring_tests.cpp"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+    target: {
+        android: {
+            shared_libs: ["libaudioutils"],
+        },
+    }
+}
+
+cc_test {
     name: "primitives_tests",
     host_supported: true,
 
@@ -247,3 +269,103 @@
         },
     }
 }
+
+cc_test {
+    name: "statistics_tests",
+    host_supported: false,
+
+    shared_libs: ["libaudioutils"],
+    srcs: ["statistics_tests.cpp"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
+
+cc_test {
+    name: "timestampverifier_tests",
+    host_supported: false,
+
+    shared_libs: ["libaudioutils"],
+    srcs: ["timestampverifier_tests.cpp"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
+
+cc_test {
+    name: "variadic_tests",
+    host_supported: false,
+
+    shared_libs: ["libaudioutils"],
+    srcs: ["variadic_tests.cpp"],
+    cflags: [
+        "-Wall",
+        "-Werror",
+        "-Wextra",
+    ],
+}
+
+cc_test {
+    name: "logplot_tests",
+    host_supported: true,
+
+    shared_libs: [
+        "liblog",
+        "libcutils",
+    ],
+    srcs: ["logplot_tests.cpp"],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    target: {
+        android: {
+            shared_libs: ["libaudioutils"],
+        },
+        host: {
+            static_libs: ["libaudioutils"],
+        },
+    }
+}
+
+cc_binary {
+    name: "statistics_benchmark",
+    host_supported: false,
+
+    srcs: ["statistics_benchmark.cpp"],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    static_libs: [
+        "libgoogle-benchmark",
+        "libaudioutils",
+    ],
+}
+
+cc_test {
+    name: "sample_tests",
+    host_supported: true,
+
+    shared_libs: [
+        "liblog",
+        "libcutils",
+    ],
+    srcs: ["sample_tests.cpp"],
+    cflags: [
+        "-Werror",
+        "-Wall",
+    ],
+    target: {
+        android: {
+            shared_libs: ["libaudioutils"],
+        },
+        host: {
+            static_libs: ["libaudioutils"],
+        },
+    }
+}
diff --git a/audio_utils/tests/Makefile b/audio_utils/tests/Makefile
new file mode 100644
index 0000000..e2ec83b
--- /dev/null
+++ b/audio_utils/tests/Makefile
@@ -0,0 +1,9 @@
+# TODO Incorporate testing into audio_utils/tests/Android.bp and remove this file.
+
+INC=../include/audio_utils
+
+main : logplot_tests.cpp $(INC)/LogPlot.h
+	g++ -I$(INC) -o logplot_tests logplot_tests.cpp
+
+clean :
+	rm -f logplot_tests
diff --git a/audio_utils/tests/build_and_run_all_unit_tests.sh b/audio_utils/tests/build_and_run_all_unit_tests.sh
index 78de692..3ca4b6e 100755
--- a/audio_utils/tests/build_and_run_all_unit_tests.sh
+++ b/audio_utils/tests/build_and_run_all_unit_tests.sh
@@ -18,6 +18,11 @@
 adb root && adb wait-for-device remount
 
 echo "========================================"
+
+echo "testing fdtostring"
+adb push $OUT/data/nativetest/fdtostring_tests/fdtostring_tests /system/bin
+adb shell /system/bin/fdtostring_tests
+
 echo "testing primitives"
 adb push $OUT/system/lib/libaudioutils.so /system/lib
 adb push $OUT/data/nativetest/primitives_tests/primitives_tests /system/bin
@@ -39,6 +44,30 @@
 adb push $OUT/data/nativetest/format_tests/format_tests /system/bin
 adb shell /system/bin/format_tests
 
+echo "simplelog tests"
+adb push $OUT/data/nativetest/simplelog_tests/simplelog_tests /system/bin
+adb shell /system/bin/simplelog_tests
+
+echo "statistics tests"
+adb push $OUT/data/nativetest/statistics_tests/statistics_tests /system/bin
+adb shell /system/bin/statistics_tests
+
+echo "timestampverifier tests"
+adb push $OUT/data/nativetest/timestampverifier_tests/timestampverifier_tests /system/bin
+adb shell /system/bin/timestampverifier_tests
+
+echo "variadic tests"
+adb push $OUT/data/nativetest/variadic_tests/variadic_tests /system/bin
+adb shell /system/bin/variadic_tests
+
+echo "logplot tests"
+adb push $OUT/data/nativetest/logplot_tests/logplot_tests /system/bin
+adb shell /system/bin/logplot_tests
+
+echo "benchmarking_statistics"
+adb push $OUT/system/bin/statistics_benchmark /system/bin
+adb shell /system/bin/statistics_benchmark
+
 echo "benchmarking primitives"
 adb push $OUT/system/bin/primitives_benchmark /system/bin
 adb shell /system/bin/primitives_benchmark
diff --git a/audio_utils/tests/channels_tests.cpp b/audio_utils/tests/channels_tests.cpp
index eb3b5e0..f454dd2 100644
--- a/audio_utils/tests/channels_tests.cpp
+++ b/audio_utils/tests/channels_tests.cpp
@@ -133,4 +133,71 @@
     expectEq(u16ary, u16ref);
 }
 
+TEST(audio_utils_channels, adjust_channels_non_destructive) {
+    constexpr size_t size = 65536; /* arbitrary large multiple of 8 */
+    std::vector<uint16_t> u16ref(size);
+    std::vector<uint16_t> u16contracted(size);
+    std::vector<uint16_t> u16expanded(size);
+    std::vector<uint16_t> u16inout(size);
 
+    // Reference buffer increases monotonically.
+    // For second test, in/out buffer begins identical to ref.
+    for (size_t i = 0; i < u16ref.size(); ++i) {
+        u16ref[i] = i;
+        u16inout[i] = i;
+    }
+
+    // *** First test: different in/out buffers ***
+
+    // Contract from quad to stereo.
+    adjust_channels_non_destructive(
+            u16ref.data() /*in_buff*/,
+            4 /*in_channels*/,
+            u16contracted.data() /*out_buff*/,
+            2 /*out_channels*/,
+            sizeof(u16ref[0]) /*sample_size_in_bytes*/,
+            sizeof(u16ref[0]) * u16ref.size() /*num_in_bytes*/);
+
+    // Each half of contracted buffer should increase monotonically.
+    checkMonotone(u16contracted.data(), u16contracted.size() / 2);
+    checkMonotone(&u16contracted[u16contracted.size() / 2], u16contracted.size() / 2);
+
+    // Expand stereo to quad
+    adjust_channels_non_destructive(
+            u16contracted.data() /*in_buff*/,
+            2 /*in_channels*/,
+            u16expanded.data() /*out_buff*/,
+            4 /*out_channels*/,
+            sizeof(u16contracted[0]) /*sample_size_in_bytes*/,
+            sizeof(u16contracted[0]) * (u16contracted.size() / 2) /*num_in_bytes*/);
+
+    // Comparison array must be identical to reference.
+    expectEq(u16expanded, u16ref);
+
+    // *** Second test: in_buff == out_buff ***
+
+    // Contract from eight channels to stereo.
+    adjust_channels_non_destructive(
+            u16inout.data() /*in_buff*/,
+            8 /*in_channels*/,
+            u16inout.data() /*out_buff*/,
+            2 /*out_channels*/,
+            sizeof(u16inout[0]) /*sample_size_in_bytes*/,
+            sizeof(u16inout[0]) * u16inout.size() /*num_in_bytes*/);
+
+    // Each section [1/4][3/4] of contracted buffer should increase monotonically.
+    checkMonotone(u16inout.data(), u16inout.size() / 4);
+    checkMonotone(&u16inout[u16inout.size() / 4], (u16inout.size() * 3) / 4);
+
+    // Expand stereo to eight channels.
+    adjust_channels_non_destructive(
+            u16inout.data() /*in_buff*/,
+            2 /*in_channels*/,
+            u16inout.data() /*out_buff*/,
+            8 /*out_channels*/,
+            sizeof(u16inout[0]) /*sample_size_in_bytes*/,
+            sizeof(u16inout[0]) * (u16inout.size() / 4) /*num_in_bytes*/);
+
+    // Comparison array must be identical to reference.
+    expectEq(u16inout, u16ref);
+}
diff --git a/audio_utils/tests/fdtostring_tests.cpp b/audio_utils/tests/fdtostring_tests.cpp
new file mode 100644
index 0000000..683ca56
--- /dev/null
+++ b/audio_utils/tests/fdtostring_tests.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "audio_utils_fdtostring_tests"
+#include <log/log.h>
+
+#include <audio_utils/FdToString.h>
+#include <gtest/gtest.h>
+
+using namespace android::audio_utils;
+
+TEST(audio_utils_fdtostring, basic) {
+    const std::string PREFIX{"aa "};
+    const std::string TEST_STRING{"hello world"};
+
+    FdToString fdToString(PREFIX);
+    const int fd = fdToString.fd();
+    ASSERT_TRUE(fd >= 0);
+
+    write(fd, TEST_STRING.c_str(), TEST_STRING.size());
+
+    const std::string result = fdToString.getStringAndClose();
+
+    ASSERT_EQ((PREFIX + TEST_STRING), result);
+}
+
+TEST(audio_utils_fdtostring, multilines) {
+    const std::string PREFIX{"aa "};
+    const std::string DELIM{"\n"};
+    const std::string TEST_STRING1{"hello world\n"};
+    const std::string TEST_STRING2{"goodbye\n"};
+
+    FdToString fdToString(PREFIX);
+    const int fd = fdToString.fd();
+    ASSERT_TRUE(fd >= 0);
+
+    write(fd, TEST_STRING1.c_str(), TEST_STRING1.size());
+    write(fd, DELIM.c_str(), DELIM.size()); // double newline
+    write(fd, TEST_STRING2.c_str(), TEST_STRING2.size());
+
+    const std::string result = fdToString.getStringAndClose();
+
+    ASSERT_EQ((PREFIX + TEST_STRING1 + PREFIX + DELIM + PREFIX + TEST_STRING2), result);
+}
diff --git a/audio_utils/tests/logplot_tests.cpp b/audio_utils/tests/logplot_tests.cpp
new file mode 100644
index 0000000..e89a6d6
--- /dev/null
+++ b/audio_utils/tests/logplot_tests.cpp
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <vector>
+
+#include <audio_utils/LogPlot.h>
+
+// TODO Make more rigorous unit tests.
+int main()
+{
+    static const float data[] = {-61.4, -61.7, -56.2, -54.5, -47.7, -51.1, -49.7, -47.2,
+        -47.8, -42.3, -38.9, -40.5, -39.4, -33.9, -26.3, -20.9};
+    size_t data_size = sizeof(data) / sizeof(*data);
+    std::vector<std::pair<float, bool>> vdata;
+    for (size_t i = 0; i < data_size; i++) {
+        vdata.emplace_back(data[i], (i + 1) % 10 == 0);
+    }
+
+    std::string graphstr = audio_utils_log_plot(vdata.begin(), vdata.end());
+    std::cout << graphstr << std::endl;
+
+    return EXIT_SUCCESS;
+}
diff --git a/audio_utils/tests/primitives_tests.cpp b/audio_utils/tests/primitives_tests.cpp
index d28516a..7eebb06 100644
--- a/audio_utils/tests/primitives_tests.cpp
+++ b/audio_utils/tests/primitives_tests.cpp
@@ -341,10 +341,16 @@
     }
 
     constexpr size_t testsize = std::min(u8size, size);
+    zeroFill(fary);
     memcpy_to_float_from_u8(fary.data(), u8ref.data(), testsize);
     memcpy_to_u8_from_float(u8ary.data(), fary.data(), testsize);
 
     EXPECT_EQ(0, memcmp(u8ary.data(), u8ref.data(), u8ary.size() * sizeof(u8ary[0])));
+
+    // test conversion from u8 to i32
+    zeroFill(i32ary);
+    memcpy_to_i32_from_u8(i32ary.data(), u8ref.data(), testsize);
+    checkMonotone(i32ary.data(), testsize);
 }
 
 template<typename T>
diff --git a/audio_utils/tests/sample_tests.cpp b/audio_utils/tests/sample_tests.cpp
new file mode 100644
index 0000000..aaeef43
--- /dev/null
+++ b/audio_utils/tests/sample_tests.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <array>
+#include <climits>
+#include <math.h>
+
+#include <audio_utils/sample.h>
+#include <gtest/gtest.h>
+
+static_assert(sizeof(sample_minifloat_t) == sizeof(uint16_t),
+        "sizeof(sample_minifloat_t != sizeof(uint16_t");
+
+static constexpr int signum(float f)
+{
+    return (f > 0) - (f < 0);
+}
+
+TEST(audio_utils_sample, Convert)
+{
+    std::vector<float> fvec;
+    // verify minifloat <-> float is a bijection, and monotonic as float
+    for (int i = 0; i <= 0xFFFF; i++) {
+        // construct floats in order
+        const int val = i < 0x8000 ? 0xFFFF - i : i ^ 0x8000;
+        // TODO shouldn't depend on representation in order to skip negative zero
+        if (val == 0x8000) {
+            // This is an undefined value and so we won't test its behavior
+            continue;
+        }
+        // TODO reinterpret_cast<sample_minifloat_t>(val) fails
+        const sample_minifloat_t in = (sample_minifloat_t) val;
+        const float f = float_from_sample(in);
+        const sample_minifloat_t out = sample_from_float(f);
+        ASSERT_EQ(in, out);
+        fvec.push_back(f);
+    }
+    // no longer needed since we construct floats in order
+    // #include <algorithm>
+    // std::sort(fvec.begin(), fvec.end());
+    float prev = -2.0f;
+    for (auto curr : fvec) {
+        // LT instead of LE because no negative zero
+        ASSERT_LT(prev, curr);
+        int signum_prev = signum(prev);
+        int signum_curr = signum(curr);
+        ASSERT_LE(signum_prev, signum_curr);
+        if (signum_prev == signum_curr) {
+            // confirm ratio between adjacent values (3:45 of "Will it float?" video)
+            float ratio = curr / prev;
+            float lower, upper;
+            // normal
+            if (fabsf(curr) >= 0.001f) {
+                upper = 1.005f;
+                lower = 0.995f;
+            // denormal
+            } else {
+                upper = 2.0f;
+                lower = 0.5f;
+            }
+            ASSERT_GE(ratio, lower) << "prev " << prev << " curr " << curr;
+            ASSERT_LE(ratio, upper) << "prev " << prev << " curr " << curr;
+        }
+        prev = curr;
+    }
+    ASSERT_LT(prev, 2.0f);
+}
diff --git a/audio_utils/tests/simplelog_tests.cpp b/audio_utils/tests/simplelog_tests.cpp
index 1ef8a83..e2fa64e 100644
--- a/audio_utils/tests/simplelog_tests.cpp
+++ b/audio_utils/tests/simplelog_tests.cpp
@@ -38,28 +38,31 @@
     slog->log("Hello %d", nine);
     slog->log("World");
 
+    slog->logs(-1 /* nowNs */, std::string("ABC")); // may take a std::string as well
+
     // two lines (no header)
-    EXPECT_EQ((size_t)2, countNewLines(slog->dumpToString()));
+    EXPECT_EQ((size_t)3, countNewLines(slog->dumpToString()));
 
     // another two lines (this is out of time order, but the log doesn't care)
     slog->log(oneSecond /* nowNs */, "Hello World %d", 10);
     slog->log(oneSecond * 2 /* nowNs */, "%s", "Goodbye");
 
-    EXPECT_EQ((size_t)4, countNewLines(slog->dumpToString()));
+    EXPECT_EQ((size_t)5, countNewLines(slog->dumpToString()));
+
 
     // truncate on lines
     EXPECT_EQ((size_t)1, countNewLines(slog->dumpToString("" /* prefix */, 1 /* lines */)));
 
     // truncate on time
-    EXPECT_EQ((size_t)4, countNewLines(
+    EXPECT_EQ((size_t)5, countNewLines(
             slog->dumpToString("" /* prefix */, 0 /* lines */, oneSecond /* limitNs */)));
 
     // truncate on time (more)
-    EXPECT_EQ((size_t)3, countNewLines(
+    EXPECT_EQ((size_t)4, countNewLines(
             slog->dumpToString("" /* prefix */, 0 /* lines */, oneSecond * 2 /* limitNs */)));
 
     // truncate on time (more)
-    EXPECT_EQ((size_t)2, countNewLines(
+    EXPECT_EQ((size_t)3, countNewLines(
             slog->dumpToString("" /* prefix */, 0 /* lines */, oneSecond * 2 + 1 /* limitNs */)));
 
     std::cout << slog->dumpToString() << std::flush;
@@ -69,12 +72,14 @@
     // The output below depends on the local time zone and current time.
     // The indentation below is exact, check alignment.
     /*
-03-27 14:47:43.567 Hello 9
-03-27 14:47:43.567 World
+08-28 11:11:30.057 Hello 9
+08-28 11:11:30.057 World
+08-28 11:11:30.057 ABC
 12-31 16:00:01.000 Hello World 10
 12-31 16:00:02.000 Goodbye
-  03-27 14:47:43.567 Hello 9
-  03-27 14:47:43.567 World
+  08-28 11:11:30.057 Hello 9
+  08-28 11:11:30.057 World
+  08-28 11:11:30.057 ABC
   12-31 16:00:01.000 Hello World 10
   12-31 16:00:02.000 Goodbye
      */
diff --git a/audio_utils/tests/statistics_benchmark.cpp b/audio_utils/tests/statistics_benchmark.cpp
new file mode 100644
index 0000000..9613278
--- /dev/null
+++ b/audio_utils/tests/statistics_benchmark.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstddef>
+#include <random>
+#include <vector>
+
+#include <benchmark/benchmark.h>
+
+#include <audio_utils/Statistics.h>
+
+template <typename T>
+static void initUniform(std::vector<T> &data, T rangeMin, T rangeMax) {
+    const size_t count = data.capacity();
+    std::minstd_rand gen(count);
+    std::uniform_real_distribution<T> dis(rangeMin, rangeMax);
+    for (auto &datum : data) {
+        datum = dis(gen);
+    }
+}
+
+template <typename Stats>
+static void BM_MeanVariance(benchmark::State& state, int iterlimit, int alphalimit) {
+    const float alpha = 1. - alphalimit * std::numeric_limits<float>::epsilon();
+    Stats stat(alpha);
+    using T = decltype(stat.getMin());
+    constexpr size_t count = 1 << 20; // exactly one "mega" samples from the distribution.
+    constexpr T range = 1.;
+    std::vector<T> data(count);
+    initUniform(data, -range, range);
+
+    // Run the test
+    int iters = 0;
+    while (state.KeepRunning()) {
+        benchmark::DoNotOptimize(data.data());
+        for (const auto &datum : data) {
+            stat.add(datum);
+        }
+        benchmark::ClobberMemory();
+        if (++iters % iterlimit == 0) {
+            printf("%d>  alpha:%f  mean:%.17g  variance:%.17g\n",
+                    iters, alpha, (double)stat.getMean(), (double)stat.getPopVariance());
+            stat.reset();
+        }
+    }
+    state.SetComplexityN(count);
+}
+
+
+// Test case:
+// Do we work correctly within the capacity of float statistics when alpha == 1?
+//
+// 1 << 23 samples is the mantissa limited capacity of float statistics if alpha == 1.
+static constexpr int float_iterlimit = 8;
+// alphalimit of 0 means alpha exactly equals one.
+static constexpr int alpha_equals_one_alphalimit = 0;
+
+// benchmark running float
+static void BM_MeanVariance_float_float_float(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::Statistics<float, float, float>>(state,
+        float_iterlimit, alpha_equals_one_alphalimit);
+}
+
+BENCHMARK(BM_MeanVariance_float_float_float);
+
+// benchmark reference float
+static void BM_RefMeanVariance_float_float(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::ReferenceStatistics<float, float>>(state,
+        float_iterlimit, alpha_equals_one_alphalimit);
+}
+
+BENCHMARK(BM_RefMeanVariance_float_float);
+
+// benchmark running double
+static auto BM_MeanVariance_float_double_double(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::Statistics<float, double, double>>(state,
+        float_iterlimit, alpha_equals_one_alphalimit);
+}
+
+BENCHMARK(BM_MeanVariance_float_double_double);
+
+// benchmark reference double
+static auto BM_RefMeanVariance_float_double(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::ReferenceStatistics<float, double>>(state,
+        float_iterlimit, alpha_equals_one_alphalimit);
+}
+
+BENCHMARK(BM_RefMeanVariance_float_double);
+
+// benchmark running float + kahan
+static auto BM_MeanVariance_float_float_Kahan(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::Statistics<float, float,
+        android::audio_utils::KahanSum<float>>>(state,
+            float_iterlimit, alpha_equals_one_alphalimit);
+}
+
+BENCHMARK(BM_MeanVariance_float_float_Kahan);
+
+// benchmark running float + Neumaier
+static auto BM_MeanVariance_float_float_Neumaier(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::Statistics<float, float,
+        android::audio_utils::NeumaierSum<float>>>(state,
+            float_iterlimit, alpha_equals_one_alphalimit);
+}
+
+BENCHMARK(BM_MeanVariance_float_float_Neumaier);
+
+// Test case:
+// Do we work correctly for very large N statistics when alpha is 1 - 32 * epsilon?
+// This simulates long term statistics collection, where the alpha weighted windowing
+// permits us to exceed 1 << 23 samples reliably.
+//
+// 1 << 25 samples exceeds the mantissa limited capacity of float statistics if alpha == 1...
+static constexpr int float_overflow_iterlimit = 32;
+// but we use an alphalimit of 32, means 1. - (alphalimit * epsilon) approx = 0.999996.
+// This should allow statistics collection indefinitely.
+static constexpr int alpha_safe_upperbound_iterlimit = 32;
+
+// benchmark running float at alpha
+static auto BM_MeanVariance_float_float_float_alpha(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::Statistics<float, float, float>>(state,
+        float_overflow_iterlimit, alpha_safe_upperbound_iterlimit);
+}
+
+BENCHMARK(BM_MeanVariance_float_float_float_alpha);
+
+// benchmark running double
+static auto BM_MeanVariance_float_double_double_alpha(benchmark::State &state) {
+    BM_MeanVariance<android::audio_utils::Statistics<float, double, double>>(state,
+        float_overflow_iterlimit, alpha_safe_upperbound_iterlimit);
+}
+
+BENCHMARK(BM_MeanVariance_float_double_double_alpha);
+
+BENCHMARK_MAIN();
diff --git a/audio_utils/tests/statistics_tests.cpp b/audio_utils/tests/statistics_tests.cpp
new file mode 100644
index 0000000..9d78e4e
--- /dev/null
+++ b/audio_utils/tests/statistics_tests.cpp
@@ -0,0 +1,571 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "audio_utils_statistics_tests"
+#include <audio_utils/Statistics.h>
+
+#include <random>
+#include <stdio.h>
+#include <gtest/gtest.h>
+
+// create uniform distribution
+template <typename T, typename V>
+static void initUniform(V& data, T rangeMin, T rangeMax) {
+    const size_t count = data.capacity();
+    std::minstd_rand gen(count);
+    std::uniform_real_distribution<T> dis(rangeMin, rangeMax);
+
+    // for_each works for scalars
+    for (auto& datum : data) {
+        android::audio_utils::for_each(datum, [&](T &value) { return value = dis(gen);});
+    }
+}
+
+// create gaussian distribution
+template <typename T, typename V>
+static void initNormal(V& data, T mean, T stddev) {
+    const size_t count = data.capacity();
+    std::minstd_rand gen(count);
+
+    // values near the mean are the most likely
+    // standard deviation affects the dispersion of generated values from the mean
+    std::normal_distribution<> dis{mean, stddev};
+
+    // for_each works for scalars
+    for (auto& datum : data) {
+        android::audio_utils::for_each(datum, [&](T &value) { return value = dis(gen);});
+    }
+}
+
+// Used to create compile-time reference constants for variance testing.
+template <typename T>
+class ConstexprStatistics {
+public:
+    template <size_t N>
+    explicit constexpr ConstexprStatistics(const T (&a)[N])
+        : mN{N}
+        , mMax{android::audio_utils::max(a)}
+        , mMin{android::audio_utils::min(a)}
+        , mMean{android::audio_utils::sum(a) / mN}
+        , mM2{android::audio_utils::sumSqDiff(a, mMean)}
+        , mPopVariance{mM2 / mN}
+        , mPopStdDev{android::audio_utils::sqrt_constexpr(mPopVariance)}
+        , mVariance{mM2 / (mN - 1)}
+        , mStdDev{android::audio_utils::sqrt_constexpr(mVariance)}
+    { }
+
+    constexpr int64_t getN() const { return mN; }
+    constexpr T getMin() const { return mMin; }
+    constexpr T getMax() const { return mMax; }
+    constexpr double getWeight() const { return (double)mN; }
+    constexpr double getMean() const { return mMean; }
+    constexpr double getVariance() const { return mVariance; }
+    constexpr double getStdDev() const { return mStdDev; }
+    constexpr double getPopVariance() const { return mPopVariance; }
+    constexpr double getPopStdDev() const { return mPopStdDev; }
+
+private:
+    const size_t mN;
+    const T mMax;
+    const T mMin;
+    const double mMean;
+    const double mM2;
+    const double mPopVariance;
+    const double mPopStdDev;
+    const double mVariance;
+    const double mStdDev;
+};
+
+class StatisticsTest : public testing::TestWithParam<const char *>
+{
+};
+
+// find power of 2 that is small enough that it doesn't add to 1. due to finite mantissa.
+template <typename T>
+constexpr T smallp2() {
+    T smallOne{};
+    for (smallOne = T{1.}; smallOne + T{1.} > T{1.}; smallOne *= T(0.5));
+    return smallOne;
+}
+
+// Our near expectation is 16x the bit that doesn't fit the mantissa.
+// this works so long as we add values close in exponent with each other
+// realizing that errors accumulate as the sqrt of N (random walk, lln, etc).
+#define TEST_EXPECT_NEAR(e, v) \
+    EXPECT_NEAR((e), (v), abs((e) * std::numeric_limits<decltype(e)>::epsilon() * 8))
+
+#define PRINT_AND_EXPECT_EQ(expected, expr) { \
+    auto value = (expr); \
+    printf("(%s): %s\n", #expr, std::to_string(value).c_str()); \
+    if ((expected) == (expected)) { EXPECT_EQ((expected), (value)); } \
+    EXPECT_EQ((expected) != (expected), (value) != (value)); /* nan check */\
+}
+
+#define PRINT_AND_EXPECT_NEAR(expected, expr) { \
+    auto ref = (expected); \
+    auto value = (expr); \
+    printf("(%s): %s\n", #expr, std::to_string(value).c_str()); \
+    TEST_EXPECT_NEAR(ref, value); \
+}
+
+template <typename T, typename S>
+static void verify(const T &stat, const S &refstat) {
+    EXPECT_EQ(refstat.getN(), stat.getN());
+    EXPECT_EQ(refstat.getMin(), stat.getMin());
+    EXPECT_EQ(refstat.getMax(), stat.getMax());
+    TEST_EXPECT_NEAR(refstat.getWeight(), stat.getWeight());
+    TEST_EXPECT_NEAR(refstat.getMean(), stat.getMean());
+    TEST_EXPECT_NEAR(refstat.getVariance(), stat.getVariance());
+    TEST_EXPECT_NEAR(refstat.getStdDev(), stat.getStdDev());
+    TEST_EXPECT_NEAR(refstat.getPopVariance(), stat.getPopVariance());
+    TEST_EXPECT_NEAR(refstat.getPopStdDev(), stat.getPopStdDev());
+}
+
+// Test against fixed reference
+
+TEST(StatisticsTest, high_precision_sums)
+{
+    static const double simple[] = { 1., 2., 3. };
+
+    double rssum = android::audio_utils::sum<double, double>(simple);
+    PRINT_AND_EXPECT_EQ(6., rssum);
+    double kssum =
+        android::audio_utils::sum<double, android::audio_utils::KahanSum<double>>(simple);
+    PRINT_AND_EXPECT_EQ(6., kssum);
+    double nmsum =
+        android::audio_utils::sum<double, android::audio_utils::NeumaierSum<double>>(simple);
+    PRINT_AND_EXPECT_EQ(6., nmsum);
+
+    double rs{};
+    android::audio_utils::KahanSum<double> ks{};
+    android::audio_utils::NeumaierSum<double> ns{};
+
+    // add 1.
+    rs += 1.;
+    ks += 1.;
+    ns += 1.;
+
+    static constexpr double smallOne = std::numeric_limits<double>::epsilon() * 0.5;
+    // add lots of small values
+    static const int loop = 1000;
+    for (int i = 0; i < loop; ++i) {
+        rs += smallOne;
+        ks += smallOne;
+        ns += smallOne;
+    }
+
+    // remove 1.
+    rs += -1.;
+    ks += -1.;
+    ns += -1.;
+
+    const double totalAdded = smallOne * loop;
+    printf("totalAdded: %lg\n", totalAdded);
+    PRINT_AND_EXPECT_EQ(0., rs);            // normal count fails
+    PRINT_AND_EXPECT_EQ(totalAdded, ks);    // kahan succeeds
+    PRINT_AND_EXPECT_EQ(totalAdded, ns);    // neumaier succeeds
+
+    // test case where kahan fails and neumaier method succeeds.
+    static const double tricky[] = { 1e100, 1., -1e100 };
+
+    rssum = android::audio_utils::sum<double, double>(tricky);
+    PRINT_AND_EXPECT_EQ(0., rssum);
+    kssum = android::audio_utils::sum<double, android::audio_utils::KahanSum<double>>(tricky);
+    PRINT_AND_EXPECT_EQ(0., kssum);
+    nmsum = android::audio_utils::sum<double, android::audio_utils::NeumaierSum<double>>(tricky);
+    PRINT_AND_EXPECT_EQ(1., nmsum);
+}
+
+TEST(StatisticsTest, minmax_bounds)
+{
+    // range based min and max use iterator forms of min and max.
+
+    static constexpr double one[] = { 1. };
+
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            android::audio_utils::min(&one[0], &one[0]));
+
+    PRINT_AND_EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+            android::audio_utils::max(&one[0], &one[0]));
+
+    static constexpr int un[] = { 1 };
+
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<int>::max(),
+            android::audio_utils::min(&un[0], &un[0]));
+
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<int>::min(),
+            android::audio_utils::max(&un[0], &un[0]));
+
+    double nanarray[] = { nan(""), nan(""), nan("") };
+
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            android::audio_utils::min(nanarray));
+
+    PRINT_AND_EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+            android::audio_utils::max(nanarray));
+
+    android::audio_utils::Statistics<double> s(nanarray);
+
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<double>::infinity(),
+           s.getMin());
+
+    PRINT_AND_EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+            s.getMax());
+}
+
+/*
+TEST(StatisticsTest, sqrt_convergence)
+{
+    union {
+        int i;
+        float f;
+    } u;
+
+    for (int i = 0; i < INT_MAX; ++i) {
+        u.i = i;
+        const float f = u.f;
+        if (!android::audio_utils::isnan(f)) {
+            const float sf = android::audio_utils::sqrt(f);
+            if ((i & (1 << 16) - 1) == 0) {
+                printf("i: %d  f:%f  sf:%f\n", i, f, sf);
+            }
+        }
+    }
+}
+*/
+
+TEST(StatisticsTest, minmax_simple_array)
+{
+    static constexpr double ary[] = { -1.5, 1.5, -2.5, 2.5 };
+
+    PRINT_AND_EXPECT_EQ(-2.5, android::audio_utils::min(ary));
+
+    PRINT_AND_EXPECT_EQ(2.5, android::audio_utils::max(ary));
+
+    static constexpr int ray[] = { -1, 1, -2, 2 };
+
+    PRINT_AND_EXPECT_EQ(-2, android::audio_utils::min(ray));
+
+    PRINT_AND_EXPECT_EQ(2, android::audio_utils::max(ray));
+}
+
+TEST(StatisticsTest, sqrt)
+{
+    // check doubles
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<double>::infinity(),
+            android::audio_utils::sqrt(std::numeric_limits<double>::infinity()));
+
+    PRINT_AND_EXPECT_EQ(std::nan(""),
+            android::audio_utils::sqrt(-std::numeric_limits<double>::infinity()));
+
+    PRINT_AND_EXPECT_NEAR(sqrt(std::numeric_limits<double>::epsilon()),
+            android::audio_utils::sqrt(std::numeric_limits<double>::epsilon()));
+
+    PRINT_AND_EXPECT_EQ(3.,
+            android::audio_utils::sqrt(9.));
+
+    PRINT_AND_EXPECT_EQ(0.,
+            android::audio_utils::sqrt(0.));
+
+    PRINT_AND_EXPECT_EQ(std::nan(""),
+            android::audio_utils::sqrt(-1.));
+
+    PRINT_AND_EXPECT_EQ(std::nan(""),
+            android::audio_utils::sqrt(std::nan("")));
+
+    // check floats
+    PRINT_AND_EXPECT_EQ(std::numeric_limits<float>::infinity(),
+            android::audio_utils::sqrt(std::numeric_limits<float>::infinity()));
+
+    PRINT_AND_EXPECT_EQ(std::nanf(""),
+            android::audio_utils::sqrt(-std::numeric_limits<float>::infinity()));
+
+    PRINT_AND_EXPECT_NEAR(sqrtf(std::numeric_limits<float>::epsilon()),
+            android::audio_utils::sqrt(std::numeric_limits<float>::epsilon()));
+
+    PRINT_AND_EXPECT_EQ(2.f,
+            android::audio_utils::sqrt(4.f));
+
+    PRINT_AND_EXPECT_EQ(0.f,
+            android::audio_utils::sqrt(0.f));
+
+    PRINT_AND_EXPECT_EQ(std::nanf(""),
+            android::audio_utils::sqrt(-1.f));
+
+    PRINT_AND_EXPECT_EQ(std::nanf(""),
+            android::audio_utils::sqrt(std::nanf("")));
+}
+
+TEST(StatisticsTest, stat_reference)
+{
+    // fixed reference compile time constants.
+    static constexpr double data[] = {0.1, -0.1, 0.2, -0.3};
+    static constexpr ConstexprStatistics<double> rstat(data); // use alpha = 1.
+    static constexpr android::audio_utils::Statistics<double> stat{data};
+
+    verify(stat, rstat);
+}
+
+TEST(StatisticsTest, stat_variable_alpha)
+{
+    constexpr size_t TEST_SIZE = 1 << 20;
+    std::vector<double> data(TEST_SIZE);
+    std::vector<double> alpha(TEST_SIZE);
+
+    initUniform(data, -1., 1.);
+    initUniform(alpha, .95, .99);
+
+    android::audio_utils::ReferenceStatistics<double> rstat;
+    android::audio_utils::Statistics<double> stat;
+
+    static_assert(std::is_trivially_copyable<decltype(stat)>::value,
+        "basic statistics must be trivially copyable");
+
+    for (size_t i = 0; i < TEST_SIZE; ++i) {
+        rstat.setAlpha(alpha[i]);
+        rstat.add(data[i]);
+
+        stat.setAlpha(alpha[i]);
+        stat.add(data[i]);
+    }
+
+    printf("statistics: %s\n", stat.toString().c_str());
+    printf("ref statistics: %s\n", rstat.toString().c_str());
+    verify(stat, rstat);
+}
+
+TEST(StatisticsTest, stat_vector)
+{
+    // for operator overloading...
+    using namespace android::audio_utils;
+
+    using data_t = std::tuple<double, double>;
+    using covariance_t = std::tuple<double, double, double, double>;
+    using covariance_ut_t = std::tuple<double, double, double>;
+
+    constexpr size_t TEST_SIZE = 1 << 20;
+    std::vector<data_t> data(TEST_SIZE);
+    // std::vector<double> alpha(TEST_SIZE);
+
+    initUniform(data, -1., 1.);
+
+    std::cout << "sample data[0]: " << data[0] << "\n";
+
+    Statistics<data_t, data_t, data_t, double, double, innerProduct_scalar<data_t>> stat;
+    Statistics<data_t, data_t, data_t, double,
+            covariance_t, outerProduct_tuple<data_t>> stat_outer;
+    Statistics<data_t, data_t, data_t, double,
+            covariance_ut_t, outerProduct_UT_tuple<data_t>> stat_outer_ut;
+
+    using pair_t = std::pair<double, double>;
+    std::vector<pair_t> pairs(TEST_SIZE);
+    initUniform(pairs, -1., 1.);
+    Statistics<pair_t, pair_t, pair_t, double, double, innerProduct_scalar<pair_t>> stat_pair;
+
+    using array_t = std::array<double, 2>;
+    using array_covariance_ut_t = std::array<double, 3>;
+    std::vector<array_t> arrays(TEST_SIZE);
+    initUniform(arrays, -1., 1.);
+    Statistics<array_t, array_t, array_t, double,
+               double, innerProduct_scalar<array_t>> stat_array;
+    Statistics<array_t, array_t, array_t, double,
+               array_covariance_ut_t, outerProduct_UT_array<array_t>> stat_array_ut;
+
+    for (size_t i = 0; i < TEST_SIZE; ++i) {
+        stat.add(data[i]);
+        stat_outer.add(data[i]);
+        stat_outer_ut.add(data[i]);
+        stat_pair.add(pairs[i]);
+        stat_array.add(arrays[i]);
+        stat_array_ut.add(arrays[i]);
+    }
+
+#if 0
+    // these aren't trivially copyable
+    static_assert(std::is_trivially_copyable<decltype(stat)>::value,
+        "tuple based inner product not trivially copyable");
+    static_assert(std::is_trivially_copyable<decltype(stat_outer)>::value,
+        "tuple based outer product not trivially copyable");
+    static_assert(std::is_trivially_copyable<decltype(stat_outer_ut)>::value,
+        "tuple based outer product not trivially copyable");
+#endif
+    static_assert(std::is_trivially_copyable<decltype(stat_array)>::value,
+        "array based inner product not trivially copyable");
+    static_assert(std::is_trivially_copyable<decltype(stat_array_ut)>::value,
+        "array based inner product not trivially copyable");
+
+    // inner product variance should be same as outer product diagonal sum
+    const double variance = stat.getPopVariance();
+    EXPECT_NEAR(variance,
+        std::get<0>(stat_outer.getPopVariance()) +
+        std::get<3>(stat_outer.getPopVariance()),
+        variance * std::numeric_limits<double>::epsilon() * 128);
+
+    // outer product covariance should be identical
+    PRINT_AND_EXPECT_NEAR(std::get<1>(stat_outer.getPopVariance()),
+        std::get<2>(stat_outer.getPopVariance()));
+
+    // upper triangular computation should be identical to outer product
+    PRINT_AND_EXPECT_NEAR(std::get<0>(stat_outer.getPopVariance()),
+        std::get<0>(stat_outer_ut.getPopVariance()));
+    PRINT_AND_EXPECT_NEAR(std::get<1>(stat_outer.getPopVariance()),
+        std::get<1>(stat_outer_ut.getPopVariance()));
+    PRINT_AND_EXPECT_NEAR(std::get<3>(stat_outer.getPopVariance()),
+        std::get<2>(stat_outer_ut.getPopVariance()));
+
+    PRINT_AND_EXPECT_EQ(variance, stat_pair.getPopVariance());
+
+    EXPECT_TRUE(equivalent(stat_array_ut.getPopVariance(), stat_outer_ut.getPopVariance()));
+
+    printf("statistics_inner: %s\n", stat.toString().c_str());
+    printf("statistics_outer: %s\n", stat_outer.toString().c_str());
+    printf("statistics_outer_ut: %s\n", stat_outer_ut.toString().c_str());
+}
+
+TEST(StatisticsTest, stat_linearfit)
+{
+    using namespace android::audio_utils; // for operator overload
+    LinearLeastSquaresFit<double> fit;
+
+    static_assert(std::is_trivially_copyable<decltype(fit)>::value,
+        "LinearLeastSquaresFit must be trivially copyable");
+
+    using array_t = std::array<double, 2>;
+    array_t data{0.0, 1.5};
+
+    for (size_t i = 0; i < 10; ++i) {
+        fit.add(data);
+        data = data + array_t{0.1, 0.2};
+    }
+
+    // check the y line equation
+    {
+        double a, b, r2;
+        fit.computeYLine(a, b, r2);
+        printf("y line - a:%lf  b:%lf  r2:%lf\n", a, b, r2);
+        PRINT_AND_EXPECT_NEAR(1.5, a); // y intercept
+        PRINT_AND_EXPECT_NEAR(2.0, b); // y slope
+        PRINT_AND_EXPECT_NEAR(1.0, r2); // correlation coefficient.
+
+        // check same as static variant
+        double ac, bc, r2c;
+        computeYLineFromStatistics(ac, bc, r2c,
+            std::get<0>(fit.getMean()), /* mean_x */
+            std::get<1>(fit.getMean()), /* mean_y */
+            std::get<0>(fit.getPopVariance()), /* var_x */
+            std::get<1>(fit.getPopVariance()), /* cov_xy */
+            std::get<2>(fit.getPopVariance())); /* var_y */
+
+        EXPECT_EQ(a, ac);
+        EXPECT_EQ(b, bc);
+        EXPECT_EQ(r2, r2c);
+
+        TEST_EXPECT_NEAR(1.9, fit.getYFromX(0.2));
+        TEST_EXPECT_NEAR(0.2, fit.getXFromY(1.9));
+        TEST_EXPECT_NEAR(1.0, fit.getR2());
+    }
+
+    // check the x line equation
+    {
+        double a, b, r2;
+        fit.computeXLine(a, b, r2);
+        printf("x line - a:%lf  b:%lf  r2:%lf\n", a, b, r2);
+        PRINT_AND_EXPECT_NEAR(-0.75, a); // x intercept
+        PRINT_AND_EXPECT_NEAR(0.5, b); // x slope
+        PRINT_AND_EXPECT_NEAR(1.0, r2); // correlation coefficient.
+    }
+}
+
+TEST(StatisticsTest, stat_linearfit_noise)
+{
+    using namespace android::audio_utils; // for operator overload
+    using array_t = std::array<double, 2>;
+    LinearLeastSquaresFit<double> fit;
+
+    // We use 1000 steps for a linear line going from (0, 0) to (1, 1) as true data for
+    // our linear fit.
+    constexpr size_t ELEMENTS = 1000;
+    array_t incr{1. / ELEMENTS, 1. / ELEMENTS};
+
+    // To simulate additive noise, we use a Gaussian with stddev of 1, and then scale
+    // achieve the desired stddev. We precompute our noise here (1000 of them).
+    std::vector<array_t> noise(ELEMENTS);
+    initNormal(noise, 0. /* mean */, 1. /* stddev */);
+
+    for (int i = 0; i < 30; ++i) {
+        // We run through 30 trials, with noise stddev ranging from 0 to 1.
+        // The steps increment linearly from 0.001 to 0.01, linearly from 0.01 to 0.1, and
+        // linearly again from 0.1 to 1.0.
+        // 0.001, 0.002, ... 0.009, 0.01, 0.02, ....0.09, 0.1, 0.2, .... 1.0
+        const double stddev = (i <= 10) ? i / 1000. : (i <= 20) ? (i - 9) / 100. : (i - 19) / 10.;
+        fit.reset();
+
+        for (size_t j = 0; j < ELEMENTS; ++j) {
+            array_t data = j * incr + noise[j] * stddev;
+            fit.add(data);
+        }
+
+        double a, b, r2;
+        fit.computeYLine(a, b, r2);
+        printf("stddev: %lf y line - N:%lld a:%lf  b:%lf  r2:%lf\n",
+                stddev, (long long) fit.getN(), a, b, r2);
+    }
+}
+
+
+TEST_P(StatisticsTest, stat_simple_char)
+{
+    const char *param = GetParam();
+
+    android::audio_utils::Statistics<char> stat(0.9);
+    android::audio_utils::ReferenceStatistics<char> rstat(0.9);
+
+    // feed the string character by character to the statistics collectors.
+    for (size_t i = 0; param[i] != '\0'; ++i) {
+        stat.add(param[i]);
+        rstat.add(param[i]);
+    }
+
+    printf("statistics for %s: %s\n", param, stat.toString().c_str());
+    printf("ref statistics for %s: %s\n", param, rstat.toString().c_str());
+    // verify that the statistics are the same
+    verify(stat, rstat);
+}
+
+// find the variance of pet names as signed characters.
+const char *pets[] = {"cat", "dog", "elephant", "mountain lion"};
+INSTANTIATE_TEST_CASE_P(PetNameStatistics, StatisticsTest,
+                        ::testing::ValuesIn(pets));
+
+TEST(StatisticsTest, simple_stats)
+{
+    simple_stats_t ss{};
+
+    for (const double value : { -1., 1., 3.}) {
+        simple_stats_log(&ss, value);
+    }
+
+    PRINT_AND_EXPECT_EQ(3., ss.last);
+    PRINT_AND_EXPECT_EQ(1., ss.mean);
+    PRINT_AND_EXPECT_EQ(-1., ss.min);
+    PRINT_AND_EXPECT_EQ(3., ss.max);
+    PRINT_AND_EXPECT_EQ(3, ss.n);
+
+    char buffer[256];
+    simple_stats_to_string(&ss, buffer, sizeof(buffer));
+    printf("simple_stats: %s", buffer);
+}
diff --git a/audio_utils/tests/timestampverifier_tests.cpp b/audio_utils/tests/timestampverifier_tests.cpp
new file mode 100644
index 0000000..7b8afa0
--- /dev/null
+++ b/audio_utils/tests/timestampverifier_tests.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "audio_utils_timestampverifier_tests"
+
+#include <stdio.h>
+
+#include <audio_utils/TimestampVerifier.h>
+#include <gtest/gtest.h>
+
+// Ensure that all TimestampVerifier mutators are really constexpr and free from
+// nasty system calls (in case called from a SCHED_FIFO thread).
+static constexpr auto makeVerifier(
+        size_t N, uint32_t sampleRate, size_t errors, size_t discontinuities) {
+    android::TimestampVerifier<int64_t, int64_t> tv;
+
+    int64_t f = 0;
+    int64_t t = 0;
+    for (size_t i = 0; i < N; ++i) {
+        tv.add(f, t, sampleRate);
+        f += sampleRate;
+        t += (int64_t)1e9;
+    }
+    for (size_t i = 0; i < discontinuities; ++i) {
+        tv.discontinuity();
+    }
+    for (size_t i = 0; i < errors; ++i) {
+        tv.error();
+    }
+    return tv;
+}
+
+TEST(TimestampVerifier, sanity)
+{
+    constexpr android::TimestampVerifier<int64_t, int64_t> tv;
+
+    // The timestamp verifier must be embeddable in a memcpy structure just like pod.
+    // We use is_trivially_copyable and is_trivially_destructible for this test.
+    static_assert(std::is_trivially_copyable<decltype(tv)>::value,
+        "TimestampVerifier must be trivially copyable");
+    static_assert(std::is_trivially_destructible<decltype(tv)>::value,
+        "TimestampVerifier must be trivially destructible");
+
+    constexpr android::audio_utils::Statistics<double> s = tv.getJitterMs();
+
+    EXPECT_EQ(std::numeric_limits<double>::infinity(), s.getMin());
+    EXPECT_EQ(-std::numeric_limits<double>::infinity(), s.getMax());
+
+    constexpr int64_t frames[] { 0, 48000 };
+    constexpr int64_t timeNs[] { 0, 1000000000 };
+    constexpr android::TimestampVerifier<int64_t, int64_t> tv2(frames, timeNs, 48000);
+    EXPECT_EQ(0., tv2.getJitterMs().getMax());
+    EXPECT_EQ(0., tv2.getJitterMs().getMin());
+    EXPECT_EQ(0., tv2.getJitterMs().getMean());
+    EXPECT_EQ(1, tv2.getJitterMs().getN());
+
+    // We should get a perfect straight line estimate as there is no noise.
+    double a, b, r2;
+    tv2.estimateSampleRate(a, b, r2);
+    EXPECT_EQ(0., a);
+    EXPECT_EQ(48000., b);
+    EXPECT_NEAR(1., r2, std::numeric_limits<double>::epsilon());
+
+    constexpr android::TimestampVerifier<int64_t, int64_t> tv3 =
+            makeVerifier(8 /* N */, 48000 /* sampleRate */, 10 /* errors */, 10 /* disc */);
+    EXPECT_EQ(8, tv3.getN());
+    EXPECT_EQ(10, tv3.getErrors());
+    EXPECT_EQ(1, tv3.getDiscontinuities());  // consecutive discontinuities read as 1.
+    EXPECT_EQ(0., tv3.getJitterMs().getMax());
+    EXPECT_EQ(0., tv3.getJitterMs().getMin());
+    EXPECT_EQ(0., tv3.getJitterMs().getMean());
+
+    constexpr auto first = tv3.getFirstTimestamp();
+    constexpr auto last = tv3.getLastTimestamp();
+
+    EXPECT_EQ(0, first.mFrames);
+    EXPECT_EQ(0, first.mTimeNs);
+    EXPECT_EQ(48000 * (8 - 1), last.mFrames);
+    EXPECT_EQ((int64_t)1e9 * (8 - 1), last.mTimeNs);
+    EXPECT_EQ((uint32_t)48000, tv3.getSampleRate());
+    EXPECT_EQ(0, tv3.getColds());
+
+    tv3.estimateSampleRate(a, b, r2);
+    EXPECT_EQ(0., a);
+    EXPECT_EQ(48000., b);
+    EXPECT_NEAR(1., r2, std::numeric_limits<double>::epsilon());
+}
diff --git a/audio_utils/tests/variadic_tests.cpp b/audio_utils/tests/variadic_tests.cpp
new file mode 100644
index 0000000..8e24c9e
--- /dev/null
+++ b/audio_utils/tests/variadic_tests.cpp
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "audio_utils_variadic_tests"
+#include <audio_utils/variadic_utils.h>
+
+#include <stdio.h>
+#include <gtest/gtest.h>
+
+// Our near expectation is 16x the bit that doesn't fit the mantissa.
+// this works so long as we add values close in exponent with each other
+// realizing that errors accumulate as the sqrt of N (random walk, lln, etc).
+#define TEST_EXPECT_NEAR(e, v) \
+    EXPECT_NEAR((e), (v), abs((e) * std::numeric_limits<decltype(e)>::epsilon() * 8))
+
+#define PRINT_AND_EXPECT_EQ(expected, expr) { \
+    auto value = (expr); \
+    printf("(%s): %s\n", #expr, std::to_string(value).c_str()); \
+    if ((expected) == (expected)) { EXPECT_EQ((expected), (value)); } \
+    EXPECT_EQ((expected) != (expected), (value) != (value)); /* nan check */\
+}
+
+#define PRINT_AND_EXPECT_NEAR(expected, expr) { \
+    auto ref = (expected); \
+    auto value = (expr); \
+    printf("(%s): %s\n", #expr, std::to_string(value).c_str()); \
+    TEST_EXPECT_NEAR(ref, value); \
+}
+
+TEST(variadic_tests, printing)
+{
+    // for operator overloading...
+    using namespace android::audio_utils;
+
+    // print simple, deep value
+    std::cout << "std::make_tuple(1, 2, 3)= " << std::make_tuple(1, 2, 3) << "\n";
+    std::cout << "std::make_pair(1, std::make_pair(0, 1))= "
+              << std::make_pair(1, std::make_pair(0, 1)) << "\n";
+}
+
+TEST(variadic_tests, equivalence)
+{
+    using android::audio_utils::equivalent;
+    auto deep = std::make_pair(1., std::make_pair(2, 3));
+
+    EXPECT_TRUE(equivalent(deep, deep));
+    EXPECT_TRUE(equivalent(std::make_pair(1, 2), std::make_tuple(1, 2)));
+    EXPECT_FALSE(equivalent(std::make_pair(1, 2), std::make_pair(0, 2)));
+    EXPECT_FALSE(equivalent(std::make_pair(1, 2), 1));
+    EXPECT_FALSE(equivalent(0, 2));
+    EXPECT_TRUE(equivalent(1, 1.));
+}
+
+TEST(variadic_tests, template_checks)
+{
+    EXPECT_FALSE(android::audio_utils::is_variadic<double>::value);
+
+    using tuple_t = std::tuple<double, double>;
+
+    EXPECT_TRUE(android::audio_utils::is_variadic<tuple_t>::value);
+    EXPECT_TRUE(android::audio_utils::is_tuple<tuple_t>::value);
+    EXPECT_FALSE(android::audio_utils::is_pair<tuple_t>::value);
+    EXPECT_FALSE(android::audio_utils::is_array<tuple_t>::value);
+    EXPECT_FALSE(std::is_array<tuple_t>::value);
+
+    using pair_t = std::pair<double, double>;
+
+    EXPECT_TRUE(android::audio_utils::is_variadic<pair_t>::value);
+    EXPECT_FALSE(android::audio_utils::is_tuple<pair_t>::value);
+    EXPECT_TRUE(android::audio_utils::is_pair<pair_t>::value);
+    EXPECT_FALSE(android::audio_utils::is_array<pair_t>::value);
+    EXPECT_FALSE(std::is_array<pair_t>::value);
+
+    using array_t = std::array<double, 2>;
+
+    EXPECT_TRUE(android::audio_utils::is_variadic<array_t>::value);
+    EXPECT_FALSE(android::audio_utils::is_tuple<array_t>::value);
+    EXPECT_FALSE(android::audio_utils::is_pair<array_t>::value);
+    EXPECT_TRUE(android::audio_utils::is_array<array_t>::value);
+    EXPECT_FALSE(std::is_array<array_t>::value);
+
+    EXPECT_FALSE(android::audio_utils::is_iterator<char>::value);
+    EXPECT_TRUE(android::audio_utils::is_iterator<char *>::value);
+    EXPECT_TRUE(android::audio_utils::is_iterator<decltype(std::vector<int>{}.begin())>::value);
+}
+
+TEST(variadic_tests, basic_math)
+{
+    // for operator overloading...
+    using namespace android::audio_utils;
+
+    using tuple_t = std::tuple<double, double>;
+    tuple_t x{1, 2};
+    tuple_t y{0, 3};
+    double z = 3;
+
+    std::cout << "x=" << x << " y=" << y << " x+y=" << (x + y) << "\n";
+    std::cout << "x=" << x << " y=" << y << " x*y=" << (x * y) << "\n";
+    std::cout << "x=" << x << " z=" << z << " x+z=" << (x + z) << "\n";
+    std::cout << "x=" << x << " z=" << z << " x*z=" << (x * z) << "\n";
+    std::cout << "x=" << x << " y=" << y << " innerProduct(x, y)=" << innerProduct(x, y) << "\n";
+    std::cout << "x=" << x << " y=" << y << " outerProduct(x, y)=" << outerProduct(x, y) << "\n";
+    std::cout << "x=" << x << " sqrt(x)=" << android::audio_utils::sqrt(x) << "\n";
+    std::cout << "x=" << x << " y=" << y
+            << " min(x, y)" <<  android::audio_utils::min(x, y) << "\n";
+
+    // check opequals mode
+    std::cout << "x=" << x;
+    std::cout << " x+=2" << (x += 2) << "\n";
+    std::cout << "x=" << x << " y=" << y;
+    std::cout << " x*=y" << (x *= y) << "\n";
+
+    using pair_t = std::pair<double, double>;
+    pair_t px{1, 2};
+    pair_t py{0, 3};
+
+    std::cout << "px=" << px << " py=" << py << " px+py=" << (px + py) << "\n";
+    std::cout << "px=" << px << " py=" << py << " px*py=" << (px * py) << "\n";
+    std::cout << "px=" << px << " z="  << z  << " px+z="  << (px + z) << "\n";
+    std::cout << "px=" << px << " z="  << z  << " px*z="  << (px * z) << "\n";
+    std::cout << "px=" << px << " py=" << py << " innerProduct(px, py)="
+            << innerProduct(px, py) << "\n";
+    std::cout << "px=" << px << " py=" << py << " outerProduct(px, py)="
+            << outerProduct(px, py) << "\n";
+
+    using array_t = std::array<double, 2>;
+    array_t ax{1, 2};
+    array_t ay{0, 3};
+
+    std::cout << "ax=" << ax << " ay=" << ay << " ax+ay=" << (ax + ay) << "\n";
+    std::cout << "ax=" << ax << " ay=" << ay << " ax*ay=" << (ax * ay) << "\n";
+    std::cout << "ax=" << ax << " z="  << z  << " ax+z="  << (ax + z) << "\n";
+    std::cout << "ax=" << ax << " z="  << z  << " ax*z="  << (ax * z) << "\n";
+    std::cout << "ax=" << px << " ay=" << ay << " innerProduct(ax, ay)="
+            << innerProduct(ax, ay) << "\n";
+    std::cout << "ax=" << px << " ay=" << ay << " outerProduct(ax, ay)="
+            << outerProduct(ax, ay) << "\n";
+
+    // deep math
+    auto deep = std::make_pair(1., std::make_pair(2, 3));
+    std::cout << "deep= " << deep << "\n";
+    std::cout << "deep + deep= " << deep + deep << "\n";
+    std::cout << "deep + 1= " << deep + 1 << "\n";
+}
diff --git a/audio_utils/tinysndfile.c b/audio_utils/tinysndfile.c
index 9c20862..e94bb65 100644
--- a/audio_utils/tinysndfile.c
+++ b/audio_utils/tinysndfile.c
@@ -489,11 +489,8 @@
     handle->remaining -= actualFrames;
     switch (format) {
     case SF_FORMAT_PCM_U8:
-#if 0
-        // TODO - implement
         memcpy_to_float_from_u8(ptr, (const unsigned char *) temp,
                 actualFrames * handle->info.channels);
-#endif
         free(temp);
         break;
     case SF_FORMAT_PCM_16:
@@ -540,11 +537,8 @@
     handle->remaining -= actualFrames;
     switch (format) {
     case SF_FORMAT_PCM_U8:
-#if 0
-        // TODO - implement
         memcpy_to_i32_from_u8(ptr, (const unsigned char *) temp,
                 actualFrames * handle->info.channels);
-#endif
         free(temp);
         break;
     case SF_FORMAT_PCM_16:
diff --git a/camera/OWNERS b/camera/OWNERS
index f1967eb..4b6515e 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,7 +1,2 @@
 set noparent
-cychen@google.com
-epeev@google.com
-etalvala@google.com
-shuzhenwang@google.com
-yinchiayeh@google.com
-zhijunhe@google.com
+include platform/frameworks/av:/camera/OWNERS
diff --git a/camera/TEST_MAPPING b/camera/TEST_MAPPING
new file mode 100644
index 0000000..2174126
--- /dev/null
+++ b/camera/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+  "presubmit": [
+    {
+       "name": "camera_metadata_tests"
+    }
+  ]
+}
diff --git a/camera/docs/CameraMetadataKeys.mako b/camera/docs/CameraMetadataKeys.mako
index 6006c8d..c53f5d6 100644
--- a/camera/docs/CameraMetadataKeys.mako
+++ b/camera/docs/CameraMetadataKeys.mako
@@ -63,6 +63,7 @@
   % endif
   % if entry.applied_visibility in ('public', 'java_public'):
     @PublicKey
+    @NonNull
   % endif
   % if entry.synthetic:
     @SyntheticKey
diff --git a/camera/docs/HidlMetadata.mako b/camera/docs/HidlMetadata.mako
index 439c14d..2677021 100644
--- a/camera/docs/HidlMetadata.mako
+++ b/camera/docs/HidlMetadata.mako
@@ -110,7 +110,7 @@
     ${entry.name + " =" | csym} CameraMetadataSectionStart:${path_name(find_parent_section(entry)) | csym}_START,
           % else:
 <%      prevVersion = find_first_older_used_hal_version(sec, hal_major_version(), hal_minor_version()) %>\
-    ${entry.name + " =" | csym} ${'android.hardware.camera.metadata@%d.%d' % prevVersion}::CameraMetadataTag:${path_name(find_parent_section(entry)) | csym}${'_END' if find_first_older_used_hal_version(sec, prevVersion[0], prevVersion[1]) == (0,0) else '_END_%d_%d' % prevVersion},
+    ${entry.name + " =" | csym} ${'android.hardware.camera.metadata@%d.%d' % prevVersion}::CameraMetadataTag:${path_name(find_parent_section(entry)) | csym}${'_END' if prevVersion[1] == first_hal_minor_version(hal_major_version()) else '_END_%d_%d' % prevVersion},
           % endif
         % else:
     ${entry.name + "," | csym}
diff --git a/camera/docs/README.md b/camera/docs/README.md
index c720455..6f3cbd7 100644
--- a/camera/docs/README.md
+++ b/camera/docs/README.md
@@ -8,7 +8,7 @@
 
 ## Dependencies
 * Python 2.7.x+
-* Beautiful Soup 4+ - HTML/XML parser, used to parse `metadata_properties.xml`
+* Beautiful Soup 4+ - HTML/XML parser, used to parse `metadata_definitions.xml`
 * Mako 0.7+         - Template engine, needed to do file generation.
 * Markdown 2.1+     - Plain text to HTML converter, for docs formatting.
 * Tidy              - Cleans up the XML/HTML files.
diff --git a/camera/docs/camera_device_info.proto b/camera/docs/camera_device_info.proto
index 10b8f87..76119ce 100644
--- a/camera/docs/camera_device_info.proto
+++ b/camera/docs/camera_device_info.proto
@@ -117,6 +117,7 @@
     optional float android_scaler_availableMaxDigitalZoom = 917504;
     optional StreamConfigurations android_scaler_streamConfigurationMap = 917505;
     optional int32 android_scaler_croppingType = 917506;
+    repeated MandatoryStreamCombination android_scaler_mandatoryStreamCombinations = 917507;
     optional int32 android_sensor_referenceIlluminant1 = 983040;
     optional int32 android_sensor_referenceIlluminant2 = 983041;
     optional ColorSpaceTransform android_sensor_calibrationTransform1 = 983042;
diff --git a/camera/docs/camera_metadata_tag_info.mako b/camera/docs/camera_metadata_tag_info.mako
index 9dde7bf..d0db13a 100644
--- a/camera/docs/camera_metadata_tag_info.mako
+++ b/camera/docs/camera_metadata_tag_info.mako
@@ -61,6 +61,14 @@
   % endfor
 };
 
+static int32_t tag_permission_needed[${permission_needed_count(metadata)}] = {
+% for sec in find_all_sections(metadata):
+  % for entry in remove_synthetic(filter_has_permission_needed(find_unique_entries(sec))):
+    ${entry.name | csym},
+  % endfor
+% endfor
+};
+
 int camera_metadata_enum_snprint(uint32_t tag,
                                  uint32_t value,
                                  char *dst,
diff --git a/camera/docs/docs.html b/camera/docs/docs.html
index e834cd3..735ee1e 100644
--- a/camera/docs/docs.html
+++ b/camera/docs/docs.html
@@ -671,6 +671,8 @@
             ><a href="#static_android.request.availableSessionKeys">android.request.availableSessionKeys</a></li>
             <li
             ><a href="#static_android.request.availablePhysicalCameraRequestKeys">android.request.availablePhysicalCameraRequestKeys</a></li>
+            <li
+            ><a href="#static_android.request.characteristicKeysNeedingPermission">android.request.characteristicKeysNeedingPermission</a></li>
           </ul>
         </li>
         <li>
@@ -740,6 +742,12 @@
             ><a href="#static_android.scaler.streamConfigurationMap">android.scaler.streamConfigurationMap</a></li>
             <li
             ><a href="#static_android.scaler.croppingType">android.scaler.croppingType</a></li>
+            <li
+            ><a href="#static_android.scaler.availableRecommendedStreamConfigurations">android.scaler.availableRecommendedStreamConfigurations</a></li>
+            <li
+            ><a href="#static_android.scaler.availableRecommendedInputOutputFormatsMap">android.scaler.availableRecommendedInputOutputFormatsMap</a></li>
+            <li
+            ><a href="#static_android.scaler.mandatoryStreamCombinations">android.scaler.mandatoryStreamCombinations</a></li>
           </ul>
         </li>
         <li>
@@ -1083,6 +1091,8 @@
             ><a href="#static_android.info.supportedHardwareLevel">android.info.supportedHardwareLevel</a></li>
             <li
             ><a href="#static_android.info.version">android.info.version</a></li>
+            <li
+            ><a href="#static_android.info.supportedBufferManagementVersion">android.info.supportedBufferManagementVersion</a></li>
           </ul>
         </li>
       </ul> <!-- toc_section -->
@@ -1167,6 +1177,14 @@
             ><a href="#static_android.depth.availableDepthStallDurations">android.depth.availableDepthStallDurations</a></li>
             <li
             ><a href="#static_android.depth.depthIsExclusive">android.depth.depthIsExclusive</a></li>
+            <li
+            ><a href="#static_android.depth.availableRecommendedDepthStreamConfigurations">android.depth.availableRecommendedDepthStreamConfigurations</a></li>
+            <li
+            ><a href="#static_android.depth.availableDynamicDepthStreamConfigurations">android.depth.availableDynamicDepthStreamConfigurations</a></li>
+            <li
+            ><a href="#static_android.depth.availableDynamicDepthMinFrameDurations">android.depth.availableDynamicDepthMinFrameDurations</a></li>
+            <li
+            ><a href="#static_android.depth.availableDynamicDepthStallDurations">android.depth.availableDynamicDepthStallDurations</a></li>
           </ul>
         </li>
       </ul> <!-- toc_section -->
@@ -1183,6 +1201,13 @@
             ><a href="#static_android.logicalMultiCamera.sensorSyncType">android.logicalMultiCamera.sensorSyncType</a></li>
           </ul>
         </li>
+        <li>
+          <span class="toc_kind_header">dynamic</span>
+          <ul class="toc_section">
+            <li
+            ><a href="#dynamic_android.logicalMultiCamera.activePhysicalId">android.logicalMultiCamera.activePhysicalId</a></li>
+          </ul>
+        </li>
       </ul> <!-- toc_section -->
     </li>
     <li>
@@ -1211,6 +1236,28 @@
         </li>
       </ul> <!-- toc_section -->
     </li>
+    <li>
+      <span class="toc_section_header"><a href="#section_heic">heic</a></span>
+      <ul class="toc_section">
+        <li>
+          <span class="toc_kind_header">static</span>
+          <ul class="toc_section">
+
+            <li
+            ><a href="#static_android.heic.info.supported">android.heic.info.supported</a></li>
+            <li
+            ><a href="#static_android.heic.info.maxJpegAppSegmentsCount">android.heic.info.maxJpegAppSegmentsCount</a></li>
+
+            <li
+            ><a href="#static_android.heic.availableHeicStreamConfigurations">android.heic.availableHeicStreamConfigurations</a></li>
+            <li
+            ><a href="#static_android.heic.availableHeicMinFrameDurations">android.heic.availableHeicMinFrameDurations</a></li>
+            <li
+            ><a href="#static_android.heic.availableHeicStallDurations">android.heic.availableHeicStallDurations</a></li>
+          </ul>
+        </li>
+      </ul> <!-- toc_section -->
+    </li>
   </ul>
 
 
@@ -11380,6 +11427,7 @@
               <p>Setting a location object in a request will include the GPS coordinates of the location
 into any JPEG images captured based on the request.<wbr/> These coordinates can then be
 viewed by anyone who receives the JPEG image.<wbr/></p>
+<p>This tag is also used for HEIC image capture.<wbr/></p>
             </td>
           </tr>
 
@@ -11390,7 +11438,7 @@
                 
           <tr class="entry" id="controls_android.jpeg.gpsCoordinates">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>gps<wbr/>Coordinates
             </td>
             <td class="entry_type">
@@ -11434,6 +11482,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used for HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -11442,7 +11498,7 @@
                 
           <tr class="entry" id="controls_android.jpeg.gpsProcessingMethod">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>gps<wbr/>Processing<wbr/>Method
             </td>
             <td class="entry_type">
@@ -11481,6 +11537,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used for HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -11489,7 +11553,7 @@
                 
           <tr class="entry" id="controls_android.jpeg.gpsTimestamp">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>gps<wbr/>Timestamp
             </td>
             <td class="entry_type">
@@ -11528,6 +11592,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used for HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -11610,6 +11682,10 @@
 </code></pre>
 <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
 also be set to EXTERNAL.<wbr/> The above code is not relevant in such case.<wbr/></p>
+<p>This tag is also used to describe the orientation of the HEIC image capture,<wbr/> in which
+case the rotation is reflected by
+<a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>,<wbr/> and not by
+rotating the image data itself.<wbr/></p>
             </td>
           </tr>
 
@@ -11664,7 +11740,8 @@
           </tr>
           <tr class="entry_cont">
             <td class="entry_details" colspan="6">
-              <p>85-95 is typical usage range.<wbr/></p>
+              <p>85-95 is typical usage range.<wbr/> This tag is also used to describe the quality
+of the HEIC image capture.<wbr/></p>
             </td>
           </tr>
 
@@ -11675,7 +11752,7 @@
                 
           <tr class="entry" id="controls_android.jpeg.thumbnailQuality">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>thumbnail<wbr/>Quality
             </td>
             <td class="entry_type">
@@ -11714,6 +11791,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used to describe the quality of the HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -11791,6 +11876,10 @@
   orientation is requested.<wbr/> LEGACY device will always report unrotated thumbnail
   size.<wbr/></li>
 </ul>
+<p>The tag is also used as thumbnail size for HEIC image format capture,<wbr/> in which case the
+the thumbnail rotation is reflected by
+<a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>,<wbr/> and not by
+rotating the thumbnail data itself.<wbr/></p>
             </td>
           </tr>
 
@@ -11902,6 +11991,7 @@
 and vice versa.<wbr/></li>
 <li>All non-<code>(0,<wbr/> 0)</code> sizes will have non-zero widths and heights.<wbr/></li>
 </ul>
+<p>This list is also used as supported thumbnail sizes for HEIC image format capture.<wbr/></p>
             </td>
           </tr>
 
@@ -12036,6 +12126,7 @@
               <p>Setting a location object in a request will include the GPS coordinates of the location
 into any JPEG images captured based on the request.<wbr/> These coordinates can then be
 viewed by anyone who receives the JPEG image.<wbr/></p>
+<p>This tag is also used for HEIC image capture.<wbr/></p>
             </td>
           </tr>
 
@@ -12046,7 +12137,7 @@
                 
           <tr class="entry" id="dynamic_android.jpeg.gpsCoordinates">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>gps<wbr/>Coordinates
             </td>
             <td class="entry_type">
@@ -12090,6 +12181,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used for HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -12098,7 +12197,7 @@
                 
           <tr class="entry" id="dynamic_android.jpeg.gpsProcessingMethod">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>gps<wbr/>Processing<wbr/>Method
             </td>
             <td class="entry_type">
@@ -12137,6 +12236,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used for HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -12145,7 +12252,7 @@
                 
           <tr class="entry" id="dynamic_android.jpeg.gpsTimestamp">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>gps<wbr/>Timestamp
             </td>
             <td class="entry_type">
@@ -12184,6 +12291,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used for HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -12266,6 +12381,10 @@
 </code></pre>
 <p>For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
 also be set to EXTERNAL.<wbr/> The above code is not relevant in such case.<wbr/></p>
+<p>This tag is also used to describe the orientation of the HEIC image capture,<wbr/> in which
+case the rotation is reflected by
+<a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>,<wbr/> and not by
+rotating the image data itself.<wbr/></p>
             </td>
           </tr>
 
@@ -12320,7 +12439,8 @@
           </tr>
           <tr class="entry_cont">
             <td class="entry_details" colspan="6">
-              <p>85-95 is typical usage range.<wbr/></p>
+              <p>85-95 is typical usage range.<wbr/> This tag is also used to describe the quality
+of the HEIC image capture.<wbr/></p>
             </td>
           </tr>
 
@@ -12392,7 +12512,7 @@
                 
           <tr class="entry" id="dynamic_android.jpeg.thumbnailQuality">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>jpeg.<wbr/>thumbnail<wbr/>Quality
             </td>
             <td class="entry_type">
@@ -12431,6 +12551,14 @@
             </td>
 
           </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This tag is also used to describe the quality of the HEIC image capture.<wbr/></p>
+            </td>
+          </tr>
 
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
@@ -12508,6 +12636,10 @@
   orientation is requested.<wbr/> LEGACY device will always report unrotated thumbnail
   size.<wbr/></li>
 </ul>
+<p>The tag is also used as thumbnail size for HEIC image format capture,<wbr/> in which case the
+the thumbnail rotation is reflected by
+<a href="https://developer.android.com/reference/android/media/ExifInterface.html#TAG_ORIENTATION">EXIF orientation flag</a>,<wbr/> and not by
+rotating the thumbnail data itself.<wbr/></p>
             </td>
           </tr>
 
@@ -12697,7 +12829,7 @@
                 
           <tr class="entry" id="controls_android.lens.focalLength">
             <td class="entry_name
-             " rowspan="3">
+             " rowspan="5">
               android.<wbr/>lens.<wbr/>focal<wbr/>Length
             </td>
             <td class="entry_type">
@@ -12753,6 +12885,19 @@
             </td>
           </tr>
 
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>For a logical camera device supporting both optical and digital zoom,<wbr/> if focalLength and
+cropRegion change in the same request,<wbr/> the camera device must make sure that the new
+focalLength and cropRegion take effect in the same frame.<wbr/> This is to make sure that there
+is no visible field-of-view jump during zoom.<wbr/> For example,<wbr/> if cropRegion is applied
+immediately,<wbr/> but focalLength takes more than 1 frame to take effect,<wbr/> the camera device
+will delay the cropRegion so that it's synchronized with focalLength.<wbr/></p>
+            </td>
+          </tr>
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
@@ -14224,7 +14369,7 @@
                 
           <tr class="entry" id="dynamic_android.lens.focalLength">
             <td class="entry_name
-             " rowspan="3">
+             " rowspan="5">
               android.<wbr/>lens.<wbr/>focal<wbr/>Length
             </td>
             <td class="entry_type">
@@ -14280,6 +14425,19 @@
             </td>
           </tr>
 
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>For a logical camera device supporting both optical and digital zoom,<wbr/> if focalLength and
+cropRegion change in the same request,<wbr/> the camera device must make sure that the new
+focalLength and cropRegion take effect in the same frame.<wbr/> This is to make sure that there
+is no visible field-of-view jump during zoom.<wbr/> For example,<wbr/> if cropRegion is applied
+immediately,<wbr/> but focalLength takes more than 1 frame to take effect,<wbr/> the camera device
+will delay the cropRegion so that it's synchronized with focalLength.<wbr/></p>
+            </td>
+          </tr>
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
@@ -16386,7 +16544,7 @@
   <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#RAW12">RAW12</a>.<wbr/></li>
 <li>Processed (but not-stalling): any non-RAW format without a stall duration.<wbr/>  Typically
   <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">YUV_<wbr/>420_<wbr/>888</a>,<wbr/>
-  <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#NV21">NV21</a>,<wbr/> or <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12">YV12</a>.<wbr/></li>
+  <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#NV21">NV21</a>,<wbr/> <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12">YV12</a>,<wbr/> or <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">Y8</a> .<wbr/></li>
 </ul>
             </td>
           </tr>
@@ -16523,6 +16681,7 @@
 <li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#NV21">NV21</a></li>
 <li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YV12">YV12</a></li>
 <li>Implementation-defined formats,<wbr/> i.<wbr/>e.<wbr/> <a href="https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap.html#isOutputSupportedFor(Class)">StreamConfigurationMap#isOutputSupportedFor(Class)</a></li>
+<li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">Y8</a></li>
 </ul>
 <p>For full guarantees,<wbr/> query <a href="https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap.html#getOutputStallDuration">StreamConfigurationMap#getOutputStallDuration</a> with a
 processed format -- it will return 0 for a non-stalling stream.<wbr/></p>
@@ -16949,7 +17108,11 @@
 but this capability only covers the above list of controls.<wbr/></p>
 <p>If this is supported,<wbr/> <a href="#static_android.scaler.streamConfigurationMap">android.<wbr/>scaler.<wbr/>stream<wbr/>Configuration<wbr/>Map</a> will
 additionally return a min frame duration that is greater than
-zero for each supported size-format combination.<wbr/></p></span>
+zero for each supported size-format combination.<wbr/></p>
+<p>For camera devices with LOGICAL_<wbr/>MULTI_<wbr/>CAMERA capability,<wbr/> when the underlying active
+physical camera switches,<wbr/> exposureTime,<wbr/> sensitivity,<wbr/> and lens properties may change
+even if AE/<wbr/>AF is locked.<wbr/> However,<wbr/> the overall auto exposure and auto focus experience
+for users will be consistent.<wbr/> Refer to LOGICAL_<wbr/>MULTI_<wbr/>CAMERA capability for details.<wbr/></p></span>
                   </li>
                   <li>
                     <span class="entry_type_enum_name">MANUAL_POST_PROCESSING (v3.2)</span>
@@ -16995,7 +17158,11 @@
 <p>If auto white balance is enabled,<wbr/> then the camera device
 will accurately report the values applied by AWB in the result.<wbr/></p>
 <p>A given camera device may also support additional post-processing
-controls,<wbr/> but this capability only covers the above list of controls.<wbr/></p></span>
+controls,<wbr/> but this capability only covers the above list of controls.<wbr/></p>
+<p>For camera devices with LOGICAL_<wbr/>MULTI_<wbr/>CAMERA capability,<wbr/> when underlying active
+physical camera switches,<wbr/> tonemap,<wbr/> white balance,<wbr/> and shading map may change even if
+awb is locked.<wbr/> However,<wbr/> the overall post-processing experience for users will be
+consistent.<wbr/> Refer to LOGICAL_<wbr/>MULTI_<wbr/>CAMERA capability for details.<wbr/></p></span>
                   </li>
                   <li>
                     <span class="entry_type_enum_name">RAW (v3.2)</span>
@@ -17032,6 +17199,8 @@
 <li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#PRIVATE">ImageFormat#PRIVATE</a> will be reprocessable into both
   <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">Image<wbr/>Format#YUV_<wbr/>420_<wbr/>888</a> and
   <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">ImageFormat#JPEG</a> formats.<wbr/></li>
+<li>For a MONOCHROME camera supporting Y8 format,<wbr/> <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#PRIVATE">ImageFormat#PRIVATE</a> will be reprocessable into
+  <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">ImageFormat#Y8</a>.<wbr/></li>
 <li>The maximum available resolution for PRIVATE streams
   (both input/<wbr/>output) will match the maximum available
   resolution of JPEG streams.<wbr/></li>
@@ -17078,10 +17247,10 @@
                     <span class="entry_type_enum_name">BURST_CAPTURE (v3.2)</span>
                     <span class="entry_type_enum_optional">[optional]</span>
                     <span class="entry_type_enum_notes"><p>The camera device supports capturing high-resolution images at &gt;= 20 frames per
-second,<wbr/> in at least the uncompressed YUV format,<wbr/> when post-processing settings are set
-to FAST.<wbr/> Additionally,<wbr/> maximum-resolution images can be captured at &gt;= 10 frames
-per second.<wbr/>  Here,<wbr/> 'high resolution' means at least 8 megapixels,<wbr/> or the maximum
-resolution of the device,<wbr/> whichever is smaller.<wbr/></p></span>
+second,<wbr/> in at least the uncompressed YUV format,<wbr/> when post-processing settings are
+set to FAST.<wbr/> Additionally,<wbr/> all image resolutions less than 24 megapixels can be
+captured at &gt;= 10 frames per second.<wbr/> Here,<wbr/> 'high resolution' means at least 8
+megapixels,<wbr/> or the maximum resolution of the device,<wbr/> whichever is smaller.<wbr/></p></span>
                   </li>
                   <li>
                     <span class="entry_type_enum_name">YUV_REPROCESSING (v3.2)</span>
@@ -17102,6 +17271,8 @@
   <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">Image<wbr/>Format#YUV_<wbr/>420_<wbr/>888</a> and <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">ImageFormat#JPEG</a> formats.<wbr/></li>
 <li>The maximum available resolution for <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">Image<wbr/>Format#YUV_<wbr/>420_<wbr/>888</a> streams (both input/<wbr/>output) will match the
   maximum available resolution of <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">ImageFormat#JPEG</a> streams.<wbr/></li>
+<li>For a MONOCHROME camera with Y8 format support,<wbr/> all the requirements mentioned
+  above for YUV_<wbr/>420_<wbr/>888 apply for Y8 format as well.<wbr/></li>
 <li>Static metadata <a href="#static_android.reprocess.maxCaptureStall">android.<wbr/>reprocess.<wbr/>max<wbr/>Capture<wbr/>Stall</a>.<wbr/></li>
 <li>Only the below controls are effective for reprocessing requests and will be present
   in capture results.<wbr/> The reprocess requests are from the original capture results
@@ -17139,8 +17310,8 @@
 <li>The <a href="#static_android.depth.depthIsExclusive">android.<wbr/>depth.<wbr/>depth<wbr/>Is<wbr/>Exclusive</a> entry is listed by this device.<wbr/></li>
 <li>As of Android P,<wbr/> the <a href="#static_android.lens.poseReference">android.<wbr/>lens.<wbr/>pose<wbr/>Reference</a> entry is listed by this device.<wbr/></li>
 <li>A LIMITED camera with only the DEPTH_<wbr/>OUTPUT capability does not have to support
-  normal YUV_<wbr/>420_<wbr/>888,<wbr/> JPEG,<wbr/> and PRIV-format outputs.<wbr/> It only has to support the DEPTH16
-  format.<wbr/></li>
+  normal YUV_<wbr/>420_<wbr/>888,<wbr/> Y8,<wbr/> JPEG,<wbr/> and PRIV-format outputs.<wbr/> It only has to support the
+  DEPTH16 format.<wbr/></li>
 </ul>
 <p>Generally,<wbr/> depth output operates at a slower frame rate than standard color capture,<wbr/>
 so the DEPTH16 and DEPTH_<wbr/>POINT_<wbr/>CLOUD formats will commonly have a stall duration that
@@ -17230,8 +17401,23 @@
                   <li>
                     <span class="entry_type_enum_name">LOGICAL_MULTI_CAMERA (v3.3)</span>
                     <span class="entry_type_enum_optional">[optional]</span>
-                    <span class="entry_type_enum_notes"><p>The camera device is a logical camera backed by two or more physical cameras that are
-also exposed to the application.<wbr/></p>
+                    <span class="entry_type_enum_notes"><p>The camera device is a logical camera backed by two or more physical cameras.<wbr/></p>
+<p>In API level 28,<wbr/> the physical cameras must also be exposed to the application via
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>.<wbr/></p>
+<p>Starting from API level 29,<wbr/> some or all physical cameras may not be independently
+exposed to the application,<wbr/> in which case the physical camera IDs will not be
+available in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>.<wbr/> But the
+application can still query the physical cameras' characteristics by calling
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>.<wbr/> Additionally,<wbr/>
+if a physical camera is hidden from camera ID list,<wbr/> the mandatory stream combinations
+for that physical camera must be supported through the logical camera using physical
+streams.<wbr/></p>
+<p>Combinations of logical and physical streams,<wbr/> or physical streams from different
+physical cameras are not guaranteed.<wbr/> However,<wbr/> if the camera device supports
+<a href="https://developer.android.com/reference/CameraDevice.html#isSessionConfigurationSupported">CameraDevice#isSessionConfigurationSupported</a>,<wbr/>
+application must be able to query whether a stream combination involving physical
+streams is supported by calling
+<a href="https://developer.android.com/reference/CameraDevice.html#isSessionConfigurationSupported">CameraDevice#isSessionConfigurationSupported</a>.<wbr/></p>
 <p>Camera application shouldn't assume that there are at most 1 rear camera and 1 front
 camera in the system.<wbr/> For an application that switches between front and back cameras,<wbr/>
 the recommendation is to switch between the first rear camera and the first front
@@ -17254,32 +17440,95 @@
 </li>
 <li>The SENSOR_<wbr/>INFO_<wbr/>TIMESTAMP_<wbr/>SOURCE of the logical device and physical devices must be
   the same.<wbr/></li>
-<li>The logical camera device must be LIMITED or higher device.<wbr/></li>
+<li>The logical camera must be LIMITED or higher device.<wbr/></li>
 </ul>
-<p>Both the logical camera device and its underlying physical devices support the
-mandatory stream combinations required for their device levels.<wbr/></p>
-<p>Additionally,<wbr/> for each guaranteed stream combination,<wbr/> the logical camera supports:</p>
+<p>A logical camera device's dynamic metadata may contain
+<a href="#dynamic_android.logicalMultiCamera.activePhysicalId">android.<wbr/>logical<wbr/>Multi<wbr/>Camera.<wbr/>active<wbr/>Physical<wbr/>Id</a> to notify the application of the current
+active physical camera Id.<wbr/> An active physical camera is the physical camera from which
+the logical camera's main image data outputs (YUV or RAW) and metadata come from.<wbr/>
+In addition,<wbr/> this serves as an indication which physical camera is used to output to
+a RAW stream,<wbr/> or in case only physical cameras support RAW,<wbr/> which physical RAW stream
+the application should request.<wbr/></p>
+<p>Logical camera's static metadata tags below describe the default active physical
+camera.<wbr/> An active physical camera is default if it's used when application directly
+uses requests built from a template.<wbr/> All templates will default to the same active
+physical camera.<wbr/></p>
 <ul>
-<li>For each guaranteed stream combination,<wbr/> the logical camera supports replacing one
-  logical <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">YUV_<wbr/>420_<wbr/>888</a>
-  or raw stream with two physical streams of the same size and format,<wbr/> each from a
-  separate physical camera,<wbr/> given that the size and format are supported by both
-  physical cameras.<wbr/></li>
-<li>If the logical camera doesn't advertise RAW capability,<wbr/> but the underlying physical
-  cameras do,<wbr/> the logical camera will support guaranteed stream combinations for RAW
-  capability,<wbr/> except that the RAW streams will be physical streams,<wbr/> each from a separate
-  physical camera.<wbr/> This is usually the case when the physical cameras have different
-  sensor sizes.<wbr/></li>
+<li><a href="#static_android.sensor.info.sensitivityRange">android.<wbr/>sensor.<wbr/>info.<wbr/>sensitivity<wbr/>Range</a></li>
+<li><a href="#static_android.sensor.info.colorFilterArrangement">android.<wbr/>sensor.<wbr/>info.<wbr/>color<wbr/>Filter<wbr/>Arrangement</a></li>
+<li><a href="#static_android.sensor.info.exposureTimeRange">android.<wbr/>sensor.<wbr/>info.<wbr/>exposure<wbr/>Time<wbr/>Range</a></li>
+<li><a href="#static_android.sensor.info.maxFrameDuration">android.<wbr/>sensor.<wbr/>info.<wbr/>max<wbr/>Frame<wbr/>Duration</a></li>
+<li><a href="#static_android.sensor.info.physicalSize">android.<wbr/>sensor.<wbr/>info.<wbr/>physical<wbr/>Size</a></li>
+<li><a href="#static_android.sensor.info.whiteLevel">android.<wbr/>sensor.<wbr/>info.<wbr/>white<wbr/>Level</a></li>
+<li><a href="#static_android.sensor.info.lensShadingApplied">android.<wbr/>sensor.<wbr/>info.<wbr/>lens<wbr/>Shading<wbr/>Applied</a></li>
+<li><a href="#static_android.sensor.referenceIlluminant1">android.<wbr/>sensor.<wbr/>reference<wbr/>Illuminant1</a></li>
+<li><a href="#static_android.sensor.referenceIlluminant2">android.<wbr/>sensor.<wbr/>reference<wbr/>Illuminant2</a></li>
+<li><a href="#static_android.sensor.calibrationTransform1">android.<wbr/>sensor.<wbr/>calibration<wbr/>Transform1</a></li>
+<li><a href="#static_android.sensor.calibrationTransform2">android.<wbr/>sensor.<wbr/>calibration<wbr/>Transform2</a></li>
+<li><a href="#static_android.sensor.colorTransform1">android.<wbr/>sensor.<wbr/>color<wbr/>Transform1</a></li>
+<li><a href="#static_android.sensor.colorTransform2">android.<wbr/>sensor.<wbr/>color<wbr/>Transform2</a></li>
+<li><a href="#static_android.sensor.forwardMatrix1">android.<wbr/>sensor.<wbr/>forward<wbr/>Matrix1</a></li>
+<li><a href="#static_android.sensor.forwardMatrix2">android.<wbr/>sensor.<wbr/>forward<wbr/>Matrix2</a></li>
+<li><a href="#static_android.sensor.blackLevelPattern">android.<wbr/>sensor.<wbr/>black<wbr/>Level<wbr/>Pattern</a></li>
+<li><a href="#static_android.sensor.maxAnalogSensitivity">android.<wbr/>sensor.<wbr/>max<wbr/>Analog<wbr/>Sensitivity</a></li>
+<li><a href="#static_android.sensor.opticalBlackRegions">android.<wbr/>sensor.<wbr/>optical<wbr/>Black<wbr/>Regions</a></li>
+<li><a href="#static_android.sensor.availableTestPatternModes">android.<wbr/>sensor.<wbr/>available<wbr/>Test<wbr/>Pattern<wbr/>Modes</a></li>
+<li><a href="#static_android.lens.info.hyperfocalDistance">android.<wbr/>lens.<wbr/>info.<wbr/>hyperfocal<wbr/>Distance</a></li>
+<li><a href="#static_android.lens.info.minimumFocusDistance">android.<wbr/>lens.<wbr/>info.<wbr/>minimum<wbr/>Focus<wbr/>Distance</a></li>
+<li><a href="#static_android.lens.info.focusDistanceCalibration">android.<wbr/>lens.<wbr/>info.<wbr/>focus<wbr/>Distance<wbr/>Calibration</a></li>
+<li><a href="#static_android.lens.poseRotation">android.<wbr/>lens.<wbr/>pose<wbr/>Rotation</a></li>
+<li><a href="#static_android.lens.poseTranslation">android.<wbr/>lens.<wbr/>pose<wbr/>Translation</a></li>
+<li><a href="#static_android.lens.intrinsicCalibration">android.<wbr/>lens.<wbr/>intrinsic<wbr/>Calibration</a></li>
+<li><a href="#static_android.lens.poseReference">android.<wbr/>lens.<wbr/>pose<wbr/>Reference</a></li>
+<li><a href="#static_android.lens.distortion">android.<wbr/>lens.<wbr/>distortion</a></li>
 </ul>
-<p>Using physical streams in place of a logical stream of the same size and format will
-not slow down the frame rate of the capture,<wbr/> as long as the minimum frame duration
-of the physical and logical streams are the same.<wbr/></p></span>
+<p>The field of view of all non-RAW physical streams must be the same or as close as
+possible to that of non-RAW logical streams.<wbr/> If the requested FOV is outside of the
+range supported by the physical camera,<wbr/> the physical stream for that physical camera
+will use either the maximum or minimum scaler crop region,<wbr/> depending on which one is
+closer to the requested FOV.<wbr/> For example,<wbr/> for a logical camera with wide-tele lens
+configuration where the wide lens is the default,<wbr/> if the logical camera's crop region
+is set to maximum,<wbr/> the physical stream for the tele lens will be configured to its
+maximum crop region.<wbr/> On the other hand,<wbr/> if the logical camera has a normal-wide lens
+configuration where the normal lens is the default,<wbr/> when the logical camera's crop
+region is set to maximum,<wbr/> the FOV of the logical streams will be that of the normal
+lens.<wbr/> The FOV of the physical streams for the wide lens will be the same as the
+logical stream,<wbr/> by making the crop region smaller than its active array size to
+compensate for the smaller focal length.<wbr/></p>
+<p>Even if the underlying physical cameras have different RAW characteristics (such as
+size or CFA pattern),<wbr/> a logical camera can still advertise RAW capability.<wbr/> In this
+case,<wbr/> when the application configures a RAW stream,<wbr/> the camera device will make sure
+the active physical camera will remain active to ensure consistent RAW output
+behavior,<wbr/> and not switch to other physical cameras.<wbr/></p>
+<p>The capture request and result metadata tags required for backward compatible camera
+functionalities will be solely based on the logical camera capabiltity.<wbr/> On the other
+hand,<wbr/> the use of manual capture controls (sensor or post-processing) with a
+logical camera may result in unexpected behavior when the HAL decides to switch
+between physical cameras with different characteristics under the hood.<wbr/> For example,<wbr/>
+when the application manually sets exposure time and sensitivity while zooming in,<wbr/>
+the brightness of the camera images may suddenly change because HAL switches from one
+physical camera to the other.<wbr/></p></span>
                   </li>
                   <li>
                     <span class="entry_type_enum_name">MONOCHROME (v3.3)</span>
                     <span class="entry_type_enum_optional">[optional]</span>
                     <span class="entry_type_enum_notes"><p>The camera device is a monochrome camera that doesn't contain a color filter array,<wbr/>
-and the pixel values on U and V planes are all 128.<wbr/></p></span>
+and for YUV_<wbr/>420_<wbr/>888 stream,<wbr/> the pixel values on U and V planes are all 128.<wbr/></p>
+<p>A MONOCHROME camera must support the guaranteed stream combinations required for
+its device level and capabilities.<wbr/> Additionally,<wbr/> if the monochrome camera device
+supports Y8 format,<wbr/> all mandatory stream combination requirements related to <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">YUV_<wbr/>420_<wbr/>888</a> apply
+to <a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">Y8</a> as well.<wbr/> There are no
+mandatory stream combination requirements with regard to
+<a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">Y8</a> for Bayer camera devices.<wbr/></p>
+<p>Starting from Android Q,<wbr/> the SENSOR_<wbr/>INFO_<wbr/>COLOR_<wbr/>FILTER_<wbr/>ARRANGEMENT of a MONOCHROME
+camera will be either MONO or NIR.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">SECURE_IMAGE_DATA (v3.4)</span>
+                    <span class="entry_type_enum_optional">[optional]</span>
+                    <span class="entry_type_enum_notes"><p>The camera device is capable of writing image data into a region of memory
+inaccessible to Android userspace or the Android kernel,<wbr/> and only accessible to
+trusted execution environments (TEE).<wbr/></p></span>
                   </li>
                 </ul>
 
@@ -17370,18 +17619,35 @@
 It's recommended that its feature set is superset of that of individual physical cameras.<wbr/></p>
 <ul>
 <li>
-<p>In camera1 API,<wbr/> to maintain application compatibility,<wbr/> for each {logical_<wbr/>camera_<wbr/>id,<wbr/>
-physical_<wbr/>camera_<wbr/>1_<wbr/>id,<wbr/> physical_<wbr/>camera_<wbr/>2_<wbr/>id,<wbr/> ...<wbr/>} combination,<wbr/> where logical_<wbr/>camera_<wbr/>id
-is composed of physical_<wbr/>camera_<wbr/>N_<wbr/>id,<wbr/> camera framework will only advertise one camera id
-(within the combination) that is frontmost in the HAL published camera id list.<wbr/></p>
+<p>In camera1 API,<wbr/> to maintain application compatibility,<wbr/> for each camera facing,<wbr/> there
+may be one or more {logical_<wbr/>camera_<wbr/>id,<wbr/> physical_<wbr/>camera_<wbr/>1_<wbr/>id,<wbr/> physical_<wbr/>camera_<wbr/>2_<wbr/>id,<wbr/> ...<wbr/>}
+combinations,<wbr/> where logical_<wbr/>camera_<wbr/>id is composed of physical_<wbr/>camera_<wbr/>N_<wbr/>id,<wbr/> camera
+framework will only advertise one camera id
+(within the combinations for the particular facing) that is frontmost in the HAL
+published camera id list.<wbr/>
+For example,<wbr/> if HAL advertises 6 back facing camera IDs (ID0 to ID5),<wbr/> among which ID4
+and ID5 are logical cameras backed by ID0+ID1 and ID2+ID3 respectively.<wbr/> In this case,<wbr/>
+only ID0 will be available for camera1 API to use.<wbr/></p>
 </li>
 <li>
 <p>Camera HAL is strongly recommended to advertise camera devices with best feature,<wbr/>
 power,<wbr/> performance,<wbr/> and latency tradeoffs at the front of the camera id list.<wbr/></p>
 </li>
+<li>
+<p>Camera HAL may switch between physical cameras depending on focalLength or cropRegion.<wbr/>
+If physical cameras have different sizes,<wbr/> HAL must maintain a single logical camera
+active<wbr/>Array<wbr/>Size/<wbr/>pixel<wbr/>Array<wbr/>Size/<wbr/>pre<wbr/>Correction<wbr/>Active<wbr/>Array<wbr/>Size,<wbr/> and must do proper mapping
+between logical camera and underlying physical cameras for all related metadata tags,<wbr/>
+such as crop region,<wbr/> 3A regions,<wbr/> and intrinsicCalibration.<wbr/></p>
+</li>
+<li>
+<p>Starting from HIDL ICameraDevice version 3.<wbr/>5,<wbr/> camera HAL must support
+isStreamCombinationSupported for application to query whether a particular logical and
+physical streams combination are supported.<wbr/></p>
+</li>
 </ul>
-<p>For MONOCHROME,<wbr/> the camera device must also advertise BACKWARD_<wbr/>COMPATIBLE capability,<wbr/> and
-it is exclusive of both RAW and MANUAL_<wbr/>POST_<wbr/>PROCESSING capabilities:</p>
+<p>A MONOCHROME camera device must also advertise BACKWARD_<wbr/>COMPATIBLE capability,<wbr/> and must
+not advertise MANUAL_<wbr/>POST_<wbr/>PROCESSING capability.<wbr/></p>
 <ul>
 <li>
 <p>To maintain backward compatibility,<wbr/> the camera device must support all
@@ -17390,18 +17656,18 @@
 <a href="#controls_android.control.awbLock">android.<wbr/>control.<wbr/>awb<wbr/>Lock</a>.<wbr/></p>
 </li>
 <li>
-<p>A monochrome device doesn't need to advertise DNG related optional metadata tags.<wbr/></p>
-</li>
-<li>
 <p><a href="#controls_android.colorCorrection.mode">android.<wbr/>color<wbr/>Correction.<wbr/>mode</a>,<wbr/> <a href="#controls_android.colorCorrection.transform">android.<wbr/>color<wbr/>Correction.<wbr/>transform</a>,<wbr/> and
-<a href="#controls_android.colorCorrection.gains">android.<wbr/>color<wbr/>Correction.<wbr/>gains</a> are not applicable.<wbr/> So the camera device cannot
-be a FULL device.<wbr/> However,<wbr/> the HAL can still advertise other individual capabilites.<wbr/></p>
+<a href="#controls_android.colorCorrection.gains">android.<wbr/>color<wbr/>Correction.<wbr/>gains</a> must not be in available request and result keys.<wbr/>
+As a result,<wbr/> the camera device cannot be a FULL device.<wbr/> However,<wbr/> the HAL can
+still advertise other individual capabilites.<wbr/></p>
 </li>
 <li>
 <p>If the device supports tonemap control,<wbr/> only <a href="#controls_android.tonemap.curveRed">android.<wbr/>tonemap.<wbr/>curve<wbr/>Red</a> is used.<wbr/>
 CurveGreen and curveBlue are no-ops.<wbr/></p>
 </li>
 </ul>
+<p>In Android API level 28,<wbr/> a MONOCHROME camera device must not have RAW capability.<wbr/> From
+API level 29,<wbr/> a camera is allowed to have both MONOCHROME and RAW capabilities.<wbr/></p>
             </td>
           </tr>
 
@@ -17770,7 +18036,7 @@
                 <span class="entry_type_array">
                   n
                 </span>
-              <span class="entry_type_visibility"> [hidden]</span>
+              <span class="entry_type_visibility"> [ndk_public]</span>
 
 
               <span class="entry_type_hwlevel">[limited] </span>
@@ -17838,6 +18104,74 @@
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
         
+                
+          <tr class="entry" id="static_android.request.characteristicKeysNeedingPermission">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>request.<wbr/>characteristic<wbr/>Keys<wbr/>Needing<wbr/>Permission
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int32</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  n
+                </span>
+              <span class="entry_type_visibility"> [hidden]</span>
+
+
+              <span class="entry_type_hwlevel">[legacy] </span>
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>A list of camera characteristics keys that are only available
+in case the camera client has camera permission.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>The entry contains a subset of
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getKeys">CameraCharacteristics#getKeys</a> that require camera clients
+to acquire the <a href="https://developer.android.com/reference/android/Manifest/permission.html#CAMERA">permission#CAMERA</a> permission before calling
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraCharacteristics">CameraManager#getCameraCharacteristics</a>.<wbr/> If the
+permission is not held by the camera client,<wbr/> then the values of the repsective properties
+will not be present in <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html">CameraCharacteristics</a>.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly,<wbr/> camera service will overwrite any previous values.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
         
 
       <!-- end of kind -->
@@ -18407,7 +18741,7 @@
                     <span class="entry_type_enum_notes"><p>RAW16 is a standard,<wbr/> cross-platform format for raw image
 buffers with 16-bit pixels.<wbr/></p>
 <p>Buffers of this format are typically expected to have a
-Bayer Color Filter Array (CFA) layout,<wbr/> which is given in
+Color Filter Array (CFA) layout,<wbr/> which is given in
 <a href="#static_android.sensor.info.colorFilterArrangement">android.<wbr/>sensor.<wbr/>info.<wbr/>color<wbr/>Filter<wbr/>Arrangement</a>.<wbr/> Sensors with
 CFAs that are not representable by a format in
 <a href="#static_android.sensor.info.colorFilterArrangement">android.<wbr/>sensor.<wbr/>info.<wbr/>color<wbr/>Filter<wbr/>Arrangement</a> should not
@@ -18480,6 +18814,21 @@
                     <span class="entry_type_enum_value">0x21</span>
                     <span class="entry_type_enum_notes"><p>JPEG format</p></span>
                   </li>
+                  <li>
+                    <span class="entry_type_enum_name">RAW10 (v3.4)</span>
+                    <span class="entry_type_enum_value">0x25</span>
+                    <span class="entry_type_enum_notes"><p>RAW10</p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">RAW12 (v3.4)</span>
+                    <span class="entry_type_enum_value">0x26</span>
+                    <span class="entry_type_enum_notes"><p>RAW12</p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">Y8 (v3.4)</span>
+                    <span class="entry_type_enum_value">0x20203859</span>
+                    <span class="entry_type_enum_notes"><p>Y8</p></span>
+                  </li>
                 </ul>
 
             </td> <!-- entry_type -->
@@ -18530,7 +18879,8 @@
 usually used by preview and recording streams,<wbr/> where the application doesn't
 need access the image data.<wbr/></p>
 <p>YCb<wbr/>Cr_<wbr/>420_<wbr/>888 format must be supported by the HAL.<wbr/> When an image stream
-needs CPU/<wbr/>application direct access,<wbr/> this format will be used.<wbr/></p>
+needs CPU/<wbr/>application direct access,<wbr/> this format will be used.<wbr/> For a MONOCHROME
+camera device,<wbr/> the pixel value of Cb and Cr planes is 128.<wbr/></p>
 <p>The BLOB format must be supported by the HAL.<wbr/> This is used for the JPEG stream.<wbr/></p>
 <p>A RAW_<wbr/>OPAQUE buffer should contain only pixel data.<wbr/> It is strongly
 recommended that any information used by the camera device when
@@ -19101,6 +19451,35 @@
 or output will never hurt maximum frame rate (i.<wbr/>e.<wbr/>  <a href="https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap.html#getOutputStallDuration">getOutputStallDuration(ImageFormat.<wbr/>PRIVATE,<wbr/> size)</a> is always 0),<wbr/></p>
 <p>Attempting to configure an input stream with output streams not
 listed as available in this map is not valid.<wbr/></p>
+<p>Additionally,<wbr/> if the camera device is MONOCHROME with Y8 support,<wbr/> it will also support
+the following map of formats if its dependent capability
+(<a href="#static_android.request.availableCapabilities">android.<wbr/>request.<wbr/>available<wbr/>Capabilities</a>) is supported:</p>
+<table>
+<thead>
+<tr>
+<th align="left">Input Format</th>
+<th align="left">Output Format</th>
+<th align="left">Capability</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td align="left"><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#PRIVATE">ImageFormat#PRIVATE</a></td>
+<td align="left"><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">ImageFormat#Y8</a></td>
+<td align="left">PRIVATE_<wbr/>REPROCESSING</td>
+</tr>
+<tr>
+<td align="left"><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">ImageFormat#Y8</a></td>
+<td align="left"><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">ImageFormat#JPEG</a></td>
+<td align="left">YUV_<wbr/>REPROCESSING</td>
+</tr>
+<tr>
+<td align="left"><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">ImageFormat#Y8</a></td>
+<td align="left"><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">ImageFormat#Y8</a></td>
+<td align="left">YUV_<wbr/>REPROCESSING</td>
+</tr>
+</tbody>
+</table>
             </td>
           </tr>
 
@@ -19517,6 +19896,7 @@
 <li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">Image<wbr/>Format#YUV_<wbr/>420_<wbr/>888</a></li>
 <li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#RAW10">ImageFormat#RAW10</a></li>
 <li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#RAW12">ImageFormat#RAW12</a></li>
+<li><a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#Y8">ImageFormat#Y8</a></li>
 </ul>
 <p>All other formats may or may not have an allowed stall duration on
 a per-capability basis; refer to <a href="#static_android.request.availableCapabilities">android.<wbr/>request.<wbr/>available<wbr/>Capabilities</a>
@@ -19799,6 +20179,315 @@
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
         
+                
+          <tr class="entry" id="static_android.scaler.availableRecommendedStreamConfigurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>scaler.<wbr/>available<wbr/>Recommended<wbr/>Stream<wbr/>Configurations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name entry_type_name_enum">int32</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  n x 5
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as recommendedStreamConfiguration]</span>
+
+
+
+
+
+                <ul class="entry_type_enum">
+                  <li>
+                    <span class="entry_type_enum_name">PREVIEW (v3.4)</span>
+                    <span class="entry_type_enum_value">0x0</span>
+                    <span class="entry_type_enum_notes"><p>Preview must only include non-stalling processed stream configurations with
+output formats like
+<a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#YUV_420_888">Image<wbr/>Format#YUV_<wbr/>420_<wbr/>888</a>,<wbr/>
+<a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#PRIVATE">ImageFormat#PRIVATE</a>,<wbr/> etc.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">RECORD (v3.4)</span>
+                    <span class="entry_type_enum_value">0x1</span>
+                    <span class="entry_type_enum_notes"><p>Video record must include stream configurations that match the advertised
+supported media profiles <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a> with
+IMPLEMENTATION_<wbr/>DEFINED format.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">VIDEO_SNAPSHOT (v3.4)</span>
+                    <span class="entry_type_enum_value">0x2</span>
+                    <span class="entry_type_enum_notes"><p>Video snapshot must include stream configurations at least as big as
+the maximum RECORD resolutions and only with
+<a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">JPEG output format</a>.<wbr/>
+Additionally the configurations shouldn't cause preview glitches and also be able to
+run at 30 fps.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">SNAPSHOT (v3.4)</span>
+                    <span class="entry_type_enum_value">0x3</span>
+                    <span class="entry_type_enum_notes"><p>Recommended snapshot stream configurations must include at least one with
+size close to <a href="#static_android.sensor.info.activeArraySize">android.<wbr/>sensor.<wbr/>info.<wbr/>active<wbr/>Array<wbr/>Size</a> and
+<a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">JPEG output format</a>.<wbr/>
+Taking into account restrictions on aspect ratio,<wbr/> alignment etc.<wbr/> the area of the
+maximum suggested size shouldn’t be less than 97% of the sensor array size area.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">ZSL (v3.4)</span>
+                    <span class="entry_type_enum_value">0x4</span>
+                    <span class="entry_type_enum_notes"><p>If supported,<wbr/> recommended input stream configurations must only be advertised with
+ZSL along with other processed and/<wbr/>or stalling output formats.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">RAW (v3.4)</span>
+                    <span class="entry_type_enum_value">0x5</span>
+                    <span class="entry_type_enum_notes"><p>If supported,<wbr/> recommended raw stream configurations must only include RAW based
+output formats.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">LOW_LATENCY_SNAPSHOT (v3.4)</span>
+                    <span class="entry_type_enum_value">0x6</span>
+                    <span class="entry_type_enum_notes"><p>If supported,<wbr/> the recommended low latency stream configurations must have
+end-to-end latency that does not exceed 200 ms.<wbr/> under standard operating conditions
+(reasonable light levels,<wbr/> not loaded system) and using template
+TEMPLATE_<wbr/>STILL_<wbr/>CAPTURE.<wbr/> This is primarily for listing configurations for the
+<a href="https://developer.android.com/reference/android/graphics/ImageFormat.html#JPEG">JPEG output format</a>
+however other supported output formats can be added as well.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">PUBLIC_END (v3.4)</span>
+                    <span class="entry_type_enum_value">0x7</span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">VENDOR_START (v3.4)</span>
+                    <span class="entry_type_enum_value">0x18</span>
+                    <span class="entry_type_enum_notes"><p>Vendor defined use cases.<wbr/> These depend on the vendor implementation.<wbr/></p></span>
+                  </li>
+                </ul>
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>Recommended stream configurations for common client use cases.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Optional subset of the <a href="#static_android.scaler.availableStreamConfigurations">android.<wbr/>scaler.<wbr/>available<wbr/>Stream<wbr/>Configurations</a> that contains
+similar tuples listed as
+(i.<wbr/>e.<wbr/> width,<wbr/> height,<wbr/> format,<wbr/> output/<wbr/>input stream,<wbr/> usecase bit field).<wbr/>
+Camera devices will be able to suggest particular stream configurations which are
+power and performance efficient for specific use cases.<wbr/> For more information about
+retrieving the suggestions see
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getRecommendedStreamConfigurationMap">CameraCharacteristics#getRecommendedStreamConfigurationMap</a>.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>There are some requirements that need to be considered regarding the usecases and the
+suggested configurations:</p>
+<ul>
+<li>If <a href="#static_android.scaler.availableRecommendedStreamConfigurations">android.<wbr/>scaler.<wbr/>available<wbr/>Recommended<wbr/>Stream<wbr/>Configurations</a> is set,<wbr/> then recommended
+stream configurations must be present for all mandatory usecases PREVIEW,<wbr/>
+SNAPSHOT,<wbr/> RECORD,<wbr/> VIDEO_<wbr/>SNAPSHOT.<wbr/> ZSL and RAW are
+required depending on device capabilities see <a href="#static_android.request.availableCapabilities">android.<wbr/>request.<wbr/>available<wbr/>Capabilities</a>.<wbr/></li>
+<li>Non-existing usecases and non-vendor usecases within the range
+(RAW : VENDOR_<wbr/>START] are prohibited as well as stream configurations not
+present in the exhaustive <a href="#static_android.scaler.availableStreamConfigurations">android.<wbr/>scaler.<wbr/>available<wbr/>Stream<wbr/>Configurations</a> list.<wbr/></li>
+</ul>
+<p>For example,<wbr/> in case the camera device supports only 4K and 1080p and both resolutions are
+recommended for the mandatory usecases except preview which can run efficiently only
+on 1080p.<wbr/> The array may look like this:</p>
+<p>[3840,<wbr/> 2160,<wbr/> HAL_<wbr/>PIXEL_<wbr/>FORMAT_<wbr/>IMPLEMENTATION_<wbr/>DEFINED,<wbr/>
+ ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>OUTPUT,<wbr/>
+ (1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>RECORD |<wbr/>
+ 1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>SNAPSHOT |<wbr/>
+ 1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>VIDEO_<wbr/>SNAPSHOT),<wbr/></p>
+<p>1920,<wbr/> 1080,<wbr/> HAL_<wbr/>PIXEL_<wbr/>FORMAT_<wbr/>IMPLEMENTATION_<wbr/>DEFINED,<wbr/>
+ ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>OUTPUT,<wbr/>
+ (1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>PREVIEW |<wbr/>
+ 1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>RECORD |<wbr/>
+ 1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>SNAPSHOT |<wbr/>
+ 1&lt;&lt; ANDROID_<wbr/>SCALER_<wbr/>AVAILABLE_<wbr/>RECOMMENDED_<wbr/>STREAM_<wbr/>CONFIGURATIONS_<wbr/>VIDEO_<wbr/>SNAPSHOT)]</p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.scaler.availableRecommendedInputOutputFormatsMap">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>scaler.<wbr/>available<wbr/>Recommended<wbr/>Input<wbr/>Output<wbr/>Formats<wbr/>Map
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int32</span>
+
+              <span class="entry_type_visibility"> [ndk_public as reprocessFormatsMap]</span>
+
+
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>Recommended mappings of image formats that are supported by this
+camera device for input streams,<wbr/> to their corresponding output formats.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_REPROC">REPROC</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This is a recommended subset of the complete list of mappings found in
+<a href="#static_android.scaler.availableInputOutputFormatsMap">android.<wbr/>scaler.<wbr/>available<wbr/>Input<wbr/>Output<wbr/>Formats<wbr/>Map</a>.<wbr/> The same requirements apply here as well.<wbr/>
+The list however doesn't need to contain all available and supported mappings.<wbr/> Instead of
+this developers must list only recommended and efficient entries.<wbr/>
+If set,<wbr/> the information will be available in the ZERO_<wbr/>SHUTTER_<wbr/>LAG recommended stream
+configuration see
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getRecommendedStreamConfigurationMap">CameraCharacteristics#getRecommendedStreamConfigurationMap</a>.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>For a code sample of the required data encoding please check
+<a href="#static_android.scaler.availableInputOutputFormatsMap">android.<wbr/>scaler.<wbr/>available<wbr/>Input<wbr/>Output<wbr/>Formats<wbr/>Map</a>.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.scaler.mandatoryStreamCombinations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>scaler.<wbr/>mandatory<wbr/>Stream<wbr/>Combinations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int32</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  n
+                </span>
+              <span class="entry_type_visibility"> [java_public as mandatoryStreamCombination]</span>
+
+              <span class="entry_type_synthetic">[synthetic] </span>
+
+              <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>An array of mandatory stream combinations generated according to the camera device
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#INFO_SUPPORTED_HARDWARE_LEVEL">Camera<wbr/>Characteristics#INFO_<wbr/>SUPPORTED_<wbr/>HARDWARE_<wbr/>LEVEL</a>
+and <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#REQUEST_AVAILABLE_CAPABILITIES">Camera<wbr/>Characteristics#REQUEST_<wbr/>AVAILABLE_<wbr/>CAPABILITIES</a>.<wbr/>
+This is an app-readable conversion of the mandatory stream combination
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraDevice.html#createCaptureSession">tables</a>.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>2</p>
+            </td>
+
+            <td class="entry_tags">
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>The array of
+<a href="https://developer.android.com/reference/android/hardware/camera2/params/MandatoryStreamCombination.html">combinations</a> is
+generated according to the documented
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraDevice.html#createCaptureSession">guideline</a> based on
+specific device level and capabilities.<wbr/>
+Clients can use the array as a quick reference to find an appropriate camera stream
+combination.<wbr/>
+As per documentation,<wbr/> the stream combinations with given PREVIEW,<wbr/> RECORD and
+MAXIMUM resolutions and anything smaller from the list given by
+<a href="https://developer.android.com/reference/android/hardware/camera2/params/StreamConfigurationMap.html#getOutputSizes">StreamConfigurationMap#getOutputSizes</a> are
+guaranteed to work.<wbr/>
+For a physical camera not independently exposed in
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#getCameraIdList">CameraManager#getCameraIdList</a>,<wbr/> the mandatory stream
+combinations for that physical camera Id are also generated,<wbr/> so that the application can
+configure them as physical streams via the logical camera.<wbr/>
+The mandatory stream combination array will be {@code null} in case the device is not
+backward compatible.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly
+(it is synthetic and will not be available at the HAL layer).<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
         
 
       <!-- end of kind -->
@@ -20685,7 +21374,7 @@
                 
           <tr class="entry" id="static_android.sensor.info.colorFilterArrangement">
             <td class="entry_name
-             " rowspan="1">
+             " rowspan="3">
               android.<wbr/>sensor.<wbr/>info.<wbr/>color<wbr/>Filter<wbr/>Arrangement
             </td>
             <td class="entry_type">
@@ -20717,6 +21406,19 @@
 values for each pixel,<wbr/> instead of just 1 16-bit value
 per pixel.<wbr/></p></span>
                   </li>
+                  <li>
+                    <span class="entry_type_enum_name">MONO (v3.4)</span>
+                    <span class="entry_type_enum_notes"><p>Sensor doesn't have any Bayer color filter.<wbr/>
+Such sensor captures visible light in monochrome.<wbr/> The exact weighting and
+wavelengths captured is not specified,<wbr/> but generally only includes the visible
+frequencies.<wbr/> This value implies a MONOCHROME camera.<wbr/></p></span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">NIR (v3.4)</span>
+                    <span class="entry_type_enum_notes"><p>Sensor has a near infrared filter capturing light with wavelength between
+roughly 750nm and 1400nm,<wbr/> and the same filter covers the whole sensor array.<wbr/> This
+value implies a MONOCHROME camera.<wbr/></p></span>
+                  </li>
                 </ul>
 
             </td> <!-- entry_type -->
@@ -20724,7 +21426,8 @@
             <td class="entry_description">
               <p>The arrangement of color filters on sensor;
 represents the colors in the top-left 2x2 section of
-the sensor,<wbr/> in reading order.<wbr/></p>
+the sensor,<wbr/> in reading order,<wbr/> for a Bayer camera,<wbr/> or the
+light spectrum it captures for MONOCHROME camera.<wbr/></p>
             </td>
 
             <td class="entry_units">
@@ -20745,6 +21448,15 @@
 
           </tr>
 
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Starting from Android Q,<wbr/> the colorFilterArrangement for a MONOCHROME camera must be
+single color patterns,<wbr/> such as MONO or NIR.<wbr/></p>
+            </td>
+          </tr>
 
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
@@ -21527,6 +22239,8 @@
 <p>Some devices may choose to provide a second set of calibration
 information for improved quality,<wbr/> including
 <a href="#static_android.sensor.referenceIlluminant2">android.<wbr/>sensor.<wbr/>reference<wbr/>Illuminant2</a> and its corresponding matrices.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -21611,6 +22325,8 @@
 <p>If this key is present,<wbr/> then <a href="#static_android.sensor.colorTransform2">android.<wbr/>sensor.<wbr/>color<wbr/>Transform2</a>,<wbr/>
 <a href="#static_android.sensor.calibrationTransform2">android.<wbr/>sensor.<wbr/>calibration<wbr/>Transform2</a>,<wbr/> and
 <a href="#static_android.sensor.forwardMatrix2">android.<wbr/>sensor.<wbr/>forward<wbr/>Matrix2</a> will also be present.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -21676,6 +22392,8 @@
 colorspace) into this camera device's native sensor color
 space under the first reference illuminant
 (<a href="#static_android.sensor.referenceIlluminant1">android.<wbr/>sensor.<wbr/>reference<wbr/>Illuminant1</a>).<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -21744,6 +22462,8 @@
 (<a href="#static_android.sensor.referenceIlluminant2">android.<wbr/>sensor.<wbr/>reference<wbr/>Illuminant2</a>).<wbr/></p>
 <p>This matrix will only be present if the second reference
 illuminant is present.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -21813,6 +22533,8 @@
 and the CIE XYZ colorspace when calculating this transform will
 match the standard white point for the first reference illuminant
 (i.<wbr/>e.<wbr/> no chromatic adaptation will be applied by this transform).<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -21884,6 +22606,8 @@
 (i.<wbr/>e.<wbr/> no chromatic adaptation will be applied by this transform).<wbr/></p>
 <p>This matrix will only be present if the second reference
 illuminant is present.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -21951,6 +22675,8 @@
 this matrix is chosen so that the standard white point for this reference
 illuminant in the reference sensor colorspace is mapped to D50 in the
 CIE XYZ colorspace.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -22020,6 +22746,8 @@
 CIE XYZ colorspace.<wbr/></p>
 <p>This matrix will only be present if the second reference
 illuminant is present.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -22139,6 +22867,7 @@
 level values.<wbr/> For raw capture in particular,<wbr/> it is recommended to use
 pixels from <a href="#static_android.sensor.opticalBlackRegions">android.<wbr/>sensor.<wbr/>optical<wbr/>Black<wbr/>Regions</a> to calculate black
 level values for each frame.<wbr/></p>
+<p>For a MONOCHROME camera device,<wbr/> all of the 2x2 channels must have the same values.<wbr/></p>
             </td>
           </tr>
 
@@ -23033,6 +23762,8 @@
 used to interpolate between the provided color transforms when
 processing raw sensor data.<wbr/></p>
 <p>The order of the values is R,<wbr/> G,<wbr/> B; where R is in the lowest index.<wbr/></p>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -23106,6 +23837,8 @@
 that channel.<wbr/></p>
 <p>A more detailed description of the noise model can be found in the
 Adobe DNG specification for the NoiseProfile tag.<wbr/></p>
+<p>For a MONOCHROME camera,<wbr/> there is only one color channel.<wbr/> So the noise model coefficients
+will only contain one S and one O.<wbr/></p>
             </td>
           </tr>
 
@@ -23332,6 +24065,8 @@
 <li>R &gt; 1.<wbr/>20 will require strong software correction to produce
 a usuable image (&gt;20% divergence).<wbr/></li>
 </ul>
+<p>Starting from Android Q,<wbr/> this key will not be present for a MONOCHROME camera,<wbr/> even if
+the camera device has RAW capability.<wbr/></p>
             </td>
           </tr>
 
@@ -23722,6 +24457,7 @@
 layout key (see <a href="#static_android.sensor.info.colorFilterArrangement">android.<wbr/>sensor.<wbr/>info.<wbr/>color<wbr/>Filter<wbr/>Arrangement</a>),<wbr/> i.<wbr/>e.<wbr/> the
 nth value given corresponds to the black level offset for the nth
 color channel listed in the CFA.<wbr/></p>
+<p>For a MONOCHROME camera,<wbr/> all of the 2x2 channels must have the same values.<wbr/></p>
 <p>This key will be available if <a href="#static_android.sensor.opticalBlackRegions">android.<wbr/>sensor.<wbr/>optical<wbr/>Black<wbr/>Regions</a> is available or the
 camera device advertises this key via <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getAvailableCaptureResultKeys">CameraCharacteristics#getAvailableCaptureResultKeys</a>.<wbr/></p>
             </td>
@@ -25900,6 +26636,17 @@
 <p>As a visualization only,<wbr/> inverting the full-color map to recover an
 image of a gray wall (using bicubic interpolation for visual quality) as captured by the sensor gives:</p>
 <p><img alt="Image of a uniform white wall (inverse shading map)" src="images/camera2/metadata/android.statistics.lensShadingMap/inv_shading.png"/></p>
+<p>For a MONOCHROME camera,<wbr/> all of the 2x2 channels must have the same values.<wbr/> An example
+shading map for such a camera is defined as:</p>
+<pre><code><a href="#static_android.lens.info.shadingMapSize">android.<wbr/>lens.<wbr/>info.<wbr/>shading<wbr/>Map<wbr/>Size</a> = [ 4,<wbr/> 3 ]
+<a href="#dynamic_android.statistics.lensShadingMap">android.<wbr/>statistics.<wbr/>lens<wbr/>Shading<wbr/>Map</a> =
+[ 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/>  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>
+    1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/>  1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/>
+  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>  1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/>
+    1.<wbr/>0,<wbr/> 1.<wbr/>0,<wbr/> 1.<wbr/>0,<wbr/> 1.<wbr/>0,<wbr/>  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>
+  1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/>   1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>
+    1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>  1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3 ]
+</code></pre>
             </td>
           </tr>
 
@@ -25980,13 +26727,13 @@
 (x,<wbr/>y) ϵ (0 ...<wbr/> N-1,<wbr/> 0 ...<wbr/> M-1) is the value of the shading map at
 pixel ( ((W-1)/<wbr/>(N-1)) * x,<wbr/> ((H-1)/<wbr/>(M-1)) * y) for the four color channels.<wbr/>
 The map is assumed to be bilinearly interpolated between the sample points.<wbr/></p>
-<p>The channel order is [R,<wbr/> Geven,<wbr/> Godd,<wbr/> B],<wbr/> where Geven is the green
-channel for the even rows of a Bayer pattern,<wbr/> and Godd is the odd rows.<wbr/>
+<p>For a Bayer camera,<wbr/> the channel order is [R,<wbr/> Geven,<wbr/> Godd,<wbr/> B],<wbr/> where Geven is
+the green channel for the even rows of a Bayer pattern,<wbr/> and Godd is the odd rows.<wbr/>
 The shading map is stored in a fully interleaved format,<wbr/> and its size
 is provided in the camera static metadata by <a href="#static_android.lens.info.shadingMapSize">android.<wbr/>lens.<wbr/>info.<wbr/>shading<wbr/>Map<wbr/>Size</a>.<wbr/></p>
 <p>The shading map will generally have on the order of 30-40 rows and columns,<wbr/>
 and will be smaller than 64x64.<wbr/></p>
-<p>As an example,<wbr/> given a very small map defined as:</p>
+<p>As an example,<wbr/> given a very small map for a Bayer camera defined as:</p>
 <pre><code><a href="#static_android.lens.info.shadingMapSize">android.<wbr/>lens.<wbr/>info.<wbr/>shading<wbr/>Map<wbr/>Size</a> = [ 4,<wbr/> 3 ]
 <a href="#dynamic_android.statistics.lensShadingMap">android.<wbr/>statistics.<wbr/>lens<wbr/>Shading<wbr/>Map</a> =
 [ 1.<wbr/>3,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>15,<wbr/> 1.<wbr/>2,<wbr/>  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>15,<wbr/> 1.<wbr/>2,<wbr/>
@@ -26006,6 +26753,17 @@
 image of a gray wall (using bicubic interpolation for visual quality)
 as captured by the sensor gives:</p>
 <p><img alt="Image of a uniform white wall (inverse shading map)" src="images/camera2/metadata/android.statistics.lensShadingMap/inv_shading.png"/></p>
+<p>For a MONOCHROME camera,<wbr/> all of the 2x2 channels must have the same values.<wbr/> An example
+shading map for such a camera is defined as:</p>
+<pre><code><a href="#static_android.lens.info.shadingMapSize">android.<wbr/>lens.<wbr/>info.<wbr/>shading<wbr/>Map<wbr/>Size</a> = [ 4,<wbr/> 3 ]
+<a href="#dynamic_android.statistics.lensShadingMap">android.<wbr/>statistics.<wbr/>lens<wbr/>Shading<wbr/>Map</a> =
+[ 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/>  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>
+    1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/>  1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/>
+  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>  1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/> 1.<wbr/>1,<wbr/>
+    1.<wbr/>0,<wbr/> 1.<wbr/>0,<wbr/> 1.<wbr/>0,<wbr/> 1.<wbr/>0,<wbr/>  1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>
+  1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/>   1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>
+    1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/> 1.<wbr/>2,<wbr/>  1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3,<wbr/> 1.<wbr/>3 ]
+</code></pre>
 <p>Note that the RAW image data might be subject to lens shading
 correction not reported on this map.<wbr/> Query
 <a href="#static_android.sensor.info.lensShadingApplied">android.<wbr/>sensor.<wbr/>info.<wbr/>lens<wbr/>Shading<wbr/>Applied</a> to see if RAW image data has subject
@@ -26993,8 +27751,8 @@
 of points can be less than max (that is,<wbr/> the request doesn't have to
 always provide a curve with number of points equivalent to
 <a href="#static_android.tonemap.maxCurvePoints">android.<wbr/>tonemap.<wbr/>max<wbr/>Curve<wbr/>Points</a>).<wbr/></p>
-<p>For devices with MONOCHROME capability,<wbr/> only red channel is used.<wbr/> Green and blue channels
-are ignored.<wbr/></p>
+<p>For devices with MONOCHROME capability,<wbr/> all three channels must have the same set of
+control points.<wbr/></p>
 <p>A few examples,<wbr/> and their corresponding graphical mappings; these
 only specify the red channel and the precision is limited to 4
 digits,<wbr/> for conciseness.<wbr/></p>
@@ -27100,8 +27858,8 @@
 of points can be less than max (that is,<wbr/> the request doesn't have to
 always provide a curve with number of points equivalent to
 <a href="#static_android.tonemap.maxCurvePoints">android.<wbr/>tonemap.<wbr/>max<wbr/>Curve<wbr/>Points</a>).<wbr/></p>
-<p>For devices with MONOCHROME capability,<wbr/> only red channel is used.<wbr/> Green and blue channels
-are ignored.<wbr/></p>
+<p>For devices with MONOCHROME capability,<wbr/> all three channels must have the same set of
+control points.<wbr/></p>
 <p>A few examples,<wbr/> and their corresponding graphical mappings; these
 only specify the red channel and the precision is limited to 4
 digits,<wbr/> for conciseness.<wbr/></p>
@@ -27749,8 +28507,8 @@
 of points can be less than max (that is,<wbr/> the request doesn't have to
 always provide a curve with number of points equivalent to
 <a href="#static_android.tonemap.maxCurvePoints">android.<wbr/>tonemap.<wbr/>max<wbr/>Curve<wbr/>Points</a>).<wbr/></p>
-<p>For devices with MONOCHROME capability,<wbr/> only red channel is used.<wbr/> Green and blue channels
-are ignored.<wbr/></p>
+<p>For devices with MONOCHROME capability,<wbr/> all three channels must have the same set of
+control points.<wbr/></p>
 <p>A few examples,<wbr/> and their corresponding graphical mappings; these
 only specify the red channel and the precision is limited to 4
 digits,<wbr/> for conciseness.<wbr/></p>
@@ -27856,8 +28614,8 @@
 of points can be less than max (that is,<wbr/> the request doesn't have to
 always provide a curve with number of points equivalent to
 <a href="#static_android.tonemap.maxCurvePoints">android.<wbr/>tonemap.<wbr/>max<wbr/>Curve<wbr/>Points</a>).<wbr/></p>
-<p>For devices with MONOCHROME capability,<wbr/> only red channel is used.<wbr/> Green and blue channels
-are ignored.<wbr/></p>
+<p>For devices with MONOCHROME capability,<wbr/> all three channels must have the same set of
+control points.<wbr/></p>
 <p>A few examples,<wbr/> and their corresponding graphical mappings; these
 only specify the red channel and the precision is limited to 4
 digits,<wbr/> for conciseness.<wbr/></p>
@@ -28493,7 +29251,8 @@
 as triggering a still capture,<wbr/> internally executing a precapture trigger.<wbr/>  This may
 fire the flash for flash power metering during precapture,<wbr/> and then fire the flash
 for the final capture,<wbr/> if a flash is available on the device and the AE mode is set to
-enable the flash.<wbr/></p></span>
+enable the flash.<wbr/></p>
+<p>Devices that initially shipped with Android version <a href="https://developer.android.com/reference/android/os/Build/VERSION_CODES.html#Q">Q</a> or newer will not include any LEGACY-level devices.<wbr/></p></span>
                   </li>
                   <li>
                     <span class="entry_type_enum_name">3 (v3.2)</span>
@@ -28705,6 +29464,65 @@
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
         
+                
+          <tr class="entry" id="static_android.info.supportedBufferManagementVersion">
+            <td class="entry_name
+             " rowspan="3">
+              android.<wbr/>info.<wbr/>supported<wbr/>Buffer<wbr/>Management<wbr/>Version
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name entry_type_name_enum">byte</span>
+
+              <span class="entry_type_visibility"> [system]</span>
+
+
+
+
+
+                <ul class="entry_type_enum">
+                  <li>
+                    <span class="entry_type_enum_name">HIDL_DEVICE_3_5 (v3.4)</span>
+                    <span class="entry_type_enum_notes"><p>This camera device supports and opts in to the buffer management APIs provided by
+HIDL ICameraDevice version 3.<wbr/>5.<wbr/></p></span>
+                  </li>
+                </ul>
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>The version of buffer management API this camera device supports and opts into.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>When this key is not present,<wbr/> camera framework will interact with this camera device
+without any buffer management HAL API.<wbr/> When this key is present and camera framework
+supports the buffer management API version,<wbr/> camera framework will interact with camera
+HAL using such version of buffer management API.<wbr/></p>
+            </td>
+          </tr>
+
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
         
 
       <!-- end of kind -->
@@ -29975,6 +30793,308 @@
           <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
            <!-- end of entry -->
         
+                
+          <tr class="entry" id="static_android.depth.availableRecommendedDepthStreamConfigurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>depth.<wbr/>available<wbr/>Recommended<wbr/>Depth<wbr/>Stream<wbr/>Configurations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int32</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  n x 5
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as recommendedStreamConfiguration]</span>
+
+
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>Recommended depth stream configurations for common client use cases.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Optional subset of the <a href="#static_android.depth.availableDepthStreamConfigurations">android.<wbr/>depth.<wbr/>available<wbr/>Depth<wbr/>Stream<wbr/>Configurations</a> that
+contains similar tuples listed as
+(i.<wbr/>e.<wbr/> width,<wbr/> height,<wbr/> format,<wbr/> output/<wbr/>input stream,<wbr/> usecase bit field).<wbr/>
+Camera devices will be able to suggest particular depth stream configurations which are
+power and performance efficient for specific use cases.<wbr/> For more information about
+retrieving the suggestions see
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getRecommendedStreamConfigurationMap">CameraCharacteristics#getRecommendedStreamConfigurationMap</a>.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Recommended depth configurations are expected to be declared with SNAPSHOT and/<wbr/>or
+ZSL if supported by the device.<wbr/>
+For additional details on how to declare recommended stream configurations,<wbr/> check
+<a href="#static_android.scaler.availableRecommendedStreamConfigurations">android.<wbr/>scaler.<wbr/>available<wbr/>Recommended<wbr/>Stream<wbr/>Configurations</a>.<wbr/>
+For additional requirements on depth streams please consider
+<a href="#static_android.depth.availableDepthStreamConfigurations">android.<wbr/>depth.<wbr/>available<wbr/>Depth<wbr/>Stream<wbr/>Configurations</a>.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.depth.availableDynamicDepthStreamConfigurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>depth.<wbr/>available<wbr/>Dynamic<wbr/>Depth<wbr/>Stream<wbr/>Configurations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name entry_type_name_enum">int32</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  n x 4
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as streamConfiguration]</span>
+
+
+
+
+
+                <ul class="entry_type_enum">
+                  <li>
+                    <span class="entry_type_enum_name">OUTPUT (v3.4)</span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">INPUT (v3.4)</span>
+                  </li>
+                </ul>
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>The available dynamic depth dataspace stream
+configurations that this camera device supports
+(i.<wbr/>e.<wbr/> format,<wbr/> width,<wbr/> height,<wbr/> output/<wbr/>input stream).<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_DEPTH">DEPTH</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>These are output stream configurations for use with
+dataSpace DYNAMIC_<wbr/>DEPTH.<wbr/> The configurations are
+listed as <code>(format,<wbr/> width,<wbr/> height,<wbr/> input?)</code> tuples.<wbr/></p>
+<p>Only devices that support depth output for at least
+the HAL_<wbr/>PIXEL_<wbr/>FORMAT_<wbr/>Y16 dense depth map along with
+HAL_<wbr/>PIXEL_<wbr/>FORMAT_<wbr/>BLOB with the same size or size with
+the same aspect ratio can have dynamic depth dataspace
+stream configuration.<wbr/> <a href="#static_android.depth.depthIsExclusive">android.<wbr/>depth.<wbr/>depth<wbr/>Is<wbr/>Exclusive</a> also
+needs to be set to FALSE.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly.<wbr/>
+It is populated by camera framework and must not be set
+at the HAL layer.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.depth.availableDynamicDepthMinFrameDurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>depth.<wbr/>available<wbr/>Dynamic<wbr/>Depth<wbr/>Min<wbr/>Frame<wbr/>Durations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int64</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  4 x n
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as streamConfigurationDuration]</span>
+
+
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>This lists the minimum frame duration for each
+format/<wbr/>size combination for dynamic depth output streams.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+              (format,<wbr/> width,<wbr/> height,<wbr/> ns) x n
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_DEPTH">DEPTH</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This should correspond to the frame duration when only that
+stream is active,<wbr/> with all processing (typically in android.<wbr/>*.<wbr/>mode)
+set to either OFF or FAST.<wbr/></p>
+<p>When multiple streams are used in a request,<wbr/> the minimum frame
+duration will be max(individual stream min durations).<wbr/></p>
+<p>The minimum frame duration of a stream (of a particular format,<wbr/> size)
+is the same regardless of whether the stream is input or output.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly.<wbr/>
+It is populated by camera framework and must not be set
+at the HAL layer.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.depth.availableDynamicDepthStallDurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>depth.<wbr/>available<wbr/>Dynamic<wbr/>Depth<wbr/>Stall<wbr/>Durations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int64</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  4 x n
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as streamConfigurationDuration]</span>
+
+
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>This lists the maximum stall duration for each
+output format/<wbr/>size combination for dynamic depth streams.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+              (format,<wbr/> width,<wbr/> height,<wbr/> ns) x n
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_DEPTH">DEPTH</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>A stall duration is how much extra time would get added
+to the normal minimum frame duration for a repeating request
+that has streams with non-zero stall.<wbr/></p>
+<p>All dynamic depth output streams may have a nonzero stall
+duration.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly.<wbr/>
+It is populated by camera framework and must not be set
+at the HAL layer.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
         
 
       <!-- end of kind -->
@@ -30021,7 +31141,7 @@
                 <span class="entry_type_array">
                   n
                 </span>
-              <span class="entry_type_visibility"> [hidden]</span>
+              <span class="entry_type_visibility"> [ndk_public]</span>
 
 
               <span class="entry_type_hwlevel">[limited] </span>
@@ -30058,12 +31178,12 @@
           </tr>
           <tr class="entry_cont">
             <td class="entry_details" colspan="6">
-              <p>For a logical camera,<wbr/> this is concatenation of all underlying physical camera ids.<wbr/>
-The null terminator for physical camera id must be preserved so that the whole string
-can be tokenized using '\0' to generate list of physical camera ids.<wbr/></p>
-<p>For example,<wbr/> if the physical camera ids of the logical camera are "2" and "3",<wbr/> the
+              <p>For a logical camera,<wbr/> this is concatenation of all underlying physical camera IDs.<wbr/>
+The null terminator for physical camera ID must be preserved so that the whole string
+can be tokenized using '\0' to generate list of physical camera IDs.<wbr/></p>
+<p>For example,<wbr/> if the physical camera IDs of the logical camera are "2" and "3",<wbr/> the
 value of this tag will be ['2',<wbr/> '\0',<wbr/> '3',<wbr/> '\0'].<wbr/></p>
-<p>The number of physical camera ids must be no less than 2.<wbr/></p>
+<p>The number of physical camera IDs must be no less than 2.<wbr/></p>
             </td>
           </tr>
 
@@ -30138,6 +31258,8 @@
 <p>In both cases,<wbr/> all images generated for a particular capture request still carry the same
 timestamps,<wbr/> so that they can be used to look up the matching frame number and
 onCaptureStarted callback.<wbr/></p>
+<p>This tag is only applicable if the logical camera device supports concurrent physical
+streams from different physical cameras.<wbr/></p>
             </td>
           </tr>
 
@@ -30149,6 +31271,108 @@
 
       <!-- end of kind -->
       </tbody>
+      <tr><td colspan="7" class="kind">dynamic</td></tr>
+
+      <thead class="entries_header">
+        <tr>
+          <th class="th_name">Property Name</th>
+          <th class="th_type">Type</th>
+          <th class="th_description">Description</th>
+          <th class="th_units">Units</th>
+          <th class="th_range">Range</th>
+          <th class="th_hal_version">Initial HIDL HAL version</th>
+          <th class="th_tags">Tags</th>
+        </tr>
+      </thead>
+
+      <tbody>
+
+        
+
+        
+
+        
+
+        
+
+                
+          <tr class="entry" id="dynamic_android.logicalMultiCamera.activePhysicalId">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>logical<wbr/>Multi<wbr/>Camera.<wbr/>active<wbr/>Physical<wbr/>Id
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">byte</span>
+
+              <span class="entry_type_visibility"> [public as string]</span>
+
+
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>String containing the ID of the underlying active physical camera.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+              UTF-8 null-terminated string
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_LOGICALCAMERA">LOGICALCAMERA</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>The ID of the active physical camera that's backing the logical camera.<wbr/> All camera
+streams and metadata that are not physical camera specific will be originating from this
+physical camera.<wbr/></p>
+<p>For a logical camera made up of physical cameras where each camera's lenses have
+different characteristics,<wbr/> the camera device may choose to switch between the physical
+cameras when application changes FOCAL_<wbr/>LENGTH or SCALER_<wbr/>CROP_<wbr/>REGION.<wbr/>
+At the time of lens switch,<wbr/> this result metadata reflects the new active physical camera
+ID.<wbr/></p>
+<p>This key will be available if the camera device advertises this key via <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getAvailableCaptureResultKeys">CameraCharacteristics#getAvailableCaptureResultKeys</a>.<wbr/>
+When available,<wbr/> this must be one of valid physical IDs backing this logical multi-camera.<wbr/>
+If this key is not available for a logical multi-camera,<wbr/> the camera device implementation
+may still switch between different active physical cameras based on use case,<wbr/> but the
+current active physical camera information won't be available to the application.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Staring from HIDL ICameraDevice version 3.<wbr/>5,<wbr/> the tag must be available in the capture
+result metadata to indicate current active physical camera ID.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+        
+
+      <!-- end of kind -->
+      </tbody>
 
   <!-- end of section -->
   <tr><td colspan="7" id="section_distortionCorrection" class="section">distortionCorrection</td></tr>
@@ -30247,8 +31471,8 @@
 will not slow down capture rate when applying correction.<wbr/> FAST may be the same as OFF if
 any correction at all would slow down capture rate.<wbr/>  Every output stream will have a
 similar amount of enhancement applied.<wbr/></p>
-<p>The correction only applies to processed outputs such as YUV,<wbr/> JPEG,<wbr/> or DEPTH16; it is not
-applied to any RAW output.<wbr/></p>
+<p>The correction only applies to processed outputs such as YUV,<wbr/> Y8,<wbr/> JPEG,<wbr/> or DEPTH16; it is
+not applied to any RAW output.<wbr/></p>
 <p>This control will be on by default on devices that support this control.<wbr/> Applications
 disabling distortion correction need to pay extra attention with the coordinate system of
 metering regions,<wbr/> crop region,<wbr/> and face rectangles.<wbr/> When distortion correction is OFF,<wbr/>
@@ -30474,8 +31698,8 @@
 will not slow down capture rate when applying correction.<wbr/> FAST may be the same as OFF if
 any correction at all would slow down capture rate.<wbr/>  Every output stream will have a
 similar amount of enhancement applied.<wbr/></p>
-<p>The correction only applies to processed outputs such as YUV,<wbr/> JPEG,<wbr/> or DEPTH16; it is not
-applied to any RAW output.<wbr/></p>
+<p>The correction only applies to processed outputs such as YUV,<wbr/> Y8,<wbr/> JPEG,<wbr/> or DEPTH16; it is
+not applied to any RAW output.<wbr/></p>
 <p>This control will be on by default on devices that support this control.<wbr/> Applications
 disabling distortion correction need to pay extra attention with the coordinate system of
 metering regions,<wbr/> crop region,<wbr/> and face rectangles.<wbr/> When distortion correction is OFF,<wbr/>
@@ -30510,6 +31734,457 @@
       </tbody>
 
   <!-- end of section -->
+  <tr><td colspan="7" id="section_heic" class="section">heic</td></tr>
+
+
+      <tr><td colspan="7" class="kind">static</td></tr>
+
+      <thead class="entries_header">
+        <tr>
+          <th class="th_name">Property Name</th>
+          <th class="th_type">Type</th>
+          <th class="th_description">Description</th>
+          <th class="th_units">Units</th>
+          <th class="th_range">Range</th>
+          <th class="th_hal_version">Initial HIDL HAL version</th>
+          <th class="th_tags">Tags</th>
+        </tr>
+      </thead>
+
+      <tbody>
+
+        
+
+        
+
+        
+
+        
+                
+            
+
+                
+          <tr class="entry" id="static_android.heic.info.supported">
+            <td class="entry_name
+             " rowspan="3">
+              android.<wbr/>heic.<wbr/>info.<wbr/>supported
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name entry_type_name_enum">byte</span>
+
+              <span class="entry_type_visibility"> [system as boolean]</span>
+
+
+              <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+                <ul class="entry_type_enum">
+                  <li>
+                    <span class="entry_type_enum_name">FALSE (v3.4)</span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">TRUE (v3.4)</span>
+                  </li>
+                </ul>
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>Whether this camera device can support identical set of stream combinations
+involving HEIC image format,<wbr/> compared to the
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraDevice.html#createCaptureSession">table of combinations</a>
+involving JPEG image format required for the device's hardware level and capabilities.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_HEIC">HEIC</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>All the static,<wbr/> control and dynamic metadata tags related to JPEG apply to HEIC formats
+as well.<wbr/> For example,<wbr/> the same <a href="#controls_android.jpeg.orientation">android.<wbr/>jpeg.<wbr/>orientation</a> and <a href="#controls_android.jpeg.quality">android.<wbr/>jpeg.<wbr/>quality</a> are
+used to control the orientation and quality of the HEIC image.<wbr/> Configuring JPEG and
+HEIC streams at the same time is not supported.<wbr/></p>
+<p>If a camera device supports HEIC format (ISO/<wbr/>IEC 23008-12),<wbr/> not only does it
+support the existing mandatory stream
+<a href="https://developer.android.com/reference/android/hardware/camera2/CameraDevice.html#createCaptureSession">combinations</a>
+required for the device's hardware level and capabilities,<wbr/> it also supports swapping
+each JPEG stream with HEIC stream in all guaranteed combinations.<wbr/></p>
+<p>For every HEIC stream configured by the application,<wbr/> the camera framework sets up 2
+internal streams with camera HAL:</p>
+<ul>
+<li>A YUV_<wbr/>420_<wbr/>888 or IMPLEMENTATION_<wbr/>DEFINED HAL stream as input to HEIC or HEVC encoder.<wbr/></li>
+<li>A BLOB stream with JPEG_<wbr/>APPS_<wbr/>SEGMENTS dataspace to extract application markers
+including EXIF and thumbnail to be saved in HEIF container.<wbr/></li>
+</ul>
+<p>A camera device can output HEIC format to the application if and only if:</p>
+<ul>
+<li>The system contains a HEIC or HEVC encoder with constant quality mode,<wbr/> and</li>
+<li>This tag is set to TRUE,<wbr/> meaning that camera HAL supports replacing JPEG streams in
+all mandatory stream combinations with a [YUV_<wbr/>420_<wbr/>888/<wbr/>IMPLEMENTATION_<wbr/>DEFINED stream +
+JPEG_<wbr/>APPS_<wbr/>SEGMENT BLOB stream] combo.<wbr/></li>
+</ul>
+<p>As an example,<wbr/> if the camera device's hardware level is LIMITED,<wbr/> and it supports HEIC,<wbr/>
+in addition to the required stream combinations,<wbr/> HAL must support below stream
+combinations as well:</p>
+<ul>
+<li>IMPLEMENTATION_<wbr/>DEFINED/<wbr/>YUV_<wbr/>420_<wbr/>888 MAXIMUM + JPEG_<wbr/>SEGMENTS_<wbr/>BLOB,<wbr/></li>
+<li>PRIV PREVIEW + IMPLEMENTATION_<wbr/>DEFINED/<wbr/>YUV_<wbr/>420_<wbr/>888 MAXIMUM + JPEG_<wbr/>SEGMENTS_<wbr/>BLOB,<wbr/></li>
+<li>YUV PREVIEW + IMPLEMENTATION_<wbr/>DEFINED/<wbr/>YUV_<wbr/>420_<wbr/>888 MAXIMUM + JPEG_<wbr/>SEGMENTS_<wbr/>BLOB,<wbr/></li>
+<li>PRIV PREVIEW + YUV PREVIEW + IMPLEMENTATION_<wbr/>DEFINED/<wbr/>YUV_<wbr/>420_<wbr/>888 MAXIMUM +
+JPEG_<wbr/>SEGMENTS_<wbr/>BLOB</li>
+</ul>
+<p>The selection logic between YUV_<wbr/>420_<wbr/>888 and IMPLEMENTATION_<wbr/>DEFINED for HAL internal
+stream is as follows:</p>
+<pre><code>if (HEIC encoder exists and supports the size) {
+    use IMPLEMENTATION_<wbr/>DEFINED with GRALLOC_<wbr/>USAGE_<wbr/>HW_<wbr/>IMAGE_<wbr/>ENCODER usage flag;
+} else {
+    //<wbr/> HVC encoder exists
+    if (size is less than framework predefined tile size) {
+        use IMPLEMENTATINO_<wbr/>DEFINED with GRALLOC_<wbr/>USAGE_<wbr/>HW_<wbr/>VIDEO_<wbr/>ENCODER usage flag;
+    } else {
+        use YUV_<wbr/>420_<wbr/>888;
+    }
+}
+</code></pre>
+            </td>
+          </tr>
+
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.heic.info.maxJpegAppSegmentsCount">
+            <td class="entry_name
+             " rowspan="3">
+              android.<wbr/>heic.<wbr/>info.<wbr/>max<wbr/>Jpeg<wbr/>App<wbr/>Segments<wbr/>Count
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">byte</span>
+
+              <span class="entry_type_visibility"> [system]</span>
+
+
+              <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>The maximum number of Jpeg APP segments supported by the camera HAL device.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_HEIC">HEIC</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>The camera framework will use this value to derive the size of the BLOB buffer with
+JPEG_<wbr/>APP_<wbr/>SEGMENTS dataspace,<wbr/> with each APP segment occupying at most 64K bytes.<wbr/> If the
+value of this tag is n,<wbr/> the size of the framework allocated buffer will be:</p>
+<pre><code>n * (2 + 0xFFFF) + sizeof(struct CameraBlob)
+</code></pre>
+<p>where 2 is number of bytes for APP marker,<wbr/> 0xFFFF is the maximum size per APP segment
+(including segment size).<wbr/></p>
+<p>The value of this tag must be at least 1,<wbr/> and APP1 marker (0xFFE1) segment must be the
+first segment stored in the JPEG_<wbr/>APPS_<wbr/>SEGMENTS BLOB buffer.<wbr/> APP1 segment stores EXIF and
+thumbnail.<wbr/></p>
+<p>Since media encoder embeds the orientation in the metadata of the output image,<wbr/> to be
+consistent between main image and thumbnail,<wbr/> camera HAL must not rotate the thumbnail
+image data based on <a href="#controls_android.jpeg.orientation">android.<wbr/>jpeg.<wbr/>orientation</a>.<wbr/> The framework will write the orientation
+into EXIF and HEIC container.<wbr/></p>
+<p>APP1 segment is followed immediately by one or multiple APP2 segments,<wbr/> and APPn
+segments.<wbr/> After the HAL fills and returns the JPEG_<wbr/>APP_<wbr/>SEGMENTS buffer,<wbr/> the camera
+framework modifies the APP1 segment by filling in the EXIF tags that are related to
+main image bitstream and the tags that can be derived from capture result metadata,<wbr/>
+before saving them into the HEIC container.<wbr/></p>
+<p>The value of this tag must not be more than 16.<wbr/></p>
+            </td>
+          </tr>
+
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+        
+        
+
+                
+          <tr class="entry" id="static_android.heic.availableHeicStreamConfigurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>heic.<wbr/>available<wbr/>Heic<wbr/>Stream<wbr/>Configurations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name entry_type_name_enum">int32</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  n x 4
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as streamConfiguration]</span>
+
+
+              <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+                <ul class="entry_type_enum">
+                  <li>
+                    <span class="entry_type_enum_name">OUTPUT (v3.4)</span>
+                  </li>
+                  <li>
+                    <span class="entry_type_enum_name">INPUT (v3.4)</span>
+                  </li>
+                </ul>
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>The available HEIC (ISO/<wbr/>IEC 23008-12) stream
+configurations that this camera device supports
+(i.<wbr/>e.<wbr/> format,<wbr/> width,<wbr/> height,<wbr/> output/<wbr/>input stream).<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_HEIC">HEIC</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>The configurations are listed as <code>(format,<wbr/> width,<wbr/> height,<wbr/> input?)</code> tuples.<wbr/></p>
+<p>If the camera device supports HEIC image format,<wbr/> it will support identical set of stream
+combinations involving HEIC image format,<wbr/> compared to the combinations involving JPEG
+image format as required by the device's hardware level and capabilities.<wbr/></p>
+<p>All the static,<wbr/> control,<wbr/> and dynamic metadata tags related to JPEG apply to HEIC formats.<wbr/>
+Configuring JPEG and HEIC streams at the same time is not supported.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>These are output stream configurations for use with dataSpace HAL_<wbr/>DATASPACE_<wbr/>HEIF.<wbr/></p>
+<p>Do not set this property directly.<wbr/> It is populated by camera framework and must not be
+set by the HAL layer.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.heic.availableHeicMinFrameDurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>heic.<wbr/>available<wbr/>Heic<wbr/>Min<wbr/>Frame<wbr/>Durations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int64</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  4 x n
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as streamConfigurationDuration]</span>
+
+
+              <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>This lists the minimum frame duration for each
+format/<wbr/>size combination for HEIC output formats.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+              (format,<wbr/> width,<wbr/> height,<wbr/> ns) x n
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_HEIC">HEIC</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>This should correspond to the frame duration when only that
+stream is active,<wbr/> with all processing (typically in android.<wbr/>*.<wbr/>mode)
+set to either OFF or FAST.<wbr/></p>
+<p>When multiple streams are used in a request,<wbr/> the minimum frame
+duration will be max(individual stream min durations).<wbr/></p>
+<p>See <a href="#controls_android.sensor.frameDuration">android.<wbr/>sensor.<wbr/>frame<wbr/>Duration</a> and
+<a href="#static_android.scaler.availableStallDurations">android.<wbr/>scaler.<wbr/>available<wbr/>Stall<wbr/>Durations</a> for more details about
+calculating the max frame rate.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly.<wbr/> It is populated by camera framework and must not be
+set by the HAL layer.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+                
+          <tr class="entry" id="static_android.heic.availableHeicStallDurations">
+            <td class="entry_name
+             " rowspan="5">
+              android.<wbr/>heic.<wbr/>available<wbr/>Heic<wbr/>Stall<wbr/>Durations
+            </td>
+            <td class="entry_type">
+                <span class="entry_type_name">int64</span>
+                <span class="entry_type_container">x</span>
+
+                <span class="entry_type_array">
+                  4 x n
+                </span>
+              <span class="entry_type_visibility"> [ndk_public as streamConfigurationDuration]</span>
+
+
+              <span class="entry_type_hwlevel">[limited] </span>
+
+
+
+
+            </td> <!-- entry_type -->
+
+            <td class="entry_description">
+              <p>This lists the maximum stall duration for each
+output format/<wbr/>size combination for HEIC streams.<wbr/></p>
+            </td>
+
+            <td class="entry_units">
+              (format,<wbr/> width,<wbr/> height,<wbr/> ns) x n
+            </td>
+
+            <td class="entry_range">
+            </td>
+
+            <td class="entry_hal_version">
+              <p>3.<wbr/>4</p>
+            </td>
+
+            <td class="entry_tags">
+              <ul class="entry_tags">
+                  <li><a href="#tag_HEIC">HEIC</a></li>
+              </ul>
+            </td>
+
+          </tr>
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>A stall duration is how much extra time would get added
+to the normal minimum frame duration for a repeating request
+that has streams with non-zero stall.<wbr/></p>
+<p>This functions similarly to
+<a href="#static_android.scaler.availableStallDurations">android.<wbr/>scaler.<wbr/>available<wbr/>Stall<wbr/>Durations</a> for HEIC
+streams.<wbr/></p>
+<p>All HEIC output stream formats may have a nonzero stall
+duration.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entries_header">
+            <th class="th_details" colspan="6">HAL Implementation Details</th>
+          </tr>
+          <tr class="entry_cont">
+            <td class="entry_details" colspan="6">
+              <p>Do not set this property directly.<wbr/> It is populated by camera framework and must not be
+set by the HAL layer.<wbr/></p>
+            </td>
+          </tr>
+
+          <tr class="entry_spacer"><td class="entry_spacer" colspan="7"></td></tr>
+           <!-- end of entry -->
+        
+        
+
+      <!-- end of kind -->
+      </tbody>
+
+  <!-- end of section -->
 <!-- </namespace> -->
   </table>
 
@@ -30707,6 +32382,9 @@
           <li><a href="#static_android.depth.availableDepthStreamConfigurations">android.depth.availableDepthStreamConfigurations</a> (static)</li>
           <li><a href="#static_android.depth.availableDepthMinFrameDurations">android.depth.availableDepthMinFrameDurations</a> (static)</li>
           <li><a href="#static_android.depth.availableDepthStallDurations">android.depth.availableDepthStallDurations</a> (static)</li>
+          <li><a href="#static_android.depth.availableDynamicDepthStreamConfigurations">android.depth.availableDynamicDepthStreamConfigurations</a> (static)</li>
+          <li><a href="#static_android.depth.availableDynamicDepthMinFrameDurations">android.depth.availableDynamicDepthMinFrameDurations</a> (static)</li>
+          <li><a href="#static_android.depth.availableDynamicDepthStallDurations">android.depth.availableDynamicDepthStallDurations</a> (static)</li>
         </ul>
       </li> <!-- tag_DEPTH -->
       <li id="tag_REPROC">REPROC - 
@@ -30719,6 +32397,7 @@
           <li><a href="#static_android.noiseReduction.availableNoiseReductionModes">android.noiseReduction.availableNoiseReductionModes</a> (static)</li>
           <li><a href="#static_android.request.maxNumInputStreams">android.request.maxNumInputStreams</a> (static)</li>
           <li><a href="#static_android.scaler.availableInputOutputFormatsMap">android.scaler.availableInputOutputFormatsMap</a> (static)</li>
+          <li><a href="#static_android.scaler.availableRecommendedInputOutputFormatsMap">android.scaler.availableRecommendedInputOutputFormatsMap</a> (static)</li>
           <li><a href="#controls_android.reprocess.effectiveExposureFactor">android.reprocess.effectiveExposureFactor</a> (controls)</li>
           <li><a href="#static_android.reprocess.maxCaptureStall">android.reprocess.maxCaptureStall</a> (static)</li>
           <li><a href="#static_android.distortionCorrection.availableModes">android.distortionCorrection.availableModes</a> (static)</li>
@@ -30732,8 +32411,20 @@
         <ul class="tags_entries">
           <li><a href="#static_android.logicalMultiCamera.physicalIds">android.logicalMultiCamera.physicalIds</a> (static)</li>
           <li><a href="#static_android.logicalMultiCamera.sensorSyncType">android.logicalMultiCamera.sensorSyncType</a> (static)</li>
+          <li><a href="#dynamic_android.logicalMultiCamera.activePhysicalId">android.logicalMultiCamera.activePhysicalId</a> (dynamic)</li>
         </ul>
       </li> <!-- tag_LOGICALCAMERA -->
+      <li id="tag_HEIC">HEIC - 
+        Entry is required for devices with HEIC (High Efficiency Image Format) support.
+    
+        <ul class="tags_entries">
+          <li><a href="#static_android.heic.info.supported">android.heic.info.supported</a> (static)</li>
+          <li><a href="#static_android.heic.info.maxJpegAppSegmentsCount">android.heic.info.maxJpegAppSegmentsCount</a> (static)</li>
+          <li><a href="#static_android.heic.availableHeicStreamConfigurations">android.heic.availableHeicStreamConfigurations</a> (static)</li>
+          <li><a href="#static_android.heic.availableHeicMinFrameDurations">android.heic.availableHeicMinFrameDurations</a> (static)</li>
+          <li><a href="#static_android.heic.availableHeicStallDurations">android.heic.availableHeicStallDurations</a> (static)</li>
+        </ul>
+      </li> <!-- tag_HEIC -->
       <li id="tag_FUTURE">FUTURE - 
         Entry is  under-specified and is not required for now. This is for book-keeping purpose,
         do not implement or use it, it may be revised for future.
diff --git a/camera/docs/metadata-generate b/camera/docs/metadata-generate
index 98ebd67..b1eeac1 100755
--- a/camera/docs/metadata-generate
+++ b/camera/docs/metadata-generate
@@ -206,6 +206,8 @@
 gen_file_abs HidlMetadata.mako "$hidldir/3.2/types.hal" yes 3.2 || exit 1
 mkdir -p "${hidldir}/3.3"
 gen_file_abs HidlMetadata.mako "$hidldir/3.3/types.hal" yes 3.3 || exit 1
+mkdir -p "${hidldir}/3.4"
+gen_file_abs HidlMetadata.mako "$hidldir/3.4/types.hal" yes 3.4 || exit 1
 
 #Generate NDK header
 gen_file_abs ndk_camera_metadata_tags.mako "$ndk_header_dir/NdkCameraMetadataTags.h" yes || exit 1
diff --git a/camera/docs/metadata_definitions.xml b/camera/docs/metadata_definitions.xml
index c9d56be..2c49c13 100644
--- a/camera/docs/metadata_definitions.xml
+++ b/camera/docs/metadata_definitions.xml
@@ -15,7 +15,7 @@
 -->
 <metadata xmlns="http://schemas.android.com/service/camera/metadata/"
 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata_properties.xsd">
+xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata_definitions.xsd">
 
   <tags>
     <tag id="BC">
@@ -42,6 +42,9 @@
     <tag id="LOGICALCAMERA">
         Entry is required for logical multi-camera capability.
     </tag>
+    <tag id="HEIC">
+        Entry is required for devices with HEIC (High Efficiency Image Format) support.
+    </tag>
     <tag id="FUTURE">
         Entry is  under-specified and is not required for now. This is for book-keeping purpose,
         do not implement or use it, it may be revised for future.
@@ -76,6 +79,10 @@
     <typedef name="streamConfiguration">
       <language name="java">android.hardware.camera2.params.StreamConfiguration</language>
     </typedef>
+    <typedef name="recommendedStreamConfiguration">
+      <language
+      name="java">android.hardware.camera2.params.RecommendedStreamConfiguration</language>
+    </typedef>
     <typedef name="streamConfigurationDuration">
       <language name="java">android.hardware.camera2.params.StreamConfigurationDuration</language>
     </typedef>
@@ -130,6 +137,9 @@
     <typedef name="oisSample">
       <language name="java">android.hardware.camera2.params.OisSample</language>
     </typedef>
+    <typedef name="mandatoryStreamCombination">
+      <language name="java">android.hardware.camera2.params.MandatoryStreamCombination</language>
+    </typedef>
   </types>
 
   <namespace name="android">
@@ -3359,6 +3369,8 @@
           Setting a location object in a request will include the GPS coordinates of the location
           into any JPEG images captured based on the request. These coordinates can then be
           viewed by anyone who receives the JPEG image.
+
+          This tag is also used for HEIC image capture.
           </details>
         </entry>
         <entry name="gpsCoordinates" type="double" visibility="ndk_public"
@@ -3370,6 +3382,7 @@
           <description>GPS coordinates to include in output JPEG
           EXIF.</description>
           <range>(-180 - 180], [-90,90], [-inf, inf]</range>
+          <details>This tag is also used for HEIC image capture.</details>
           <tag id="BC" />
         </entry>
         <entry name="gpsProcessingMethod" type="byte" visibility="ndk_public"
@@ -3377,12 +3390,14 @@
           <description>32 characters describing GPS algorithm to
           include in EXIF.</description>
           <units>UTF-8 null-terminated string</units>
+          <details>This tag is also used for HEIC image capture.</details>
           <tag id="BC" />
         </entry>
         <entry name="gpsTimestamp" type="int64" visibility="ndk_public" hwlevel="legacy">
           <description>Time GPS fix was made to include in
           EXIF.</description>
           <units>UTC in seconds since January 1, 1970</units>
+          <details>This tag is also used for HEIC image capture.</details>
           <tag id="BC" />
         </entry>
         <entry name="orientation" type="int32" visibility="public" hwlevel="legacy">
@@ -3424,6 +3439,11 @@
 
           For EXTERNAL cameras the sensor orientation will always be set to 0 and the facing will
           also be set to EXTERNAL. The above code is not relevant in such case.
+
+          This tag is also used to describe the orientation of the HEIC image capture, in which
+          case the rotation is reflected by
+          {@link android.media.ExifInterface#TAG_ORIENTATION EXIF orientation flag}, and not by
+          rotating the image data itself.
           </details>
           <tag id="BC" />
         </entry>
@@ -3431,13 +3451,15 @@
           <description>Compression quality of the final JPEG
           image.</description>
           <range>1-100; larger is higher quality</range>
-          <details>85-95 is typical usage range.</details>
+          <details>85-95 is typical usage range. This tag is also used to describe the quality
+          of the HEIC image capture.</details>
           <tag id="BC" />
         </entry>
         <entry name="thumbnailQuality" type="byte" visibility="public" hwlevel="legacy">
           <description>Compression quality of JPEG
           thumbnail.</description>
           <range>1-100; larger is higher quality</range>
+          <details>This tag is also used to describe the quality of the HEIC image capture.</details>
           <tag id="BC" />
         </entry>
         <entry name="thumbnailSize" type="int32" visibility="public"
@@ -3471,6 +3493,11 @@
             capture result, so the width and height will be interchanged if 90 or 270 degree
             orientation is requested. LEGACY device will always report unrotated thumbnail
             size.
+
+          The tag is also used as thumbnail size for HEIC image format capture, in which case the
+          the thumbnail rotation is reflected by
+          {@link android.media.ExifInterface#TAG_ORIENTATION EXIF orientation flag}, and not by
+          rotating the thumbnail data itself.
           </details>
           <hal_details>
           The HAL must not squeeze or stretch the downscaled primary image to generate thumbnail.
@@ -3505,7 +3532,10 @@
           * Each output JPEG size in android.scaler.availableStreamConfigurations will have at least
           one corresponding size that has the same aspect ratio in availableThumbnailSizes,
           and vice versa.
-          * All non-`(0, 0)` sizes will have non-zero widths and heights.</details>
+          * All non-`(0, 0)` sizes will have non-zero widths and heights.
+
+          This list is also used as supported thumbnail sizes for HEIC image format capture.
+          </details>
           <tag id="BC" />
         </entry>
         <entry name="maxSize" type="int32" visibility="system">
@@ -3620,6 +3650,14 @@
 
           Optical zoom will not be supported on most devices.
           </details>
+          <hal_details>
+          For a logical camera device supporting both optical and digital zoom, if focalLength and
+          cropRegion change in the same request, the camera device must make sure that the new
+          focalLength and cropRegion take effect in the same frame. This is to make sure that there
+          is no visible field-of-view jump during zoom. For example, if cropRegion is applied
+          immediately, but focalLength takes more than 1 frame to take effect, the camera device
+          will delay the cropRegion so that it's synchronized with focalLength.
+          </hal_details>
           <tag id="V1" />
         </entry>
         <entry name="focusDistance" type="float" visibility="public" hwlevel="full">
@@ -3764,7 +3802,7 @@
             <tag id="V1" />
           </entry>
           <entry name="hyperfocalDistance" type="float" visibility="public" optional="true"
-                 hwlevel="limited">
+                 hwlevel="limited" permission_needed="true">
             <description>Hyperfocal distance for this lens.</description>
             <units>See android.lens.info.focusDistanceCalibration for details</units>
             <range>If lens is fixed focus, &amp;gt;= 0. If lens has focuser unit, the value is
@@ -3775,7 +3813,7 @@
             </details>
           </entry>
           <entry name="minimumFocusDistance" type="float" visibility="public" optional="true"
-                 hwlevel="limited">
+                 hwlevel="limited" permission_needed="true">
             <description>Shortest distance from frontmost surface
             of the lens that can be brought into sharp focus.</description>
             <units>See android.lens.info.focusDistanceCalibration for details</units>
@@ -3894,7 +3932,7 @@
           device screen.</description>
         </entry>
         <entry name="poseRotation" type="float" visibility="public"
-               container="array">
+               container="array" permission_needed="true">
           <array>
             <size>4</size>
           </array>
@@ -3941,7 +3979,7 @@
           <tag id="DEPTH" />
         </entry>
         <entry name="poseTranslation" type="float" visibility="public"
-               container="array">
+               container="array" permission_needed="true">
           <array>
             <size>3</size>
           </array>
@@ -4066,7 +4104,7 @@
       </dynamic>
       <static>
         <entry name="intrinsicCalibration" type="float" visibility="public"
-               container="array">
+               container="array" permission_needed="true">
           <array>
             <size>5</size>
           </array>
@@ -4145,7 +4183,7 @@
           <tag id="DEPTH" />
         </entry>
         <entry name="radialDistortion" type="float" visibility="public"
-               deprecated="true" container="array">
+               deprecated="true" container="array" permission_needed="true">
           <array>
             <size>6</size>
           </array>
@@ -4202,7 +4240,8 @@
         </clone>
       </dynamic>
       <static>
-        <entry name="poseReference" type="byte" visibility="public" enum="true" hal_version="3.3">
+        <entry name="poseReference" type="byte" visibility="public" enum="true"
+            permission_needed="true" hal_version="3.3" >
           <enum>
             <value>PRIMARY_CAMERA
             <notes>The value of android.lens.poseTranslation is relative to the optical center of
@@ -4226,7 +4265,7 @@
           </details>
         </entry>
         <entry name="distortion" type="float" visibility="public" container="array"
-               hal_version="3.3">
+               permission_needed="true" hal_version="3.3" >
           <array>
             <size>5</size>
           </array>
@@ -4655,8 +4694,9 @@
             {@link android.graphics.ImageFormat#RAW12|AIMAGE_FORMAT_RAW12 RAW12}.
           * Processed (but not-stalling): any non-RAW format without a stall duration.  Typically
             {@link android.graphics.ImageFormat#YUV_420_888|AIMAGE_FORMAT_YUV_420_888 YUV_420_888},
-            {@link android.graphics.ImageFormat#NV21 NV21}, or {@link
-            android.graphics.ImageFormat#YV12 YV12}.
+            {@link android.graphics.ImageFormat#NV21 NV21}, {@link
+            android.graphics.ImageFormat#YV12 YV12}, or {@link
+            android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8 Y8} .
           </details>
           <tag id="BC" />
         </entry>
@@ -4716,6 +4756,7 @@
           * {@link android.graphics.ImageFormat#YV12 YV12}
           * Implementation-defined formats, i.e. {@link
             android.hardware.camera2.params.StreamConfigurationMap#isOutputSupportedFor(Class)}
+          * {@link android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8 Y8}
 
           For full guarantees, query {@link
           android.hardware.camera2.params.StreamConfigurationMap#getOutputStallDuration} with a
@@ -4939,6 +4980,11 @@
               If this is supported, android.scaler.streamConfigurationMap will
               additionally return a min frame duration that is greater than
               zero for each supported size-format combination.
+
+              For camera devices with LOGICAL_MULTI_CAMERA capability, when the underlying active
+              physical camera switches, exposureTime, sensitivity, and lens properties may change
+              even if AE/AF is locked. However, the overall auto exposure and auto focus experience
+              for users will be consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.
               </notes>
             </value>
             <value optional="true">MANUAL_POST_PROCESSING
@@ -4973,6 +5019,11 @@
 
               A given camera device may also support additional post-processing
               controls, but this capability only covers the above list of controls.
+
+              For camera devices with LOGICAL_MULTI_CAMERA capability, when underlying active
+              physical camera switches, tonemap, white balance, and shading map may change even if
+              awb is locked. However, the overall post-processing experience for users will be
+              consistent. Refer to LOGICAL_MULTI_CAMERA capability for details.
               </notes>
             </value>
             <value optional="true">RAW
@@ -5016,6 +5067,9 @@
               * {@link android.graphics.ImageFormat#PRIVATE} will be reprocessable into both
                 {@link android.graphics.ImageFormat#YUV_420_888} and
                 {@link android.graphics.ImageFormat#JPEG} formats.
+              * For a MONOCHROME camera supporting Y8 format, {@link
+                android.graphics.ImageFormat#PRIVATE} will be reprocessable into
+                {@link android.graphics.ImageFormat#Y8}.
               * The maximum available resolution for PRIVATE streams
                 (both input/output) will match the maximum available
                 resolution of JPEG streams.
@@ -5054,10 +5108,10 @@
             <value optional="true">BURST_CAPTURE
               <notes>
               The camera device supports capturing high-resolution images at &gt;= 20 frames per
-              second, in at least the uncompressed YUV format, when post-processing settings are set
-              to FAST. Additionally, maximum-resolution images can be captured at &gt;= 10 frames
-              per second.  Here, 'high resolution' means at least 8 megapixels, or the maximum
-              resolution of the device, whichever is smaller.
+              second, in at least the uncompressed YUV format, when post-processing settings are
+              set to FAST. Additionally, all image resolutions less than 24 megapixels can be
+              captured at &gt;= 10 frames per second. Here, 'high resolution' means at least 8
+              megapixels, or the maximum resolution of the device, whichever is smaller.
               </notes>
               <sdk_notes>
               More specifically, this means that a size matching the camera device's active array
@@ -5066,9 +5120,10 @@
               android.hardware.camera2.params.StreamConfigurationMap#getOutputSizes} or {@link
               android.hardware.camera2.params.StreamConfigurationMap#getHighResolutionOutputSizes},
               with a minimum frame duration for that format and size of either &lt;= 1/20 s, or
-              &lt;= 1/10 s, respectively; and the android.control.aeAvailableTargetFpsRanges entry
-              lists at least one FPS range where the minimum FPS is &gt;= 1 / minimumFrameDuration
-              for the maximum-size YUV_420_888 format.  If that maximum size is listed in {@link
+              &lt;= 1/10 s if the image size is less than 24 megapixels, respectively; and
+              the android.control.aeAvailableTargetFpsRanges entry lists at least one FPS range
+              where the minimum FPS is &gt;= 1 / minimumFrameDuration for the maximum-size
+              YUV_420_888 format.  If that maximum size is listed in {@link
               android.hardware.camera2.params.StreamConfigurationMap#getHighResolutionOutputSizes},
               then the list of resolutions for YUV_420_888 from {@link
               android.hardware.camera2.params.StreamConfigurationMap#getOutputSizes} contains at
@@ -5077,7 +5132,8 @@
 
               If the device supports the {@link
               android.graphics.ImageFormat#RAW10|AIMAGE_FORMAT_RAW10}, {@link
-              android.graphics.ImageFormat#RAW12|AIMAGE_FORMAT_RAW12}, then those can also be
+              android.graphics.ImageFormat#RAW12|AIMAGE_FORMAT_RAW12}, {@link
+              android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8}, then those can also be
               captured at the same rate as the maximum-size YUV_420_888 resolution is.
 
               If the device supports the PRIVATE_REPROCESSING capability, then the same guarantees
@@ -5099,13 +5155,15 @@
               android.graphics.ImageFormat#YUV_420_888|AIMAGE_FORMAT_YUV_420_888} size listed in
               {@link
               android.hardware.camera2.params.StreamConfigurationMap|ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS},
-              camera device can capture this size for at least 10 frames per second.  Also the
-              android.control.aeAvailableTargetFpsRanges entry lists at least one FPS range where
-              the minimum FPS is &gt;= 1 / minimumFrameDuration for the largest YUV_420_888 size.
+              camera device can capture this size for at least 10 frames per second if the size is
+              less than 24 megapixels. Also the android.control.aeAvailableTargetFpsRanges entry
+              lists at least one FPS range where the minimum FPS is &gt;= 1 / minimumFrameDuration
+              for the largest YUV_420_888 size.
 
               If the device supports the {@link
               android.graphics.ImageFormat#RAW10|AIMAGE_FORMAT_RAW10}, {@link
-              android.graphics.ImageFormat#RAW12|AIMAGE_FORMAT_RAW12}, then those can also be
+              android.graphics.ImageFormat#RAW12|AIMAGE_FORMAT_RAW12}, {@link
+              android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8}, then those can also be
               captured at the same rate as the maximum-size YUV_420_888 resolution is.
 
               In addition, the android.sync.maxLatency field is guaranted to have a value between 0
@@ -5142,6 +5200,8 @@
               * The maximum available resolution for {@link
                 android.graphics.ImageFormat#YUV_420_888} streams (both input/output) will match the
                 maximum available resolution of {@link android.graphics.ImageFormat#JPEG} streams.
+              * For a MONOCHROME camera with Y8 format support, all the requirements mentioned
+                above for YUV_420_888 apply for Y8 format as well.
               * Static metadata android.reprocess.maxCaptureStall.
               * Only the below controls are effective for reprocessing requests and will be present
                 in capture results. The reprocess requests are from the original capture results
@@ -5179,8 +5239,8 @@
               * The android.depth.depthIsExclusive entry is listed by this device.
               * As of Android P, the android.lens.poseReference entry is listed by this device.
               * A LIMITED camera with only the DEPTH_OUTPUT capability does not have to support
-                normal YUV_420_888, JPEG, and PRIV-format outputs. It only has to support the DEPTH16
-                format.
+                normal YUV_420_888, Y8, JPEG, and PRIV-format outputs. It only has to support the
+                DEPTH16 format.
 
               Generally, depth output operates at a slower frame rate than standard color capture,
               so the DEPTH16 and DEPTH_POINT_CLOUD formats will commonly have a stall duration that
@@ -5285,8 +5345,26 @@
             </value>
             <value optional="true" hal_version="3.3">LOGICAL_MULTI_CAMERA
               <notes>
-              The camera device is a logical camera backed by two or more physical cameras that are
-              also exposed to the application.
+              The camera device is a logical camera backed by two or more physical cameras.
+
+              In API level 28, the physical cameras must also be exposed to the application via
+              {@link android.hardware.camera2.CameraManager#getCameraIdList}.
+
+              Starting from API level 29, some or all physical cameras may not be independently
+              exposed to the application, in which case the physical camera IDs will not be
+              available in {@link android.hardware.camera2.CameraManager#getCameraIdList}. But the
+              application can still query the physical cameras' characteristics by calling
+              {@link android.hardware.camera2.CameraManager#getCameraCharacteristics}. Additionally,
+              if a physical camera is hidden from camera ID list, the mandatory stream combinations
+              for that physical camera must be supported through the logical camera using physical
+              streams.
+
+              Combinations of logical and physical streams, or physical streams from different
+              physical cameras are not guaranteed. However, if the camera device supports
+              {@link CameraDevice#isSessionConfigurationSupported|ACameraDevice_isSessionConfigurationSupported},
+              application must be able to query whether a stream combination involving physical
+              streams is supported by calling
+              {@link CameraDevice#isSessionConfigurationSupported|ACameraDevice_isSessionConfigurationSupported}.
 
               Camera application shouldn't assume that there are at most 1 rear camera and 1 front
               camera in the system. For an application that switches between front and back cameras,
@@ -5309,35 +5387,104 @@
                   - android.lens.distortion
               * The SENSOR_INFO_TIMESTAMP_SOURCE of the logical device and physical devices must be
                 the same.
-              * The logical camera device must be LIMITED or higher device.
+              * The logical camera must be LIMITED or higher device.
 
-              Both the logical camera device and its underlying physical devices support the
-              mandatory stream combinations required for their device levels.
+              A logical camera device's dynamic metadata may contain
+              android.logicalMultiCamera.activePhysicalId to notify the application of the current
+              active physical camera Id. An active physical camera is the physical camera from which
+              the logical camera's main image data outputs (YUV or RAW) and metadata come from.
+              In addition, this serves as an indication which physical camera is used to output to
+              a RAW stream, or in case only physical cameras support RAW, which physical RAW stream
+              the application should request.
 
-              Additionally, for each guaranteed stream combination, the logical camera supports:
+              Logical camera's static metadata tags below describe the default active physical
+              camera. An active physical camera is default if it's used when application directly
+              uses requests built from a template. All templates will default to the same active
+              physical camera.
 
-              * For each guaranteed stream combination, the logical camera supports replacing one
-                logical {@link android.graphics.ImageFormat#YUV_420_888|AIMAGE_FORMAT_YUV_420_888 YUV_420_888}
-                or raw stream with two physical streams of the same size and format, each from a
-                separate physical camera, given that the size and format are supported by both
-                physical cameras.
-              * If the logical camera doesn't advertise RAW capability, but the underlying physical
-                cameras do, the logical camera will support guaranteed stream combinations for RAW
-                capability, except that the RAW streams will be physical streams, each from a separate
-                physical camera. This is usually the case when the physical cameras have different
-                sensor sizes.
+                - android.sensor.info.sensitivityRange
+                - android.sensor.info.colorFilterArrangement
+                - android.sensor.info.exposureTimeRange
+                - android.sensor.info.maxFrameDuration
+                - android.sensor.info.physicalSize
+                - android.sensor.info.whiteLevel
+                - android.sensor.info.lensShadingApplied
+                - android.sensor.referenceIlluminant1
+                - android.sensor.referenceIlluminant2
+                - android.sensor.calibrationTransform1
+                - android.sensor.calibrationTransform2
+                - android.sensor.colorTransform1
+                - android.sensor.colorTransform2
+                - android.sensor.forwardMatrix1
+                - android.sensor.forwardMatrix2
+                - android.sensor.blackLevelPattern
+                - android.sensor.maxAnalogSensitivity
+                - android.sensor.opticalBlackRegions
+                - android.sensor.availableTestPatternModes
+                - android.lens.info.hyperfocalDistance
+                - android.lens.info.minimumFocusDistance
+                - android.lens.info.focusDistanceCalibration
+                - android.lens.poseRotation
+                - android.lens.poseTranslation
+                - android.lens.intrinsicCalibration
+                - android.lens.poseReference
+                - android.lens.distortion
 
-              Using physical streams in place of a logical stream of the same size and format will
-              not slow down the frame rate of the capture, as long as the minimum frame duration
-              of the physical and logical streams are the same.
+              The field of view of all non-RAW physical streams must be the same or as close as
+              possible to that of non-RAW logical streams. If the requested FOV is outside of the
+              range supported by the physical camera, the physical stream for that physical camera
+              will use either the maximum or minimum scaler crop region, depending on which one is
+              closer to the requested FOV. For example, for a logical camera with wide-tele lens
+              configuration where the wide lens is the default, if the logical camera's crop region
+              is set to maximum, the physical stream for the tele lens will be configured to its
+              maximum crop region. On the other hand, if the logical camera has a normal-wide lens
+              configuration where the normal lens is the default, when the logical camera's crop
+              region is set to maximum, the FOV of the logical streams will be that of the normal
+              lens. The FOV of the physical streams for the wide lens will be the same as the
+              logical stream, by making the crop region smaller than its active array size to
+              compensate for the smaller focal length.
+
+              Even if the underlying physical cameras have different RAW characteristics (such as
+              size or CFA pattern), a logical camera can still advertise RAW capability. In this
+              case, when the application configures a RAW stream, the camera device will make sure
+              the active physical camera will remain active to ensure consistent RAW output
+              behavior, and not switch to other physical cameras.
+
+              The capture request and result metadata tags required for backward compatible camera
+              functionalities will be solely based on the logical camera capabiltity. On the other
+              hand, the use of manual capture controls (sensor or post-processing) with a
+              logical camera may result in unexpected behavior when the HAL decides to switch
+              between physical cameras with different characteristics under the hood. For example,
+              when the application manually sets exposure time and sensitivity while zooming in,
+              the brightness of the camera images may suddenly change because HAL switches from one
+              physical camera to the other.
               </notes>
             </value>
             <value optional="true" hal_version="3.3" >MONOCHROME
               <notes>
               The camera device is a monochrome camera that doesn't contain a color filter array,
-              and the pixel values on U and V planes are all 128.
+              and for YUV_420_888 stream, the pixel values on U and V planes are all 128.
+
+              A MONOCHROME camera must support the guaranteed stream combinations required for
+              its device level and capabilities. Additionally, if the monochrome camera device
+              supports Y8 format, all mandatory stream combination requirements related to {@link
+              android.graphics.ImageFormat#YUV_420_888|AIMAGE_FORMAT_YUV_420_888 YUV_420_888} apply
+              to {@link android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8 Y8} as well. There are no
+              mandatory stream combination requirements with regard to
+              {@link android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8 Y8} for Bayer camera devices.
+
+              Starting from Android Q, the SENSOR_INFO_COLOR_FILTER_ARRANGEMENT of a MONOCHROME
+              camera will be either MONO or NIR.
               </notes>
             </value>
+            <value optional="true" hal_version="3.4" >SECURE_IMAGE_DATA
+              <notes>
+                The camera device is capable of writing image data into a region of memory
+                inaccessible to Android userspace or the Android kernel, and only accessible to
+                trusted execution environments (TEE).
+              </notes>
+            </value>
+
 
           </enum>
           <description>List of capabilities that this camera device
@@ -5409,30 +5556,47 @@
           same way as a physical camera device based on its hardware level and capabilities.
           It's recommended that its feature set is superset of that of individual physical cameras.
 
-          * In camera1 API, to maintain application compatibility, for each {logical_camera_id,
-          physical_camera_1_id, physical_camera_2_id, ...} combination, where logical_camera_id
-          is composed of physical_camera_N_id, camera framework will only advertise one camera id
-          (within the combination) that is frontmost in the HAL published camera id list.
+          * In camera1 API, to maintain application compatibility, for each camera facing, there
+          may be one or more {logical_camera_id, physical_camera_1_id, physical_camera_2_id, ...}
+          combinations, where logical_camera_id is composed of physical_camera_N_id, camera
+          framework will only advertise one camera id
+          (within the combinations for the particular facing) that is frontmost in the HAL
+          published camera id list.
+          For example, if HAL advertises 6 back facing camera IDs (ID0 to ID5), among which ID4
+          and ID5 are logical cameras backed by ID0+ID1 and ID2+ID3 respectively. In this case,
+          only ID0 will be available for camera1 API to use.
 
           * Camera HAL is strongly recommended to advertise camera devices with best feature,
           power, performance, and latency tradeoffs at the front of the camera id list.
 
-          For MONOCHROME, the camera device must also advertise BACKWARD_COMPATIBLE capability, and
-          it is exclusive of both RAW and MANUAL_POST_PROCESSING capabilities:
+          * Camera HAL may switch between physical cameras depending on focalLength or cropRegion.
+          If physical cameras have different sizes, HAL must maintain a single logical camera
+          activeArraySize/pixelArraySize/preCorrectionActiveArraySize, and must do proper mapping
+          between logical camera and underlying physical cameras for all related metadata tags,
+          such as crop region, 3A regions, and intrinsicCalibration.
+
+          * Starting from HIDL ICameraDevice version 3.5, camera HAL must support
+          isStreamCombinationSupported for application to query whether a particular logical and
+          physical streams combination are supported.
+
+          A MONOCHROME camera device must also advertise BACKWARD_COMPATIBLE capability, and must
+          not advertise MANUAL_POST_PROCESSING capability.
 
           * To maintain backward compatibility, the camera device must support all
           BACKWARD_COMPATIBLE required keys. The android.control.awbAvailableModes key only contains
           AUTO, and android.control.awbState are either CONVERGED or LOCKED depending on
           android.control.awbLock.
 
-          * A monochrome device doesn't need to advertise DNG related optional metadata tags.
-
           * android.colorCorrection.mode, android.colorCorrection.transform, and
-          android.colorCorrection.gains are not applicable. So the camera device cannot
-          be a FULL device. However, the HAL can still advertise other individual capabilites.
+          android.colorCorrection.gains must not be in available request and result keys.
+          As a result, the camera device cannot be a FULL device. However, the HAL can
+          still advertise other individual capabilites.
 
           * If the device supports tonemap control, only android.tonemap.curveRed is used.
           CurveGreen and curveBlue are no-ops.
+
+          In Android API level 28, a MONOCHROME camera device must not have RAW capability. From
+          API level 29, a camera is allowed to have both MONOCHROME and RAW capabilities.
           </hal_details>
         </entry>
         <entry name="availableRequestKeys" type="int32" visibility="ndk_public"
@@ -5609,7 +5773,7 @@
           {@link android.hardware.camera2.CameraCharacteristics#getAvailableSessionKeys}.
           </hal_details>
         </entry>
-        <entry name="availablePhysicalCameraRequestKeys" type="int32" visibility="hidden"
+        <entry name="availablePhysicalCameraRequestKeys" type="int32" visibility="ndk_public"
                container="array" hwlevel="limited" hal_version="3.3">
           <array>
             <size>n</size>
@@ -5645,6 +5809,25 @@
           {@link android.hardware.camera2.CameraCharacteristics#getAvailablePhysicalCameraRequestKeys}.
           </hal_details>
         </entry>
+        <entry name="characteristicKeysNeedingPermission" type="int32" visibility="hidden"
+               container="array" hwlevel="legacy" hal_version="3.4">
+          <array>
+            <size>n</size>
+          </array>
+          <description>A list of camera characteristics keys that are only available
+          in case the camera client has camera permission.</description>
+
+          <details>The entry contains a subset of
+          {@link android.hardware.camera2.CameraCharacteristics#getKeys} that require camera clients
+          to acquire the {@link android.Manifest.permission#CAMERA} permission before calling
+          {@link android.hardware.camera2.CameraManager#getCameraCharacteristics}. If the
+          permission is not held by the camera client, then the values of the repsective properties
+          will not be present in {@link android.hardware.camera2.CameraCharacteristics}.
+          </details>
+          <hal_details>
+          Do not set this property directly, camera service will overwrite any previous values.
+          </hal_details>
+        </entry>
       </static>
     </section>
     <section name="scaler">
@@ -5774,7 +5957,7 @@
               buffers with 16-bit pixels.
 
               Buffers of this format are typically expected to have a
-              Bayer Color Filter Array (CFA) layout, which is given in
+              Color Filter Array (CFA) layout, which is given in
               android.sensor.info.colorFilterArrangement. Sensors with
               CFAs that are not representable by a format in
               android.sensor.info.colorFilterArrangement should not
@@ -5840,6 +6023,15 @@
             <value id="0x21">BLOB
               <notes>JPEG format</notes>
             </value>
+            <value id="0x25" hal_version="3.4">RAW10
+              <notes>RAW10</notes>
+            </value>
+            <value id="0x26" hal_version="3.4">RAW12
+              <notes>RAW12</notes>
+            </value>
+            <value id="0x20203859" hal_version="3.4">Y8
+              <notes>Y8</notes>
+            </value>
           </enum>
           <description>The list of image formats that are supported by this
           camera device for output streams.</description>
@@ -5862,7 +6054,8 @@
           need access the image data.
 
           YCbCr_420_888 format must be supported by the HAL. When an image stream
-          needs CPU/application direct access, this format will be used.
+          needs CPU/application direct access, this format will be used. For a MONOCHROME
+          camera device, the pixel value of Cb and Cr planes is 128.
 
           The BLOB format must be supported by the HAL. This is used for the JPEG stream.
 
@@ -6075,6 +6268,17 @@
 
           Attempting to configure an input stream with output streams not
           listed as available in this map is not valid.
+
+          Additionally, if the camera device is MONOCHROME with Y8 support, it will also support
+          the following map of formats if its dependent capability
+          (android.request.availableCapabilities) is supported:
+
+            Input Format                                    | Output Format                                     | Capability
+          :-------------------------------------------------|:--------------------------------------------------|:----------
+          {@link android.graphics.ImageFormat#PRIVATE}      | {@link android.graphics.ImageFormat#Y8}           | PRIVATE_REPROCESSING
+          {@link android.graphics.ImageFormat#Y8}           | {@link android.graphics.ImageFormat#JPEG}         | YUV_REPROCESSING
+          {@link android.graphics.ImageFormat#Y8}           | {@link android.graphics.ImageFormat#Y8}           | YUV_REPROCESSING
+
           </details>
           <hal_details>
           For the formats, see `system/core/include/system/graphics.h` for a definition
@@ -6323,6 +6527,7 @@
           * {@link android.graphics.ImageFormat#YUV_420_888|AIMAGE_FORMAT_YUV_420_888}
           * {@link android.graphics.ImageFormat#RAW10|AIMAGE_FORMAT_RAW10}
           * {@link android.graphics.ImageFormat#RAW12|AIMAGE_FORMAT_RAW12}
+          * {@link android.graphics.ImageFormat#Y8|AIMAGE_FORMAT_Y8}
 
           All other formats may or may not have an allowed stall duration on
           a per-capability basis; refer to android.request.availableCapabilities
@@ -6476,6 +6681,209 @@
           LEGACY capability devices will only support CENTER_ONLY cropping.
           </details>
         </entry>
+        <entry name="availableRecommendedStreamConfigurations" type="int32" visibility="ndk_public"
+            optional="true" enum="true" container="array" typedef="recommendedStreamConfiguration"
+            hal_version="3.4">
+          <array>
+            <size>n</size>
+            <size>5</size>
+          </array>
+          <enum>
+            <value id="0x0">PREVIEW
+            <notes>
+                Preview must only include non-stalling processed stream configurations with
+                output formats like
+                {@link android.graphics.ImageFormat#YUV_420_888|AIMAGE_FORMAT_YUV_420_888},
+                {@link android.graphics.ImageFormat#PRIVATE|AIMAGE_FORMAT_PRIVATE}, etc.
+            </notes>
+            </value>
+            <value id="0x1">RECORD
+            <notes>
+                Video record must include stream configurations that match the advertised
+                supported media profiles {@link android.media.CamcorderProfile} with
+                IMPLEMENTATION_DEFINED format.
+            </notes>
+            </value>
+            <value id="0x2">VIDEO_SNAPSHOT
+            <notes>
+                Video snapshot must include stream configurations at least as big as
+                the maximum RECORD resolutions and only with
+                {@link android.graphics.ImageFormat#JPEG|AIMAGE_FORMAT_JPEG JPEG output format}.
+                Additionally the configurations shouldn't cause preview glitches and also be able to
+                run at 30 fps.
+            </notes>
+            </value>
+            <value id="0x3">SNAPSHOT
+            <notes>
+                Recommended snapshot stream configurations must include at least one with
+                size close to android.sensor.info.activeArraySize and
+                {@link android.graphics.ImageFormat#JPEG|AIMAGE_FORMAT_JPEG JPEG output format}.
+                Taking into account restrictions on aspect ratio, alignment etc. the area of the
+                maximum suggested size shouldn’t be less than 97% of the sensor array size area.
+            </notes>
+            </value>
+            <value id="0x4">ZSL
+            <notes>
+                If supported, recommended input stream configurations must only be advertised with
+                ZSL along with other processed and/or stalling output formats.
+            </notes>
+            </value>
+            <value id="0x5">RAW
+            <notes>
+                If supported, recommended raw stream configurations must only include RAW based
+                output formats.
+            </notes>
+            </value>
+            <value id="0x6">LOW_LATENCY_SNAPSHOT
+            <notes>
+                If supported, the recommended low latency stream configurations must have
+                end-to-end latency that does not exceed 200 ms. under standard operating conditions
+                (reasonable light levels, not loaded system) and using template
+                TEMPLATE_STILL_CAPTURE. This is primarily for listing configurations for the
+                {@link android.graphics.ImageFormat#JPEG|AIMAGE_FORMAT_JPEG JPEG output format}
+                however other supported output formats can be added as well.
+            </notes>
+            </value>
+            <value id="0x7">PUBLIC_END
+            </value>
+            <value id="0x18">VENDOR_START
+            <notes>
+                Vendor defined use cases. These depend on the vendor implementation.
+            </notes>
+            </value>
+          </enum>
+          <description>Recommended stream configurations for common client use cases.
+          </description>
+          <details>Optional subset of the android.scaler.availableStreamConfigurations that contains
+          similar tuples listed as
+          (i.e. width, height, format, output/input stream, usecase bit field).
+          Camera devices will be able to suggest particular stream configurations which are
+          power and performance efficient for specific use cases. For more information about
+          retrieving the suggestions see
+          {@link android.hardware.camera2.CameraCharacteristics#getRecommendedStreamConfigurationMap}.
+          </details>
+          <ndk_details>
+          The data representation is int[5], which maps to
+          (width, height, format, output/input stream, usecase bit field). The array can be
+          parsed using the following pseudo code:
+
+          struct StreamConfiguration {
+          int32_t format;
+          int32_t width;
+          int32_t height;
+          int32_t isInput; };
+
+          void getPreferredStreamConfigurations(
+              int32_t *array, size_t count, int32_t usecaseId,
+              Vector &lt; StreamConfiguration &gt; * scs) {
+              const size_t STREAM_CONFIGURATION_SIZE = 5;
+              const size_t STREAM_WIDTH_OFFSET = 0;
+              const size_t STREAM_HEIGHT_OFFSET = 1;
+              const size_t STREAM_FORMAT_OFFSET = 2;
+              const size_t STREAM_IS_INPUT_OFFSET = 3;
+              const size_t STREAM_USECASE_BITMAP_OFFSET = 4;
+
+              for (size_t i = 0; i &lt; count; i+= STREAM_CONFIGURATION_SIZE) {
+                  int32_t width = array[i + STREAM_WIDTH_OFFSET];
+                  int32_t height = array[i + STREAM_HEIGHT_OFFSET];
+                  int32_t format = array[i + STREAM_FORMAT_OFFSET];
+                  int32_t isInput = array[i + STREAM_IS_INPUT_OFFSET];
+                  int32_t supportedUsecases = array[i + STREAM_USECASE_BITMAP_OFFSET];
+                  if (supportedUsecases &amp; (1 &lt;&lt; usecaseId)) {
+                      StreamConfiguration sc = {format, width, height, isInput};
+                      scs->add(sc);
+                  }
+              }
+          }
+
+          </ndk_details>
+          <hal_details>
+          There are some requirements that need to be considered regarding the usecases and the
+          suggested configurations:
+
+          * If android.scaler.availableRecommendedStreamConfigurations is set, then recommended
+          stream configurations must be present for all mandatory usecases PREVIEW,
+          SNAPSHOT, RECORD, VIDEO_SNAPSHOT. ZSL and RAW are
+          required depending on device capabilities see android.request.availableCapabilities.
+          * Non-existing usecases and non-vendor usecases within the range
+          (RAW : VENDOR_START] are prohibited as well as stream configurations not
+          present in the exhaustive android.scaler.availableStreamConfigurations list.
+
+          For example, in case the camera device supports only 4K and 1080p and both resolutions are
+          recommended for the mandatory usecases except preview which can run efficiently only
+          on 1080p. The array may look like this:
+
+          [3840, 2160, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+           ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+           (1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RECORD |
+           1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_SNAPSHOT |
+           1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VIDEO_SNAPSHOT),
+
+           1920, 1080, HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+           ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+           (1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PREVIEW |
+           1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RECORD |
+           1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_SNAPSHOT |
+           1&lt;&lt; ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VIDEO_SNAPSHOT)]
+
+          </hal_details>
+        </entry>
+        <entry name="availableRecommendedInputOutputFormatsMap" type="int32" visibility="ndk_public"
+          optional="true" typedef="reprocessFormatsMap" hal_version="3.4">
+          <description>Recommended mappings of image formats that are supported by this
+          camera device for input streams, to their corresponding output formats.
+          </description>
+          <details>
+          This is a recommended subset of the complete list of mappings found in
+          android.scaler.availableInputOutputFormatsMap. The same requirements apply here as well.
+          The list however doesn't need to contain all available and supported mappings. Instead of
+          this developers must list only recommended and efficient entries.
+          If set, the information will be available in the ZERO_SHUTTER_LAG recommended stream
+          configuration see
+          {@link android.hardware.camera2.CameraCharacteristics#getRecommendedStreamConfigurationMap}.
+          </details>
+          <hal_details>
+          For a code sample of the required data encoding please check
+          android.scaler.availableInputOutputFormatsMap.
+          </hal_details>
+          <tag id="REPROC" />
+        </entry>
+        <entry name="mandatoryStreamCombinations" type="int32" visibility="java_public"
+          synthetic="true" container="array" typedef="mandatoryStreamCombination" hwlevel="limited">
+          <array>
+            <size>n</size>
+          </array>
+          <description>
+          An array of mandatory stream combinations generated according to the camera device
+          {@link android.hardware.camera2.CameraCharacteristics#INFO_SUPPORTED_HARDWARE_LEVEL}
+          and {@link android.hardware.camera2.CameraCharacteristics#REQUEST_AVAILABLE_CAPABILITIES}.
+          This is an app-readable conversion of the mandatory stream combination
+          {@link android.hardware.camera2.CameraDevice#createCaptureSession tables}.
+          </description>
+          <details>
+          The array of
+          {@link android.hardware.camera2.params.MandatoryStreamCombination combinations} is
+          generated according to the documented
+          {@link android.hardware.camera2.CameraDevice#createCaptureSession guideline} based on
+          specific device level and capabilities.
+          Clients can use the array as a quick reference to find an appropriate camera stream
+          combination.
+          As per documentation, the stream combinations with given PREVIEW, RECORD and
+          MAXIMUM resolutions and anything smaller from the list given by
+          {@link android.hardware.camera2.params.StreamConfigurationMap#getOutputSizes} are
+          guaranteed to work.
+          For a physical camera not independently exposed in
+          {@link android.hardware.camera2.CameraManager#getCameraIdList}, the mandatory stream
+          combinations for that physical camera Id are also generated, so that the application can
+          configure them as physical streams via the logical camera.
+          The mandatory stream combination array will be {@code null} in case the device is not
+          backward compatible.
+          </details>
+          <hal_details>
+          Do not set this property directly
+          (it is synthetic and will not be available at the HAL layer).
+          </hal_details>
+        </entry>
       </static>
     </section>
     <section name="sensor">
@@ -6686,10 +7094,25 @@
               <notes>Sensor is not Bayer; output has 3 16-bit
               values for each pixel, instead of just 1 16-bit value
               per pixel.</notes></value>
+              <value hal_version="3.4">MONO
+              <notes>Sensor doesn't have any Bayer color filter.
+              Such sensor captures visible light in monochrome. The exact weighting and
+              wavelengths captured is not specified, but generally only includes the visible
+              frequencies. This value implies a MONOCHROME camera.</notes></value>
+              <value hal_version="3.4">NIR
+              <notes>Sensor has a near infrared filter capturing light with wavelength between
+              roughly 750nm and 1400nm, and the same filter covers the whole sensor array. This
+              value implies a MONOCHROME camera.</notes></value>
             </enum>
             <description>The arrangement of color filters on sensor;
             represents the colors in the top-left 2x2 section of
-            the sensor, in reading order.</description>
+            the sensor, in reading order, for a Bayer camera, or the
+            light spectrum it captures for MONOCHROME camera.
+            </description>
+            <hal_details>
+            Starting from Android Q, the colorFilterArrangement for a MONOCHROME camera must be
+            single color patterns, such as MONO or NIR.
+            </hal_details>
             <tag id="RAW" />
           </entry>
           <entry name="exposureTimeRange" type="int64" visibility="public"
@@ -6964,7 +7387,7 @@
           </entry>
         </namespace>
         <entry name="referenceIlluminant1" type="byte" visibility="public"
-               enum="true">
+               enum="true" permission_needed="true" >
           <enum>
             <value id="1">DAYLIGHT</value>
             <value id="2">FLUORESCENT</value>
@@ -7014,6 +7437,9 @@
           Some devices may choose to provide a second set of calibration
           information for improved quality, including
           android.sensor.referenceIlluminant2 and its corresponding matrices.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <hal_details>
           The first reference illuminant (android.sensor.referenceIlluminant1)
@@ -7040,7 +7466,8 @@
           </hal_details>
           <tag id="RAW" />
         </entry>
-        <entry name="referenceIlluminant2" type="byte" visibility="public">
+        <entry name="referenceIlluminant2" type="byte" visibility="public"
+        permission_needed="true" >
           <description>
           The standard reference illuminant used as the scene light source when
           calculating the android.sensor.colorTransform2,
@@ -7054,13 +7481,16 @@
           If this key is present, then android.sensor.colorTransform2,
           android.sensor.calibrationTransform2, and
           android.sensor.forwardMatrix2 will also be present.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
         <entry name="calibrationTransform1" type="rational"
         visibility="public" optional="true"
         type_notes="3x3 matrix in row-major-order" container="array"
-        typedef="colorSpaceTransform">
+        typedef="colorSpaceTransform" permission_needed="true" >
           <array>
             <size>3</size>
             <size>3</size>
@@ -7079,13 +7509,16 @@
           colorspace) into this camera device's native sensor color
           space under the first reference illuminant
           (android.sensor.referenceIlluminant1).
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
         <entry name="calibrationTransform2" type="rational"
         visibility="public" optional="true"
         type_notes="3x3 matrix in row-major-order" container="array"
-        typedef="colorSpaceTransform">
+        typedef="colorSpaceTransform" permission_needed="true" >
           <array>
             <size>3</size>
             <size>3</size>
@@ -7108,13 +7541,16 @@
 
           This matrix will only be present if the second reference
           illuminant is present.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
         <entry name="colorTransform1" type="rational"
         visibility="public" optional="true"
         type_notes="3x3 matrix in row-major-order" container="array"
-        typedef="colorSpaceTransform">
+        typedef="colorSpaceTransform" permission_needed="true" >
           <array>
             <size>3</size>
             <size>3</size>
@@ -7138,13 +7574,16 @@
           and the CIE XYZ colorspace when calculating this transform will
           match the standard white point for the first reference illuminant
           (i.e. no chromatic adaptation will be applied by this transform).
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
         <entry name="colorTransform2" type="rational"
         visibility="public" optional="true"
         type_notes="3x3 matrix in row-major-order" container="array"
-        typedef="colorSpaceTransform">
+        typedef="colorSpaceTransform" permission_needed="true" >
           <array>
             <size>3</size>
             <size>3</size>
@@ -7171,13 +7610,16 @@
 
           This matrix will only be present if the second reference
           illuminant is present.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
         <entry name="forwardMatrix1" type="rational"
         visibility="public" optional="true"
         type_notes="3x3 matrix in row-major-order" container="array"
-        typedef="colorSpaceTransform">
+        typedef="colorSpaceTransform" permission_needed="true" >
           <array>
             <size>3</size>
             <size>3</size>
@@ -7199,13 +7641,16 @@
           this matrix is chosen so that the standard white point for this reference
           illuminant in the reference sensor colorspace is mapped to D50 in the
           CIE XYZ colorspace.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
         <entry name="forwardMatrix2" type="rational"
         visibility="public" optional="true"
         type_notes="3x3 matrix in row-major-order" container="array"
-        typedef="colorSpaceTransform">
+        typedef="colorSpaceTransform" permission_needed="true" >
           <array>
             <size>3</size>
             <size>3</size>
@@ -7230,6 +7675,9 @@
 
           This matrix will only be present if the second reference
           illuminant is present.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
@@ -7269,6 +7717,8 @@
           level values. For raw capture in particular, it is recommended to use
           pixels from android.sensor.opticalBlackRegions to calculate black
           level values for each frame.
+
+          For a MONOCHROME camera device, all of the 2x2 channels must have the same values.
           </details>
           <hal_details>
           The values are given in row-column scan order, with the first value
@@ -7407,6 +7857,9 @@
           processing raw sensor data.
 
           The order of the values is R, G, B; where R is in the lowest index.
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <tag id="RAW" />
         </entry>
@@ -7442,6 +7895,10 @@
 
           A more detailed description of the noise model can be found in the
           Adobe DNG specification for the NoiseProfile tag.
+
+          For a MONOCHROME camera, there is only one color channel. So the noise model coefficients
+          will only contain one S and one O.
+
           </details>
           <hal_details>
           For a CFA layout of RGGB, the list of coefficients would be given as
@@ -7546,6 +8003,9 @@
           correction to avoid demosaic errors (3-20% divergence).
           * R &amp;gt; 1.20 will require strong software correction to produce
           a usuable image (&amp;gt;20% divergence).
+
+          Starting from Android Q, this key will not be present for a MONOCHROME camera, even if
+          the camera device has RAW capability.
           </details>
           <hal_details>
           The green split given may be a static value based on prior
@@ -7841,6 +8301,8 @@
           nth value given corresponds to the black level offset for the nth
           color channel listed in the CFA.
 
+          For a MONOCHROME camera, all of the 2x2 channels must have the same values.
+
           This key will be available if android.sensor.opticalBlackRegions is available or the
           camera device advertises this key via {@link
           android.hardware.camera2.CameraCharacteristics#getAvailableCaptureResultKeys|ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS}.
@@ -8368,6 +8830,19 @@
           image of a gray wall (using bicubic interpolation for visual quality) as captured by the sensor gives:
 
           ![Image of a uniform white wall (inverse shading map)](android.statistics.lensShadingMap/inv_shading.png)
+
+          For a MONOCHROME camera, all of the 2x2 channels must have the same values. An example
+          shading map for such a camera is defined as:
+
+              android.lens.info.shadingMapSize = [ 4, 3 ]
+              android.statistics.lensShadingMap =
+              [ 1.3, 1.3, 1.3, 1.3,  1.2, 1.2, 1.2, 1.2,
+                  1.1, 1.1, 1.1, 1.1,  1.3, 1.3, 1.3, 1.3,
+                1.2, 1.2, 1.2, 1.2,  1.1, 1.1, 1.1, 1.1,
+                  1.0, 1.0, 1.0, 1.0,  1.2, 1.2, 1.2, 1.2,
+                1.3, 1.3, 1.3, 1.3,   1.2, 1.2, 1.2, 1.2,
+                  1.2, 1.2, 1.2, 1.2,  1.3, 1.3, 1.3, 1.3 ]
+
           </details>
         </entry>
         <entry name="lensShadingMap" type="float" visibility="ndk_public"
@@ -8412,15 +8887,15 @@
           pixel ( ((W-1)/(N-1)) * x, ((H-1)/(M-1)) * y) for the four color channels.
           The map is assumed to be bilinearly interpolated between the sample points.
 
-          The channel order is [R, Geven, Godd, B], where Geven is the green
-          channel for the even rows of a Bayer pattern, and Godd is the odd rows.
+          For a Bayer camera, the channel order is [R, Geven, Godd, B], where Geven is
+          the green channel for the even rows of a Bayer pattern, and Godd is the odd rows.
           The shading map is stored in a fully interleaved format, and its size
           is provided in the camera static metadata by android.lens.info.shadingMapSize.
 
           The shading map will generally have on the order of 30-40 rows and columns,
           and will be smaller than 64x64.
 
-          As an example, given a very small map defined as:
+          As an example, given a very small map for a Bayer camera defined as:
 
               android.lens.info.shadingMapSize = [ 4, 3 ]
               android.statistics.lensShadingMap =
@@ -8445,6 +8920,18 @@
 
           ![Image of a uniform white wall (inverse shading map)](android.statistics.lensShadingMap/inv_shading.png)
 
+          For a MONOCHROME camera, all of the 2x2 channels must have the same values. An example
+          shading map for such a camera is defined as:
+
+              android.lens.info.shadingMapSize = [ 4, 3 ]
+              android.statistics.lensShadingMap =
+              [ 1.3, 1.3, 1.3, 1.3,  1.2, 1.2, 1.2, 1.2,
+                  1.1, 1.1, 1.1, 1.1,  1.3, 1.3, 1.3, 1.3,
+                1.2, 1.2, 1.2, 1.2,  1.1, 1.1, 1.1, 1.1,
+                  1.0, 1.0, 1.0, 1.0,  1.2, 1.2, 1.2, 1.2,
+                1.3, 1.3, 1.3, 1.3,   1.2, 1.2, 1.2, 1.2,
+                  1.2, 1.2, 1.2, 1.2,  1.3, 1.3, 1.3, 1.3 ]
+
           Note that the RAW image data might be subject to lens shading
           correction not reported on this map. Query
           android.sensor.info.lensShadingApplied to see if RAW image data has subject
@@ -8802,8 +9289,8 @@
           always provide a curve with number of points equivalent to
           android.tonemap.maxCurvePoints).
 
-          For devices with MONOCHROME capability, only red channel is used. Green and blue channels
-          are ignored.
+          For devices with MONOCHROME capability, all three channels must have the same set of
+          control points.
 
           A few examples, and their corresponding graphical mappings; these
           only specify the red channel and the precision is limited to 4
@@ -8875,8 +9362,8 @@
           always provide a curve with number of points equivalent to
           android.tonemap.maxCurvePoints).
 
-          For devices with MONOCHROME capability, only red channel is used. Green and blue channels
-          are ignored.
+          For devices with MONOCHROME capability, all three channels must have the same set of
+          control points.
 
           A few examples, and their corresponding graphical mappings; these
           only specify the red channel and the precision is limited to 4
@@ -9232,6 +9719,9 @@
               fire the flash for flash power metering during precapture, and then fire the flash
               for the final capture, if a flash is available on the device and the AE mode is set to
               enable the flash.
+
+              Devices that initially shipped with Android version {@link
+              android.os.Build.VERSION_CODES#Q Q} or newer will not include any LEGACY-level devices.
               </notes>
             </value>
             <value>
@@ -9379,6 +9869,27 @@
               It must not exceed 256 characters.
           </hal_details>
         </entry>
+        <entry name="supportedBufferManagementVersion" type="byte" visibility="system"
+               enum="true" hal_version="3.4">
+          <enum>
+            <value>
+              HIDL_DEVICE_3_5
+              <notes>
+              This camera device supports and opts in to the buffer management APIs provided by
+              HIDL ICameraDevice version 3.5.
+              </notes>
+            </value>
+          </enum>
+          <description>
+              The version of buffer management API this camera device supports and opts into.
+          </description>
+          <details>
+              When this key is not present, camera framework will interact with this camera device
+              without any buffer management HAL API. When this key is present and camera framework
+              supports the buffer management API version, camera framework will interact with camera
+              HAL using such version of buffer management API.
+          </details>
+        </entry>
       </static>
     </section>
     <section name="blackLevel">
@@ -9850,11 +10361,127 @@
           corrupted during depth measurement.
           </details>
         </entry>
+        <entry name="availableRecommendedDepthStreamConfigurations" type="int32"
+            visibility="ndk_public" optional="true" container="array"
+            typedef="recommendedStreamConfiguration" hal_version="3.4">
+          <array>
+            <size>n</size>
+            <size>5</size>
+          </array>
+          <description>Recommended depth stream configurations for common client use cases.
+          </description>
+          <details>Optional subset of the android.depth.availableDepthStreamConfigurations that
+          contains similar tuples listed as
+          (i.e. width, height, format, output/input stream, usecase bit field).
+          Camera devices will be able to suggest particular depth stream configurations which are
+          power and performance efficient for specific use cases. For more information about
+          retrieving the suggestions see
+          {@link android.hardware.camera2.CameraCharacteristics#getRecommendedStreamConfigurationMap}.
+          </details>
+          <ndk_details>
+          For data representation please refer to
+          android.scaler.availableRecommendedStreamConfigurations
+          </ndk_details>
+          <hal_details>
+          Recommended depth configurations are expected to be declared with SNAPSHOT and/or
+          ZSL if supported by the device.
+          For additional details on how to declare recommended stream configurations, check
+          android.scaler.availableRecommendedStreamConfigurations.
+          For additional requirements on depth streams please consider
+          android.depth.availableDepthStreamConfigurations.
+          </hal_details>
+        </entry>
+        <entry name="availableDynamicDepthStreamConfigurations" type="int32" visibility="ndk_public"
+               enum="true" container="array" typedef="streamConfiguration" hal_version="3.4">
+          <array>
+            <size>n</size>
+            <size>4</size>
+          </array>
+          <enum>
+            <value>OUTPUT</value>
+            <value>INPUT</value>
+          </enum>
+          <description>The available dynamic depth dataspace stream
+          configurations that this camera device supports
+          (i.e. format, width, height, output/input stream).
+          </description>
+          <details>
+            These are output stream configurations for use with
+            dataSpace DYNAMIC_DEPTH. The configurations are
+            listed as `(format, width, height, input?)` tuples.
+
+            Only devices that support depth output for at least
+            the HAL_PIXEL_FORMAT_Y16 dense depth map along with
+            HAL_PIXEL_FORMAT_BLOB with the same size or size with
+            the same aspect ratio can have dynamic depth dataspace
+            stream configuration. android.depth.depthIsExclusive also
+            needs to be set to FALSE.
+          </details>
+          <hal_details>
+            Do not set this property directly.
+            It is populated by camera framework and must not be set
+            at the HAL layer.
+          </hal_details>
+          <tag id="DEPTH" />
+        </entry>
+        <entry name="availableDynamicDepthMinFrameDurations" type="int64" visibility="ndk_public"
+               container="array" typedef="streamConfigurationDuration" hal_version="3.4">
+          <array>
+            <size>4</size>
+            <size>n</size>
+          </array>
+          <description>This lists the minimum frame duration for each
+          format/size combination for dynamic depth output streams.
+          </description>
+          <units>(format, width, height, ns) x n</units>
+          <details>
+          This should correspond to the frame duration when only that
+          stream is active, with all processing (typically in android.*.mode)
+          set to either OFF or FAST.
+
+          When multiple streams are used in a request, the minimum frame
+          duration will be max(individual stream min durations).
+
+          The minimum frame duration of a stream (of a particular format, size)
+          is the same regardless of whether the stream is input or output.
+          </details>
+          <hal_details>
+            Do not set this property directly.
+            It is populated by camera framework and must not be set
+            at the HAL layer.
+          </hal_details>
+          <tag id="DEPTH" />
+        </entry>
+        <entry name="availableDynamicDepthStallDurations" type="int64" visibility="ndk_public"
+               container="array" typedef="streamConfigurationDuration" hal_version="3.4">
+          <array>
+            <size>4</size>
+            <size>n</size>
+          </array>
+          <description>This lists the maximum stall duration for each
+          output format/size combination for dynamic depth streams.
+          </description>
+          <units>(format, width, height, ns) x n</units>
+          <details>
+          A stall duration is how much extra time would get added
+          to the normal minimum frame duration for a repeating request
+          that has streams with non-zero stall.
+
+          All dynamic depth output streams may have a nonzero stall
+          duration.
+          </details>
+          <hal_details>
+            Do not set this property directly.
+            It is populated by camera framework and must not be set
+            at the HAL layer.
+          </hal_details>
+          <tag id="DEPTH" />
+        </entry>
       </static>
     </section>
     <section name="logicalMultiCamera">
       <static>
-        <entry name="physicalIds" type="byte" visibility="hidden"
+        <entry name="physicalIds" type="byte" visibility="ndk_public"
                container="array" hwlevel="limited" hal_version="3.3">
           <array>
             <size>n</size>
@@ -9863,14 +10490,14 @@
           </description>
           <units>UTF-8 null-terminated string</units>
           <details>
-            For a logical camera, this is concatenation of all underlying physical camera ids.
-            The null terminator for physical camera id must be preserved so that the whole string
-            can be tokenized using '\0' to generate list of physical camera ids.
+            For a logical camera, this is concatenation of all underlying physical camera IDs.
+            The null terminator for physical camera ID must be preserved so that the whole string
+            can be tokenized using '\0' to generate list of physical camera IDs.
 
-            For example, if the physical camera ids of the logical camera are "2" and "3", the
+            For example, if the physical camera IDs of the logical camera are "2" and "3", the
             value of this tag will be ['2', '\0', '3', '\0'].
 
-            The number of physical camera ids must be no less than 2.
+            The number of physical camera IDs must be no less than 2.
           </details>
           <tag id="LOGICALCAMERA" />
         </entry>
@@ -9903,10 +10530,44 @@
           In both cases, all images generated for a particular capture request still carry the same
           timestamps, so that they can be used to look up the matching frame number and
           onCaptureStarted callback.
+
+          This tag is only applicable if the logical camera device supports concurrent physical
+          streams from different physical cameras.
           </details>
           <tag id="LOGICALCAMERA" />
         </entry>
       </static>
+      <dynamic>
+      <entry name="activePhysicalId" type="byte" visibility="public"
+             typedef="string" hal_version="3.4">
+        <description>String containing the ID of the underlying active physical camera.
+        </description>
+        <units>UTF-8 null-terminated string</units>
+        <details>
+          The ID of the active physical camera that's backing the logical camera. All camera
+          streams and metadata that are not physical camera specific will be originating from this
+          physical camera.
+
+          For a logical camera made up of physical cameras where each camera's lenses have
+          different characteristics, the camera device may choose to switch between the physical
+          cameras when application changes FOCAL_LENGTH or SCALER_CROP_REGION.
+          At the time of lens switch, this result metadata reflects the new active physical camera
+          ID.
+
+          This key will be available if the camera device advertises this key via {@link
+          android.hardware.camera2.CameraCharacteristics#getAvailableCaptureResultKeys|ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS}.
+          When available, this must be one of valid physical IDs backing this logical multi-camera.
+          If this key is not available for a logical multi-camera, the camera device implementation
+          may still switch between different active physical cameras based on use case, but the
+          current active physical camera information won't be available to the application.
+        </details>
+        <hal_details>
+          Staring from HIDL ICameraDevice version 3.5, the tag must be available in the capture
+          result metadata to indicate current active physical camera ID.
+        </hal_details>
+        <tag id="LOGICALCAMERA" />
+      </entry>
+    </dynamic>
     </section>
     <section name="distortionCorrection">
       <controls>
@@ -9935,8 +10596,8 @@
           any correction at all would slow down capture rate.  Every output stream will have a
           similar amount of enhancement applied.
 
-          The correction only applies to processed outputs such as YUV, JPEG, or DEPTH16; it is not
-          applied to any RAW output.
+          The correction only applies to processed outputs such as YUV, Y8, JPEG, or DEPTH16; it is
+          not applied to any RAW output.
 
           This control will be on by default on devices that support this control. Applications
           disabling distortion correction need to pay extra attention with the coordinate system of
@@ -9992,5 +10653,203 @@
         </clone>
       </dynamic>
     </section>
+    <section name="heic">
+      <static>
+        <namespace name="info">
+          <entry name="supported" type="byte" visibility="system" enum="true"
+                 typedef="boolean" hwlevel="limited" hal_version="3.4">
+            <enum>
+              <value>FALSE</value>
+              <value>TRUE</value>
+            </enum>
+            <description>Whether this camera device can support identical set of stream combinations
+            involving HEIC image format, compared to the
+            {@link android.hardware.camera2.CameraDevice#createCaptureSession table of combinations}
+            involving JPEG image format required for the device's hardware level and capabilities.
+            </description>
+            <details>
+            All the static, control and dynamic metadata tags related to JPEG apply to HEIC formats
+            as well. For example, the same android.jpeg.orientation and android.jpeg.quality are
+            used to control the orientation and quality of the HEIC image. Configuring JPEG and
+            HEIC streams at the same time is not supported.
+
+            If a camera device supports HEIC format (ISO/IEC 23008-12), not only does it
+            support the existing mandatory stream
+            {@link android.hardware.camera2.CameraDevice#createCaptureSession combinations}
+            required for the device's hardware level and capabilities, it also supports swapping
+            each JPEG stream with HEIC stream in all guaranteed combinations.
+
+            For every HEIC stream configured by the application, the camera framework sets up 2
+            internal streams with camera HAL:
+
+            * A YUV_420_888 or IMPLEMENTATION_DEFINED HAL stream as input to HEIC or HEVC encoder.
+            * A BLOB stream with JPEG_APPS_SEGMENTS dataspace to extract application markers
+            including EXIF and thumbnail to be saved in HEIF container.
+
+            A camera device can output HEIC format to the application if and only if:
+
+            * The system contains a HEIC or HEVC encoder with constant quality mode, and
+            * This tag is set to TRUE, meaning that camera HAL supports replacing JPEG streams in
+            all mandatory stream combinations with a [YUV_420_888/IMPLEMENTATION_DEFINED stream +
+            JPEG_APPS_SEGMENT BLOB stream] combo.
+
+            As an example, if the camera device's hardware level is LIMITED, and it supports HEIC,
+            in addition to the required stream combinations, HAL must support below stream
+            combinations as well:
+
+            * IMPLEMENTATION_DEFINED/YUV_420_888 MAXIMUM + JPEG_SEGMENTS_BLOB,
+            * PRIV PREVIEW + IMPLEMENTATION_DEFINED/YUV_420_888 MAXIMUM + JPEG_SEGMENTS_BLOB,
+            * YUV PREVIEW + IMPLEMENTATION_DEFINED/YUV_420_888 MAXIMUM + JPEG_SEGMENTS_BLOB,
+            * PRIV PREVIEW + YUV PREVIEW + IMPLEMENTATION_DEFINED/YUV_420_888 MAXIMUM +
+            JPEG_SEGMENTS_BLOB
+
+            The selection logic between YUV_420_888 and IMPLEMENTATION_DEFINED for HAL internal
+            stream is as follows:
+
+                if (HEIC encoder exists and supports the size) {
+                    use IMPLEMENTATION_DEFINED with GRALLOC_USAGE_HW_IMAGE_ENCODER usage flag;
+                } else {
+                    // HVC encoder exists
+                    if (size is less than framework predefined tile size) {
+                        use IMPLEMENTATINO_DEFINED with GRALLOC_USAGE_HW_VIDEO_ENCODER usage flag;
+                    } else {
+                        use YUV_420_888;
+                    }
+                }
+          </details>
+          <tag id="HEIC" />
+          </entry>
+          <entry name="maxJpegAppSegmentsCount" type="byte" visibility="system"
+                 hwlevel="limited" hal_version="3.4">
+            <description>The maximum number of Jpeg APP segments supported by the camera HAL device.
+            </description>
+            <details>
+            The camera framework will use this value to derive the size of the BLOB buffer with
+            JPEG_APP_SEGMENTS dataspace, with each APP segment occupying at most 64K bytes. If the
+            value of this tag is n, the size of the framework allocated buffer will be:
+
+                n * (2 + 0xFFFF) + sizeof(struct CameraBlob)
+
+            where 2 is number of bytes for APP marker, 0xFFFF is the maximum size per APP segment
+            (including segment size).
+
+            The value of this tag must be at least 1, and APP1 marker (0xFFE1) segment must be the
+            first segment stored in the JPEG_APPS_SEGMENTS BLOB buffer. APP1 segment stores EXIF and
+            thumbnail.
+
+            Since media encoder embeds the orientation in the metadata of the output image, to be
+            consistent between main image and thumbnail, camera HAL must not rotate the thumbnail
+            image data based on android.jpeg.orientation. The framework will write the orientation
+            into EXIF and HEIC container.
+
+            APP1 segment is followed immediately by one or multiple APP2 segments, and APPn
+            segments. After the HAL fills and returns the JPEG_APP_SEGMENTS buffer, the camera
+            framework modifies the APP1 segment by filling in the EXIF tags that are related to
+            main image bitstream and the tags that can be derived from capture result metadata,
+            before saving them into the HEIC container.
+
+            The value of this tag must not be more than 16.
+            </details>
+            <tag id="HEIC" />
+          </entry>
+        </namespace>
+
+        <entry name="availableHeicStreamConfigurations" type="int32" visibility="ndk_public"
+            enum="true" container="array" typedef="streamConfiguration"
+            hwlevel="limited" hal_version="3.4">
+          <array>
+            <size>n</size>
+            <size>4</size>
+          </array>
+          <enum>
+            <value>OUTPUT</value>
+            <value>INPUT</value>
+          </enum>
+          <description>The available HEIC (ISO/IEC 23008-12) stream
+          configurations that this camera device supports
+          (i.e. format, width, height, output/input stream).
+          </description>
+          <details>
+          The configurations are listed as `(format, width, height, input?)` tuples.
+
+          If the camera device supports HEIC image format, it will support identical set of stream
+          combinations involving HEIC image format, compared to the combinations involving JPEG
+          image format as required by the device's hardware level and capabilities.
+
+          All the static, control, and dynamic metadata tags related to JPEG apply to HEIC formats.
+          Configuring JPEG and HEIC streams at the same time is not supported.
+          </details>
+          <ndk_details>
+          All the configuration tuples `(format, width, height, input?)` will contain
+          AIMAGE_FORMAT_HEIC format as OUTPUT only.
+          </ndk_details>
+          <hal_details>
+          These are output stream configurations for use with dataSpace HAL_DATASPACE_HEIF.
+
+          Do not set this property directly. It is populated by camera framework and must not be
+          set by the HAL layer.
+          </hal_details>
+         <tag id="HEIC" />
+        </entry>
+        <entry name="availableHeicMinFrameDurations" type="int64" visibility="ndk_public"
+               container="array" typedef="streamConfigurationDuration" hwlevel="limited"
+               hal_version="3.4">
+          <array>
+            <size>4</size>
+            <size>n</size>
+          </array>
+          <description>This lists the minimum frame duration for each
+          format/size combination for HEIC output formats.
+          </description>
+          <units>(format, width, height, ns) x n</units>
+          <details>
+          This should correspond to the frame duration when only that
+          stream is active, with all processing (typically in android.*.mode)
+          set to either OFF or FAST.
+
+          When multiple streams are used in a request, the minimum frame
+          duration will be max(individual stream min durations).
+
+          See android.sensor.frameDuration and
+          android.scaler.availableStallDurations for more details about
+          calculating the max frame rate.
+          </details>
+          <hal_details>
+          Do not set this property directly. It is populated by camera framework and must not be
+          set by the HAL layer.
+          </hal_details>
+          <tag id="HEIC" />
+        </entry>
+        <entry name="availableHeicStallDurations" type="int64" visibility="ndk_public"
+               container="array" typedef="streamConfigurationDuration" hwlevel="limited"
+               hal_version="3.4">
+          <array>
+            <size>4</size>
+            <size>n</size>
+          </array>
+          <description>This lists the maximum stall duration for each
+          output format/size combination for HEIC streams.
+          </description>
+          <units>(format, width, height, ns) x n</units>
+          <details>
+          A stall duration is how much extra time would get added
+          to the normal minimum frame duration for a repeating request
+          that has streams with non-zero stall.
+
+          This functions similarly to
+          android.scaler.availableStallDurations for HEIC
+          streams.
+
+          All HEIC output stream formats may have a nonzero stall
+          duration.
+          </details>
+          <hal_details>
+          Do not set this property directly. It is populated by camera framework and must not be
+          set by the HAL layer.
+          </hal_details>
+          <tag id="HEIC" />
+        </entry>
+      </static>
+    </section>
   </namespace>
 </metadata>
diff --git a/camera/docs/metadata_definitions.xsd b/camera/docs/metadata_definitions.xsd
index 8e46cb1..d938417 100644
--- a/camera/docs/metadata_definitions.xsd
+++ b/camera/docs/metadata_definitions.xsd
@@ -243,6 +243,14 @@
             </simpleType>
         </attribute>
         <attribute name="hal_version" type="decimal" default="3.2" />
+        <attribute name="permission_needed" default="false">
+            <simpleType>
+                <restriction base="string">
+                    <enumeration value="true" />
+                    <enumeration value="false" />
+                </restriction>
+            </simpleType>
+        </attribute>
     </complexType>
 
     <complexType name="EnumType">
diff --git a/camera/docs/metadata_helpers.py b/camera/docs/metadata_helpers.py
index ed303bb..b4a2f57 100644
--- a/camera/docs/metadata_helpers.py
+++ b/camera/docs/metadata_helpers.py
@@ -166,6 +166,7 @@
     "sizeF"                  : "SizeF",
     "rectangle"              : "Rect",
     "streamConfigurationMap" : "StreamConfigurations",
+    "mandatoryStreamCombination" : "MandatoryStreamCombination",
     "rangeInt"               : "RangeInt",
     "rangeLong"              : "RangeLong",
     "colorSpaceTransform"    : "ColorSpaceTransform",
@@ -750,7 +751,7 @@
   range.
   """
   def inner(text):
-    if entry.units:
+    if entry.units and not (entry.typedef and entry.typedef.name == 'string'):
       text += '\n\n<b>Units</b>: %s\n' % (dedent(entry.units))
     if entry.enum and not (entry.typedef and entry.typedef.languages.get('java')):
       text += '\n\n<b>Possible values:</b>\n<ul>\n'
@@ -765,7 +766,7 @@
         text += '\n\n<b>Range of valid values:</b><br>\n'
       text += '%s\n' % (dedent(entry.range))
     if entry.hwlevel != 'legacy': # covers any of (None, 'limited', 'full')
-      text += '\n\n<b>Optional</b> - This value may be {@code null} on some devices.\n'
+      text += '\n\n<b>Optional</b> - The value for this key may be {@code null} on some devices.\n'
     if entry.hwlevel == 'full':
       text += \
         '\n<b>Full capability</b> - \n' + \
@@ -778,6 +779,8 @@
         'android.info.supportedHardwareLevel key\n'
     if entry.hwlevel == 'legacy':
       text += "\nThis key is available on all devices."
+    if entry.permission_needed == "true":
+      text += "\n\n<b>Permission {@link android.Manifest.permission#CAMERA} is needed to access this property</b>\n\n"
 
     return text
   return inner
@@ -1379,6 +1382,35 @@
   """
   return (e for e in entries if e.has_new_values_added_in_hal_version(hal_major_version, hal_minor_version))
 
+def permission_needed_count(root):
+  """
+  Return the number entries that need camera permission.
+
+  Args:
+    root: a Metadata instance
+
+  Returns:
+    The number of entires that need camera permission.
+
+  """
+  ret = 0
+  for sec in find_all_sections(root):
+      ret += len(list(filter_has_permission_needed(remove_synthetic(find_unique_entries(sec)))))
+
+  return ret
+
+def filter_has_permission_needed(entries):
+  """
+    Filter the given entries by removing those that don't need camera permission.
+
+    Args:
+      entries: An iterable of Entry nodes
+
+    Yields:
+      An iterable of Entry nodes
+  """
+  return (e for e in entries if e.permission_needed == 'true')
+
 def filter_ndk_visible(entries):
   """
   Filter the given entries by removing those that are not NDK visible.
diff --git a/camera/docs/metadata_helpers_test.py b/camera/docs/metadata_helpers_test.py
index 4264c6e..812a0d7 100644
--- a/camera/docs/metadata_helpers_test.py
+++ b/camera/docs/metadata_helpers_test.py
@@ -11,7 +11,7 @@
 <?xml version="1.0" encoding="utf-8"?>
 <metadata xmlns="http://schemas.android.com/service/camera/metadata/"
 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata_properties.xsd">
+xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata_definitions.xsd">
 
 <namespace name="testOuter1">
   <section name="testSection1">
diff --git a/camera/docs/metadata_model.py b/camera/docs/metadata_model.py
index 398e43a..1172971 100644
--- a/camera/docs/metadata_model.py
+++ b/camera/docs/metadata_model.py
@@ -1122,6 +1122,7 @@
              to be supported by the camera device. All devices with higher
              hwlevels will also include this entry. None means that the
              entry is optional on any hardware level.
+    permission_needed: Flags whether the tag needs extra camera permission.
     deprecated: Marks an entry as @Deprecated in the Java layer; if within an
                unreleased version this needs to be removed altogether. If applied
                to an entry from an older release, then this means the entry
@@ -1258,6 +1259,10 @@
   def deprecation_description(self):
     return self._deprecation_description
 
+  @property
+  def permission_needed(self):
+    return self._permission_needed or "false"
+
   # TODO: optional should just return hwlevel is None
   @property
   def optional(self):
@@ -1417,6 +1422,7 @@
     self._deprecated = kwargs.get('deprecated', False)
     self._deprecation_description = kwargs.get('deprecation_description')
 
+    self._permission_needed = kwargs.get('permission_needed')
     self._optional = kwargs.get('optional')
     self._ndk_visible = kwargs.get('ndk_visible')
 
@@ -1626,7 +1632,8 @@
                     'optional',
                     'typedef',
                     'hal_major_version',
-                    'hal_minor_version'
+                    'hal_minor_version',
+                    'permission_needed'
                    ]
 
     for p in props_common:
diff --git a/camera/docs/metadata_parser_xml.py b/camera/docs/metadata_parser_xml.py
index 91af192..6d42466 100755
--- a/camera/docs/metadata_parser_xml.py
+++ b/camera/docs/metadata_parser_xml.py
@@ -182,6 +182,11 @@
     d['synthetic'] = entry.get('synthetic') == 'true'
 
     #
+    # Permission needed ?
+    #
+    d['permission_needed'] = entry.get('permission_needed')
+
+    #
     # Hardware Level (one of limited, legacy, full)
     #
     d['hwlevel'] = entry.get('hwlevel')
diff --git a/camera/docs/metadata_template.mako b/camera/docs/metadata_template.mako
index 02689f0..7d9718d 100644
--- a/camera/docs/metadata_template.mako
+++ b/camera/docs/metadata_template.mako
@@ -17,7 +17,7 @@
 <metadata
     xmlns="http://schemas.android.com/service/camera/metadata/"
     xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-    xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata_properties.xsd">
+    xsi:schemaLocation="http://schemas.android.com/service/camera/metadata/ metadata_definitions.xsd">
 
 <tags>
 % for tag in metadata.tags:
@@ -124,6 +124,10 @@
                 hwlevel="${prop.hwlevel}"
           % endif
 
+          % if prop.permission_needed == "true":
+                permission_needed="true"
+          % endif
+
           % if (prop.hal_major_version, prop.hal_minor_version) != (3,2):
                 hal_version="${prop.hal_major_version}.${prop.hal_minor_version}"
           % endif
diff --git a/camera/include/system/camera_metadata.h b/camera/include/system/camera_metadata.h
index 46e7ac0..879757d 100644
--- a/camera/include/system/camera_metadata.h
+++ b/camera/include/system/camera_metadata.h
@@ -481,6 +481,12 @@
         const camera_metadata_t *meta);
 
 /**
+ * Retrieve all tags that need permission.
+ */
+ANDROID_API
+const int32_t *get_camera_metadata_permission_needed(uint32_t *tag_count);
+
+/**
  * Set up vendor-specific tag query methods. These are needed to properly add
  * entries with vendor-specified tags and to use the
  * get_camera_metadata_section_name, _tag_name, and _tag_type methods with
diff --git a/camera/include/system/camera_metadata_tags.h b/camera/include/system/camera_metadata_tags.h
index adf18b8..93403f3 100644
--- a/camera/include/system/camera_metadata_tags.h
+++ b/camera/include/system/camera_metadata_tags.h
@@ -63,6 +63,8 @@
     ANDROID_DEPTH,
     ANDROID_LOGICAL_MULTI_CAMERA,
     ANDROID_DISTORTION_CORRECTION,
+    ANDROID_HEIC,
+    ANDROID_HEIC_INFO,
     ANDROID_SECTION_COUNT,
 
     VENDOR_SECTION = 0x8000
@@ -105,6 +107,8 @@
     ANDROID_DISTORTION_CORRECTION_START
                                    = ANDROID_DISTORTION_CORRECTION
                                                                 << 16,
+    ANDROID_HEIC_START             = ANDROID_HEIC              << 16,
+    ANDROID_HEIC_INFO_START        = ANDROID_HEIC_INFO         << 16,
     VENDOR_SECTION_START           = VENDOR_SECTION            << 16
 } camera_metadata_section_start_t;
 
@@ -276,7 +280,9 @@
     ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,   // int32[]      | ndk_public   | HIDL v3.2
     ANDROID_REQUEST_AVAILABLE_SESSION_KEYS,           // int32[]      | ndk_public   | HIDL v3.3
     ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS,
-                                                      // int32[]      | hidden       | HIDL v3.3
+                                                      // int32[]      | ndk_public   | HIDL v3.3
+    ANDROID_REQUEST_CHARACTERISTIC_KEYS_NEEDING_PERMISSION,
+                                                      // int32[]      | hidden       | HIDL v3.4
     ANDROID_REQUEST_END,
 
     ANDROID_SCALER_CROP_REGION =                      // int32[]      | public       | HIDL v3.2
@@ -294,6 +300,10 @@
     ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,     // int64[]      | ndk_public   | HIDL v3.2
     ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,         // int64[]      | ndk_public   | HIDL v3.2
     ANDROID_SCALER_CROPPING_TYPE,                     // enum         | public       | HIDL v3.2
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS,
+                                                      // enum[]       | ndk_public   | HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_INPUT_OUTPUT_FORMATS_MAP,
+                                                      // int32        | ndk_public   | HIDL v3.4
     ANDROID_SCALER_END,
 
     ANDROID_SENSOR_EXPOSURE_TIME =                    // int64        | public       | HIDL v3.2
@@ -409,6 +419,7 @@
     ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL =           // enum         | public       | HIDL v3.2
             ANDROID_INFO_START,
     ANDROID_INFO_VERSION,                             // byte         | public       | HIDL v3.3
+    ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION, // enum         | system       | HIDL v3.4
     ANDROID_INFO_END,
 
     ANDROID_BLACK_LEVEL_LOCK =                        // enum         | public       | HIDL v3.2
@@ -432,11 +443,20 @@
     ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,// int64[]      | ndk_public   | HIDL v3.2
     ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS,    // int64[]      | ndk_public   | HIDL v3.2
     ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE,                 // enum         | public       | HIDL v3.2
+    ANDROID_DEPTH_AVAILABLE_RECOMMENDED_DEPTH_STREAM_CONFIGURATIONS,
+                                                      // int32[]      | ndk_public   | HIDL v3.4
+    ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS,
+                                                      // enum[]       | ndk_public   | HIDL v3.4
+    ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS,
+                                                      // int64[]      | ndk_public   | HIDL v3.4
+    ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS,
+                                                      // int64[]      | ndk_public   | HIDL v3.4
     ANDROID_DEPTH_END,
 
-    ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS =       // byte[]       | hidden       | HIDL v3.3
+    ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS =       // byte[]       | ndk_public   | HIDL v3.3
             ANDROID_LOGICAL_MULTI_CAMERA_START,
     ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE,    // enum         | public       | HIDL v3.3
+    ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID,  // byte         | public       | HIDL v3.4
     ANDROID_LOGICAL_MULTI_CAMERA_END,
 
     ANDROID_DISTORTION_CORRECTION_MODE =              // enum         | public       | HIDL v3.3
@@ -444,6 +464,18 @@
     ANDROID_DISTORTION_CORRECTION_AVAILABLE_MODES,    // byte[]       | public       | HIDL v3.3
     ANDROID_DISTORTION_CORRECTION_END,
 
+    ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS = 
+                                                      // enum[]       | ndk_public   | HIDL v3.4
+            ANDROID_HEIC_START,
+    ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS,  // int64[]      | ndk_public   | HIDL v3.4
+    ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS,      // int64[]      | ndk_public   | HIDL v3.4
+    ANDROID_HEIC_END,
+
+    ANDROID_HEIC_INFO_SUPPORTED =                     // enum         | system       | HIDL v3.4
+            ANDROID_HEIC_INFO_START,
+    ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT,    // byte         | system       | HIDL v3.4
+    ANDROID_HEIC_INFO_END,
+
 } camera_metadata_tag_t;
 
 /**
@@ -779,6 +811,7 @@
     ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MOTION_TRACKING          , // HIDL v3.3
     ANDROID_REQUEST_AVAILABLE_CAPABILITIES_LOGICAL_MULTI_CAMERA     , // HIDL v3.3
     ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME               , // HIDL v3.3
+    ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA        , // HIDL v3.4
 } camera_metadata_enum_android_request_available_capabilities_t;
 
 
@@ -791,6 +824,9 @@
     ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED          = 0x22, // HIDL v3.2
     ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888                   = 0x23, // HIDL v3.2
     ANDROID_SCALER_AVAILABLE_FORMATS_BLOB                            = 0x21, // HIDL v3.2
+    ANDROID_SCALER_AVAILABLE_FORMATS_RAW10                           = 0x25, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_FORMATS_RAW12                           = 0x26, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_FORMATS_Y8                              = 0x20203859, // HIDL v3.4
 } camera_metadata_enum_android_scaler_available_formats_t;
 
 // ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
@@ -805,6 +841,26 @@
     ANDROID_SCALER_CROPPING_TYPE_FREEFORM                           , // HIDL v3.2
 } camera_metadata_enum_android_scaler_cropping_type_t;
 
+// ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS
+typedef enum camera_metadata_enum_android_scaler_available_recommended_stream_configurations {
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PREVIEW
+                                                                      = 0x0, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RECORD
+                                                                      = 0x1, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VIDEO_SNAPSHOT
+                                                                      = 0x2, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_SNAPSHOT
+                                                                      = 0x3, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_ZSL   = 0x4, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RAW   = 0x5, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_LOW_LATENCY_SNAPSHOT
+                                                                      = 0x6, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PUBLIC_END
+                                                                      = 0x7, // HIDL v3.4
+    ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VENDOR_START
+                                                                      = 0x18, // HIDL v3.4
+} camera_metadata_enum_android_scaler_available_recommended_stream_configurations_t;
+
 
 // ANDROID_SENSOR_REFERENCE_ILLUMINANT1
 typedef enum camera_metadata_enum_android_sensor_reference_illuminant1 {
@@ -847,6 +903,8 @@
     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_GBRG               , // HIDL v3.2
     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_BGGR               , // HIDL v3.2
     ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_RGB                , // HIDL v3.2
+    ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_MONO               , // HIDL v3.4
+    ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR                , // HIDL v3.4
 } camera_metadata_enum_android_sensor_info_color_filter_arrangement_t;
 
 // ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE
@@ -953,6 +1011,11 @@
     ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_EXTERNAL                  , // HIDL v3.3
 } camera_metadata_enum_android_info_supported_hardware_level_t;
 
+// ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION
+typedef enum camera_metadata_enum_android_info_supported_buffer_management_version {
+    ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION_HIDL_DEVICE_3_5, // HIDL v3.4
+} camera_metadata_enum_android_info_supported_buffer_management_version_t;
+
 
 // ANDROID_BLACK_LEVEL_LOCK
 typedef enum camera_metadata_enum_android_black_level_lock {
@@ -987,6 +1050,14 @@
     ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_TRUE                           , // HIDL v3.2
 } camera_metadata_enum_android_depth_depth_is_exclusive_t;
 
+// ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS
+typedef enum camera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations {
+    ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_OUTPUT
+                                                                     , // HIDL v3.4
+    ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_INPUT
+                                                                     , // HIDL v3.4
+} camera_metadata_enum_android_depth_available_dynamic_depth_stream_configurations_t;
+
 
 // ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE
 typedef enum camera_metadata_enum_android_logical_multi_camera_sensor_sync_type {
@@ -1003,3 +1074,17 @@
 } camera_metadata_enum_android_distortion_correction_mode_t;
 
 
+// ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS
+typedef enum camera_metadata_enum_android_heic_available_heic_stream_configurations {
+    ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_OUTPUT        , // HIDL v3.4
+    ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_INPUT         , // HIDL v3.4
+} camera_metadata_enum_android_heic_available_heic_stream_configurations_t;
+
+
+// ANDROID_HEIC_INFO_SUPPORTED
+typedef enum camera_metadata_enum_android_heic_info_supported {
+    ANDROID_HEIC_INFO_SUPPORTED_FALSE                               , // HIDL v3.4
+    ANDROID_HEIC_INFO_SUPPORTED_TRUE                                , // HIDL v3.4
+} camera_metadata_enum_android_heic_info_supported_t;
+
+
diff --git a/camera/src/camera_metadata.c b/camera/src/camera_metadata.c
index f33da8d..f9d524b 100644
--- a/camera/src/camera_metadata.c
+++ b/camera/src/camera_metadata.c
@@ -271,7 +271,11 @@
 
     size_t memory_needed = calculate_camera_metadata_size(entry_capacity,
                                                           data_capacity);
-    if (memory_needed > dst_size) return NULL;
+    if (memory_needed > dst_size) {
+      ALOGE("%s: Memory needed to place camera metadata (%zu) > dst size (%zu)", __FUNCTION__,
+              memory_needed, dst_size);
+      return NULL;
+    }
 
     camera_metadata_t *metadata = (camera_metadata_t*)dst;
     metadata->version = CURRENT_METADATA_VERSION;
@@ -343,7 +347,11 @@
     size_t memory_needed = get_camera_metadata_compact_size(src);
 
     if (dst == NULL) return NULL;
-    if (dst_size < memory_needed) return NULL;
+    if (dst_size < memory_needed) {
+        ALOGE("%s: Memory needed to place camera metadata (%zu) > dst size (%zu)", __FUNCTION__,
+                memory_needed, dst_size);
+      return NULL;
+    }
 
     camera_metadata_t *metadata =
         place_camera_metadata(dst, dst_size, src->entry_count, src->data_count);
@@ -426,14 +434,14 @@
         };
 
         for (size_t i = 0; i < sizeof(alignments)/sizeof(alignments[0]); ++i) {
-            uintptr_t aligned_ptr = ALIGN_TO((uintptr_t) metadata + alignmentOffset,
+            uintptr_t aligned_ptr_section = ALIGN_TO((uintptr_t) metadata + alignmentOffset,
                     alignments[i].alignment);
 
-            if ((uintptr_t)metadata + alignmentOffset != aligned_ptr) {
+            if ((uintptr_t)metadata + alignmentOffset != aligned_ptr_section) {
                 ALOGE("%s: Metadata pointer is not aligned (actual %p, "
                       "expected %p, offset %" PRIuPTR ") to type %s",
                       __FUNCTION__, metadata,
-                      (void*)aligned_ptr, alignmentOffset, alignments[i].name);
+                      (void*)aligned_ptr_section, alignmentOffset, alignments[i].name);
                 return CAMERA_METADATA_VALIDATION_ERROR;
             }
         }
@@ -442,56 +450,71 @@
     /**
      * Check that the metadata contents are correct
      */
-
-    if (expected_size != NULL && metadata->size > *expected_size) {
-        ALOGE("%s: Metadata size (%" PRIu32 ") should be <= expected size (%zu)",
-              __FUNCTION__, metadata->size, *expected_size);
+    if (expected_size != NULL && sizeof(camera_metadata_t) > *expected_size) {
+        ALOGE("%s: Metadata size (%zu) should be <= expected size (%zu)",
+                __FUNCTION__, sizeof(camera_metadata_t), *expected_size);
         return CAMERA_METADATA_VALIDATION_ERROR;
     }
 
-    if (metadata->entry_count > metadata->entry_capacity) {
+    // Create an aligned header
+    camera_metadata_t headerCopy;
+    const camera_metadata_t *header;
+    if (alignmentOffset != 0) {
+        memcpy(&headerCopy, metadata, sizeof(camera_metadata_t));
+        header = &headerCopy;
+    } else {
+        header = metadata;
+    }
+
+    if (expected_size != NULL && header->size > *expected_size) {
+        ALOGE("%s: Metadata size (%" PRIu32 ") should be <= expected size (%zu)",
+              __FUNCTION__, header->size, *expected_size);
+        return CAMERA_METADATA_VALIDATION_ERROR;
+    }
+
+    if (header->entry_count > header->entry_capacity) {
         ALOGE("%s: Entry count (%" PRIu32 ") should be <= entry capacity "
               "(%" PRIu32 ")",
-              __FUNCTION__, metadata->entry_count, metadata->entry_capacity);
+              __FUNCTION__, header->entry_count, header->entry_capacity);
         return CAMERA_METADATA_VALIDATION_ERROR;
     }
 
-    if (metadata->data_count > metadata->data_capacity) {
+    if (header->data_count > header->data_capacity) {
         ALOGE("%s: Data count (%" PRIu32 ") should be <= data capacity "
               "(%" PRIu32 ")",
-              __FUNCTION__, metadata->data_count, metadata->data_capacity);
+              __FUNCTION__, header->data_count, header->data_capacity);
         android_errorWriteLog(SN_EVENT_LOG_ID, "30591838");
         return CAMERA_METADATA_VALIDATION_ERROR;
     }
 
     const metadata_uptrdiff_t entries_end =
-        metadata->entries_start + metadata->entry_capacity;
-    if (entries_end < metadata->entries_start || // overflow check
-        entries_end > metadata->data_start) {
+        header->entries_start + header->entry_capacity;
+    if (entries_end < header->entries_start || // overflow check
+        entries_end > header->data_start) {
 
         ALOGE("%s: Entry start + capacity (%" PRIu32 ") should be <= data start "
               "(%" PRIu32 ")",
                __FUNCTION__,
-              (metadata->entries_start + metadata->entry_capacity),
-              metadata->data_start);
+              (header->entries_start + header->entry_capacity),
+              header->data_start);
         return CAMERA_METADATA_VALIDATION_ERROR;
     }
 
     const metadata_uptrdiff_t data_end =
-        metadata->data_start + metadata->data_capacity;
-    if (data_end < metadata->data_start || // overflow check
-        data_end > metadata->size) {
+        header->data_start + header->data_capacity;
+    if (data_end < header->data_start || // overflow check
+        data_end > header->size) {
 
         ALOGE("%s: Data start + capacity (%" PRIu32 ") should be <= total size "
               "(%" PRIu32 ")",
                __FUNCTION__,
-              (metadata->data_start + metadata->data_capacity),
-              metadata->size);
+              (header->data_start + header->data_capacity),
+              header->size);
         return CAMERA_METADATA_VALIDATION_ERROR;
     }
 
     // Validate each entry
-    const metadata_size_t entry_count = metadata->entry_count;
+    const metadata_size_t entry_count = header->entry_count;
     camera_metadata_buffer_entry_t *entries = get_entries(metadata);
 
     for (size_t i = 0; i < entry_count; ++i) {
@@ -504,7 +527,12 @@
             return CAMERA_METADATA_VALIDATION_ERROR;
         }
 
-        camera_metadata_buffer_entry_t entry = entries[i];
+        camera_metadata_buffer_entry_t entry;
+        if (alignmentOffset != 0) {
+            memcpy(&entry, entries + i, sizeof(camera_metadata_buffer_entry_t));
+        } else {
+            entry = entries[i];
+        }
 
         if (entry.type >= NUM_TYPES) {
             ALOGE("%s: Entry index %zu had a bad type %d",
@@ -515,7 +543,7 @@
         // TODO: fix vendor_tag_ops across processes so we don't need to special
         //       case vendor-specific tags
         uint32_t tag_section = entry.tag >> 16;
-        int tag_type = get_local_camera_metadata_tag_type(entry.tag, metadata);
+        int tag_type = get_local_camera_metadata_tag_type(entry.tag, header);
         if (tag_type != (int)entry.type && tag_section < VENDOR_SECTION) {
             ALOGE("%s: Entry index %zu had tag type %d, but the type was %d",
                   __FUNCTION__, i, tag_type, entry.type);
@@ -998,6 +1026,15 @@
     return get_local_camera_metadata_tag_type_vendor_id(tag, id);
 }
 
+const int32_t *get_camera_metadata_permission_needed(uint32_t *tag_count) {
+    if (NULL == tag_count) {
+        return NULL;
+    }
+
+    *tag_count = sizeof(tag_permission_needed) / sizeof(tag_permission_needed[0]);
+    return tag_permission_needed;
+}
+
 int set_camera_metadata_vendor_tag_ops(const vendor_tag_query_ops_t* ops) {
     // **DEPRECATED**
     (void) ops;
diff --git a/camera/src/camera_metadata_tag_info.c b/camera/src/camera_metadata_tag_info.c
index 75ad1f4..6b14275 100644
--- a/camera/src/camera_metadata_tag_info.c
+++ b/camera/src/camera_metadata_tag_info.c
@@ -60,6 +60,8 @@
     [ANDROID_LOGICAL_MULTI_CAMERA] = "android.logicalMultiCamera",
     [ANDROID_DISTORTION_CORRECTION]
                                     = "android.distortionCorrection",
+    [ANDROID_HEIC]                 = "android.heic",
+    [ANDROID_HEIC_INFO]            = "android.heic.info",
 };
 
 unsigned int camera_metadata_section_bounds[ANDROID_SECTION_COUNT][2] = {
@@ -120,6 +122,10 @@
     [ANDROID_DISTORTION_CORRECTION]
                                     = { ANDROID_DISTORTION_CORRECTION_START,
                                        ANDROID_DISTORTION_CORRECTION_END },
+    [ANDROID_HEIC]                 = { ANDROID_HEIC_START,
+                                       ANDROID_HEIC_END },
+    [ANDROID_HEIC_INFO]            = { ANDROID_HEIC_INFO_START,
+                                       ANDROID_HEIC_INFO_END },
 };
 
 static tag_info_t android_color_correction[ANDROID_COLOR_CORRECTION_END -
@@ -417,6 +423,9 @@
     [ ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS - ANDROID_REQUEST_START ] =
     { "availablePhysicalCameraRequestKeys",
                                         TYPE_INT32  },
+    [ ANDROID_REQUEST_CHARACTERISTIC_KEYS_NEEDING_PERMISSION - ANDROID_REQUEST_START ] =
+    { "characteristicKeysNeedingPermission",
+                                        TYPE_INT32  },
 };
 
 static tag_info_t android_scaler[ANDROID_SCALER_END -
@@ -451,6 +460,12 @@
     { "availableStallDurations",       TYPE_INT64  },
     [ ANDROID_SCALER_CROPPING_TYPE - ANDROID_SCALER_START ] =
     { "croppingType",                  TYPE_BYTE   },
+    [ ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS - ANDROID_SCALER_START ] =
+    { "availableRecommendedStreamConfigurations",
+                                        TYPE_INT32  },
+    [ ANDROID_SCALER_AVAILABLE_RECOMMENDED_INPUT_OUTPUT_FORMATS_MAP - ANDROID_SCALER_START ] =
+    { "availableRecommendedInputOutputFormatsMap",
+                                        TYPE_INT32  },
 };
 
 static tag_info_t android_sensor[ANDROID_SENSOR_END -
@@ -666,6 +681,9 @@
     { "supportedHardwareLevel",        TYPE_BYTE   },
     [ ANDROID_INFO_VERSION - ANDROID_INFO_START ] =
     { "version",                       TYPE_BYTE   },
+    [ ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION - ANDROID_INFO_START ] =
+    { "supportedBufferManagementVersion",
+                                        TYPE_BYTE   },
 };
 
 static tag_info_t android_black_level[ANDROID_BLACK_LEVEL_END -
@@ -704,6 +722,18 @@
     { "availableDepthStallDurations",  TYPE_INT64  },
     [ ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE - ANDROID_DEPTH_START ] =
     { "depthIsExclusive",              TYPE_BYTE   },
+    [ ANDROID_DEPTH_AVAILABLE_RECOMMENDED_DEPTH_STREAM_CONFIGURATIONS - ANDROID_DEPTH_START ] =
+    { "availableRecommendedDepthStreamConfigurations",
+                                        TYPE_INT32  },
+    [ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS - ANDROID_DEPTH_START ] =
+    { "availableDynamicDepthStreamConfigurations",
+                                        TYPE_INT32  },
+    [ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS - ANDROID_DEPTH_START ] =
+    { "availableDynamicDepthMinFrameDurations",
+                                        TYPE_INT64  },
+    [ ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS - ANDROID_DEPTH_START ] =
+    { "availableDynamicDepthStallDurations",
+                                        TYPE_INT64  },
 };
 
 static tag_info_t android_logical_multi_camera[ANDROID_LOGICAL_MULTI_CAMERA_END -
@@ -712,6 +742,8 @@
     { "physicalIds",                   TYPE_BYTE   },
     [ ANDROID_LOGICAL_MULTI_CAMERA_SENSOR_SYNC_TYPE - ANDROID_LOGICAL_MULTI_CAMERA_START ] =
     { "sensorSyncType",                TYPE_BYTE   },
+    [ ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID - ANDROID_LOGICAL_MULTI_CAMERA_START ] =
+    { "activePhysicalId",              TYPE_BYTE   },
 };
 
 static tag_info_t android_distortion_correction[ANDROID_DISTORTION_CORRECTION_END -
@@ -722,6 +754,26 @@
     { "availableModes",                TYPE_BYTE   },
 };
 
+static tag_info_t android_heic[ANDROID_HEIC_END -
+        ANDROID_HEIC_START] = {
+    [ ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS - ANDROID_HEIC_START ] =
+    { "availableHeicStreamConfigurations",
+                                        TYPE_INT32  },
+    [ ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS - ANDROID_HEIC_START ] =
+    { "availableHeicMinFrameDurations",
+                                        TYPE_INT64  },
+    [ ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS - ANDROID_HEIC_START ] =
+    { "availableHeicStallDurations",   TYPE_INT64  },
+};
+
+static tag_info_t android_heic_info[ANDROID_HEIC_INFO_END -
+        ANDROID_HEIC_INFO_START] = {
+    [ ANDROID_HEIC_INFO_SUPPORTED - ANDROID_HEIC_INFO_START ] =
+    { "supported",                     TYPE_BYTE   },
+    [ ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT - ANDROID_HEIC_INFO_START ] =
+    { "maxJpegAppSegmentsCount",       TYPE_BYTE   },
+};
+
 
 tag_info_t *tag_info[ANDROID_SECTION_COUNT] = {
     android_color_correction,
@@ -752,6 +804,27 @@
     android_depth,
     android_logical_multi_camera,
     android_distortion_correction,
+    android_heic,
+    android_heic_info,
+};
+
+static int32_t tag_permission_needed[16] = {
+    ANDROID_LENS_POSE_ROTATION,
+    ANDROID_LENS_POSE_TRANSLATION,
+    ANDROID_LENS_INTRINSIC_CALIBRATION,
+    ANDROID_LENS_RADIAL_DISTORTION,
+    ANDROID_LENS_POSE_REFERENCE,
+    ANDROID_LENS_DISTORTION,
+    ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+    ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+    ANDROID_SENSOR_REFERENCE_ILLUMINANT1,
+    ANDROID_SENSOR_REFERENCE_ILLUMINANT2,
+    ANDROID_SENSOR_CALIBRATION_TRANSFORM1,
+    ANDROID_SENSOR_CALIBRATION_TRANSFORM2,
+    ANDROID_SENSOR_COLOR_TRANSFORM1,
+    ANDROID_SENSOR_COLOR_TRANSFORM2,
+    ANDROID_SENSOR_FORWARD_MATRIX1,
+    ANDROID_SENSOR_FORWARD_MATRIX2,
 };
 
 int camera_metadata_enum_snprint(uint32_t tag,
@@ -1930,6 +2003,10 @@
                     msg = "MONOCHROME";
                     ret = 0;
                     break;
+                case ANDROID_REQUEST_AVAILABLE_CAPABILITIES_SECURE_IMAGE_DATA:
+                    msg = "SECURE_IMAGE_DATA";
+                    ret = 0;
+                    break;
                 default:
                     msg = "error: enum value out of range";
             }
@@ -1950,6 +2027,9 @@
         case ANDROID_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS: {
             break;
         }
+        case ANDROID_REQUEST_CHARACTERISTIC_KEYS_NEEDING_PERMISSION: {
+            break;
+        }
 
         case ANDROID_SCALER_CROP_REGION: {
             break;
@@ -1984,6 +2064,18 @@
                     msg = "BLOB";
                     ret = 0;
                     break;
+                case ANDROID_SCALER_AVAILABLE_FORMATS_RAW10:
+                    msg = "RAW10";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_FORMATS_RAW12:
+                    msg = "RAW12";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_FORMATS_Y8:
+                    msg = "Y8";
+                    ret = 0;
+                    break;
                 default:
                     msg = "error: enum value out of range";
             }
@@ -2049,6 +2141,52 @@
             }
             break;
         }
+        case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS: {
+            switch (value) {
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PREVIEW:
+                    msg = "PREVIEW";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RECORD:
+                    msg = "RECORD";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VIDEO_SNAPSHOT:
+                    msg = "VIDEO_SNAPSHOT";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_SNAPSHOT:
+                    msg = "SNAPSHOT";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_ZSL:
+                    msg = "ZSL";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RAW:
+                    msg = "RAW";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_LOW_LATENCY_SNAPSHOT:
+                    msg = "LOW_LATENCY_SNAPSHOT";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PUBLIC_END:
+                    msg = "PUBLIC_END";
+                    ret = 0;
+                    break;
+                case ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VENDOR_START:
+                    msg = "VENDOR_START";
+                    ret = 0;
+                    break;
+                default:
+                    msg = "error: enum value out of range";
+            }
+            break;
+        }
+        case ANDROID_SCALER_AVAILABLE_RECOMMENDED_INPUT_OUTPUT_FORMATS_MAP: {
+            break;
+        }
 
         case ANDROID_SENSOR_EXPOSURE_TIME: {
             break;
@@ -2280,6 +2418,14 @@
                     msg = "RGB";
                     ret = 0;
                     break;
+                case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_MONO:
+                    msg = "MONO";
+                    ret = 0;
+                    break;
+                case ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT_NIR:
+                    msg = "NIR";
+                    ret = 0;
+                    break;
                 default:
                     msg = "error: enum value out of range";
             }
@@ -2662,6 +2808,17 @@
         case ANDROID_INFO_VERSION: {
             break;
         }
+        case ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION: {
+            switch (value) {
+                case ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION_HIDL_DEVICE_3_5:
+                    msg = "HIDL_DEVICE_3_5";
+                    ret = 0;
+                    break;
+                default:
+                    msg = "error: enum value out of range";
+            }
+            break;
+        }
 
         case ANDROID_BLACK_LEVEL_LOCK: {
             switch (value) {
@@ -2756,6 +2913,30 @@
             }
             break;
         }
+        case ANDROID_DEPTH_AVAILABLE_RECOMMENDED_DEPTH_STREAM_CONFIGURATIONS: {
+            break;
+        }
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS: {
+            switch (value) {
+                case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_OUTPUT:
+                    msg = "OUTPUT";
+                    ret = 0;
+                    break;
+                case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_INPUT:
+                    msg = "INPUT";
+                    ret = 0;
+                    break;
+                default:
+                    msg = "error: enum value out of range";
+            }
+            break;
+        }
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS: {
+            break;
+        }
+        case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS: {
+            break;
+        }
 
         case ANDROID_LOGICAL_MULTI_CAMERA_PHYSICAL_IDS: {
             break;
@@ -2775,6 +2956,9 @@
             }
             break;
         }
+        case ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID: {
+            break;
+        }
 
         case ANDROID_DISTORTION_CORRECTION_MODE: {
             switch (value) {
@@ -2799,6 +2983,47 @@
             break;
         }
 
+        case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS: {
+            switch (value) {
+                case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_OUTPUT:
+                    msg = "OUTPUT";
+                    ret = 0;
+                    break;
+                case ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS_INPUT:
+                    msg = "INPUT";
+                    ret = 0;
+                    break;
+                default:
+                    msg = "error: enum value out of range";
+            }
+            break;
+        }
+        case ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS: {
+            break;
+        }
+        case ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS: {
+            break;
+        }
+
+        case ANDROID_HEIC_INFO_SUPPORTED: {
+            switch (value) {
+                case ANDROID_HEIC_INFO_SUPPORTED_FALSE:
+                    msg = "FALSE";
+                    ret = 0;
+                    break;
+                case ANDROID_HEIC_INFO_SUPPORTED_TRUE:
+                    msg = "TRUE";
+                    ret = 0;
+                    break;
+                default:
+                    msg = "error: enum value out of range";
+            }
+            break;
+        }
+        case ANDROID_HEIC_INFO_MAX_JPEG_APP_SEGMENTS_COUNT: {
+            break;
+        }
+
     }
 
     strncpy(dst, msg, size - 1);
diff --git a/camera/tests/Android.bp b/camera/tests/Android.bp
index e4d78c1..58db12c 100644
--- a/camera/tests/Android.bp
+++ b/camera/tests/Android.bp
@@ -2,6 +2,7 @@
 cc_test {
     name: "camera_metadata_tests",
     srcs: ["camera_metadata_tests.cpp"],
+    test_suites: ["device-tests"],
 
     shared_libs: [
         "libutils",