[automerger skipped] Merge "[RESTRICT AUTOMERGE]: STS include missing apps from EphemeralTest" into oc-dr1-dev am: 9c9a9bb0eb -s ours am: 7e18a83ced -s ours am: fe2523cfcd -s ours

am skip reason: subject contains skip directive

Original change: https://googleplex-android-review.googlesource.com/c/platform/cts/+/13121098

MUST ONLY BE SUBMITTED BY AUTOMERGER

Change-Id: I72d39273d77bf9881df35a6494aee4b55f21112f
diff --git a/.gitignore b/.gitignore
index baf394f..33bfd97 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@
 gen/
 *.iml
 *.class
+*.sw*
diff --git a/Android.bp b/Android.bp
new file mode 100644
index 0000000..2dce5de
--- /dev/null
+++ b/Android.bp
@@ -0,0 +1,78 @@
+java_defaults {
+    name: "cts_error_prone_rules",
+    errorprone: {
+        javacflags: [
+            // Set of error prone rules to ensure code quality
+            // When updating this list, also update error_prone_rules.mk
+            "-Xep:ArrayToString:ERROR",
+            "-Xep:BoxedPrimitiveConstructor:ERROR",
+            "-Xep:ConstantField:ERROR",
+            "-Xep:EqualsIncompatibleType:ERROR",
+            "-Xep:FormatString:ERROR",
+            "-Xep:GetClassOnClass:ERROR",
+            "-Xep:IdentityBinaryExpression:ERROR",
+            "-Xep:JUnit3TestNotRun:ERROR",
+            "-Xep:JUnit4ClassUsedInJUnit3:ERROR",
+            "-Xep:JUnitAmbiguousTestClass:ERROR",
+            "-Xep:MissingFail:ERROR",
+            "-Xep:MissingOverride:ERROR",
+            "-Xep:Overrides:ERROR",
+            "-Xep:ReferenceEquality:ERROR",
+            "-Xep:RemoveUnusedImports:ERROR",
+            "-Xep:ReturnValueIgnored:ERROR",
+            "-Xep:SelfEquals:ERROR",
+            "-Xep:SizeGreaterThanOrEqualsZero:ERROR",
+            "-Xep:TryFailThrowable:ERROR",
+        ],
+    },
+}
+
+java_defaults {
+    name: "cts_error_prone_rules_tests",
+    errorprone: {
+        javacflags: [
+            // Set of error prone rules to ensure code quality of tests
+            // Goal is to eventually merge with cts_error_prone_rules
+            // When updating this list, also update error_prone_rules_tests.mk
+            "-Xep:ArrayToString:ERROR",
+            "-Xep:CollectionIncompatibleType:ERROR",
+            "-Xep:EqualsIncompatibleType:ERROR",
+            "-Xep:EqualsNaN:ERROR",
+            "-Xep:FormatString:ERROR",
+            "-Xep:IdentityBinaryExpression:ERROR",
+            "-Xep:JUnit3TestNotRun:ERROR",
+            "-Xep:JUnit4ClassUsedInJUnit3:ERROR",
+            "-Xep:JUnitAmbiguousTestClass:ERROR",
+            "-Xep:MissingFail:ERROR",
+            "-Xep:SizeGreaterThanOrEqualsZero:ERROR",
+            "-Xep:TryFailThrowable:ERROR",
+        ],
+    },
+}
+
+// Used with an android_test / android_test_helper_app, this is equivalent to
+// BUILD_CTS_SUPPORT_PACKAGE
+java_defaults {
+    name: "cts_support_defaults",
+    defaults: ["cts_error_prone_rules_tests"],
+    dex_preopt: {
+        enabled: false,
+    },
+    optimize: {
+        enabled: false,
+    },
+}
+
+// Used with different module types, this is equivalent to:
+//   android_test: BUILD_CTS_PACKAGE
+//   java_library: BUILD_CTS_TARGET_JAVA_LIBRARY
+//   java_library_host: BUILD_CTS_HOST_JAVA_LIBRARY
+java_defaults {
+    name: "cts_defaults",
+    defaults: ["cts_support_defaults"],
+    target: {
+        android: {
+            static_libs: ["platform-test-annotations"],
+        },
+    },
+}
diff --git a/CtsCoverage.mk b/CtsCoverage.mk
index 8a0eef8..0ac533b 100644
--- a/CtsCoverage.mk
+++ b/CtsCoverage.mk
@@ -22,12 +22,7 @@
 
 coverage_out := $(HOST_OUT)/cts-api-coverage
 
-api_text_description := frameworks/base/api/current.txt
-api_xml_description := $(coverage_out)/api.xml
-$(api_xml_description) : $(api_text_description) $(APICHECK)
-	$(hide) echo "Converting API file to XML: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) $(APICHECK_COMMAND) -convert2xml $< $@
+api_xml_description := $(TARGET_OUT_COMMON_INTERMEDIATES)/api.xml
 
 napi_text_description := cts/tools/cts-api-coverage/etc/ndk-api.xml
 napi_xml_description := $(coverage_out)/ndk-api.xml
@@ -94,7 +89,7 @@
 .PHONY: cts-combined-xml-coverage
 cts-combined-xml-coverage : $(cts-combined-xml-coverage-report)
 
-.PHONY: cts-api-coverage
+.PHONY: cts-coverage-report-all cts-api-coverage
 cts-coverage-report-all: cts-test-coverage cts-verifier-coverage cts-combined-coverage cts-combined-xml-coverage
 
 # Put the test coverage report in the dist dir if "cts-api-coverage" is among the build goals.
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 5f3e99f..aca54cd 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -1,13 +1,33 @@
+[Builtin Hooks]
+clang_format = true
+
+[Builtin Hooks Options]
+clang_format = --commit ${PREUPLOAD_COMMIT} --style file --extensions c,h,cc,cpp
+               tests/tests/binder_ndk
+
 [Hook Scripts]
 checkstyle_hook = ${REPO_ROOT}/prebuilts/checkstyle/checkstyle.py --sha ${PREUPLOAD_COMMIT}
                   -fw apps/CtsVerifier/src/com/android/cts/verifier/usb/
                       apps/CtsVerifierUSBCompanion/
+                      libs/
                       tests/autofillservice/
+                      tests/contentcaptureservice/
                       tests/tests/animation/
+                      tests/tests/content/
+                      tests/tests/graphics/
+                      tests/tests/hardware/
+                      tests/tests/permission2/
+                      tests/tests/permission/
+                      tests/tests/preference/
                       tests/tests/print/
                       tests/tests/text/
-                      tests/tests/graphics/
+                      tests/tests/theme/
                       tests/tests/transition/
                       tests/tests/uirendering/
                       tests/tests/view/
                       tests/tests/widget/
+                      common/device-side/util/
+                      hostsidetests/stagedinstall/
+                      tests/tests/packageinstaller/atomicinstall/
+
+ktlint_hook = ${REPO_ROOT}/prebuilts/ktlint/ktlint.py -f ${PREUPLOAD_FILES}
diff --git a/apps/CameraITS/CameraITS.pdf b/apps/CameraITS/CameraITS.pdf
index 5eb3af3..5f1e481 100644
--- a/apps/CameraITS/CameraITS.pdf
+++ b/apps/CameraITS/CameraITS.pdf
Binary files differ
diff --git a/apps/CameraITS/OWNERS b/apps/CameraITS/OWNERS
new file mode 100644
index 0000000..fb8bbec
--- /dev/null
+++ b/apps/CameraITS/OWNERS
@@ -0,0 +1,3 @@
+# Bug component: 41727
+include platform/frameworks/av:/camera/OWNERS
+portmannc@google.com
diff --git a/apps/CameraITS/build/envsetup.sh b/apps/CameraITS/build/envsetup.sh
index a21108e..ae12e10 100644
--- a/apps/CameraITS/build/envsetup.sh
+++ b/apps/CameraITS/build/envsetup.sh
@@ -63,6 +63,4 @@
         echo ">> Unit test for $M failed" >&2
 done
 
-alias gpylint='gpylint --disable=F0401 --disable=C6304 --rcfile=$CAMERA_ITS_TOP"/build/scripts/gpylint_rcfile"'
-# F0401 ignores import errors since gpylint does not have the python paths
-# C6304 ignore Copyright line errors.
+alias gpylint='gpylint --rcfile=$CAMERA_ITS_TOP"/build/scripts/gpylint_rcfile"'
diff --git a/apps/CameraITS/build/scripts/gpylint_rcfile b/apps/CameraITS/build/scripts/gpylint_rcfile
index 37f43f7..f92c613 100644
--- a/apps/CameraITS/build/scripts/gpylint_rcfile
+++ b/apps/CameraITS/build/scripts/gpylint_rcfile
@@ -13,7 +13,10 @@
 # --enable=similarities". If you want to run only the classes checker, but have
 # no Warning level messages displayed, use"--disable=all --enable=classes
 # --disable=W"
-disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression
+disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression, F0401, C6304, C0111
+# F0401 ignores import errors since gpylint does not have the python paths
+# C6304 ignore Copyright line errors.
+# C0111 ignore Docstring at top of file.
 
 # Enable the message, report, category or checker with the given id(s). You can
 # either give multiple identifier separated by comma (,) or put this option
diff --git a/apps/CameraITS/pymodules/its/caps.py b/apps/CameraITS/pymodules/its/caps.py
index d75532b..a4b6927 100644
--- a/apps/CameraITS/pymodules/its/caps.py
+++ b/apps/CameraITS/pymodules/its/caps.py
@@ -191,6 +191,17 @@
     """
     return raw16(props) or raw10(props) or raw12(props)
 
+def y8(props):
+    """Returns whether a device supports Y8 output.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return len(its.objects.get_available_output_sizes("y8", props)) > 0
+
 def post_raw_sensitivity_boost(props):
     """Returns whether a device supports post RAW sensitivity boost..
 
@@ -513,6 +524,20 @@
     return False
 
 
+def sync_latency(props):
+    """Returns sync latency in number of frames.
+
+    If undefined, 8 frames.
+
+    Returns:
+        integer number of frames
+    """
+    sync_latency = props['android.sync.maxLatency']
+    if sync_latency < 0:
+        sync_latency = 8
+    return sync_latency
+
+
 def backward_compatible(props):
     """Returns whether a device supports BACKWARD_COMPATIBLE.
 
diff --git a/apps/CameraITS/pymodules/its/cv2image.py b/apps/CameraITS/pymodules/its/cv2image.py
index 2004846..b0a3d56 100644
--- a/apps/CameraITS/pymodules/its/cv2image.py
+++ b/apps/CameraITS/pymodules/its/cv2image.py
@@ -22,10 +22,41 @@
 import its.image
 import numpy
 
+CHART_FILE = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules', 'its',
+                          'test_images', 'ISO12233.png')
+CHART_HEIGHT = 13.5  # cm
+CHART_DISTANCE_RFOV = 30.0  # cm
+CHART_DISTANCE_WFOV = 22.0  # cm
+CHART_SCALE_START = 0.65
+CHART_SCALE_STOP = 1.35
+CHART_SCALE_STEP = 0.025
+
+FOV_THRESH_TELE = 60
+FOV_THRESH_WFOV = 90
+
+SCALE_RFOV_IN_WFOV_BOX = 0.67
+SCALE_TELE_IN_RFOV_BOX = 0.67
+SCALE_TELE_IN_WFOV_BOX = 0.5
+
 VGA_HEIGHT = 480
 VGA_WIDTH = 640
 
 
+def calc_chart_scaling(chart_distance, camera_fov):
+    chart_scaling = 1.0
+    camera_fov = float(camera_fov)
+    if (FOV_THRESH_TELE < camera_fov < FOV_THRESH_WFOV and
+                numpy.isclose(chart_distance, CHART_DISTANCE_WFOV, rtol=0.1)):
+        chart_scaling = SCALE_RFOV_IN_WFOV_BOX
+    elif (camera_fov <= FOV_THRESH_TELE and
+          numpy.isclose(chart_distance, CHART_DISTANCE_WFOV, rtol=0.1)):
+        chart_scaling = SCALE_TELE_IN_WFOV_BOX
+    elif (camera_fov <= FOV_THRESH_TELE and
+          numpy.isclose(chart_distance, CHART_DISTANCE_RFOV, rtol=0.1)):
+        chart_scaling = SCALE_TELE_IN_RFOV_BOX
+    return chart_scaling
+
+
 def scale_img(img, scale=1.0):
     """Scale and image based on a real number scale factor."""
     dim = (int(img.shape[1]*scale), int(img.shape[0]*scale))
@@ -50,8 +81,9 @@
     Defines PNG reference file, chart size and distance, and scaling range.
     """
 
-    def __init__(self, chart_file, height, distance, scale_start, scale_stop,
-                 scale_step, camera_id=None):
+    def __init__(self, chart_file=None, height=None, distance=None,
+                 scale_start=None, scale_stop=None, scale_step=None,
+                 camera_id=None):
         """Initial constructor for class.
 
         Args:
@@ -63,12 +95,12 @@
             scale_step:     float; step value for scaling for chart search
             camera_id:      int; camera used for extractor
         """
-        self._file = chart_file
-        self._height = height
-        self._distance = distance
-        self._scale_start = scale_start
-        self._scale_stop = scale_stop
-        self._scale_step = scale_step
+        self._file = chart_file or CHART_FILE
+        self._height = height or CHART_HEIGHT
+        self._distance = distance or CHART_DISTANCE_RFOV
+        self._scale_start = scale_start or CHART_SCALE_START
+        self._scale_stop = scale_stop or CHART_SCALE_STOP
+        self._scale_step = scale_step or CHART_SCALE_STEP
         self.xnorm, self.ynorm, self.wnorm, self.hnorm, self.scale = its.image.chart_located_per_argv()
         if not self.xnorm:
             with its.device.ItsSession(camera_id) as cam:
@@ -160,7 +192,7 @@
         for scale in numpy.arange(scale_start, scale_stop, scale_step):
             scene_scaled = scale_img(scene_gray, scale)
             if (scene_scaled.shape[0] < chart.shape[0] or
-                scene_scaled.shape[1] < chart.shape[1]):
+                        scene_scaled.shape[1] < chart.shape[1]):
                 continue
             result = cv2.matchTemplate(scene_scaled, chart, cv2.TM_CCOEFF)
             _, opt_val, _, top_left_scaled = cv2.minMaxLoc(result)
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
index a1de8cf..5fdc567 100644
--- a/apps/CameraITS/pymodules/its/device.py
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -12,21 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.error
-import os
-import os.path
-import sys
-import re
 import json
-import time
-import unittest
+import os
 import socket
-import subprocess
-import hashlib
-import numpy
 import string
+import subprocess
+import sys
+import time
 import unicodedata
+import unittest
 
+import its.error
+import numpy
+
+from collections import namedtuple
 
 class ItsSession(object):
     """Controls a device over adb to run ITS scripts.
@@ -71,11 +70,15 @@
     INTENT_START = 'com.android.cts.verifier.camera.its.START'
     ACTION_ITS_RESULT = 'com.android.cts.verifier.camera.its.ACTION_ITS_RESULT'
     EXTRA_VERSION = 'camera.its.extra.VERSION'
-    CURRENT_ITS_VERSION = '1.0' # version number to sync with CtsVerifier
+    CURRENT_ITS_VERSION = '1.0'  # version number to sync with CtsVerifier
     EXTRA_CAMERA_ID = 'camera.its.extra.CAMERA_ID'
     EXTRA_RESULTS = 'camera.its.extra.RESULTS'
     ITS_TEST_ACTIVITY = 'com.android.cts.verifier/.camera.its.ItsTestActivity'
 
+    # This string must be in sync with ItsService. Updated when interface
+    # between script and ItsService is changed.
+    ITS_SERVICE_VERSION = "1.0"
+
     RESULT_PASS = 'PASS'
     RESULT_FAIL = 'FAIL'
     RESULT_NOT_EXECUTED = 'NOT_EXECUTED'
@@ -145,7 +148,7 @@
                 if forward_info[0] == self.device_id and \
                    remote_p == ItsSession.REMOTE_PORT:
                     port = local_p
-                    break;
+                    break
                 else:
                     used_ports.append(local_p)
 
@@ -218,8 +221,9 @@
                 break
         proc.kill()
 
-    def __init__(self, camera_id=None):
+    def __init__(self, camera_id=None, hidden_physical_id=None):
         self._camera_id = camera_id
+        self._hidden_physical_id = hidden_physical_id
 
     def __enter__(self):
         # Initialize device id and adb command.
@@ -230,7 +234,7 @@
         self.__init_socket_port()
 
         self.__close_camera()
-        self.__open_camera(self._camera_id)
+        self.__open_camera()
         return self
 
     def __exit__(self, type, value, traceback):
@@ -263,18 +267,26 @@
             buf = numpy.frombuffer(buf, dtype=numpy.uint8)
         return jobj, buf
 
-    def __open_camera(self, camera_id):
+    def __open_camera(self):
         # Get the camera ID to open if it is an argument as a single camera.
         # This allows passing camera=# to individual tests at command line
         # and camera=#,#,# or an no camera argv with tools/run_all_tests.py.
-        if not camera_id:
-            camera_id = 0
+        #
+        # In case the camera is a logical multi-camera, to run ITS on the
+        # hidden physical sub-camera, pass camera=[logical ID]:[physical ID]
+        # to an individual test at the command line, and same applies to multiple
+        # camera IDs for tools/run_all_tests.py: camera=#,#:#,#:#,#
+        if not self._camera_id:
+            self._camera_id = 0
             for s in sys.argv[1:]:
                 if s[:7] == "camera=" and len(s) > 7:
-                    camera_ids = s[7:].split(",")
-                    if len(camera_ids) == 1:
-                        camera_id = camera_ids[0]
-        cmd = {"cmdName":"open", "cameraId":camera_id}
+                    camera_ids = s[7:].split(',')
+                    camera_id_combos = parse_camera_ids(camera_ids)
+                    if len(camera_id_combos) == 1:
+                        self._camera_id = camera_id_combos[0].id
+                        self._hidden_physical_id = camera_id_combos[0].sub_id
+
+        cmd = {"cmdName":"open", "cameraId":self._camera_id}
         self.sock.send(json.dumps(cmd) + "\n")
         data,_ = self.__read_response_from_socket()
         if data['tag'] != 'cameraOpened':
@@ -385,6 +397,40 @@
             raise its.error.Error('Invalid command response')
         return data['objValue']['cameraIdArray']
 
+    def check_its_version_compatible(self):
+        """Check the java side ItsService is compatible with current host script.
+           Raise ItsException if versions are incompatible
+
+        Returns: None
+        """
+        cmd = {}
+        cmd["cmdName"] = "getItsVersion"
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'ItsVersion':
+            raise its.error.Error('ItsService is incompatible with host python script')
+        server_version = data['strValue']
+        if self.ITS_SERVICE_VERSION != server_version:
+            raise its.error.Error('Version mismatch ItsService(%s) vs host script(%s)' % (
+                    server_version, ITS_SERVICE_VERSION))
+
+    def override_with_hidden_physical_camera_props(self, props):
+        """If current session is for a hidden physical camera, check that it is a valid
+           sub-camera backing the logical camera, and return the
+           characteristics of sub-camera. Otherwise, return "props" directly.
+
+        Returns: The properties of the hidden physical camera if possible
+        """
+        if self._hidden_physical_id:
+            e_msg = 'Camera %s is not a logical multi-camera' % self._camera_id
+            assert its.caps.logical_multi_camera(props), e_msg
+            physical_ids = its.caps.logical_multi_camera_physical_ids(props)
+            e_msg = 'Camera %s is not a hidden sub-camera of camera %s' % (
+                self._hidden_physical_id, self._camera_id)
+            assert self._hidden_physical_id in physical_ids, e_msg
+            props = self.get_camera_properties_by_id(self._hidden_physical_id)
+        return props
+
     def get_camera_properties(self):
         """Get the camera properties object for the device.
 
@@ -478,6 +524,8 @@
             cmd["awbLock"] = True
         if ev_comp != 0:
             cmd["evComp"] = ev_comp
+        if self._hidden_physical_id:
+            cmd["physicalId"] = self._hidden_physical_id
         self.sock.send(json.dumps(cmd) + "\n")
 
         # Wait for each specified 3A to converge.
@@ -513,6 +561,33 @@
             raise its.error.Error('3A failed to converge')
         return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
 
+    def is_stream_combination_supported(self, out_surfaces):
+        """Query whether a output surfaces combination is supported by the camera device.
+
+        This function hooks up to the isSessionConfigurationSupported() camera API
+        to query whether a particular stream combination is supported.
+
+        Refer to do_capture function for specification of out_surfaces field.
+        """
+        cmd = {}
+        cmd['cmdName'] = 'isStreamCombinationSupported'
+
+        if not isinstance(out_surfaces, list):
+            cmd['outputSurfaces'] = [out_surfaces]
+        else:
+            cmd['outputSurfaces'] = out_surfaces
+        formats = [c['format'] if 'format' in c else 'yuv'
+                   for c in cmd['outputSurfaces']]
+        formats = [s if s != 'jpg' else 'jpeg' for s in formats]
+
+        self.sock.send(json.dumps(cmd) + '\n')
+
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'streamCombinationSupport':
+            its.error.Error('Failed to query stream combination')
+
+        return data['strValue'] == 'supportedCombination'
+
     def do_capture(self, cap_request,
             out_surfaces=None, reprocess_format=None, repeat_request=None):
         """Issue capture request(s), and read back the image(s) and metadata.
@@ -531,19 +606,12 @@
 
         The out_surfaces field can specify the width(s), height(s), and
         format(s) of the captured image. The formats may be "yuv", "jpeg",
-        "dng", "raw", "raw10", "raw12", or "rawStats". The default is a YUV420
+        "dng", "raw", "raw10", "raw12", "rawStats" or "y8". The default is a YUV420
         frame ("yuv") corresponding to a full sensor frame.
 
         Optionally the out_surfaces field can specify physical camera id(s) if the
         current camera device is a logical multi-camera. The physical camera id
-        must refer to a physical camera backing this logical camera device. And
-        only "yuv", "raw", "raw10", "raw12" support the physical camera id field.
-
-        Currently only 2 physical streams with the same format are supported, one
-        from each physical camera:
-        - yuv physical streams of the same size.
-        - raw physical streams with the same or different sizes, depending on
-          device capability. (Different physical cameras may have different raw sizes).
+        must refer to a physical camera backing this logical camera device.
 
         Note that one or more surfaces can be specified, allowing a capture to
         request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
@@ -707,54 +775,64 @@
                                       "width" : max_yuv_size[0],
                                       "height": max_yuv_size[1]}]
 
-        # Figure out requested physical camera ids, physical and logical
-        # streams.
-        physical_cam_ids = {}
-        physical_buffers = {}
-        physical_cam_format = None
-        logical_cam_formats = []
-        for i,s in enumerate(cmd["outputSurfaces"]):
-            if "format" in s and s["format"] in ["yuv", "raw", "raw10", "raw12"]:
-                if "physicalCamera" in s:
-                    if physical_cam_format is not None and s["format"] != physical_cam_format:
-                        raise its.error.Error('ITS does not support capturing multiple ' +
-                                              'physical formats yet')
-                    physical_cam_ids[i] = s["physicalCamera"]
-                    physical_buffers[s["physicalCamera"]] = []
-                    physical_cam_format = s["format"]
-                else:
-                    logical_cam_formats.append(s["format"])
-            else:
-                logical_cam_formats.append(s["format"])
-
         ncap = len(cmd["captureRequests"])
         nsurf = 1 if out_surfaces is None else len(cmd["outputSurfaces"])
-        # Only allow yuv output to multiple targets
-        logical_yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"\
-                        and "physicalCamera" not in s]
-        n_yuv = len(logical_yuv_surfaces)
-        # Compute the buffer size of YUV targets
-        yuv_maxsize_1d = 0
-        for s in logical_yuv_surfaces:
-            if not ("width" in s and "height" in s):
-                if self.props is None:
-                    raise its.error.Error('Camera props are unavailable')
-                yuv_maxsize_2d = its.objects.get_available_output_sizes(
-                    "yuv", self.props)[0]
-                yuv_maxsize_1d = yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3 / 2
-                break
-        yuv_sizes = [c["width"]*c["height"]*3/2
-                     if "width" in c and "height" in c
-                     else yuv_maxsize_1d
-                     for c in logical_yuv_surfaces]
-        # Currently we don't pass enough metadta from ItsService to distinguish
-        # different yuv stream of same buffer size
-        if len(yuv_sizes) != len(set(yuv_sizes)):
-            raise its.error.Error(
-                    'ITS does not support yuv outputs of same buffer size')
-        if len(logical_cam_formats) > len(set(logical_cam_formats)):
-          if n_yuv != len(logical_cam_formats) - len(set(logical_cam_formats)) + 1:
-                raise its.error.Error('Duplicate format requested')
+
+        cam_ids = []
+        bufs = {}
+        yuv_bufs = {}
+        for i,s in enumerate(cmd["outputSurfaces"]):
+            if self._hidden_physical_id:
+                s['physicalCamera'] = self._hidden_physical_id
+
+            if 'physicalCamera' in s:
+                cam_id = s['physicalCamera']
+            else:
+                cam_id = self._camera_id
+
+            if cam_id not in cam_ids:
+                cam_ids.append(cam_id)
+                bufs[cam_id] = {"raw":[], "raw10":[], "raw12":[],
+                        "rawStats":[], "dng":[], "jpeg":[], "y8":[]}
+
+        for cam_id in cam_ids:
+            # Only allow yuv output to multiple targets
+            if cam_id == self._camera_id:
+                yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"\
+                                and "physicalCamera" not in s]
+                formats_for_id = [s["format"] for s in cmd["outputSurfaces"] if \
+                                 "physicalCamera" not in s]
+            else:
+                yuv_surfaces = [s for s in cmd["outputSurfaces"] if s["format"]=="yuv"\
+                                and "physicalCamera" in s and s["physicalCamera"] == cam_id]
+                formats_for_id = [s["format"] for s in cmd["outputSurfaces"] if \
+                                 "physicalCamera" in s and s["physicalCamera"] == cam_id]
+
+            n_yuv = len(yuv_surfaces)
+            # Compute the buffer size of YUV targets
+            yuv_maxsize_1d = 0
+            for s in yuv_surfaces:
+                if not ("width" in s and "height" in s):
+                    if self.props is None:
+                        raise its.error.Error('Camera props are unavailable')
+                    yuv_maxsize_2d = its.objects.get_available_output_sizes(
+                        "yuv", self.props)[0]
+                    yuv_maxsize_1d = yuv_maxsize_2d[0] * yuv_maxsize_2d[1] * 3 / 2
+                    break
+            yuv_sizes = [c["width"]*c["height"]*3/2
+                         if "width" in c and "height" in c
+                         else yuv_maxsize_1d
+                         for c in yuv_surfaces]
+            # Currently we don't pass enough metadta from ItsService to distinguish
+            # different yuv stream of same buffer size
+            if len(yuv_sizes) != len(set(yuv_sizes)):
+                raise its.error.Error(
+                        'ITS does not support yuv outputs of same buffer size')
+            if len(formats_for_id) > len(set(formats_for_id)):
+                if n_yuv != len(formats_for_id) - len(set(formats_for_id)) + 1:
+                    raise its.error.Error('Duplicate format requested')
+
+            yuv_bufs[cam_id] = {size:[] for size in yuv_sizes}
 
         raw_formats = 0;
         raw_formats += 1 if "dng" in formats else 0
@@ -788,9 +866,6 @@
         # the burst, however individual images of different formats can come
         # out in any order for that capture.
         nbufs = 0
-        bufs = {"raw":[], "raw10":[], "raw12":[],
-                "rawStats":[], "dng":[], "jpeg":[]}
-        yuv_bufs = {size:[] for size in yuv_sizes}
         mds = []
         physical_mds = []
         widths = None
@@ -798,14 +873,14 @@
         while nbufs < ncap*nsurf or len(mds) < ncap:
             jsonObj,buf = self.__read_response_from_socket()
             if jsonObj['tag'] in ['jpegImage', 'rawImage', \
-                    'raw10Image', 'raw12Image', 'rawStatsImage', 'dngImage'] \
+                    'raw10Image', 'raw12Image', 'rawStatsImage', 'dngImage', 'y8Image'] \
                     and buf is not None:
                 fmt = jsonObj['tag'][:-5]
-                bufs[fmt].append(buf)
+                bufs[self._camera_id][fmt].append(buf)
                 nbufs += 1
             elif jsonObj['tag'] == 'yuvImage':
                 buf_size = numpy.product(buf.shape)
-                yuv_bufs[buf_size].append(buf)
+                yuv_bufs[self._camera_id][buf_size].append(buf)
                 nbufs += 1
             elif jsonObj['tag'] == 'captureResults':
                 mds.append(jsonObj['objValue']['captureResult'])
@@ -815,42 +890,73 @@
                 heights = [out['height'] for out in outputs]
             else:
                 tagString = unicodedata.normalize('NFKD', jsonObj['tag']).encode('ascii', 'ignore');
-                for x in ['rawImage', 'raw10Image', 'raw12Image', 'yuvImage']:
-                    if (tagString.startswith(x)):
-                        physicalId = jsonObj['tag'][len(x):];
-                        if physicalId in physical_cam_ids.values():
-                            physical_buffers[physicalId].append(buf)
-                            nbufs += 1
+                for x in ['jpegImage', 'rawImage', \
+                        'raw10Image', 'raw12Image', 'rawStatsImage', 'yuvImage']:
+                    if tagString.startswith(x):
+                        if x == 'yuvImage':
+                            physicalId = jsonObj['tag'][len(x):]
+                            if physicalId in cam_ids:
+                                buf_size = numpy.product(buf.shape)
+                                yuv_bufs[physicalId][buf_size].append(buf)
+                                nbufs += 1
+                        else:
+                            physicalId = jsonObj['tag'][len(x):]
+                            if physicalId in cam_ids:
+                                fmt = x[:-5]
+                                bufs[physicalId][fmt].append(buf)
+                                nbufs += 1
         rets = []
         for j,fmt in enumerate(formats):
             objs = []
+            if "physicalCamera" in cmd["outputSurfaces"][j]:
+                cam_id = cmd["outputSurfaces"][j]["physicalCamera"]
+            else:
+                cam_id = self._camera_id
+
             for i in range(ncap):
                 obj = {}
                 obj["width"] = widths[j]
                 obj["height"] = heights[j]
                 obj["format"] = fmt
-                if j in physical_cam_ids:
-                    for physical_md in physical_mds[i]:
-                        if physical_cam_ids[j] in physical_md:
-                            obj["metadata"] = physical_md[physical_cam_ids[j]]
-                            break
-                else:
+                if cam_id == self._camera_id:
                     obj["metadata"] = mds[i]
-
-                if j in physical_cam_ids:
-                    obj["data"] = physical_buffers[physical_cam_ids[j]][i]
-                elif fmt == 'yuv':
-                    buf_size = widths[j] * heights[j] * 3 / 2
-                    obj["data"] = yuv_bufs[buf_size][i]
                 else:
-                    obj["data"] = bufs[fmt][i]
+                    for physical_md in physical_mds[i]:
+                        if cam_id in physical_md:
+                            obj["metadata"] = physical_md[cam_id]
+                            break
+
+                if fmt == "yuv":
+                    buf_size = widths[j] * heights[j] * 3 / 2
+                    obj["data"] = yuv_bufs[cam_id][buf_size][i]
+                else:
+                    obj["data"] = bufs[cam_id][fmt][i]
                 objs.append(obj)
-            rets.append(objs if ncap>1 else objs[0])
+            rets.append(objs if ncap > 1 else objs[0])
         self.sock.settimeout(self.SOCK_TIMEOUT)
-        return rets if len(rets)>1 else rets[0]
+        if len(rets) > 1 or (isinstance(rets[0], dict) and
+                             isinstance(cap_request, list)):
+            return rets
+        else:
+            return rets[0]
+
+def do_capture_with_latency(cam, req, sync_latency, fmt=None):
+    """Helper function to take enough frames with do_capture to allow sync latency.
+
+    Args:
+        cam:            camera object
+        req:            request for camera
+        sync_latency:   integer number of frames
+        fmt:            format for the capture
+    Returns:
+        single capture with the unsettled frames discarded
+    """
+    caps = cam.do_capture([req]*(sync_latency+1), fmt)
+    return caps[-1]
+
 
 def get_device_id():
-    """ Return the ID of the device that the test is running on.
+    """Return the ID of the device that the test is running on.
 
     Return the device ID provided in the command line if it's connected. If no
     device ID is provided in the command line and there is only one device
@@ -986,6 +1092,21 @@
 
     return device_bfp
 
+def parse_camera_ids(ids):
+    """ Parse the string of camera IDs into array of CameraIdCombo tuples.
+    """
+    CameraIdCombo = namedtuple('CameraIdCombo', ['id', 'sub_id'])
+    id_combos = []
+    for one_id in ids:
+        one_combo = one_id.split(':')
+        if len(one_combo) == 1:
+            id_combos.append(CameraIdCombo(one_combo[0], None))
+        elif len(one_combo) == 2:
+            id_combos.append(CameraIdCombo(one_combo[0], one_combo[1]))
+        else:
+            assert(False), 'Camera id parameters must be either ID, or ID:SUB_ID'
+    return id_combos
+
 def _run(cmd):
     """Replacement for os.system, with hiding of stdout+stderr messages.
     """
@@ -993,6 +1114,7 @@
         subprocess.check_call(
                 cmd.split(), stdout=devnull, stderr=subprocess.STDOUT)
 
+
 class __UnitTest(unittest.TestCase):
     """Run a suite of unit tests on this module.
     """
diff --git a/apps/CameraITS/pymodules/its/image.py b/apps/CameraITS/pymodules/its/image.py
index 3ea6fa3..289bf08 100644
--- a/apps/CameraITS/pymodules/its/image.py
+++ b/apps/CameraITS/pymodules/its/image.py
@@ -81,6 +81,9 @@
         assert(props is not None)
         r,gr,gb,b = convert_capture_to_planes(cap, props)
         return convert_raw_to_rgb_image(r,gr,gb,b, props, cap["metadata"])
+    elif cap["format"] == "y8":
+        y = cap["data"][0:w*h]
+        return convert_y8_to_rgb_image(y, w, h)
     else:
         raise its.error.Error('Invalid format %s' % (cap["format"]))
 
@@ -469,6 +472,21 @@
     rgb.reshape(w*h*3)[:] = flt.reshape(w*h*3)[:]
     return rgb.astype(numpy.float32) / 255.0
 
+def convert_y8_to_rgb_image(y_plane, w, h):
+    """Convert a Y 8-bit image to an RGB image.
+
+    Args:
+        y_plane: The packed 8-bit Y plane.
+        w: The width of the image.
+        h: The height of the image.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0].
+    """
+    y3 = numpy.dstack([y_plane, y_plane, y_plane])
+    rgb = numpy.empty([h, w, 3], dtype=numpy.uint8)
+    rgb.reshape(w*h*3)[:] = y3.reshape(w*h*3)[:]
+    return rgb.astype(numpy.float32) / 255.0
 
 def load_rgb_image(fname):
     """Load a standard image file (JPG, PNG, etc.).
diff --git a/apps/CameraITS/pymodules/its/objects.py b/apps/CameraITS/pymodules/its/objects.py
index a76c7d4..3c39205 100644
--- a/apps/CameraITS/pymodules/its/objects.py
+++ b/apps/CameraITS/pymodules/its/objects.py
@@ -154,7 +154,7 @@
 
     Args:
         fmt: the output format, as a string in
-            ["jpg", "yuv", "raw", "raw10", "raw12"].
+            ["jpg", "yuv", "raw", "raw10", "raw12", "y8"].
         props: the object returned from its.device.get_camera_properties().
         max_size: (Optional) A (w,h) tuple.
             Sizes larger than max_size (either w or h)  will be discarded.
@@ -167,7 +167,7 @@
     """
     AR_TOLERANCE = 0.03
     fmt_codes = {"raw":0x20, "raw10":0x25, "raw12":0x26,"yuv":0x23,
-                 "jpg":0x100, "jpeg":0x100}
+                 "jpg":0x100, "jpeg":0x100, "y8":0x20203859}
     configs = props['android.scaler.streamConfigurationMap']\
                    ['availableStreamConfigurations']
     fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_codes[fmt]]
diff --git a/apps/CameraITS/tests/dng_noise_model/DngNoiseModel.pdf b/apps/CameraITS/tests/dng_noise_model/DngNoiseModel.pdf
deleted file mode 100644
index d979a06..0000000
--- a/apps/CameraITS/tests/dng_noise_model/DngNoiseModel.pdf
+++ /dev/null
Binary files differ
diff --git a/apps/CameraITS/tests/dng_noise_model/dng_noise_model.py b/apps/CameraITS/tests/dng_noise_model/dng_noise_model.py
deleted file mode 100644
index e8c6d19..0000000
--- a/apps/CameraITS/tests/dng_noise_model/dng_noise_model.py
+++ /dev/null
@@ -1,331 +0,0 @@
-# Copyright 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import its.device
-import its.caps
-import its.objects
-import its.image
-import os.path
-from matplotlib import pylab
-import matplotlib
-import matplotlib.pyplot as plt
-import math
-import textwrap
-import time
-import numpy as np
-import scipy.stats
-import scipy.signal
-
-
-# Convert a 2D array a to a 4D array with dimensions [tile_size,
-# tile_size, row, col] where row, col are tile indices.
-def tile(a, tile_size):
-    tile_rows, tile_cols = a.shape[0]/tile_size, a.shape[1]/tile_size
-    a = a.reshape([tile_rows, tile_size, tile_cols, tile_size])
-    a = a.transpose([1, 3, 0, 2])
-    return a
-
-
-def main():
-    """Capture a set of raw images with increasing gains and measure the noise.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
-    BAYER_LIST = ['R', 'GR', 'GB', 'B']
-
-    # How many sensitivities per stop to sample.
-    steps_per_stop = 2
-    # How large of tiles to use to compute mean/variance.
-    tile_size = 64
-    # Exposure bracketing range in stops
-    bracket_stops = 4
-    # How high to allow the mean of the tiles to go.
-    max_signal_level = 0.5
-    # Colors used for plotting the data for each exposure.
-    colors = 'rygcbm'
-
-    # Define a first order high pass filter to eliminate low frequency
-    # signal content when computing variance.
-    f = np.array([-1, 1]).astype('float32')
-    # Make it a higher order filter by convolving the first order
-    # filter with itself a few times.
-    f = np.convolve(f, f)
-    f = np.convolve(f, f)
-
-    # Compute the normalization of the filter to preserve noise
-    # power. Let a be the normalization factor we're looking for, and
-    # Let X and X' be the random variables representing the noise
-    # before and after filtering, respectively. First, compute
-    # Var[a*X']:
-    #
-    #   Var[a*X'] = a^2*Var[X*f_0 + X*f_1 + ... + X*f_N-1]
-    #             = a^2*(f_0^2*Var[X] + f_1^2*Var[X] + ... + (f_N-1)^2*Var[X])
-    #             = sum(f_i^2)*a^2*Var[X]
-    #
-    # We want Var[a*X'] to be equal to Var[X]:
-    #
-    #    sum(f_i^2)*a^2*Var[X] = Var[X] -> a = sqrt(1/sum(f_i^2))
-    #
-    # We can just bake this normalization factor into the high pass
-    # filter kernel.
-    f /= math.sqrt(np.dot(f, f))
-
-    bracket_factor = math.pow(2, bracket_stops)
-
-    with its.device.ItsSession() as cam:
-        props = cam.get_camera_properties()
-
-        # Get basic properties we need.
-        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
-        sens_max_analog = props['android.sensor.maxAnalogSensitivity']
-        white_level = props['android.sensor.info.whiteLevel']
-
-        print "Sensitivity range: [%f, %f]" % (sens_min, sens_max)
-        print "Max analog sensitivity: %f" % (sens_max_analog)
-
-        # Do AE to get a rough idea of where we are.
-        s_ae, e_ae, _, _, _  = \
-            cam.do_3a(get_results=True, do_awb=False, do_af=False)
-        # Underexpose to get more data for low signal levels.
-        auto_e = s_ae*e_ae/bracket_factor
-        # Focus at zero to intentionally blur the scene as much as possible.
-        f_dist = 0.0
-
-        # If the auto-exposure result is too bright for the highest
-        # sensitivity or too dark for the lowest sensitivity, report
-        # an error.
-        min_exposure_ns, max_exposure_ns = \
-            props['android.sensor.info.exposureTimeRange']
-        if auto_e < min_exposure_ns*sens_max:
-            raise its.error.Error("Scene is too bright to properly expose \
-                                  at the highest sensitivity")
-        if auto_e*bracket_factor > max_exposure_ns*sens_min:
-            raise its.error.Error("Scene is too dark to properly expose \
-                                  at the lowest sensitivity")
-
-        # Start the sensitivities at the minimum.
-        s = sens_min
-
-        samples = [[], [], [], []]
-        plots = []
-        measured_models = [[], [], [], []]
-        while s <= sens_max + 1:
-            print "ISO %d" % round(s)
-            fig = plt.figure()
-            plt_s = fig.gca()
-            plt_s.set_title("ISO %d" % round(s))
-            plt_s.set_xlabel("Mean signal level")
-            plt_s.set_ylabel("Variance")
-
-            samples_s = [[], [], [], []]
-            for b in range(0, bracket_stops + 1):
-                # Get the exposure for this sensitivity and exposure time.
-                e = int(math.pow(2, b)*auto_e/float(s))
-                req = its.objects.manual_capture_request(round(s), e, f_dist)
-                cap = cam.do_capture(req, cam.CAP_RAW)
-                planes = its.image.convert_capture_to_planes(cap, props)
-
-                for (pidx, p) in enumerate(planes):
-                    p = p.squeeze()
-
-                    # Crop the plane to be a multiple of the tile size.
-                    p = p[0:p.shape[0] - p.shape[0]%tile_size,
-                          0:p.shape[1] - p.shape[1]%tile_size]
-
-                    # convert_capture_to_planes normalizes the range
-                    # to [0, 1], but without subtracting the black
-                    # level.
-                    black_level = its.image.get_black_level(
-                        pidx, props, cap["metadata"])
-                    p *= white_level
-                    p = (p - black_level)/(white_level - black_level)
-
-                    # Use our high pass filter to filter this plane.
-                    hp = scipy.signal.sepfir2d(p, f, f).astype('float32')
-
-                    means_tiled = \
-                        np.mean(tile(p, tile_size), axis=(0, 1)).flatten()
-                    vars_tiled = \
-                        np.var(tile(hp, tile_size), axis=(0, 1)).flatten()
-
-                    samples_e = []
-                    for (mean, var) in zip(means_tiled, vars_tiled):
-                        # Don't include the tile if it has samples that might
-                        # be clipped.
-                        if mean + 2*math.sqrt(var) < max_signal_level:
-                            samples_e.append([mean, var])
-
-                    if len(samples_e) > 0:
-                        means_e, vars_e = zip(*samples_e)
-                        plt_s.plot(means_e, vars_e, colors[b%len(colors)] + ',')
-
-                        samples_s[pidx].extend(samples_e)
-
-            for (pidx, p) in enumerate(samples_s):
-                [S, O, R, p, stderr] = scipy.stats.linregress(samples_s[pidx])
-                measured_models[pidx].append([round(s), S, O])
-                print "Sensitivity %d: %e*y + %e (R=%f)" % (round(s), S, O, R)
-
-                # Add the samples for this sensitivity to the global samples list.
-                samples[pidx].extend([(round(s), mean, var) for (mean, var) in samples_s[pidx]])
-
-                # Add the linear fit to the plot for this sensitivity.
-                plt_s.plot([0, max_signal_level], [O, O + S*max_signal_level], 'rgkb'[pidx]+'--',
-                           label="Linear fit")
-
-            xmax = max([max([x for (x, _) in p]) for p in samples_s])*1.25
-            ymax = max([max([y for (_, y) in p]) for p in samples_s])*1.25
-            plt_s.set_xlim(xmin=0, xmax=xmax)
-            plt_s.set_ylim(ymin=0, ymax=ymax)
-
-            fig.savefig("%s_samples_iso%04d.png" % (NAME, round(s)))
-            plots.append([round(s), fig])
-
-            # Move to the next sensitivity.
-            s *= math.pow(2, 1.0/steps_per_stop)
-
-        (fig, (plt_S, plt_O)) = plt.subplots(2, 1)
-        plt_S.set_title("Noise model")
-        plt_S.set_ylabel("S")
-        plt_S.legend(loc=2)
-        plt_O.set_xlabel("ISO")
-        plt_O.set_ylabel("O")
-
-        A = []
-        B = []
-        C = []
-        D = []
-        for (pidx, p) in enumerate(measured_models):
-            # Grab the sensitivities and line parameters from each sensitivity.
-            S_measured = [e[1] for e in measured_models[pidx]]
-            O_measured = [e[2] for e in measured_models[pidx]]
-            sens = np.asarray([e[0] for e in measured_models[pidx]])
-            sens_sq = np.square(sens)
-
-            # Use a global linear optimization to fit the noise model.
-            gains = np.asarray([s[0] for s in samples[pidx]])
-            means = np.asarray([s[1] for s in samples[pidx]])
-            vars_ = np.asarray([s[2] for s in samples[pidx]])
-
-            # Define digital gain as the gain above the max analog gain
-            # per the Camera2 spec. Also, define a corresponding C
-            # expression snippet to use in the generated model code.
-            digital_gains = np.maximum(gains/sens_max_analog, 1)
-            digital_gain_cdef = "(sens / %d.0) < 1.0 ? 1.0 : (sens / %d.0)" % \
-                (sens_max_analog, sens_max_analog)
-
-            # Find the noise model parameters via least squares fit.
-            ad = gains*means
-            bd = means
-            cd = gains*gains
-            dd = digital_gains*digital_gains
-            a = np.asarray([ad, bd, cd, dd]).T
-            b = vars_
-
-            # To avoid overfitting to high ISOs (high variances), divide the system
-            # by the gains.
-            a /= (np.tile(gains, (a.shape[1], 1)).T)
-            b /= gains
-
-            [A_p, B_p, C_p, D_p], _, _, _ = np.linalg.lstsq(a, b)
-            A.append(A_p)
-            B.append(B_p)
-            C.append(C_p)
-            D.append(D_p)
-
-            # Plot the noise model components with the values predicted by the
-            # noise model.
-            S_model = A_p*sens + B_p
-            O_model = \
-                C_p*sens_sq + D_p*np.square(np.maximum(sens/sens_max_analog, 1))
-
-            plt_S.loglog(sens, S_measured, 'rgkb'[pidx]+'+', basex=10, basey=10,
-                         label="Measured")
-            plt_S.loglog(sens, S_model, 'rgkb'[pidx]+'x', basex=10, basey=10, label="Model")
-
-            plt_O.loglog(sens, O_measured, 'rgkb'[pidx]+'+', basex=10, basey=10,
-                         label="Measured")
-            plt_O.loglog(sens, O_model, 'rgkb'[pidx]+'x', basex=10, basey=10, label="Model")
-
-        fig.savefig("%s.png" % (NAME))
-
-        for [s, fig] in plots:
-            plt_s = fig.gca()
-
-            dg = max(s/sens_max_analog, 1)
-            for (pidx, p) in enumerate(measured_models):
-                S = A[pidx]*s + B[pidx]
-                O = C[pidx]*s*s + D[pidx]*dg*dg
-                plt_s.plot([0, max_signal_level], [O, O + S*max_signal_level], 'rgkb'[pidx]+'-',
-                           label="Model")
-
-            plt_s.legend(loc=2)
-            plt.figure(fig.number)
-
-            # Re-save the plot with the global model.
-            fig.savefig("%s_samples_iso%04d.png" % (NAME, round(s)))
-
-          # Generate the noise model implementation.
-        A_array = ",".join([str(i) for i in A])
-        B_array = ",".join([str(i) for i in B])
-        C_array = ",".join([str(i) for i in C])
-        D_array = ",".join([str(i) for i in D])
-        noise_model_code = textwrap.dedent("""\
-            /* Generated test code to dump a table of data for external validation
-             * of the noise model parameters.
-             */
-            #include <stdio.h>
-            #include <assert.h>
-            double compute_noise_model_entry_S(int plane, int sens);
-            double compute_noise_model_entry_O(int plane, int sens);
-            int main(void) {
-                for (int plane = 0; plane < %d; plane++) {
-                    for (int sens = %d; sens <= %d; sens += 100) {
-                        double o = compute_noise_model_entry_O(plane, sens);
-                        double s = compute_noise_model_entry_S(plane, sens);
-                        printf("%%d,%%d,%%lf,%%lf\\n", plane, sens, o, s);
-                    }
-                }
-                return 0;
-            }
-
-            /* Generated functions to map a given sensitivity to the O and S noise
-             * model parameters in the DNG noise model. The planes are in
-             * R, Gr, Gb, B order.
-             */
-            double compute_noise_model_entry_S(int plane, int sens) {
-                static double noise_model_A[] = { %s };
-                static double noise_model_B[] = { %s };
-                double A = noise_model_A[plane];
-                double B = noise_model_B[plane];
-                double s = A * sens + B;
-                return s < 0.0 ? 0.0 : s;
-            }
-
-            double compute_noise_model_entry_O(int plane, int sens) {
-                static double noise_model_C[] = { %s };
-                static double noise_model_D[] = { %s };
-                double digital_gain = %s;
-                double C = noise_model_C[plane];
-                double D = noise_model_D[plane];
-                double o = C * sens * sens + D * digital_gain * digital_gain;
-                return o < 0.0 ? 0.0 : o;
-            }
-            """ % (len(A), sens_min, sens_max, A_array, B_array, C_array, D_array, digital_gain_cdef))
-        print noise_model_code
-        text_file = open("noise_model.c", "w")
-        text_file.write("%s" % noise_model_code)
-        text_file.close()
-
-if __name__ == '__main__':
-    main()
diff --git a/apps/CameraITS/tests/scene0/test_burst_capture.py b/apps/CameraITS/tests/scene0/test_burst_capture.py
index c573584..b21884e 100644
--- a/apps/CameraITS/tests/scene0/test_burst_capture.py
+++ b/apps/CameraITS/tests/scene0/test_burst_capture.py
@@ -31,6 +31,7 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.backward_compatible(props))
         req = its.objects.auto_capture_request()
         caps = cam.do_capture([req]*NUM_TEST_FRAMES)
diff --git a/apps/CameraITS/tests/scene0/test_camera_properties.py b/apps/CameraITS/tests/scene0/test_camera_properties.py
deleted file mode 100644
index dbd528d..0000000
--- a/apps/CameraITS/tests/scene0/test_camera_properties.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import its.caps
-import its.device
-import its.objects
-import pprint
-
-def main():
-    """Basic test to query and print out camera properties.
-    """
-
-    with its.device.ItsSession() as cam:
-        props = cam.get_camera_properties()
-
-        pprint.pprint(props)
-
-        # Test that a handful of required keys are present.
-        assert(props.has_key('android.sensor.info.sensitivityRange'))
-        assert(props.has_key('android.sensor.orientation'))
-        assert(props.has_key('android.scaler.streamConfigurationMap'))
-        assert(props.has_key('android.lens.facing'))
-
-        print "JPG sizes:", its.objects.get_available_output_sizes("jpg", props)
-        print "RAW sizes:", its.objects.get_available_output_sizes("raw", props)
-        print "YUV sizes:", its.objects.get_available_output_sizes("yuv", props)
-
-if __name__ == '__main__':
-    main()
-
diff --git a/apps/CameraITS/tests/scene0/test_gyro_bias.py b/apps/CameraITS/tests/scene0/test_gyro_bias.py
index 44be95f..c860ac8 100644
--- a/apps/CameraITS/tests/scene0/test_gyro_bias.py
+++ b/apps/CameraITS/tests/scene0/test_gyro_bias.py
@@ -12,48 +12,45 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os
+import time
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import time
-from matplotlib import pylab
-import os.path
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
 import numpy
 
+NAME = os.path.basename(__file__).split('.')[0]
+N = 20  # Number of samples averaged together, in the plot.
+MEAN_THRESH = 0.01  # PASS/FAIL threshold for gyro mean drift
+VAR_THRESH = 0.001  # PASS/FAIL threshold for gyro variance drift
+
+
 def main():
     """Test if the gyro has stable output when device is stationary.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    # Number of samples averaged together, in the plot.
-    N = 20
-
-    # Pass/fail thresholds for gyro drift
-    MEAN_THRESH = 0.01
-    VAR_THRESH = 0.001
-
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
         # Only run test if the appropriate caps are claimed.
         its.caps.skip_unless(its.caps.sensor_fusion(props) and
             cam.get_sensors().get("gyro"))
 
-        print "Collecting gyro events"
+        print 'Collecting gyro events'
         cam.start_sensor_events()
         time.sleep(5)
-        gyro_events = cam.get_sensor_events()["gyro"]
+        gyro_events = cam.get_sensor_events()['gyro']
 
     nevents = (len(gyro_events) / N) * N
     gyro_events = gyro_events[:nevents]
-    times = numpy.array([(e["time"] - gyro_events[0]["time"])/1000000000.0
+    times = numpy.array([(e['time'] - gyro_events[0]['time'])/1000000000.0
                          for e in gyro_events])
-    xs = numpy.array([e["x"] for e in gyro_events])
-    ys = numpy.array([e["y"] for e in gyro_events])
-    zs = numpy.array([e["z"] for e in gyro_events])
+    xs = numpy.array([e['x'] for e in gyro_events])
+    ys = numpy.array([e['y'] for e in gyro_events])
+    zs = numpy.array([e['z'] for e in gyro_events])
 
     # Group samples into size-N groups and average each together, to get rid
     # of individual random spikes in the data.
@@ -62,17 +59,19 @@
     ys = ys.reshape(nevents/N, N).mean(1)
     zs = zs.reshape(nevents/N, N).mean(1)
 
-    pylab.plot(times, xs, 'r', label="x")
-    pylab.plot(times, ys, 'g', label="y")
-    pylab.plot(times, zs, 'b', label="z")
-    pylab.xlabel("Time (seconds)")
-    pylab.ylabel("Gyro readings (mean of %d samples)"%(N))
+    pylab.plot(times, xs, 'r', label='x')
+    pylab.plot(times, ys, 'g', label='y')
+    pylab.plot(times, zs, 'b', label='z')
+    pylab.xlabel('Time (seconds)')
+    pylab.ylabel('Gyro readings (mean of %d samples)'%(N))
     pylab.legend()
-    matplotlib.pyplot.savefig("%s_plot.png" % (NAME))
+    matplotlib.pyplot.savefig('%s_plot.png' % (NAME))
 
-    for samples in [xs,ys,zs]:
-        assert(samples.mean() < MEAN_THRESH)
-        assert(numpy.var(samples) < VAR_THRESH)
+    for samples in [xs, ys, zs]:
+        mean = samples.mean()
+        var = numpy.var(samples)
+        assert mean < MEAN_THRESH, 'mean: %.3f, TOL=%.2f' % (mean, MEAN_THRESH)
+        assert var < VAR_THRESH, 'var: %.4f, TOL=%.3f' % (var, VAR_THRESH)
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene0/test_jitter.py b/apps/CameraITS/tests/scene0/test_jitter.py
index 6a156dd..1bc0855 100644
--- a/apps/CameraITS/tests/scene0/test_jitter.py
+++ b/apps/CameraITS/tests/scene0/test_jitter.py
@@ -12,24 +12,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
-import os.path
-from matplotlib import pylab
+
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+# PASS/FAIL thresholds
+TEST_FPS = 30
+MIN_AVG_FRAME_DELTA = 30  # at least 30ms delta between frames
+MAX_VAR_FRAME_DELTA = 0.01  # variance of frame deltas
+MAX_FRAME_DELTA_JITTER = 0.3  # max ms gap from the average frame delta
+
+NAME = os.path.basename(__file__).split('.')[0]
+
 
 def main():
-    """Measure jitter in camera timestamps.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    # Pass/fail thresholds
-    MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames
-    MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas
-    MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta
+    """Measure jitter in camera timestamps."""
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -37,29 +40,37 @@
                              its.caps.sensor_fusion(props))
 
         req, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        req["android.control.aeTargetFpsRange"] = [TEST_FPS, TEST_FPS]
         caps = cam.do_capture([req]*50, [fmt])
 
         # Print out the millisecond delta between the start of each exposure
         tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]
-        deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]
+        deltas = [tstamps[i]-tstamps[i-1] for i in range(1, len(tstamps))]
         deltas_ms = [d/1000000.0 for d in deltas]
         avg = sum(deltas_ms) / len(deltas_ms)
         var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg
         range0 = min(deltas_ms) - avg
         range1 = max(deltas_ms) - avg
-        print "Average:", avg
-        print "Variance:", var
-        print "Jitter range:", range0, "to", range1
+        print 'Average:', avg
+        print 'Variance:', var
+        print 'Jitter range:', range0, 'to', range1
 
         # Draw a plot.
         pylab.plot(range(len(deltas_ms)), deltas_ms)
-        matplotlib.pyplot.savefig("%s_deltas.png" % (NAME))
+        pylab.title(NAME)
+        pylab.xlabel('frame number')
+        pylab.ylabel('jitter (ms)')
+        matplotlib.pyplot.savefig('%s_deltas.png' % (NAME))
 
         # Test for pass/fail.
-        assert(avg > MIN_AVG_FRAME_DELTA)
-        assert(var < MAX_VAR_FRAME_DELTA)
-        assert(abs(range0) < MAX_FRAME_DELTA_JITTER)
-        assert(abs(range1) < MAX_FRAME_DELTA_JITTER)
+        emsg = 'avg: %.4fms, TOL: %.fms' % (avg, MIN_AVG_FRAME_DELTA)
+        assert avg > MIN_AVG_FRAME_DELTA, emsg
+        emsg = 'var: %.4fms, TOL: %.2fms' % (var, MAX_VAR_FRAME_DELTA)
+        assert var < MAX_VAR_FRAME_DELTA, emsg
+        emsg = 'range0: %.4fms, range1: %.4fms, TOL: %.2fms' % (
+                range0, range1, MAX_FRAME_DELTA_JITTER)
+        assert abs(range0) < MAX_FRAME_DELTA_JITTER, emsg
+        assert abs(range1) < MAX_FRAME_DELTA_JITTER, emsg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene0/test_metadata.py b/apps/CameraITS/tests/scene0/test_metadata.py
index b8949b1..4468c5d 100644
--- a/apps/CameraITS/tests/scene0/test_metadata.py
+++ b/apps/CameraITS/tests/scene0/test_metadata.py
@@ -31,6 +31,7 @@
         # Arbitrary capture request exposure values; image content is not
         # important for this test, only the metadata.
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.backward_compatible(props))
         auto_req = its.objects.auto_capture_request()
         cap = cam.do_capture(auto_req)
diff --git a/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py b/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
index b716141..7923f95 100644
--- a/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
+++ b/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
@@ -37,6 +37,7 @@
         sens_step = (sens_range[1] - sens_range[0]) / NUM_STEPS
         sens_list = range(sens_range[0], sens_range[1], sens_step)
         e = min(props['android.sensor.info.exposureTimeRange'])
+        assert e != 0
         reqs = [its.objects.manual_capture_request(s, e) for s in sens_list]
         _, fmt = its.objects.get_fastest_manual_capture_settings(props)
 
diff --git a/apps/CameraITS/tests/scene0/test_read_write.py b/apps/CameraITS/tests/scene0/test_read_write.py
index 1b76806..0f8a7a6 100644
--- a/apps/CameraITS/tests/scene0/test_read_write.py
+++ b/apps/CameraITS/tests/scene0/test_read_write.py
@@ -29,18 +29,13 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.manual_sensor(props) and
                              its.caps.per_frame_control(props))
 
-        # determine capture format
-        debug = its.caps.debug_mode()
-        largest_yuv = its.objects.get_largest_yuv_format(props)
-        if debug:
-            fmt = largest_yuv
-        else:
-            match_ar = (largest_yuv['width'], largest_yuv['height'])
-            fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
-
+        valid_formats = ['yuv', 'jpg']
+        if its.caps.raw16(props):
+            valid_formats.insert(0, 'raw')
         # grab exp/gain ranges from camera
         sensor_exp_range = props['android.sensor.info.exposureTimeRange']
         sens_range = props['android.sensor.info.sensitivityRange']
@@ -48,6 +43,7 @@
         print 'sensor s range:', sens_range
 
         # determine if exposure test range is within sensor reported range
+        assert sensor_exp_range[0] != 0
         exp_range = []
         if sensor_exp_range[0] < TEST_EXP_RANGE[0]:
             exp_range.append(TEST_EXP_RANGE[0])
@@ -58,55 +54,70 @@
         else:
             exp_range.append(sensor_exp_range[1])
 
-        # build requests
-        reqs = []
-        index_list = []
-        for exp in exp_range:
-            for sens in sens_range:
-                reqs.append(its.objects.manual_capture_request(sens, exp))
-                index_list.append((exp, sens))
-
-        # take shots
-        caps = cam.do_capture(reqs, fmt)
-
-        # extract exp/sensitivity data
         data = {}
-        for i, cap in enumerate(caps):
-            e_read = cap['metadata']['android.sensor.exposureTime']
-            s_read = cap['metadata']['android.sensor.sensitivity']
-            data[index_list[i]] = (e_read, s_read)
+        # build requests
+        for fmt in valid_formats:
+            print 'format: %s' % fmt
+            size = its.objects.get_available_output_sizes(fmt, props)[-1]
+            out_surface = {'width': size[0], 'height': size[1], 'format': fmt}
+            if cam._hidden_physical_id:
+                out_surface['physicalCamera'] = cam._hidden_physical_id
+            reqs = []
+            index_list = []
+            for exp in exp_range:
+                for sens in sens_range:
+                    reqs.append(its.objects.manual_capture_request(sens, exp))
+                    index_list.append((fmt, exp, sens))
+                    print 'exp_write: %d, sens_write: %d' % (exp, sens)
+
+            # take shots
+            caps = cam.do_capture(reqs, out_surface)
+
+            # extract exp/sensitivity data
+            for i, cap in enumerate(caps):
+                e_read = cap['metadata']['android.sensor.exposureTime']
+                s_read = cap['metadata']['android.sensor.sensitivity']
+                data[index_list[i]] = (fmt, e_read, s_read)
 
         # check read/write match across all shots
         e_failed = []
         s_failed = []
-        for e_write in exp_range:
-            for s_write in sens_range:
-                (e_read, s_read) = data[(e_write, s_write)]
-                if e_write < e_read or e_read/float(e_write) <= RTOL_EXP_GAIN:
-                    e_failed.append({'e_write': e_write,
-                                     'e_read': e_read,
-                                     's_write': s_write,
-                                     's_read': s_read})
-                if s_write < s_read or s_read/float(s_write) <= RTOL_EXP_GAIN:
-                    s_failed.append({'e_write': e_write,
-                                     'e_read': e_read,
-                                     's_write': s_write,
-                                     's_read': s_read})
+        for fmt_write in valid_formats:
+            for e_write in exp_range:
+                for s_write in sens_range:
+                    fmt_read, e_read, s_read = data[(
+                            fmt_write, e_write, s_write)]
+                    if (e_write < e_read or
+                                e_read/float(e_write) <= RTOL_EXP_GAIN):
+                        e_failed.append({'format': fmt_read,
+                                         'e_write': e_write,
+                                         'e_read': e_read,
+                                         's_write': s_write,
+                                         's_read': s_read})
+                    if (s_write < s_read or
+                                s_read/float(s_write) <= RTOL_EXP_GAIN):
+                        s_failed.append({'format': fmt_read,
+                                         'e_write': e_write,
+                                         'e_read': e_read,
+                                         's_write': s_write,
+                                         's_read': s_read})
 
         # print results
         if e_failed:
             print '\nFAILs for exposure time'
             for fail in e_failed:
-                print ' e_write: %d, e_read: %d, RTOL: %.2f, ' % (
-                        fail['e_write'], fail['e_read'], RTOL_EXP_GAIN),
+                print ' format: %s, e_write: %d, e_read: %d, RTOL: %.2f, ' % (
+                        fail['format'], fail['e_write'], fail['e_read'],
+                        RTOL_EXP_GAIN),
                 print 's_write: %d, s_read: %d, RTOL: %.2f' % (
                         fail['s_write'], fail['s_read'], RTOL_EXP_GAIN)
         if s_failed:
             print 'FAILs for sensitivity(ISO)'
             for fail in s_failed:
-                print 's_write: %d, s_read: %d, RTOL: %.2f, ' % (
-                        fail['s_write'], fail['s_read'], RTOL_EXP_GAIN),
-                print ' e_write: %d, e_read: %d, RTOL: %.2f' % (
+                print ' format: %s, s_write: %d, s_read: %d, RTOL: %.2f, ' % (
+                        fail['format'], fail['s_write'], fail['s_read'],
+                        RTOL_EXP_GAIN),
+                print 'e_write: %d, e_read: %d, RTOL: %.2f' % (
                         fail['e_write'], fail['e_read'], RTOL_EXP_GAIN)
 
         # assert PASS/FAIL
diff --git a/apps/CameraITS/tests/scene0/test_sensor_events.py b/apps/CameraITS/tests/scene0/test_sensor_events.py
index d3226b3..cc0e647 100644
--- a/apps/CameraITS/tests/scene0/test_sensor_events.py
+++ b/apps/CameraITS/tests/scene0/test_sensor_events.py
@@ -25,6 +25,7 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         # Only run test if the appropriate caps are claimed.
         its.caps.skip_unless(its.caps.sensor_fusion(props))
 
diff --git a/apps/CameraITS/tests/scene0/test_tonemap_curve.py b/apps/CameraITS/tests/scene0/test_tonemap_curve.py
new file mode 100644
index 0000000..96a495a
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_tonemap_curve.py
@@ -0,0 +1,171 @@
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import its.caps
+import its.device
+import its.image
+import its.objects
+import numpy as np
+
+NAME = os.path.basename(__file__).split('.')[0]
+PATTERN = 2  # Note scene0/test_test_patterns must PASS
+COLOR_BARS = ['WHITE', 'YELLOW', 'CYAN', 'GREEN', 'MAGENTA', 'RED',
+              'BLUE', 'BLACK']
+COLOR_CHECKER = {'BLACK': [0, 0, 0], 'RED': [1, 0, 0], 'GREEN': [0, 1, 0],
+                 'BLUE': [0, 0, 1], 'MAGENTA': [1, 0, 1], 'CYAN': [0, 1, 1],
+                 'YELLOW': [1, 1, 0], 'WHITE': [1, 1, 1]}
+DELTA = 0.0005  # crop on edge of color bars
+RAW_TOL = 0.001  # 1 DN in [0:1] (1/(1023-64)
+RGB_VAR_TOL = 0.0039  # 1/255
+RGB_MEAN_TOL = 0.1
+TONEMAP_MAX = 0.5
+
+
+def check_raw_pattern(img_raw):
+    """Check for RAW capture matches color bar pattern.
+
+    Args:
+        img_raw: RAW image
+    """
+
+    print 'Checking RAW/PATTERN match'
+    n_bars = len(COLOR_BARS)
+    color_match = []
+    for i in range(n_bars):
+        print 'patch:', i,
+        raw_patch = its.image.get_image_patch(
+                img_raw, float(i)/n_bars+DELTA, 0.0, 1.0/n_bars-2*DELTA, 1.0)
+        raw_means = its.image.compute_image_means(raw_patch)
+        for color in COLOR_BARS:
+            if np.allclose(COLOR_CHECKER[color], raw_means, atol=RAW_TOL):
+                color_match.append(color)
+                print '%s' % color
+    assert set(color_match) == set(COLOR_BARS), 'RAW does not have all colors'
+
+
+def check_yuv_vs_raw(img_raw, img_yuv):
+    """Check for YUV vs RAW match in 8 patches.
+
+    Check for correct values and color consistency
+
+    Args:
+        img_raw: RAW image
+        img_yuv: YUV image
+    """
+
+    print 'Checking YUV/RAW match'
+    n_bars = len(COLOR_BARS)
+    color_match_errs = []
+    color_variance_errs = []
+    for i in range(n_bars):
+        raw_patch = its.image.get_image_patch(
+                img_raw, float(i)/n_bars+DELTA, 0.0, 1.0/n_bars-2*DELTA, 1.0)
+        yuv_patch = its.image.get_image_patch(
+                img_yuv, float(i)/n_bars+DELTA, 0.0, 1.0/n_bars-2*DELTA, 1.0)
+        raw_means = np.array(its.image.compute_image_means(raw_patch))
+        raw_vars = np.array(its.image.compute_image_variances(raw_patch))
+        yuv_means = np.array(its.image.compute_image_means(yuv_patch))
+        yuv_means /= TONEMAP_MAX  # Normalize to tonemap max
+        yuv_vars = np.array(its.image.compute_image_variances(yuv_patch))
+        if not np.allclose(raw_means, yuv_means, atol=RGB_MEAN_TOL):
+            color_match_errs.append('RAW: %s, RGB(norm): %s, ATOL: %.2f' % (
+                    str(raw_means), str(np.round(yuv_means, 3)), RGB_MEAN_TOL))
+        if not np.allclose(raw_vars, yuv_vars, atol=RGB_VAR_TOL):
+            color_variance_errs.append('RAW: %s, RGB: %s, ATOL: %.4f' % (
+                    str(raw_vars), str(yuv_vars), RGB_VAR_TOL))
+    if color_match_errs:
+        print '\nColor match errors'
+        for err in color_match_errs:
+            print err
+    if color_variance_errs:
+        print '\nColor variance errors'
+        for err in color_variance_errs:
+            print err
+    assert not color_match_errs
+    assert not color_variance_errs
+
+
+def test_tonemap_curve(cam, props):
+    """test tonemap curve with sensor test pattern.
+
+    Args:
+        cam: An open device session.
+        props: Properties of cam
+    """
+
+    avail_patterns = props['android.sensor.availableTestPatternModes']
+    print 'avail_patterns: ', avail_patterns
+    sens_min, _ = props['android.sensor.info.sensitivityRange']
+    exp = min(props['android.sensor.info.exposureTimeRange'])
+
+    # Linear tonemap with maximum of 0.5
+    tmap = sum([[i/63.0, i/126.0] for i in range(64)], [])
+
+    if PATTERN in avail_patterns:
+        # RAW image
+        req_raw = its.objects.manual_capture_request(int(sens_min), exp)
+        req_raw['android.sensor.testPatternMode'] = PATTERN
+        fmt_raw = {'format': 'raw'}
+        cap_raw = cam.do_capture(req_raw, fmt_raw)
+        img_raw = its.image.convert_capture_to_rgb_image(
+                cap_raw, props=props)
+
+        # Save RAW pattern
+        its.image.write_image(img_raw, '%s_raw_%d.jpg' % (
+                NAME, PATTERN), True)
+        check_raw_pattern(img_raw)
+
+        # YUV image
+        req_yuv = its.objects.manual_capture_request(int(sens_min), exp)
+        req_yuv['android.sensor.testPatternMode'] = PATTERN
+        req_yuv['android.distortionCorrection.mode'] = 0
+        req_yuv['android.tonemap.mode'] = 0
+        req_yuv['android.tonemap.curve'] = {
+                'red': tmap, 'green': tmap, 'blue': tmap}
+        fmt_yuv = {'format': 'yuv', 'width': 640, 'height': 480}
+        cap_yuv = cam.do_capture(req_yuv, fmt_yuv)
+        img_yuv = its.image.convert_capture_to_rgb_image(cap_yuv, True)
+
+        # Save YUV pattern
+        its.image.write_image(img_yuv, '%s_yuv_%d.jpg' % (
+                NAME, PATTERN), True)
+
+        # Check pattern for correctness
+        check_yuv_vs_raw(img_raw, img_yuv)
+    else:
+        print 'Pattern not in android.sensor.availableTestPatternModes.'
+        assert 0
+
+
+def main():
+    """Test conversion of test pattern from RAW to YUV.
+
+    android.sensor.testPatternMode
+    2: COLOR_BARS
+    """
+
+    print '\nStarting %s' % NAME
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.raw16(props) and
+                             its.caps.manual_sensor(props) and
+                             its.caps.per_frame_control(props) and
+                             its.caps.manual_post_proc(props))
+
+        test_tonemap_curve(cam, props)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene0/test_unified_timestamps.py b/apps/CameraITS/tests/scene0/test_unified_timestamps.py
index 5a9228e..008351d 100644
--- a/apps/CameraITS/tests/scene0/test_unified_timestamps.py
+++ b/apps/CameraITS/tests/scene0/test_unified_timestamps.py
@@ -12,10 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import time
+
+import its.caps
 import its.device
 import its.objects
-import its.caps
-import time
+
 
 def main():
     """Test if image and motion sensor events are in the same time domain.
@@ -37,7 +39,7 @@
         ts_image0 = cap['metadata']['android.sensor.timestamp']
 
         # Get the timestamps of motion events.
-        print "Reading sensor measurements"
+        print 'Reading sensor measurements'
         sensors = cam.get_sensors()
         cam.start_sensor_events()
         time.sleep(2.0)
@@ -46,20 +48,20 @@
         ts_sensor_last = {}
         for sensor, existing in sensors.iteritems():
             if existing:
-                assert(len(events[sensor]) > 0)
-                ts_sensor_first[sensor] = events[sensor][0]["time"]
-                ts_sensor_last[sensor] = events[sensor][-1]["time"]
+                assert events[sensor], '%s sensor has no events!' % sensor
+                ts_sensor_first[sensor] = events[sensor][0]['time']
+                ts_sensor_last[sensor] = events[sensor][-1]['time']
 
         # Get the timestamp of another image.
         cap = cam.do_capture(req, fmt)
         ts_image1 = cap['metadata']['android.sensor.timestamp']
 
-        print "Image timestamps:", ts_image0, ts_image1
+        print 'Image timestamps:', ts_image0, ts_image1
 
         # The motion timestamps must be between the two image timestamps.
         for sensor, existing in sensors.iteritems():
             if existing:
-                print "%s timestamps: %d %d" % (sensor, ts_sensor_first[sensor],
+                print '%s timestamps: %d %d' % (sensor, ts_sensor_first[sensor],
                                                 ts_sensor_last[sensor])
                 assert ts_image0 < ts_sensor_first[sensor] < ts_image1
                 assert ts_image0 < ts_sensor_last[sensor] < ts_image1
diff --git a/apps/CameraITS/tests/scene1/scene1_0.5_scaled.pdf b/apps/CameraITS/tests/scene1/scene1_0.5_scaled.pdf
new file mode 100644
index 0000000..92753c4
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/scene1_0.5_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
index a7b5add..e5d33f3 100644
--- a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
+++ b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
@@ -12,14 +12,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import math
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
-import os.path
-import math
 import numpy as np
 
+NAME = os.path.basename(__file__).split(".")[0]
+
+
 def main():
     """Capture auto and manual shots that should look the same.
 
@@ -29,12 +32,10 @@
     however there can be variations in brightness/contrast due to different
     "auto" ISP blocks that may be disabled in the manual flows.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.manual_sensor(props) and
-                             its.caps.manual_post_proc(props) and
+        its.caps.skip_unless(its.caps.read_3a(props) and
                              its.caps.per_frame_control(props))
         mono_camera = its.caps.mono_camera(props)
 
@@ -44,7 +45,7 @@
         if debug:
             fmt = largest_yuv
         else:
-            match_ar = (largest_yuv['width'], largest_yuv['height'])
+            match_ar = (largest_yuv["width"], largest_yuv["height"])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
         sens, exp, gains, xform, focus = cam.do_3a(get_results=True,
                                                    mono_camera=mono_camera)
@@ -79,10 +80,10 @@
         print "Manual wb transform:", xform_m1
 
         # Manual capture 2: WB + tonemap
-        gamma = sum([[i/63.0,math.pow(i/63.0,1/2.2)] for i in xrange(64)],[])
+        gamma = sum([[i/63.0, math.pow(i/63.0, 1/2.2)] for i in xrange(64)], [])
         req["android.tonemap.mode"] = 0
         req["android.tonemap.curve"] = {
-            "red": gamma, "green": gamma, "blue": gamma}
+                "red": gamma, "green": gamma, "blue": gamma}
         cap_man2 = cam.do_capture(req, fmt)
         img_man2 = its.image.convert_capture_to_rgb_image(cap_man2)
         its.image.write_image(img_man2, "%s_manual_wb_tm.jpg" % (NAME))
@@ -94,14 +95,14 @@
 
         # Check that the WB gains and transform reported in each capture
         # result match with the original AWB estimate from do_3a.
-        for g,x in [(gains_m1,xform_m1),(gains_m2,xform_m2)]:
-            assert(all([abs(xform[i] - x[i]) < 0.05 for i in range(9)]))
-            assert(all([abs(gains[i] - g[i]) < 0.05 for i in range(4)]))
+        for g, x in [(gains_m1, xform_m1), (gains_m2, xform_m2)]:
+            assert all([abs(xform[i] - x[i]) < 0.05 for i in range(9)])
+            assert all([abs(gains[i] - g[i]) < 0.05 for i in range(4)])
 
         # Check that auto AWB settings are close
-        assert(all([np.isclose(xform_a[i], xform[i], rtol=0.25, atol=0.1) for i in range(9)]))
-        assert(all([np.isclose(gains_a[i], gains[i], rtol=0.25, atol=0.1) for i in range(4)]))
+        assert all([np.isclose(xform_a[i], xform[i], rtol=0.25, atol=0.1) for i in range(9)])
+        assert all([np.isclose(gains_a[i], gains[i], rtol=0.25, atol=0.1) for i in range(4)])
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_black_white.py b/apps/CameraITS/tests/scene1/test_black_white.py
index 18bc001..bd5f096 100644
--- a/apps/CameraITS/tests/scene1/test_black_white.py
+++ b/apps/CameraITS/tests/scene1/test_black_white.py
@@ -12,19 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
-from matplotlib import pylab
-import os.path
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split(".")[0]
+
 
 def main():
-    """Test that the device will produce full black+white images.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
+    """Test that the device will produce full black+white images."""
 
     r_means = []
     g_means = []
@@ -32,62 +33,73 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.manual_sensor(props) and
-                             its.caps.per_frame_control(props))
+        its.caps.skip_unless(its.caps.manual_sensor(props))
+        sync_latency = its.caps.sync_latency(props)
 
         debug = its.caps.debug_mode()
         largest_yuv = its.objects.get_largest_yuv_format(props)
         if debug:
             fmt = largest_yuv
         else:
-            match_ar = (largest_yuv['width'], largest_yuv['height'])
+            match_ar = (largest_yuv["width"], largest_yuv["height"])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
-        expt_range = props['android.sensor.info.exposureTimeRange']
-        sens_range = props['android.sensor.info.sensitivityRange']
+        expt_range = props["android.sensor.info.exposureTimeRange"]
+        sens_range = props["android.sensor.info.sensitivityRange"]
 
         # Take a shot with very low ISO and exposure time. Expect it to
         # be black.
-        print "Black shot: sens = %d, exp time = %.4fms" % (
-                sens_range[0], expt_range[0]/1000000.0)
         req = its.objects.manual_capture_request(sens_range[0], expt_range[0])
-        cap = cam.do_capture(req, fmt)
+        cap = its.device.do_capture_with_latency(cam, req, sync_latency, fmt)
         img = its.image.convert_capture_to_rgb_image(cap)
-        its.image.write_image(img, "%s_black.jpg" % (NAME))
+        its.image.write_image(img, "%s_black.jpg" % NAME)
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
         black_means = its.image.compute_image_means(tile)
         r_means.append(black_means[0])
         g_means.append(black_means[1])
         b_means.append(black_means[2])
         print "Dark pixel means:", black_means
+        r_exp = cap["metadata"]["android.sensor.exposureTime"]
+        r_iso = cap["metadata"]["android.sensor.sensitivity"]
+        print "Black shot write values: sens = %d, exp time = %.4fms" % (
+                sens_range[0], expt_range[0]/1000000.0)
+        print "Black shot read values: sens = %d, exp time = %.4fms\n" % (
+                r_iso, r_exp/1000000.0)
 
         # Take a shot with very high ISO and exposure time. Expect it to
         # be white.
-        print "White shot: sens = %d, exp time = %.2fms" % (
-                sens_range[1], expt_range[1]/1000000.0)
         req = its.objects.manual_capture_request(sens_range[1], expt_range[1])
-        cap = cam.do_capture(req, fmt)
+        cap = its.device.do_capture_with_latency(cam, req, sync_latency, fmt)
         img = its.image.convert_capture_to_rgb_image(cap)
-        its.image.write_image(img, "%s_white.jpg" % (NAME))
+        its.image.write_image(img, "%s_white.jpg" % NAME)
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
         white_means = its.image.compute_image_means(tile)
         r_means.append(white_means[0])
         g_means.append(white_means[1])
         b_means.append(white_means[2])
         print "Bright pixel means:", white_means
+        r_exp = cap["metadata"]["android.sensor.exposureTime"]
+        r_iso = cap["metadata"]["android.sensor.sensitivity"]
+        print "White shot write values: sens = %d, exp time = %.2fms" % (
+                sens_range[1], expt_range[1]/1000000.0)
+        print "White shot read values: sens = %d, exp time = %.2fms\n" % (
+                r_iso, r_exp/1000000.0)
 
         # Draw a plot.
-        pylab.plot([0,1], r_means, 'r')
-        pylab.plot([0,1], g_means, 'g')
-        pylab.plot([0,1], b_means, 'b')
-        pylab.ylim([0,1])
+        pylab.title("test_black_white")
+        pylab.plot([0, 1], r_means, "-ro")
+        pylab.plot([0, 1], g_means, "-go")
+        pylab.plot([0, 1], b_means, "-bo")
+        pylab.xlabel("Capture Number")
+        pylab.ylabel("Output Values (Normalized)")
+        pylab.ylim([0, 1])
         matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
 
-        for val in black_means:
-            assert(val < 0.025)
-        for val in white_means:
-            assert(val > 0.975)
+        for black_mean in black_means:
+            assert black_mean < 0.025
+        for white_mean in white_means:
+            assert white_mean > 0.975
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py b/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
index edb8995..92239db 100644
--- a/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
+++ b/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
@@ -12,14 +12,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import os.path
+
+from matplotlib import pylab
+import matplotlib.pyplot
 import numpy
 
+BURST_LEN = 50
+BURSTS = 5
+COLORS = ["R", "G", "B"]
+FRAMES = BURST_LEN * BURSTS
+NAME = os.path.basename(__file__).split(".")[0]
+SPREAD_THRESH = 0.03
+
+
 def main():
     """Take long bursts of images and check that they're all identical.
 
@@ -27,25 +38,19 @@
     frames that are processed differently or have artifacts. Uses manual
     capture settings.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    BURST_LEN = 50
-    BURSTS = 5
-    FRAMES = BURST_LEN * BURSTS
-
-    SPREAD_THRESH = 0.03
 
     with its.device.ItsSession() as cam:
 
         # Capture at the smallest resolution.
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.manual_sensor(props) and
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
                              its.caps.per_frame_control(props))
+        debug = its.caps.debug_mode()
 
         _, fmt = its.objects.get_fastest_manual_capture_settings(props)
         e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
         req = its.objects.manual_capture_request(s, e)
-        w,h = fmt["width"], fmt["height"]
+        w, h = fmt["width"], fmt["height"]
 
         # Capture bursts of YUV shots.
         # Get the mean values of a center patch for each.
@@ -53,10 +58,10 @@
         r_means = []
         g_means = []
         b_means = []
-        imgs = numpy.empty([FRAMES,h,w,3])
+        imgs = numpy.empty([FRAMES, h, w, 3])
         for j in range(BURSTS):
             caps = cam.do_capture([req]*BURST_LEN, [fmt])
-            for i,cap in enumerate(caps):
+            for i, cap in enumerate(caps):
                 n = j*BURST_LEN + i
                 imgs[n] = its.image.convert_capture_to_rgb_image(cap)
                 tile = its.image.get_image_patch(imgs[n], 0.45, 0.45, 0.1, 0.1)
@@ -65,21 +70,35 @@
                 g_means.append(means[1])
                 b_means.append(means[2])
 
-        # Dump all images.
-        print "Dumping images"
-        for i in range(FRAMES):
-            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+        # Dump all images if debug
+        if debug:
+            print "Dumping images"
+            for i in range(FRAMES):
+                its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME, i))
 
         # The mean image.
         img_mean = imgs.mean(0)
         its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
 
-        # Pass/fail based on center patch similarity.
-        for means in [r_means, g_means, b_means]:
-            spread = max(means) - min(means)
-            print spread
-            assert(spread < SPREAD_THRESH)
+        # Plot means vs frames
+        frames = range(FRAMES)
+        pylab.title(NAME)
+        pylab.plot(frames, r_means, "-ro")
+        pylab.plot(frames, g_means, "-go")
+        pylab.plot(frames, b_means, "-bo")
+        pylab.ylim([0, 1])
+        pylab.xlabel("frame number")
+        pylab.ylabel("RGB avg [0, 1]")
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
 
-if __name__ == '__main__':
+        # PASS/FAIL based on center patch similarity.
+        for plane, means in enumerate([r_means, g_means, b_means]):
+            spread = max(means) - min(means)
+            msg = "%s spread: %.5f, SPREAD_THRESH: %.3f" % (
+                    COLORS[plane], spread, SPREAD_THRESH)
+            print msg
+            assert spread < SPREAD_THRESH, msg
+
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_capture_result.py b/apps/CameraITS/tests/scene1/test_capture_result.py
index a3b81fa..19d0145 100644
--- a/apps/CameraITS/tests/scene1/test_capture_result.py
+++ b/apps/CameraITS/tests/scene1/test_capture_result.py
@@ -12,16 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
-import os.path
-import numpy
 import matplotlib.pyplot
+import mpl_toolkits.mplot3d  # Required for 3d plot to work
+import numpy
 
-# Required for 3d plot to work
-import mpl_toolkits.mplot3d
 
 def main():
     """Test that valid data comes back in CaptureResult objects.
@@ -38,13 +37,13 @@
                              its.caps.manual_post_proc(props) and
                              its.caps.per_frame_control(props))
 
-        manual_tonemap = [0,0, 1,1] # Linear
+        manual_tonemap = [0,0, 1,1]  # Linear
         manual_transform = its.objects.float_to_rational(
                 [-1.5,-1.0,-0.5, 0.0,0.5,1.0, 1.5,2.0,3.0])
         manual_gains = [1,1.5,2.0,3.0]
         manual_region = [{"x":8,"y":8,"width":128,"height":128,"weight":1}]
-        manual_exp_time = min(props['android.sensor.info.exposureTimeRange'])
-        manual_sensitivity = min(props['android.sensor.info.sensitivityRange'])
+        manual_exp_time = min(props["android.sensor.info.exposureTimeRange"])
+        manual_sensitivity = min(props["android.sensor.info.sensitivityRange"])
 
         # The camera HAL may not support different gains for two G channels.
         manual_gains_ok = [[1,1.5,2.0,3.0],[1,1.5,1.5,3.0],[1,2.0,2.0,3.0]]
@@ -53,67 +52,80 @@
         auto_req["android.statistics.lensShadingMapMode"] = 1
 
         manual_req = {
-            "android.control.mode": 0,
-            "android.control.aeMode": 0,
-            "android.control.awbMode": 0,
-            "android.control.afMode": 0,
-            "android.sensor.sensitivity": manual_sensitivity,
-            "android.sensor.exposureTime": manual_exp_time,
-            "android.colorCorrection.mode": 0,
-            "android.colorCorrection.transform": manual_transform,
-            "android.colorCorrection.gains": manual_gains,
-            "android.tonemap.mode": 0,
-            "android.tonemap.curve": {"red": manual_tonemap,
-                                      "green": manual_tonemap,
-                                      "blue": manual_tonemap},
-            "android.control.aeRegions": manual_region,
-            "android.control.afRegions": manual_region,
-            "android.control.awbRegions": manual_region,
-            "android.statistics.lensShadingMapMode":1
-            }
+                "android.control.mode": 0,
+                "android.control.aeMode": 0,
+                "android.control.awbMode": 0,
+                "android.control.afMode": 0,
+                "android.sensor.sensitivity": manual_sensitivity,
+                "android.sensor.exposureTime": manual_exp_time,
+                "android.colorCorrection.mode": 0,
+                "android.colorCorrection.transform": manual_transform,
+                "android.colorCorrection.gains": manual_gains,
+                "android.tonemap.mode": 0,
+                "android.tonemap.curve": {"red": manual_tonemap,
+                                          "green": manual_tonemap,
+                                          "blue": manual_tonemap},
+                "android.control.aeRegions": manual_region,
+                "android.control.afRegions": manual_region,
+                "android.control.awbRegions": manual_region,
+                "android.statistics.lensShadingMapMode": 1
+                }
 
+        sync_latency = its.caps.sync_latency(props)
         print "Testing auto capture results"
-        lsc_map_auto = test_auto(cam, props)
+        lsc_map_auto = test_auto(cam, props, sync_latency)
         print "Testing manual capture results"
-        test_manual(cam, lsc_map_auto, props)
+        test_manual(cam, lsc_map_auto, props, sync_latency)
         print "Testing auto capture results again"
-        test_auto(cam, props)
+        test_auto(cam, props, sync_latency)
 
-# A very loose definition for two floats being close to each other;
-# there may be different interpolation and rounding used to get the
-# two values, and all this test is looking at is whether there is
-# something obviously broken; it's not looking for a perfect match.
+
 def is_close_float(n1, n2):
+    """A very loose definition for two floats being close to each other.
+
+    there may be different interpolation and rounding used to get the
+    two values, and all this test is looking at is whether there is
+    something obviously broken; it's not looking for a perfect match.
+
+    Args:
+        n1:     float 1
+        n2:     float 2
+    Returns:
+        Boolean
+    """
     return abs(n1 - n2) < 0.05
 
+
 def is_close_rational(n1, n2):
     return is_close_float(its.objects.rational_to_float(n1),
                           its.objects.rational_to_float(n2))
 
+
 def draw_lsc_plot(w_map, h_map, lsc_map, name):
     for ch in range(4):
         fig = matplotlib.pyplot.figure()
-        ax = fig.gca(projection='3d')
+        ax = fig.gca(projection="3d")
         xs = numpy.array([range(w_map)] * h_map).reshape(h_map, w_map)
         ys = numpy.array([[i]*w_map for i in range(h_map)]).reshape(
                 h_map, w_map)
         zs = numpy.array(lsc_map[ch::4]).reshape(h_map, w_map)
         ax.plot_wireframe(xs, ys, zs)
-        matplotlib.pyplot.savefig("%s_plot_lsc_%s_ch%d.png"%(NAME,name,ch))
+        matplotlib.pyplot.savefig("%s_plot_lsc_%s_ch%d.png"%(NAME, name, ch))
 
-def test_auto(cam, props):
+
+def test_auto(cam, props, sync_latency):
     # Get 3A lock first, so the auto values in the capture result are
     # populated properly.
-    rect = [[0,0,1,1,1]]
+    rect = [[0, 0, 1, 1, 1]]
     mono_camera = its.caps.mono_camera(props)
     cam.do_3a(rect, rect, rect, do_af=False, mono_camera=mono_camera)
 
-    cap = cam.do_capture(auto_req)
+    cap = its.device.do_capture_with_latency(cam, auto_req, sync_latency)
     cap_res = cap["metadata"]
 
     gains = cap_res["android.colorCorrection.gains"]
     transform = cap_res["android.colorCorrection.transform"]
-    exp_time = cap_res['android.sensor.exposureTime']
+    exp_time = cap_res["android.sensor.exposureTime"]
     lsc_obj = cap_res["android.statistics.lensShadingCorrectionMap"]
     lsc_map = lsc_obj["map"]
     w_map = lsc_obj["width"]
@@ -125,11 +137,11 @@
     print "Transform:", [its.objects.rational_to_float(t)
                          for t in transform]
     if props["android.control.maxRegionsAe"] > 0:
-        print "AE region:", cap_res['android.control.aeRegions']
+        print "AE region:", cap_res["android.control.aeRegions"]
     if props["android.control.maxRegionsAf"] > 0:
-        print "AF region:", cap_res['android.control.afRegions']
+        print "AF region:", cap_res["android.control.afRegions"]
     if props["android.control.maxRegionsAwb"] > 0:
-        print "AWB region:", cap_res['android.control.awbRegions']
+        print "AWB region:", cap_res["android.control.awbRegions"]
     print "LSC map:", w_map, h_map, lsc_map[:8]
 
     assert(ctrl_mode == 1)
@@ -157,8 +169,9 @@
 
     return lsc_map
 
-def test_manual(cam, lsc_map_auto, props):
-    cap = cam.do_capture(manual_req)
+
+def test_manual(cam, lsc_map_auto, props, sync_latency):
+    cap = its.device.do_capture_with_latency(cam, manual_req, sync_latency)
     cap_res = cap["metadata"]
 
     gains = cap_res["android.colorCorrection.gains"]
@@ -166,7 +179,7 @@
     curves = [cap_res["android.tonemap.curve"]["red"],
               cap_res["android.tonemap.curve"]["green"],
               cap_res["android.tonemap.curve"]["blue"]]
-    exp_time = cap_res['android.sensor.exposureTime']
+    exp_time = cap_res["android.sensor.exposureTime"]
     lsc_obj = cap_res["android.statistics.lensShadingCorrectionMap"]
     lsc_map = lsc_obj["map"]
     w_map = lsc_obj["width"]
@@ -179,11 +192,11 @@
                          for t in transform]
     print "Tonemap:", curves[0][1::16]
     if props["android.control.maxRegionsAe"] > 0:
-        print "AE region:", cap_res['android.control.aeRegions']
+        print "AE region:", cap_res["android.control.aeRegions"]
     if props["android.control.maxRegionsAf"] > 0:
-        print "AF region:", cap_res['android.control.afRegions']
+        print "AF region:", cap_res["android.control.afRegions"]
     if props["android.control.maxRegionsAwb"] > 0:
-        print "AWB region:", cap_res['android.control.awbRegions']
+        print "AWB region:", cap_res["android.control.awbRegions"]
     print "LSC map:", w_map, h_map, lsc_map[:8]
 
     assert(ctrl_mode == 0)
@@ -218,6 +231,7 @@
 
     draw_lsc_plot(w_map, h_map, lsc_map, "manual")
 
-if __name__ == '__main__':
+
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_channel_saturation.py b/apps/CameraITS/tests/scene1/test_channel_saturation.py
new file mode 100644
index 0000000..7d23c04
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_channel_saturation.py
@@ -0,0 +1,77 @@
+# Copyright 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+import its.caps
+import its.device
+import its.image
+import its.objects
+import numpy as np
+
+NAME = os.path.basename(__file__).split('.')[0]
+RGB_FULL_SCALE = 255.0
+RGB_SAT_MIN = 253.0
+RGB_SAT_TOL = 1.0
+
+
+def main():
+    """Test that channels saturate evenly."""
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props))
+        sync_latency = its.caps.sync_latency(props)
+
+        debug = its.caps.debug_mode()
+        largest_yuv = its.objects.get_largest_yuv_format(props)
+        if debug:
+            fmt = largest_yuv
+        else:
+            match_ar = (largest_yuv['width'], largest_yuv['height'])
+            fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
+
+        exp = props['android.sensor.info.exposureTimeRange'][1]
+        iso = props['android.sensor.info.sensitivityRange'][1]
+
+        # Take shot with very high ISO and exposure time. Expect saturation
+        req = its.objects.manual_capture_request(iso, exp)
+        cap = its.device.do_capture_with_latency(cam, req, sync_latency, fmt)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, '%s.jpg' % NAME)
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        white_means = its.image.compute_image_means(tile)
+        r = white_means[0] * RGB_FULL_SCALE
+        g = white_means[1] * RGB_FULL_SCALE
+        b = white_means[2] * RGB_FULL_SCALE
+        print ' Saturated pixels r, g, b:', white_means
+        r_exp = cap['metadata']['android.sensor.exposureTime']
+        r_iso = cap['metadata']['android.sensor.sensitivity']
+        print ' Saturated shot write values: iso = %d, exp = %.2fms' % (
+                iso, exp/1000000.0)
+        print ' Saturated shot read values: iso = %d, exp = %.2fms\n' % (
+                r_iso, r_exp/1000000.0)
+
+        # assert saturation
+        assert min(r, g, b) > RGB_SAT_MIN, (
+                'r: %.1f, g: %.1f, b: %.1f, MIN: %.f' % (r, g, b, RGB_SAT_MIN))
+        # assert channels saturate evenly
+        assert np.isclose(min(r, g, b), max(r, g, b), atol=RGB_SAT_TOL), (
+                'ch_sat not EQ!  r: %.1f, g: %.1f, b: %.1f, TOL: %.f' % (
+                        r, g, b, RGB_SAT_TOL))
+
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_crop_regions.py b/apps/CameraITS/tests/scene1/test_crop_regions.py
index 6d3dad1..59f884c 100644
--- a/apps/CameraITS/tests/scene1/test_crop_regions.py
+++ b/apps/CameraITS/tests/scene1/test_crop_regions.py
@@ -12,26 +12,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import os.path
 import numpy
 
-def main():
-    """Test that crop regions work.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
+NAME = os.path.basename(__file__).split(".")[0]
+# A list of 5 regions, specified in normalized (x,y,w,h) coords.
+# The regions correspond to: TL, TR, BL, BR, CENT
+REGIONS = [(0.0, 0.0, 0.5, 0.5),
+           (0.5, 0.0, 0.5, 0.5),
+           (0.0, 0.5, 0.5, 0.5),
+           (0.5, 0.5, 0.5, 0.5),
+           (0.25, 0.25, 0.5, 0.5)]
 
-    # A list of 5 regions, specified in normalized (x,y,w,h) coords.
-    # The regions correspond to: TL, TR, BL, BR, CENT
-    REGIONS = [(0.0, 0.0, 0.5, 0.5),
-               (0.5, 0.0, 0.5, 0.5),
-               (0.0, 0.5, 0.5, 0.5),
-               (0.5, 0.5, 0.5, 0.5),
-               (0.25, 0.25, 0.5, 0.5)]
+
+def main():
+    """Test that crop regions work."""
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -39,29 +39,30 @@
                              its.caps.freeform_crop(props) and
                              its.caps.per_frame_control(props))
 
-        a = props['android.sensor.info.activeArraySize']
+        a = props["android.sensor.info.activeArraySize"]
         ax, ay = a["left"], a["top"]
         aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
         e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
         print "Active sensor region (%d,%d %dx%d)" % (ax, ay, aw, ah)
 
         # Uses a 2x digital zoom.
-        assert(its.objects.get_max_digital_zoom(props) >= 2)
+        assert its.objects.get_max_digital_zoom(props) >= 2
 
         # Capture a full frame.
-        req = its.objects.manual_capture_request(s,e)
+        req = its.objects.manual_capture_request(s, e)
         cap_full = cam.do_capture(req)
         img_full = its.image.convert_capture_to_rgb_image(cap_full)
-        its.image.write_image(img_full, "%s_full.jpg" % (NAME))
         wfull, hfull = cap_full["width"], cap_full["height"]
+        its.image.write_image(
+                img_full, "%s_full_%dx%d.jpg" % (NAME, wfull, hfull))
 
         # Capture a burst of crop region frames.
         # Note that each region is 1/2x1/2 of the full frame, and is digitally
         # zoomed into the full size output image, so must be downscaled (below)
         # by 2x when compared to a tile of the full image.
         reqs = []
-        for x,y,w,h in REGIONS:
-            req = its.objects.manual_capture_request(s,e)
+        for x, y, w, h in REGIONS:
+            req = its.objects.manual_capture_request(s, e)
             req["android.scaler.cropRegion"] = {
                     "top": int(ah * y),
                     "left": int(aw * x),
@@ -70,7 +71,7 @@
             reqs.append(req)
         caps_regions = cam.do_capture(reqs)
         match_failed = False
-        for i,cap in enumerate(caps_regions):
+        for i, cap in enumerate(caps_regions):
             a = cap["metadata"]["android.scaler.cropRegion"]
             ax, ay = a["left"], a["top"]
             aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
@@ -83,13 +84,14 @@
             its.image.write_image(img_crop, "%s_crop%d.jpg" % (NAME, i))
             min_diff = None
             min_diff_region = None
-            for j,(x,y,w,h) in enumerate(REGIONS):
-                tile_full = its.image.get_image_patch(img_full, x,y,w,h)
+            for j, (x, y, w, h) in enumerate(REGIONS):
+                tile_full = its.image.get_image_patch(img_full, x, y, w, h)
                 wtest = min(tile_full.shape[1], aw)
                 htest = min(tile_full.shape[0], ah)
                 tile_full = tile_full[0:htest:, 0:wtest:, ::]
                 tile_crop = img_crop[0:htest:, 0:wtest:, ::]
-                its.image.write_image(tile_full, "%s_fullregion%d.jpg"%(NAME,j))
+                its.image.write_image(
+                        tile_full, "%s_fullregion%d.jpg" % (NAME, j))
                 diff = numpy.fabs(tile_full - tile_crop).mean()
                 if min_diff is None or diff < min_diff:
                     min_diff = diff
@@ -99,8 +101,8 @@
             print "Crop image %d (%d,%d %dx%d) best match with region %d"%(
                     i, ax, ay, aw, ah, min_diff_region)
 
-        assert(not match_failed)
+        assert not match_failed
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_dng_noise_model.py b/apps/CameraITS/tests/scene1/test_dng_noise_model.py
index c447ae5..98efd4e 100644
--- a/apps/CameraITS/tests/scene1/test_dng_noise_model.py
+++ b/apps/CameraITS/tests/scene1/test_dng_noise_model.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import math
 import os.path
 import its.caps
 import its.device
@@ -25,7 +26,7 @@
 DIFF_THRESH = 0.0012  # absolute variance delta threshold
 FRAC_THRESH = 0.2  # relative variance delta threshold
 NUM_STEPS = 4
-STATS_GRID = 49  # center 2.04% of image for calculations
+SENS_TOL = 0.97  # specification is <= 3%
 
 
 def main():
@@ -39,52 +40,44 @@
     # (since ITS doesn't require a perfectly uniformly lit scene).
 
     with its.device.ItsSession() as cam:
-
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.raw(props) and
-                             its.caps.raw16(props) and
-                             its.caps.manual_sensor(props) and
-                             its.caps.read_3a(props) and
-                             its.caps.per_frame_control(props) and
-                             not its.caps.mono_camera(props))
-        debug = its.caps.debug_mode()
+        props = cam.override_with_hidden_physical_camera_props(props)
+        its.caps.skip_unless(
+                its.caps.raw(props) and
+                its.caps.raw16(props) and
+                its.caps.manual_sensor(props) and
+                its.caps.read_3a(props) and
+                its.caps.per_frame_control(props) and
+                not its.caps.mono_camera(props))
 
         white_level = float(props['android.sensor.info.whiteLevel'])
         cfa_idxs = its.image.get_canonical_cfa_order(props)
-        aax = props['android.sensor.info.preCorrectionActiveArraySize']['left']
-        aay = props['android.sensor.info.preCorrectionActiveArraySize']['top']
-        aaw = props['android.sensor.info.preCorrectionActiveArraySize']['right']-aax
-        aah = props['android.sensor.info.preCorrectionActiveArraySize']['bottom']-aay
 
         # Expose for the scene with min sensitivity
-        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
-        sens_step = (sens_max - sens_min) / NUM_STEPS
+        sens_min, _ = props['android.sensor.info.sensitivityRange']
+        sens_max_ana = props['android.sensor.maxAnalogSensitivity']
+        sens_step = (sens_max_ana - sens_min) / NUM_STEPS
         s_ae, e_ae, _, _, f_dist = cam.do_3a(get_results=True)
         s_e_prod = s_ae * e_ae
-        sensitivities = range(sens_min, sens_max, sens_step)
+        sensitivities = range(sens_min, sens_max_ana+1, sens_step)
 
         var_expected = [[], [], [], []]
         var_measured = [[], [], [], []]
-        x = STATS_GRID/2  # center in H of STATS_GRID
-        y = STATS_GRID/2  # center in W of STATS_GRID
+        sens_valid = []
         for sens in sensitivities:
-
             # Capture a raw frame with the desired sensitivity
             exp = int(s_e_prod / float(sens))
             req = its.objects.manual_capture_request(sens, exp, f_dist)
-            if debug:
-                cap = cam.do_capture(req, cam.CAP_RAW)
-                planes = its.image.convert_capture_to_planes(cap, props)
-            else:
-                cap = cam.do_capture(req, {'format': 'rawStats',
-                                           'gridWidth': aaw/STATS_GRID,
-                                           'gridHeight': aah/STATS_GRID})
-                mean_img, var_img = its.image.unpack_rawstats_capture(cap)
+            cap = cam.do_capture(req, cam.CAP_RAW)
+            planes = its.image.convert_capture_to_planes(cap, props)
+            s_read = cap['metadata']['android.sensor.sensitivity']
+            print 'iso_write: %d, iso_read: %d' % (sens, s_read)
 
             # Test each raw color channel (R, GR, GB, B)
             noise_profile = cap['metadata']['android.sensor.noiseProfile']
             assert len(noise_profile) == len(BAYER_LIST)
             for i in range(len(BAYER_LIST)):
+                print BAYER_LIST[i],
                 # Get the noise model parameters for this channel of this shot.
                 ch = cfa_idxs[i]
                 s, o = noise_profile[ch]
@@ -95,23 +88,43 @@
                 black_level = its.image.get_black_level(i, props,
                                                         cap['metadata'])
                 level_range = white_level - black_level
-                if debug:
-                    plane = ((planes[i] * white_level - black_level) /
-                             level_range)
-                    tile = its.image.get_image_patch(plane, 0.49, 0.49,
-                                                     0.02, 0.02)
-                    mean_img_ch = tile.mean()
-                    var_measured[i].append(
-                            its.image.compute_image_variances(tile)[0])
-                else:
-                    mean_img_ch = (mean_img[x, y, ch]-black_level)/level_range
-                    var_measured[i].append(var_img[x, y, ch]/level_range**2)
-                var_expected[i].append(s * mean_img_ch + o)
+                plane = its.image.get_image_patch(planes[i], 0.49, 0.49,
+                                                  0.02, 0.02)
+                tile_raw = plane * white_level
+                tile_norm = ((tile_raw - black_level) / level_range)
 
+                # exit if distribution is clipped at 0, otherwise continue
+                mean_img_ch = tile_norm.mean()
+                var_model = s * mean_img_ch + o
+                # This computation is a suspicious because if the data were
+                # clipped, the mean and standard deviation could be affected
+                # in a way that affects this check. However, empirically,
+                # the mean and standard deviation change more slowly than the
+                # clipping point itself does, so the check remains correct
+                # even after the signal starts to clip.
+                mean_minus_3sigma = mean_img_ch - math.sqrt(var_model) * 3
+                if mean_minus_3sigma < 0:
+                    e_msg = '\nPixel distribution crosses 0.\n'
+                    e_msg += 'Likely black level over-clips.\n'
+                    e_msg += 'Linear model is not valid.\n'
+                    e_msg += 'mean: %.3e, var: %.3e, u-3s: %.3e' % (
+                            mean_img_ch, var_model, mean_minus_3sigma)
+                    assert 0, e_msg
+                else:
+                    print 'mean:', mean_img_ch,
+                    var_measured[i].append(
+                            its.image.compute_image_variances(tile_norm)[0])
+                    print 'var:', var_measured[i][-1],
+                    var_expected[i].append(var_model)
+                    print 'var_model:', var_expected[i][-1]
+            print ''
+            sens_valid.append(sens)
+
+    # plot data and models
     for i, ch in enumerate(BAYER_LIST):
-        pylab.plot(sensitivities, var_expected[i], 'rgkb'[i],
+        pylab.plot(sens_valid, var_expected[i], 'rgkb'[i],
                    label=ch+' expected')
-        pylab.plot(sensitivities, var_measured[i], 'rgkb'[i]+'--',
+        pylab.plot(sens_valid, var_measured[i], 'rgkb'[i]+'.--',
                    label=ch+' measured')
     pylab.xlabel('Sensitivity')
     pylab.ylabel('Center patch variance')
@@ -121,11 +134,11 @@
     # PASS/FAIL check
     for i, ch in enumerate(BAYER_LIST):
         diffs = [abs(var_measured[i][j] - var_expected[i][j])
-                 for j in range(len(sensitivities))]
+                 for j in range(len(sens_valid))]
         print 'Diffs (%s):'%(ch), diffs
         for j, diff in enumerate(diffs):
             thresh = max(DIFF_THRESH, FRAC_THRESH*var_expected[i][j])
-            assert diff <= thresh
+            assert diff <= thresh, 'diff: %.5f, thresh: %.4f' % (diff, thresh)
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py b/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
index d087ab1..cc36990 100644
--- a/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
+++ b/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
@@ -12,27 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
-import its.device
-import its.caps
-import its.objects
 import os.path
-from matplotlib import pylab
+import its.caps
+import its.device
+import its.image
+import its.objects
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
 import numpy
 
-#AE must converge within this number of auto requests for EV
-THREASH_CONVERGE_FOR_EV = 8
+LOCKED = 3
+MAX_LUMA_DELTA_THRESH = 0.05
+NAME = os.path.basename(__file__).split('.')[0]
+THRESH_CONVERGE_FOR_EV = 8  # AE must converge in this num auto reqs for EV
+
 
 def main():
-    """Tests that EV compensation is applied.
-    """
-    LOCKED = 3
-
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    MAX_LUMA_DELTA_THRESH = 0.05
+    """Tests that EV compensation is applied."""
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -76,34 +72,40 @@
             # by tone curves.
             req['android.tonemap.mode'] = 0
             req['android.tonemap.curve'] = {
-                'red': [0.0,0.0, 1.0,1.0],
-                'green': [0.0,0.0, 1.0,1.0],
-                'blue': [0.0,0.0, 1.0,1.0]}
-            caps = cam.do_capture([req]*THREASH_CONVERGE_FOR_EV, fmt)
+                    'red': [0.0, 0.0, 1.0, 1.0],
+                    'green': [0.0, 0.0, 1.0, 1.0],
+                    'blue': [0.0, 0.0, 1.0, 1.0]}
+            caps = cam.do_capture([req]*THRESH_CONVERGE_FOR_EV, fmt)
 
             for cap in caps:
-                if (cap['metadata']['android.control.aeState'] == LOCKED):
+                if cap['metadata']['android.control.aeState'] == LOCKED:
                     y = its.image.convert_capture_to_planes(cap)[0]
-                    tile = its.image.get_image_patch(y, 0.45,0.45,0.1,0.1)
+                    tile = its.image.get_image_patch(y, 0.45, 0.45, 0.1, 0.1)
                     lumas.append(its.image.compute_image_means(tile)[0])
                     break
-            assert(cap['metadata']['android.control.aeState'] == LOCKED)
+            assert cap['metadata']['android.control.aeState'] == LOCKED
 
-        print "ev_step_size_in_stops", ev_per_step
+        print 'ev_step_size_in_stops', ev_per_step
         shift_mid = ev_shifts[imid]
         luma_normal = lumas[imid] / shift_mid
-        expected_lumas = [min(1.0, luma_normal * ev_shift) for ev_shift in ev_shifts]
+        expected_lumas = [min(1.0, luma_normal*ev_shift) for ev_shift in ev_shifts]
 
-        pylab.plot(ev_steps, lumas, 'r')
-        pylab.plot(ev_steps, expected_lumas, 'b')
-        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+        pylab.plot(ev_steps, lumas, '-ro')
+        pylab.plot(ev_steps, expected_lumas, '-bo')
+        pylab.title(NAME)
+        pylab.xlabel('EV Compensation')
+        pylab.ylabel('Mean Luma (Normalized)')
 
-        luma_diffs = [expected_lumas[i] - lumas[i] for i in range(len(ev_steps))]
+        matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
+
+        luma_diffs = [expected_lumas[i]-lumas[i] for i in range(len(ev_steps))]
         max_diff = max(abs(i) for i in luma_diffs)
         avg_diff = abs(numpy.array(luma_diffs)).mean()
-        print "Max delta between modeled and measured lumas:", max_diff
-        print "Avg delta between modeled and measured lumas:", avg_diff
-        assert(max_diff < MAX_LUMA_DELTA_THRESH)
+        print 'Max delta between modeled and measured lumas:', max_diff
+        print 'Avg delta between modeled and measured lumas:', avg_diff
+        assert max_diff < MAX_LUMA_DELTA_THRESH, 'diff: %.3f, THRESH: %.2f' % (
+                max_diff, MAX_LUMA_DELTA_THRESH)
+
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py b/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
index 32e5001..61c89d8 100644
--- a/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
+++ b/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
@@ -27,8 +27,8 @@
 LUMA_LOCKED_TOL = 0.05
 THRESH_CONVERGE_FOR_EV = 8  # AE must converge within this num
 YUV_FULL_SCALE = 255.0
-YUV_SATURATION_MIN = 253.0
-YUV_SATURATION_TOL = 1.0
+YUV_SAT_MIN = 250.0
+YUV_SAT_TOL = 3.0
 
 
 def main():
@@ -49,13 +49,10 @@
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
         ev_per_step = its.objects.rational_to_float(
-            props['android.control.aeCompensationStep'])
+                props['android.control.aeCompensationStep'])
         steps_per_ev = int(1.0 / ev_per_step)
         evs = range(-2 * steps_per_ev, 2 * steps_per_ev + 1, steps_per_ev)
         lumas = []
-        reds = []
-        greens = []
-        blues = []
 
         # Converge 3A, and lock AE once converged. skip AF trigger as
         # dark/bright scene could make AF convergence fail and this test
@@ -77,45 +74,33 @@
                     luma_locked.append(luma)
                     if i == THRESH_CONVERGE_FOR_EV-1:
                         lumas.append(luma)
-                        rgb = its.image.convert_capture_to_rgb_image(cap)
-                        rgb_tile = its.image.get_image_patch(rgb,
-                                                             0.45, 0.45,
-                                                             0.1, 0.1)
-                        rgb_means = its.image.compute_image_means(rgb_tile)
-                        reds.append(rgb_means[0])
-                        greens.append(rgb_means[1])
-                        blues.append(rgb_means[2])
                         print 'lumas in AE locked captures: ', luma_locked
+                        msg = 'AE locked lumas: %s, RTOL: %.2f' % (
+                                str(luma_locked), LUMA_LOCKED_TOL)
                         assert np.isclose(min(luma_locked), max(luma_locked),
-                                          rtol=LUMA_LOCKED_TOL)
+                                          rtol=LUMA_LOCKED_TOL), msg
             assert caps[THRESH_CONVERGE_FOR_EV-1]['metadata']['android.control.aeState'] == LOCKED
 
         pylab.plot(evs, lumas, '-ro')
+        pylab.title(NAME)
         pylab.xlabel('EV Compensation')
         pylab.ylabel('Mean Luma (Normalized)')
         matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
 
         # Trim extra saturated images
-        while lumas and lumas[-1] >= YUV_SATURATION_MIN/YUV_FULL_SCALE:
-            if (np.isclose(reds[-1], greens[-1],
-                           YUV_SATURATION_TOL/YUV_FULL_SCALE) and
-                    np.isclose(blues[-1], greens[-1],
-                               YUV_SATURATION_TOL/YUV_FULL_SCALE)):
-                lumas.pop(-1)
-                reds.pop(-1)
-                greens.pop(-1)
-                blues.pop(-1)
-                print 'Removed saturated image.'
-            else:
-                break
+        while (lumas[-2] >= YUV_SAT_MIN/YUV_FULL_SCALE and
+               lumas[-1] >= YUV_SAT_MIN/YUV_FULL_SCALE and
+               len(lumas) > 2):
+            lumas.pop(-1)
+            print 'Removed saturated image.'
+
         # Only allow positive EVs to give saturated image
-        assert len(lumas) > 2
-        luma_diffs = np.diff(lumas)
-        min_luma_diffs = min(luma_diffs)
+        assert len(lumas) > 2, '3 or more unsaturated images needed'
+        min_luma_diffs = min(np.diff(lumas))
         print 'Min of the luma value difference between adjacent ev comp: ',
         print min_luma_diffs
         # All luma brightness should be increasing with increasing ev comp.
-        assert min_luma_diffs > 0
+        assert min_luma_diffs > 0, 'Luma is not increasing!'
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_exposure.py b/apps/CameraITS/tests/scene1/test_exposure.py
index cac49d0..a13f020 100644
--- a/apps/CameraITS/tests/scene1/test_exposure.py
+++ b/apps/CameraITS/tests/scene1/test_exposure.py
@@ -64,12 +64,10 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props))
-
-        process_raw = (its.caps.raw16(props) and
-                       its.caps.manual_sensor(props))
-
+        props = cam.override_with_hidden_physical_camera_props(props)
+        its.caps.skip_unless(its.caps.compute_target_exposure(props))
+        sync_latency = its.caps.sync_latency(props)
+        process_raw = its.caps.raw16(props) and its.caps.manual_sensor(props)
         debug = its.caps.debug_mode()
         largest_yuv = its.objects.get_largest_yuv_format(props)
         if debug:
@@ -91,7 +89,8 @@
             print 'Testing s:', s_test, 'e:', e_test
             req = its.objects.manual_capture_request(
                     s_test, e_test, 0.0, True, props)
-            cap = cam.do_capture(req, fmt)
+            cap = its.device.do_capture_with_latency(
+                    cam, req, sync_latency, fmt)
             s_res = cap['metadata']['android.sensor.sensitivity']
             e_res = cap['metadata']['android.sensor.exposureTime']
             # determine exposure tolerance based on exposure time
@@ -104,12 +103,12 @@
                         (THRESH_EXP_KNEE - e_test) / THRESH_EXP_KNEE)
             s_msg = 's_write: %d, s_read: %d, TOL=%.f%%' % (
                     s_test, s_res, THRESH_ROUND_DOWN_GAIN*100)
-            e_msg = 'e_write: %.2fms, e_read: %.2fms, TOL=%.f%%' % (
+            e_msg = 'e_write: %.3fms, e_read: %.3fms, TOL=%.f%%' % (
                     e_test/1.0E6, e_res/1.0E6, thresh_round_down_exp*100)
             assert 0 <= s_test - s_res < s_test * THRESH_ROUND_DOWN_GAIN, s_msg
             assert 0 <= e_test - e_res < e_test * thresh_round_down_exp, e_msg
             s_e_product_res = s_res * e_res
-            request_result_ratio = s_e_product / s_e_product_res
+            request_result_ratio = float(s_e_product) / s_e_product_res
             print 'Capture result s:', s_res, 'e:', e_res
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, '%s_mult=%3.2f.jpg' % (NAME, m))
@@ -122,12 +121,13 @@
             # do same in RAW space if possible
             if process_raw and debug:
                 aaw, aah = get_raw_active_array_size(props)
-                raw_cap = cam.do_capture(req,
-                                         {'format': 'rawStats',
-                                          'gridWidth': aaw/IMG_STATS_GRID,
-                                          'gridHeight': aah/IMG_STATS_GRID})
-                r, gr, gb, b = its.image.convert_capture_to_planes(raw_cap,
-                                                                   props)
+                fmt_raw = {'format': 'rawStats',
+                           'gridWidth': aaw/IMG_STATS_GRID,
+                           'gridHeight': aah/IMG_STATS_GRID}
+                raw_cap = its.device.do_capture_with_latency(
+                        cam, req, sync_latency, fmt_raw)
+                r, gr, gb, b = its.image.convert_capture_to_planes(
+                        raw_cap, props)
                 raw_r_means.append(r[IMG_STATS_GRID/2, IMG_STATS_GRID/2]
                                    * request_result_ratio)
                 raw_gr_means.append(gr[IMG_STATS_GRID/2, IMG_STATS_GRID/2]
diff --git a/apps/CameraITS/tests/scene1/test_format_combos.py b/apps/CameraITS/tests/scene1/test_format_combos.py
deleted file mode 100644
index ca65e4f..0000000
--- a/apps/CameraITS/tests/scene1/test_format_combos.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# Copyright 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import its.image
-import its.caps
-import its.device
-import its.objects
-import its.error
-import its.target
-import sys
-import os
-
-NAME = os.path.basename(__file__).split(".")[0]
-STOP_AT_FIRST_FAILURE = False  # change to True to have test break @ 1st FAIL
-
-
-def main():
-    """Test different combinations of output formats."""
-
-    with its.device.ItsSession() as cam:
-
-        props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.raw16(props))
-
-        successes = []
-        failures = []
-        debug = its.caps.debug_mode()
-
-        # Two different requests: auto, and manual.
-        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
-        req_aut = its.objects.auto_capture_request()
-        req_man = its.objects.manual_capture_request(s, e)
-        reqs = [req_aut,  # R0
-                req_man]  # R1
-
-        # 10 different combos of output formats; some are single surfaces, and
-        # some are multiple surfaces.
-        wyuv, hyuv = its.objects.get_available_output_sizes("yuv", props)[-1]
-        wjpg, hjpg = its.objects.get_available_output_sizes("jpg", props)[-1]
-        fmt_yuv_prev = {"format": "yuv", "width": wyuv, "height": hyuv}
-        fmt_yuv_full = {"format": "yuv"}
-        fmt_jpg_prev = {"format": "jpeg", "width": wjpg, "height": hjpg}
-        fmt_jpg_full = {"format": "jpeg"}
-        fmt_raw_full = {"format": "raw"}
-        fmt_combos = [
-            [fmt_yuv_prev],                              # F0
-            [fmt_yuv_full],                              # F1
-            [fmt_jpg_prev],                              # F2
-            [fmt_jpg_full],                              # F3
-            [fmt_raw_full],                              # F4
-            [fmt_yuv_prev, fmt_jpg_prev],                # F5
-            [fmt_yuv_prev, fmt_jpg_full],                # F6
-            [fmt_yuv_prev, fmt_raw_full],                # F7
-            [fmt_yuv_prev, fmt_jpg_prev, fmt_raw_full],  # F8
-            [fmt_yuv_prev, fmt_jpg_full, fmt_raw_full]]  # F9
-
-        # Two different burst lengths: single frame, and 3 frames.
-        burst_lens = [1,  # B0
-                      3]  # B1
-
-        # There are 2x10x2=40 different combinations. Run through them all.
-        n = 0
-        for r,req in enumerate(reqs):
-            for f,fmt_combo in enumerate(fmt_combos):
-                for b,burst_len in enumerate(burst_lens):
-                    try:
-                        caps = cam.do_capture([req]*burst_len, fmt_combo)
-                        successes.append((n,r,f,b))
-                        print "==> Success[%02d]: R%d F%d B%d" % (n,r,f,b)
-
-                        # Dump the captures out to jpegs in debug mode.
-                        if debug:
-                            if not isinstance(caps, list):
-                                caps = [caps]
-                            elif isinstance(caps[0], list):
-                                caps = sum(caps, [])
-                            for c, cap in enumerate(caps):
-                                img = its.image.convert_capture_to_rgb_image(cap, props=props)
-                                its.image.write_image(img,
-                                    "%s_n%02d_r%d_f%d_b%d_c%d.jpg"%(NAME,n,r,f,b,c))
-
-                    except Exception as e:
-                        print e
-                        print "==> Failure[%02d]: R%d F%d B%d" % (n,r,f,b)
-                        failures.append((n,r,f,b))
-                        if STOP_AT_FIRST_FAILURE:
-                            sys.exit(1)
-                    n += 1
-
-        num_fail = len(failures)
-        num_success = len(successes)
-        num_total = len(reqs)*len(fmt_combos)*len(burst_lens)
-        num_not_run = num_total - num_success - num_fail
-
-        print "\nFailures (%d / %d):" % (num_fail, num_total)
-        for (n,r,f,b) in failures:
-            print "  %02d: R%d F%d B%d" % (n,r,f,b)
-        print "\nSuccesses (%d / %d):" % (num_success, num_total)
-        for (n,r,f,b) in successes:
-            print "  %02d: R%d F%d B%d" % (n,r,f,b)
-        if num_not_run > 0:
-            print "\nNumber of tests not run: %d / %d" % (num_not_run, num_total)
-        print ""
-
-        # The test passes if all the combinations successfully capture.
-        assert num_fail == 0
-        assert num_success == num_total
-
-if __name__ == '__main__':
-    main()
-
diff --git a/apps/CameraITS/tests/scene1/test_jpeg.py b/apps/CameraITS/tests/scene1/test_jpeg.py
index 6b14411..3abeef5 100644
--- a/apps/CameraITS/tests/scene1/test_jpeg.py
+++ b/apps/CameraITS/tests/scene1/test_jpeg.py
@@ -12,33 +12,36 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import math
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import os.path
-import math
+
+NAME = os.path.basename(__file__).split(".")[0]
+THRESHOLD_MAX_RMS_DIFF = 0.01
+
 
 def main():
     """Test that converted YUV images and device JPEG images look the same.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    THRESHOLD_MAX_RMS_DIFF = 0.01
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props))
+        its.caps.skip_unless(its.caps.compute_target_exposure(props))
+        sync_latency = its.caps.sync_latency(props)
 
         e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
         req = its.objects.manual_capture_request(s, e, 0.0, True, props)
 
         # YUV
         size = its.objects.get_available_output_sizes("yuv", props)[0]
-        out_surface = {"width":size[0], "height":size[1], "format":"yuv"}
-        cap = cam.do_capture(req, out_surface)
+        out_surface = {"width": size[0], "height": size[1], "format": "yuv"}
+        cap = its.device.do_capture_with_latency(
+                cam, req, sync_latency, out_surface)
         img = its.image.convert_capture_to_rgb_image(cap)
         its.image.write_image(img, "%s_fmt=yuv.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -46,8 +49,9 @@
 
         # JPEG
         size = its.objects.get_available_output_sizes("jpg", props)[0]
-        out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
-        cap = cam.do_capture(req, out_surface)
+        out_surface = {"width": size[0], "height": size[1], "format": "jpg"}
+        cap = its.device.do_capture_with_latency(
+                cam, req, sync_latency, out_surface)
         img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
         its.image.write_image(img, "%s_fmt=jpg.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -56,7 +60,9 @@
         rms_diff = math.sqrt(
                 sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
         print "RMS difference:", rms_diff
-        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+        msg = "RMS difference: %.4f, spec: %.3f" % (rms_diff,
+                                                    THRESHOLD_MAX_RMS_DIFF)
+        assert rms_diff < THRESHOLD_MAX_RMS_DIFF, msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_latching.py b/apps/CameraITS/tests/scene1/test_latching.py
index 79f0f1a..362b7b8 100644
--- a/apps/CameraITS/tests/scene1/test_latching.py
+++ b/apps/CameraITS/tests/scene1/test_latching.py
@@ -12,15 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-from matplotlib import pylab
-import os.path
+
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split('.')[0]
+
 
 def main():
     """Test that settings latch on the right frame.
@@ -29,14 +33,13 @@
     request parameters between shots. Checks that the images that come back
     have the expected properties.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
         its.caps.skip_unless(its.caps.full_or_better(props))
 
-        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
-        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        _, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        e, s = its.target.get_target_exposure_combos(cam)['midExposureTime']
         e /= 2.0
 
         r_means = []
@@ -44,26 +47,26 @@
         b_means = []
 
         reqs = [
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s*2,e,   0.0, True, props),
-            its.objects.manual_capture_request(s*2,e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e*2, 0.0, True, props),
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s*2,e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e*2, 0.0, True, props),
-            its.objects.manual_capture_request(s,  e,   0.0, True, props),
-            its.objects.manual_capture_request(s,  e*2, 0.0, True, props),
-            its.objects.manual_capture_request(s,  e*2, 0.0, True, props),
-            ]
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s*2, e, 0.0, True, props),
+                its.objects.manual_capture_request(s*2, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e*2, 0.0, True, props),
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s*2, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e*2, 0.0, True, props),
+                its.objects.manual_capture_request(s, e, 0.0, True, props),
+                its.objects.manual_capture_request(s, e*2, 0.0, True, props),
+                its.objects.manual_capture_request(s, e*2, 0.0, True, props),
+                ]
 
         caps = cam.do_capture(reqs, fmt)
-        for i,cap in enumerate(caps):
+        for i, cap in enumerate(caps):
             img = its.image.convert_capture_to_rgb_image(cap)
-            its.image.write_image(img, "%s_i=%02d.jpg" % (NAME, i))
+            its.image.write_image(img, '%s_i=%02d.jpg' % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
             r_means.append(rgb_means[0])
@@ -72,15 +75,18 @@
 
         # Draw a plot.
         idxs = range(len(r_means))
-        pylab.plot(idxs, r_means, 'r')
-        pylab.plot(idxs, g_means, 'g')
-        pylab.plot(idxs, b_means, 'b')
-        pylab.ylim([0,1])
-        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+        pylab.plot(idxs, r_means, '-ro')
+        pylab.plot(idxs, g_means, '-go')
+        pylab.plot(idxs, b_means, '-bo')
+        pylab.ylim([0, 1])
+        pylab.title(NAME)
+        pylab.xlabel('capture')
+        pylab.ylabel('RGB means')
+        matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
 
         g_avg = sum(g_means) / len(g_means)
         g_ratios = [g / g_avg for g in g_means]
-        g_hilo = [g>1.0 for g in g_ratios]
+        g_hilo = [g > 1.0 for g in g_ratios]
         assert(g_hilo == [False, False, True, True, False, False, True,
                           False, True, False, True, False, True, True])
 
diff --git a/apps/CameraITS/tests/scene1/test_linearity.py b/apps/CameraITS/tests/scene1/test_linearity.py
index 1f4aa14..e029ac7 100644
--- a/apps/CameraITS/tests/scene1/test_linearity.py
+++ b/apps/CameraITS/tests/scene1/test_linearity.py
@@ -12,17 +12,16 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import math
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import numpy
-import math
-from matplotlib import pylab
-import os.path
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+import numpy
 
 NAME = os.path.basename(__file__).split('.')[0]
 RESIDUAL_THRESHOLD = 0.0003  # approximately each sample is off by 2/255
@@ -40,14 +39,15 @@
     linear R,G,B pixel data.
     """
     gamma_lut = numpy.array(
-        sum([[i/LM1, math.pow(i/LM1, 1/2.2)] for i in xrange(L)], []))
+            sum([[i/LM1, math.pow(i/LM1, 1/2.2)] for i in xrange(L)], []))
     inv_gamma_lut = numpy.array(
-        sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
+            sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props))
+        props = cam.override_with_hidden_physical_camera_props(props)
+        its.caps.skip_unless(its.caps.compute_target_exposure(props))
+        sync_latency = its.caps.sync_latency(props)
 
         debug = its.caps.debug_mode()
         largest_yuv = its.objects.get_largest_yuv_format(props)
@@ -57,7 +57,7 @@
             match_ar = (largest_yuv['width'], largest_yuv['height'])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
-        e,s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        e, s = its.target.get_target_exposure_combos(cam)['midSensitivity']
         s /= 2
         sens_range = props['android.sensor.info.sensitivityRange']
         sensitivities = [s*1.0/3.0, s*2.0/3.0, s, s*4.0/3.0, s*5.0/3.0]
@@ -68,20 +68,21 @@
         req['android.blackLevel.lock'] = True
         req['android.tonemap.mode'] = 0
         req['android.tonemap.curve'] = {
-            'red': gamma_lut.tolist(),
-            'green': gamma_lut.tolist(),
-            'blue': gamma_lut.tolist()}
+                'red': gamma_lut.tolist(),
+                'green': gamma_lut.tolist(),
+                'blue': gamma_lut.tolist()}
 
         r_means = []
         g_means = []
         b_means = []
 
         for sens in sensitivities:
-            req["android.sensor.sensitivity"] = sens
-            cap = cam.do_capture(req, fmt)
+            req['android.sensor.sensitivity'] = sens
+            cap = its.device.do_capture_with_latency(
+                    cam, req, sync_latency, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
-                img, '%s_sens=%04d.jpg' % (NAME, sens))
+                    img, '%s_sens=%04d.jpg' % (NAME, sens))
             img = its.image.apply_lut_to_image(img, inv_gamma_lut[1::2] * LM1)
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
@@ -104,7 +105,9 @@
             line, residuals, _, _, _ = numpy.polyfit(range(len(sensitivities)),
                                                      means, 1, full=True)
             print 'Line: m=%f, b=%f, resid=%f'%(line[0], line[1], residuals[0])
-            assert residuals[0] < RESIDUAL_THRESHOLD
+            msg = 'residual: %.5f, THRESH: %.4f' % (
+                    residuals[0], RESIDUAL_THRESHOLD)
+            assert residuals[0] < RESIDUAL_THRESHOLD, msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_locked_burst.py b/apps/CameraITS/tests/scene1/test_locked_burst.py
index befbbed..76a8203 100644
--- a/apps/CameraITS/tests/scene1/test_locked_burst.py
+++ b/apps/CameraITS/tests/scene1/test_locked_burst.py
@@ -12,15 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
-import its.device
-import its.objects
-import its.caps
 import os.path
-import numpy
-from matplotlib import pylab
-import matplotlib
-import matplotlib.pyplot
+import its.caps
+import its.device
+import its.image
+import its.objects
+
+BURST_LEN = 8
+COLORS = ['R', 'G', 'B']
+FPS_MAX_DIFF = 2.0
+NAME = os.path.basename(__file__).split('.')[0]
+SPREAD_THRESH_MANUAL_SENSOR = 0.01
+SPREAD_THRESH = 0.03
+VALUE_THRESH = 0.1
+
 
 def main():
     """Test 3A lock + YUV burst (using auto settings).
@@ -29,12 +34,6 @@
     don't have MANUAL_SENSOR or PER_FRAME_CONTROLS. The test checks
     YUV image consistency while the frame rate check is in CTS.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    BURST_LEN = 8
-    SPREAD_THRESH_MANUAL_SENSOR = 0.01
-    SPREAD_THRESH = 0.03
-    FPS_MAX_DIFF = 2.0
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -49,9 +48,10 @@
         fmt = its.objects.get_largest_yuv_format(props)
 
         # After 3A has converged, lock AE+AWB for the duration of the test.
+        print 'Locking AE & AWB'
         req = its.objects.fastest_auto_capture_request(props)
-        req["android.control.awbLock"] = True
-        req["android.control.aeLock"] = True
+        req['android.control.awbLock'] = True
+        req['android.control.aeLock'] = True
 
         # Capture bursts of YUV shots.
         # Get the mean values of a center patch for each.
@@ -59,23 +59,32 @@
         g_means = []
         b_means = []
         caps = cam.do_capture([req]*BURST_LEN, fmt)
-        for i,cap in enumerate(caps):
+        for i, cap in enumerate(caps):
             img = its.image.convert_capture_to_rgb_image(cap)
-            its.image.write_image(img, "%s_frame%d.jpg"%(NAME,i))
+            its.image.write_image(img, '%s_frame%d.jpg'%(NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             means = its.image.compute_image_means(tile)
             r_means.append(means[0])
             g_means.append(means[1])
             b_means.append(means[2])
 
-        # Pass/fail based on center patch similarity.
-        for means in [r_means, g_means, b_means]:
-            spread = max(means) - min(means)
-            print "Patch mean spread", spread, \
-                    " (min/max: ",  min(means), "/", max(means), ")"
+        # Assert center patch brightness & similarity
+        for i, means in enumerate([r_means, g_means, b_means]):
+            plane = COLORS[i]
+            min_means = min(means)
+            spread = max(means) - min_means
+            print '%s patch mean spread %.5f. means = [' % (plane, spread),
+            for j in range(BURST_LEN):
+                print '%.5f' % means[j],
+            print ']'
+            e_msg = 'Image too dark!  %s: %.5f, THRESH: %.2f' % (
+                    plane, min_means, VALUE_THRESH)
+            assert min_means > VALUE_THRESH, e_msg
             threshold = SPREAD_THRESH_MANUAL_SENSOR \
                     if its.caps.manual_sensor(props) else SPREAD_THRESH
-            assert(spread < threshold)
+            e_msg = '%s center patch spread: %.5f, THRESH: %.2f' % (
+                    plane, spread, threshold)
+            assert spread < threshold, e_msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_multi_camera_match.py b/apps/CameraITS/tests/scene1/test_multi_camera_match.py
index b5bd63c..4d87ca8 100644
--- a/apps/CameraITS/tests/scene1/test_multi_camera_match.py
+++ b/apps/CameraITS/tests/scene1/test_multi_camera_match.py
@@ -35,19 +35,15 @@
     yuv_sizes = {}
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props) and
-                             its.caps.logical_multi_camera(props) and
-                             its.caps.raw16(props) and
-                             its.caps.manual_sensor(props))
+        its.caps.skip_unless(its.caps.per_frame_control(props) and
+                             its.caps.logical_multi_camera(props))
         ids = its.caps.logical_multi_camera_physical_ids(props)
-        max_raw_size = its.objects.get_available_output_sizes('raw', props)[0]
         for i in ids:
             physical_props = cam.get_camera_properties_by_id(i)
             its.caps.skip_unless(not its.caps.mono_camera(physical_props))
             yuv_sizes[i] = its.objects.get_available_output_sizes(
-                    'yuv', physical_props, match_ar_size=max_raw_size)
-            if i == ids[0]:
+                    'yuv', physical_props)
+            if i == ids[0]:  # get_available_output_sizes returns sorted list
                 yuv_match_sizes = yuv_sizes[i]
             else:
                 list(set(yuv_sizes[i]).intersection(yuv_match_sizes))
@@ -59,12 +55,31 @@
         print 'Matched YUV size: (%d, %d)' % (w, h)
 
         # do 3a and create requests
-        avail_fls = props['android.lens.info.availableFocalLengths']
+        avail_fls = sorted(props['android.lens.info.availableFocalLengths'],
+                           reverse=True)
         cam.do_3a()
         reqs = []
         for i, fl in enumerate(avail_fls):
             reqs.append(its.objects.auto_capture_request())
             reqs[i]['android.lens.focalLength'] = fl
+            if i > 0:
+                # Calculate the active sensor region for a non-cropped image
+                zoom = avail_fls[0] / fl
+                a = props['android.sensor.info.activeArraySize']
+                ax, ay = a['left'], a['top']
+                aw, ah = a['right'] - a['left'], a['bottom'] - a['top']
+
+                # Calculate a center crop region.
+                assert zoom >= 1
+                cropw = aw / zoom
+                croph = ah / zoom
+                crop_region = {
+                        'left': aw / 2 - cropw / 2,
+                        'top': ah / 2 - croph / 2,
+                        'right': aw / 2 + cropw / 2,
+                        'bottom': ah / 2 + croph / 2
+                }
+                reqs[i]['android.scaler.cropRegion'] = crop_region
 
         # capture YUVs
         y_means = {}
diff --git a/apps/CameraITS/tests/scene1/test_param_color_correction.py b/apps/CameraITS/tests/scene1/test_param_color_correction.py
index 83f4f7f..f49eba5 100644
--- a/apps/CameraITS/tests/scene1/test_param_color_correction.py
+++ b/apps/CameraITS/tests/scene1/test_param_color_correction.py
@@ -12,15 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-from matplotlib import pylab
-import os.path
+
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split('.')[0]
+THRESHOLD_MAX_DIFF = 0.1
+
 
 def main():
     """Test that the android.colorCorrection.* params are applied when set.
@@ -31,15 +36,12 @@
 
     Uses a linear tonemap.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    THRESHOLD_MAX_DIFF = 0.1
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props) and
                              not its.caps.mono_camera(props))
+        sync_latency = its.caps.sync_latency(props)
 
         # Baseline request
         debug = its.caps.debug_mode()
@@ -50,23 +52,23 @@
             match_ar = (largest_yuv['width'], largest_yuv['height'])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
-        e, s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        e, s = its.target.get_target_exposure_combos(cam)['midSensitivity']
         req = its.objects.manual_capture_request(s, e, 0.0, True, props)
-        req["android.colorCorrection.mode"] = 0
+        req['android.colorCorrection.mode'] = 0
 
         # Transforms:
         # 1. Identity
         # 2. Identity
         # 3. Boost blue
-        transforms = [its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]),
-                      its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]),
-                      its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,2])]
+        transforms = [its.objects.int_to_rational([1, 0, 0, 0, 1, 0, 0, 0, 1]),
+                      its.objects.int_to_rational([1, 0, 0, 0, 1, 0, 0, 0, 1]),
+                      its.objects.int_to_rational([1, 0, 0, 0, 1, 0, 0, 0, 2])]
 
         # Gains:
         # 1. Unit
         # 2. Boost red
         # 3. Unit
-        gains = [[1,1,1,1], [2,1,1,1], [1,1,1,1]]
+        gains = [[1, 1, 1, 1], [2, 1, 1, 1], [1, 1, 1, 1]]
 
         r_means = []
         g_means = []
@@ -77,36 +79,40 @@
         # 2. With a higher red gain, and identity transform.
         # 3. With unit gains, and a transform that boosts blue.
         for i in range(len(transforms)):
-            req["android.colorCorrection.transform"] = transforms[i]
-            req["android.colorCorrection.gains"] = gains[i]
-            cap = cam.do_capture(req, fmt)
+            req['android.colorCorrection.transform'] = transforms[i]
+            req['android.colorCorrection.gains'] = gains[i]
+            cap = its.device.do_capture_with_latency(
+                    cam, req, sync_latency, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
-            its.image.write_image(img, "%s_req=%d.jpg" % (NAME, i))
+            its.image.write_image(img, '%s_req=%d.jpg' % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
             r_means.append(rgb_means[0])
             g_means.append(rgb_means[1])
             b_means.append(rgb_means[2])
             ratios = [rgb_means[0] / rgb_means[1], rgb_means[2] / rgb_means[1]]
-            print "Means = ", rgb_means, "   Ratios =", ratios
+            print 'Means = ', rgb_means, '   Ratios =', ratios
 
         # Draw a plot.
         domain = range(len(transforms))
-        pylab.plot(domain, r_means, 'r')
-        pylab.plot(domain, g_means, 'g')
-        pylab.plot(domain, b_means, 'b')
-        pylab.ylim([0,1])
-        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+        pylab.plot(domain, r_means, '-ro')
+        pylab.plot(domain, g_means, '-go')
+        pylab.plot(domain, b_means, '-bo')
+        pylab.ylim([0, 1])
+        pylab.title(NAME)
+        pylab.xlabel('Unity, R boost, B boost')
+        pylab.ylabel('RGB means')
+        matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
 
         # Expect G0 == G1 == G2, R0 == 0.5*R1 == R2, B0 == B1 == 0.5*B2
         # Also need to ensure that the image is not clamped to white/black.
-        assert(all(g_means[i] > 0.2 and g_means[i] < 0.8 for i in xrange(3)))
-        assert(abs(g_means[1] - g_means[0]) < THRESHOLD_MAX_DIFF)
-        assert(abs(g_means[2] - g_means[1]) < THRESHOLD_MAX_DIFF)
-        assert(abs(r_means[2] - r_means[0]) < THRESHOLD_MAX_DIFF)
-        assert(abs(r_means[1] - 2.0 * r_means[0]) < THRESHOLD_MAX_DIFF)
-        assert(abs(b_means[1] - b_means[0]) < THRESHOLD_MAX_DIFF)
-        assert(abs(b_means[2] - 2.0 * b_means[0]) < THRESHOLD_MAX_DIFF)
+        assert all(g_means[i] > 0.2 and g_means[i] < 0.8 for i in xrange(3))
+        assert abs(g_means[1] - g_means[0]) < THRESHOLD_MAX_DIFF
+        assert abs(g_means[2] - g_means[1]) < THRESHOLD_MAX_DIFF
+        assert abs(r_means[2] - r_means[0]) < THRESHOLD_MAX_DIFF
+        assert abs(r_means[1] - 2.0 * r_means[0]) < THRESHOLD_MAX_DIFF
+        assert abs(b_means[1] - b_means[0]) < THRESHOLD_MAX_DIFF
+        assert abs(b_means[2] - 2.0 * b_means[0]) < THRESHOLD_MAX_DIFF
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_param_exposure_time.py b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
index 90ad0b6..3995c6e 100644
--- a/apps/CameraITS/tests/scene1/test_param_exposure_time.py
+++ b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
@@ -12,20 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-from matplotlib import pylab
-import os.path
+
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split('.')[0]
+
 
 def main():
-    """Test that the android.sensor.exposureTime parameter is applied.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
+    """Test that the android.sensor.exposureTime parameter is applied."""
 
     exp_times = []
     r_means = []
@@ -34,8 +36,8 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props))
+        its.caps.skip_unless(its.caps.compute_target_exposure(props))
+        sync_latency = its.caps.sync_latency(props)
 
         debug = its.caps.debug_mode()
         largest_yuv = its.objects.get_largest_yuv_format(props)
@@ -45,13 +47,15 @@
             match_ar = (largest_yuv['width'], largest_yuv['height'])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
-        e,s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
-        for i,e_mult in enumerate([0.8, 0.9, 1.0, 1.1, 1.2]):
-            req = its.objects.manual_capture_request(s, e * e_mult, 0.0, True, props)
-            cap = cam.do_capture(req, fmt)
+        e, s = its.target.get_target_exposure_combos(cam)['midExposureTime']
+        for i, e_mult in enumerate([0.8, 0.9, 1.0, 1.1, 1.2]):
+            req = its.objects.manual_capture_request(
+                    s, e * e_mult, 0.0, True, props)
+            cap = its.device.do_capture_with_latency(
+                    cam, req, sync_latency, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
-                    img, "%s_frame%d.jpg" % (NAME, i))
+                    img, '%s_frame%d.jpg' % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
             exp_times.append(e * e_mult)
@@ -60,16 +64,20 @@
             b_means.append(rgb_means[2])
 
     # Draw a plot.
-    pylab.plot(exp_times, r_means, 'r')
-    pylab.plot(exp_times, g_means, 'g')
-    pylab.plot(exp_times, b_means, 'b')
-    pylab.ylim([0,1])
-    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+    pylab.plot(exp_times, r_means, '-ro')
+    pylab.plot(exp_times, g_means, '-go')
+    pylab.plot(exp_times, b_means, '-bo')
+    pylab.ylim([0, 1])
+    pylab.title(NAME)
+    pylab.xlabel('Exposure times (ns)')
+    pylab.ylabel('RGB means')
+    plot_name = '%s_plot_means.png' % NAME
+    matplotlib.pyplot.savefig(plot_name)
 
     # Test for pass/fail: check that each shot is brighter than the previous.
     for means in [r_means, g_means, b_means]:
         for i in range(len(means)-1):
-            assert(means[i+1] > means[i])
+            assert means[i+1] > means[i], 'See %s' % plot_name
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_param_flash_mode.py b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
index cfd88d4..eb9628f 100644
--- a/apps/CameraITS/tests/scene1/test_param_flash_mode.py
+++ b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
@@ -22,7 +22,7 @@
 NAME = os.path.basename(__file__).split('.')[0]
 GRADIENT_DELTA = 0.1
 Y_RELATIVE_DELTA_FLASH = 0.1  # 10%
-Y_RELATIVE_DELTA_TORCH = 0.05 # 5%
+Y_RELATIVE_DELTA_TORCH = 0.05  # 5%
 
 
 def main():
@@ -31,8 +31,8 @@
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.flash(props) and
-                             its.caps.per_frame_control(props))
+                             its.caps.flash(props))
+        sync_latency = its.caps.sync_latency(props)
 
         flash_modes_reported = []
         flash_states_reported = []
@@ -56,7 +56,8 @@
 
         for f in [0, 1, 2]:
             req['android.flash.mode'] = f
-            cap = cam.do_capture(req, fmt)
+            cap = its.device.do_capture_with_latency(
+                    cam, req, sync_latency, fmt)
             flash_modes_reported.append(cap['metadata']['android.flash.mode'])
             flash_states_reported.append(cap['metadata']['android.flash.state'])
             y, _, _ = its.image.convert_capture_to_planes(cap, props)
diff --git a/apps/CameraITS/tests/scene1/test_param_sensitivity.py b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
index 0d40042..ea4b3ec 100644
--- a/apps/CameraITS/tests/scene1/test_param_sensitivity.py
+++ b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
@@ -12,22 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-from matplotlib import pylab
-import os.path
+
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split('.')[0]
+NUM_STEPS = 5
+
 
 def main():
-    """Test that the android.sensor.sensitivity parameter is applied.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    NUM_STEPS = 5
+    """Test that the android.sensor.sensitivity parameter is applied."""
 
     sensitivities = None
     r_means = []
@@ -36,8 +37,8 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
-                             its.caps.per_frame_control(props))
+        its.caps.skip_unless(its.caps.compute_target_exposure(props))
+        sync_latency = its.caps.sync_latency(props)
 
         debug = its.caps.debug_mode()
         largest_yuv = its.objects.get_largest_yuv_format(props)
@@ -47,17 +48,18 @@
             match_ar = (largest_yuv['width'], largest_yuv['height'])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
-        expt,_ = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        expt, _ = its.target.get_target_exposure_combos(cam)['midSensitivity']
         sens_range = props['android.sensor.info.sensitivityRange']
         sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
-        sensitivities = [sens_range[0] + i * sens_step for i in range(NUM_STEPS)]
+        sensitivities = [
+                sens_range[0] + i * sens_step for i in range(NUM_STEPS)]
 
         for s in sensitivities:
             req = its.objects.manual_capture_request(s, expt)
-            cap = cam.do_capture(req, fmt)
+            cap = its.device.do_capture_with_latency(
+                    cam, req, sync_latency, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
-            its.image.write_image(
-                    img, "%s_iso=%04d.jpg" % (NAME, s))
+            its.image.write_image(img, '%s_iso=%04d.jpg' % (NAME, s))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
             r_means.append(rgb_means[0])
@@ -65,16 +67,19 @@
             b_means.append(rgb_means[2])
 
     # Draw a plot.
-    pylab.plot(sensitivities, r_means, 'r')
-    pylab.plot(sensitivities, g_means, 'g')
-    pylab.plot(sensitivities, b_means, 'b')
-    pylab.ylim([0,1])
-    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+    pylab.plot(sensitivities, r_means, '-ro')
+    pylab.plot(sensitivities, g_means, '-go')
+    pylab.plot(sensitivities, b_means, '-bo')
+    pylab.ylim([0, 1])
+    pylab.title(NAME)
+    pylab.xlabel('Gain (ISO)')
+    pylab.ylabel('RGB means')
+    matplotlib.pyplot.savefig('%s_plot_means.png' % (NAME))
 
     # Test for pass/fail: check that each shot is brighter than the previous.
     for means in [r_means, g_means, b_means]:
         for i in range(len(means)-1):
-            assert(means[i+1] > means[i])
+            assert means[i+1] > means[i]
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_param_shading_mode.py b/apps/CameraITS/tests/scene1/test_param_shading_mode.py
index 45c9a12..a5f85ca 100644
--- a/apps/CameraITS/tests/scene1/test_param_shading_mode.py
+++ b/apps/CameraITS/tests/scene1/test_param_shading_mode.py
@@ -12,15 +12,23 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import os
+
 import its.caps
 import its.device
 import its.image
 import its.objects
+
 import matplotlib
-import numpy
-import os
-import os.path
 from matplotlib import pylab
+import numpy
+
+NAME = os.path.basename(__file__).split('.')[0]
+NUM_FRAMES = 4  # number of frames for temporal info to settle
+NUM_SHADING_MODE_SWITCH_LOOPS = 3
+SHADING_MODES = ['OFF', 'FAST', 'HQ']
+THRESHOLD_DIFF_RATIO = 0.15
+
 
 def main():
     """Test that the android.shading.mode param is applied.
@@ -28,10 +36,6 @@
     Switching shading modes and checks that the lens shading maps are
     modified as expected.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    NUM_SHADING_MODE_SWITCH_LOOPS = 3
-    THRESHOLD_DIFF_RATIO = 0.15
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -43,8 +47,8 @@
         mono_camera = its.caps.mono_camera(props)
 
         # lsc_off devices should always support OFF(0), FAST(1), and HQ(2)
-        assert(props.has_key("android.shading.availableModes") and
-               set(props["android.shading.availableModes"]) == set([0, 1, 2]))
+        assert(props.has_key('android.shading.availableModes') and
+               set(props['android.shading.availableModes']) == set([0, 1, 2]))
 
         # Test 1: Switching shading modes several times and verify:
         #   1. Lens shading maps with mode OFF are all 1.0
@@ -52,69 +56,102 @@
         #      shading modes.
         #   3. Lens shading maps with mode HIGH_QUALITY are similar after
         #      switching shading modes.
-        cam.do_3a(mono_camera=mono_camera);
+        cam.do_3a(mono_camera=mono_camera)
+
+        # Use smallest yuv size matching the aspect ratio of largest yuv size to
+        # reduce some USB bandwidth overhead since we are only looking at output
+        # metadata in this test.
+        largest_yuv_fmt = its.objects.get_largest_yuv_format(props)
+        largest_yuv_size = (largest_yuv_fmt['width'], largest_yuv_fmt['height'])
+        cap_fmt = its.objects.get_smallest_yuv_format(props, largest_yuv_size)
 
         # Get the reference lens shading maps for OFF, FAST, and HIGH_QUALITY
         # in different sessions.
         # reference_maps[mode]
-        reference_maps = [[] for mode in range(3)]
+        num_shading_modes = len(SHADING_MODES)
+        reference_maps = [[] for mode in range(num_shading_modes)]
         num_map_gains = 0
-        for mode in range(1, 3):
-            req = its.objects.auto_capture_request();
-            req["android.statistics.lensShadingMapMode"] = 1
-            req["android.shading.mode"] = mode
-            cap_res = cam.do_capture(req)["metadata"]
-            lsc_map = cap_res["android.statistics.lensShadingCorrectionMap"]
-            assert(lsc_map.has_key("width") and
-                   lsc_map.has_key("height") and
-                   lsc_map["width"] != None and lsc_map["height"] != None)
+        for mode in range(1, num_shading_modes):
+            req = its.objects.auto_capture_request()
+            req['android.statistics.lensShadingMapMode'] = 1
+            req['android.shading.mode'] = mode
+            cap_res = cam.do_capture([req]*NUM_FRAMES, cap_fmt)[NUM_FRAMES-1]['metadata']
+            lsc_map = cap_res['android.statistics.lensShadingCorrectionMap']
+            assert(lsc_map.has_key('width') and
+                   lsc_map.has_key('height') and
+                   lsc_map['width'] is not None and
+                   lsc_map['height'] is not None)
             if mode == 1:
-                num_map_gains = lsc_map["width"] * lsc_map["height"] * 4
+                num_map_gains = lsc_map['width'] * lsc_map['height'] * 4
                 reference_maps[0] = [1.0] * num_map_gains
-            reference_maps[mode] = lsc_map["map"]
+            reference_maps[mode] = lsc_map['map']
 
         # Get the lens shading maps while switching modes in one session.
         reqs = []
         for i in range(NUM_SHADING_MODE_SWITCH_LOOPS):
-            for mode in range(3):
-                req = its.objects.auto_capture_request();
-                req["android.statistics.lensShadingMapMode"] = 1
-                req["android.shading.mode"] = mode
-                reqs.append(req);
+            for mode in range(num_shading_modes):
+                for _ in range(NUM_FRAMES):
+                    req = its.objects.auto_capture_request()
+                    req['android.statistics.lensShadingMapMode'] = 1
+                    req['android.shading.mode'] = mode
+                    reqs.append(req)
 
-        caps = cam.do_capture(reqs)
+        caps = cam.do_capture(reqs, cap_fmt)
 
         # shading_maps[mode][loop]
         shading_maps = [[[] for loop in range(NUM_SHADING_MODE_SWITCH_LOOPS)]
-                for mode in range(3)]
+                        for mode in range(num_shading_modes)]
 
         # Get the shading maps out of capture results
-        for i in range(len(caps)):
-            shading_maps[i % 3][i / 3] = \
-                    caps[i]["metadata"] \
-                    ["android.statistics.lensShadingCorrectionMap"]["map"]
+        for i in range(len(caps)/NUM_FRAMES):
+            shading_maps[i%num_shading_modes][i/NUM_SHADING_MODE_SWITCH_LOOPS] = \
+                    caps[(i+1)*NUM_FRAMES-1]['metadata']['android.statistics.lensShadingCorrectionMap']['map']
 
         # Draw the maps
-        for mode in range(3):
+        for mode in range(num_shading_modes):
             for i in range(NUM_SHADING_MODE_SWITCH_LOOPS):
                 pylab.clf()
-                pylab.plot(range(num_map_gains), shading_maps[mode][i], 'r')
-                pylab.plot(range(num_map_gains), reference_maps[mode], 'g')
+                pylab.figure(figsize=(5, 5))
+                pylab.subplot(2, 1, 1)
+                pylab.plot(range(num_map_gains), shading_maps[mode][i], '-r.',
+                           label='shading', alpha=0.7)
+                pylab.plot(range(num_map_gains), reference_maps[mode], '-g.',
+                           label='ref', alpha=0.7)
                 pylab.xlim([0, num_map_gains])
                 pylab.ylim([0.9, 4.0])
-                matplotlib.pyplot.savefig("%s_ls_maps_mode_%d_loop_%d.png" %
-                                          (NAME, mode, i))
+                name = '%s_ls_maps_mode_%d_loop_%d' % (NAME, mode, i)
+                pylab.title(name)
+                pylab.xlabel('Map gains')
+                pylab.ylabel('Lens shading maps')
+                pylab.legend(loc='upper center', numpoints=1, fancybox=True)
 
-        print "Verifying lens shading maps with mode OFF are all 1.0"
-        for i in range(NUM_SHADING_MODE_SWITCH_LOOPS):
-            assert(numpy.allclose(shading_maps[0][i], reference_maps[0]))
+                pylab.subplot(2, 1, 2)
+                shading_ref_ratio = numpy.divide(
+                        shading_maps[mode][i], reference_maps[mode])
+                pylab.plot(range(num_map_gains), shading_ref_ratio, '-b.',
+                           clip_on=False)
+                pylab.xlim([0, num_map_gains])
+                pylab.ylim([1.0-THRESHOLD_DIFF_RATIO, 1.0+THRESHOLD_DIFF_RATIO])
+                pylab.title('Shading/reference Maps Ratio vs Gain')
+                pylab.xlabel('Map gains')
+                pylab.ylabel('Shading/ref maps ratio')
 
-        for mode in range(1, 3):
-            print "Verifying lens shading maps with mode", mode, "are similar"
+                pylab.tight_layout()
+                matplotlib.pyplot.savefig('%s.png' % name)
+
+        for mode in range(num_shading_modes):
+            if mode == 0:
+                print 'Verifying lens shading maps with mode %s are all 1.0' % (
+                        SHADING_MODES[mode])
+            else:
+                print 'Verifying lens shading maps with mode %s are similar' % (
+                        SHADING_MODES[mode])
             for i in range(NUM_SHADING_MODE_SWITCH_LOOPS):
-                assert(numpy.allclose(shading_maps[mode][i],
-                                      reference_maps[mode],
-                                      THRESHOLD_DIFF_RATIO))
+                e_msg = 'FAIL mode: %s, loop: %d, THRESH: %.2f' % (
+                        SHADING_MODES[mode], i, THRESHOLD_DIFF_RATIO)
+                assert (numpy.allclose(shading_maps[mode][i],
+                                       reference_maps[mode],
+                                       rtol=THRESHOLD_DIFF_RATIO)), e_msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py b/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py
index 73c001d..8b3ef86 100644
--- a/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py
+++ b/apps/CameraITS/tests/scene1/test_post_raw_sensitivity_boost.py
@@ -12,28 +12,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.device
+import os.path
+
 import its.caps
+import its.device
 import its.image
 import its.objects
 import its.target
-import os.path
-from matplotlib import pylab
+
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split('.')[0]
+RATIO_THRESHOLD = 0.1  # Each raw image
+# Waive the check if raw pixel value is below this level (signal too small
+# that small black level error converts to huge error in percentage)
+RAW_PIXEL_VAL_THRESHOLD = 0.03
+
 
 def main():
-    """Capture a set of raw/yuv images with different
-        sensitivity/post Raw sensitivity boost combination
+    """Check post RAW sensitivity boost.
+
+        Capture a set of raw/yuv images with different
+        sensitivity/post RAW sensitivity boost combination
         and check if the output pixel mean matches request settings
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    # Each raw image
-    RATIO_THRESHOLD = 0.1
-    # Waive the check if raw pixel value is below this level (signal too small
-    # that small black level error converts to huge error in percentage)
-    RAW_PIXEL_VAL_THRESHOLD = 0.03
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -43,8 +46,8 @@
                              its.caps.per_frame_control(props) and
                              not its.caps.mono_camera(props))
 
-        w,h = its.objects.get_available_output_sizes(
-                "yuv", props, (1920, 1080))[0]
+        w, h = its.objects.get_available_output_sizes(
+                'yuv', props, (1920, 1080))[0]
 
         if its.caps.raw16(props):
             raw_format = 'raw'
@@ -52,19 +55,18 @@
             raw_format = 'raw10'
         elif its.caps.raw12(props):
             raw_format = 'raw12'
-        else: # should not reach here
+        else:  # should not reach here
             raise its.error.Error('Cannot find available RAW output format')
 
-        out_surfaces = [{"format": raw_format},
-                        {"format": "yuv", "width": w, "height": h}]
+        out_surfaces = [{'format': raw_format},
+                        {'format': 'yuv', 'width': w, 'height': h}]
 
         sens_min, sens_max = props['android.sensor.info.sensitivityRange']
         sens_boost_min, sens_boost_max = \
                 props['android.control.postRawSensitivityBoostRange']
 
-
         e_target, s_target = \
-                its.target.get_target_exposure_combos(cam)["midSensitivity"]
+                its.target.get_target_exposure_combos(cam)['midSensitivity']
 
         reqs = []
         settings = []
@@ -97,51 +99,60 @@
             (s, s_boost) = settings[i]
             raw_cap = raw_caps[i]
             yuv_cap = yuv_caps[i]
-            raw_rgb = its.image.convert_capture_to_rgb_image(raw_cap, props=props)
+            raw_rgb = its.image.convert_capture_to_rgb_image(
+                    raw_cap, props=props)
             yuv_rgb = its.image.convert_capture_to_rgb_image(yuv_cap)
-            raw_tile = its.image.get_image_patch(raw_rgb, 0.45,0.45,0.1,0.1)
-            yuv_tile = its.image.get_image_patch(yuv_rgb, 0.45,0.45,0.1,0.1)
+            raw_tile = its.image.get_image_patch(raw_rgb, 0.45, 0.45, 0.1, 0.1)
+            yuv_tile = its.image.get_image_patch(yuv_rgb, 0.45, 0.45, 0.1, 0.1)
             raw_rgb_means.append(its.image.compute_image_means(raw_tile))
             yuv_rgb_means.append(its.image.compute_image_means(yuv_tile))
-            its.image.write_image(raw_tile,
-                    "%s_raw_s=%04d_boost=%04d.jpg" % (NAME,s,s_boost))
-            its.image.write_image(yuv_tile,
-                    "%s_yuv_s=%04d_boost=%04d.jpg" % (NAME,s,s_boost))
-            print "s=%d, s_boost=%d: raw_means %s, yuv_means %s"%(
-                    s,s_boost,raw_rgb_means[-1], yuv_rgb_means[-1])
+            its.image.write_image(raw_tile, '%s_raw_s=%04d_boost=%04d.jpg' % (
+                    NAME, s, s_boost))
+            its.image.write_image(yuv_tile, '%s_yuv_s=%04d_boost=%04d.jpg' % (
+                    NAME, s, s_boost))
+            print 's=%d, s_boost=%d: raw_means %s, yuv_means %s'%(
+                    s, s_boost, raw_rgb_means[-1], yuv_rgb_means[-1])
 
         xs = range(len(reqs))
-        pylab.plot(xs, [rgb[0] for rgb in raw_rgb_means], 'r')
-        pylab.plot(xs, [rgb[1] for rgb in raw_rgb_means], 'g')
-        pylab.plot(xs, [rgb[2] for rgb in raw_rgb_means], 'b')
-        pylab.ylim([0,1])
-        matplotlib.pyplot.savefig("%s_raw_plot_means.png" % (NAME))
+        pylab.plot(xs, [rgb[0] for rgb in raw_rgb_means], '-ro')
+        pylab.plot(xs, [rgb[1] for rgb in raw_rgb_means], '-go')
+        pylab.plot(xs, [rgb[2] for rgb in raw_rgb_means], '-bo')
+        pylab.ylim([0, 1])
+        name = '%s_raw_plot_means' % NAME
+        pylab.title(name)
+        pylab.xlabel('requests')
+        pylab.ylabel('RGB means')
+        matplotlib.pyplot.savefig('%s.png' % name)
         pylab.clf()
-        pylab.plot(xs, [rgb[0] for rgb in yuv_rgb_means], 'r')
-        pylab.plot(xs, [rgb[1] for rgb in yuv_rgb_means], 'g')
-        pylab.plot(xs, [rgb[2] for rgb in yuv_rgb_means], 'b')
-        pylab.ylim([0,1])
-        matplotlib.pyplot.savefig("%s_yuv_plot_means.png" % (NAME))
+        pylab.plot(xs, [rgb[0] for rgb in yuv_rgb_means], '-ro')
+        pylab.plot(xs, [rgb[1] for rgb in yuv_rgb_means], '-go')
+        pylab.plot(xs, [rgb[2] for rgb in yuv_rgb_means], '-bo')
+        pylab.ylim([0, 1])
+        name = '%s_yuv_plot_means' % NAME
+        pylab.title(name)
+        pylab.xlabel('requests')
+        pylab.ylabel('RGB means')
+        matplotlib.pyplot.savefig('%s.png' % name)
 
-        rgb_str = ["R", "G", "B"]
+        rgb_str = ['R', 'G', 'B']
         # Test that raw means is about 2x brighter than next step
         for step in range(1, len(reqs)):
-            (s_prev, s_boost_prev) = settings[step - 1]
+            (s_prev, _) = settings[step - 1]
             (s, s_boost) = settings[step]
             expect_raw_ratio = s_prev / float(s)
             raw_thres_min = expect_raw_ratio * (1 - RATIO_THRESHOLD)
             raw_thres_max = expect_raw_ratio * (1 + RATIO_THRESHOLD)
             for rgb in range(3):
                 ratio = raw_rgb_means[step - 1][rgb] / raw_rgb_means[step][rgb]
-                print ("Step (%d,%d) %s channel: %f, %f, ratio %f," +
-                       " threshold_min %f, threshold_max %f") % (
+                print 'Step (%d,%d) %s channel: %f, %f, ratio %f,' % (
                         step-1, step, rgb_str[rgb],
                         raw_rgb_means[step - 1][rgb],
-                        raw_rgb_means[step][rgb],
-                        ratio, raw_thres_min, raw_thres_max)
-                if (raw_rgb_means[step][rgb] <= RAW_PIXEL_VAL_THRESHOLD):
+                        raw_rgb_means[step][rgb], ratio),
+                print 'threshold_min %f, threshold_max %f' % (
+                        raw_thres_min, raw_thres_max)
+                if raw_rgb_means[step][rgb] <= RAW_PIXEL_VAL_THRESHOLD:
                     continue
-                assert(raw_thres_min < ratio < raw_thres_max)
+                assert raw_thres_min < ratio < raw_thres_max
 
         # Test that each yuv step is about the same bright as their mean
         yuv_thres_min = 1 - RATIO_THRESHOLD
@@ -149,13 +160,13 @@
         for rgb in range(3):
             vals = [val[rgb] for val in yuv_rgb_means]
             for step in range(len(reqs)):
-                if (raw_rgb_means[step][rgb] <= RAW_PIXEL_VAL_THRESHOLD):
+                if raw_rgb_means[step][rgb] <= RAW_PIXEL_VAL_THRESHOLD:
                     vals = vals[:step]
             mean = sum(vals) / len(vals)
-            print "%s channel vals %s mean %f"%(rgb_str[rgb], vals, mean)
+            print '%s channel vals %s mean %f'%(rgb_str[rgb], vals, mean)
             for step in range(len(vals)):
                 ratio = vals[step] / mean
-                assert(yuv_thres_min < ratio < yuv_thres_max)
+                assert yuv_thres_min < ratio < yuv_thres_max
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
index b6b0514..debf22c 100644
--- a/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
+++ b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
@@ -12,13 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.device
-import its.caps
-import its.objects
-import its.image
 import os.path
-from matplotlib import pylab
+import its.caps
+import its.device
+import its.image
+import its.objects
 import matplotlib
+from matplotlib import pylab
 
 GR_PLANE = 1  # GR plane index in RGGB data
 IMG_STATS_GRID = 9  # find used to find the center 11.11%
@@ -106,7 +106,9 @@
         # Test that each shot is noisier than the previous one.
         x.pop()  # remove last element in x index
         for i in x:
-            assert variances[i] < variances[i+1] / VAR_THRESH
+            msg = 'variances [i]: %.5f, [i+1]: %.5f, THRESH: %.2f' % (
+                    variances[i], variances[i+1], VAR_THRESH)
+            assert variances[i] < variances[i+1] / VAR_THRESH, msg
 
 if __name__ == "__main__":
     main()
diff --git a/apps/CameraITS/tests/scene1/test_raw_exposure.py b/apps/CameraITS/tests/scene1/test_raw_exposure.py
index ca59aa8..b3fc98f 100644
--- a/apps/CameraITS/tests/scene1/test_raw_exposure.py
+++ b/apps/CameraITS/tests/scene1/test_raw_exposure.py
@@ -26,8 +26,9 @@
 NUM_ISO_STEPS = 5
 SATURATION_TOL = 0.01
 BLK_LVL_TOL = 0.1
-# Test 3 steps per 2x exposure
-EXP_MULT = pow(2, 1.0/3)
+EXP_MULT_SHORT = pow(2, 1.0/3)  # Test 3 steps per 2x exposure
+EXP_MULT_LONG = pow(10, 1.0/3)  # Test 3 steps per 10x exposure
+EXP_LONG = 1E6  # 1ms
 INCREASING_THR = 0.99
 # slice captures into burst of SLICE_LEN requests
 SLICE_LEN = 10
@@ -40,9 +41,9 @@
     with its.device.ItsSession() as cam:
 
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.raw16(props) and
                              its.caps.manual_sensor(props) and
-                             its.caps.read_3a(props) and
                              its.caps.per_frame_control(props) and
                              not its.caps.mono_camera(props))
         debug = its.caps.debug_mode()
@@ -68,7 +69,10 @@
         mult = 1.0
         while exp_min*mult < exp_max:
             e_test.append(int(exp_min*mult))
-            mult *= EXP_MULT
+            if exp_min*mult < EXP_LONG:
+                mult *= EXP_MULT_SHORT
+            else:
+                mult *= EXP_MULT_LONG
         if e_test[-1] < exp_max * INCREASING_THR:
             e_test.append(int(exp_max))
         e_test_ms = [e / 1000000.0 for e in e_test]
@@ -79,18 +83,18 @@
             reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]
             # Capture raw in debug mode, rawStats otherwise
             caps = []
-            for i in range(len(reqs) / SLICE_LEN):
+            slice_len = SLICE_LEN
+            # Eliminate cap burst of 1: returns [[]], not [{}, ...]
+            while len(reqs) % slice_len == 1:
+                slice_len -= 1
+            # Break caps into smaller bursts
+            for i in range(len(reqs) / slice_len):
                 if debug:
-                    caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)
+                    caps += cam.do_capture(reqs[i*slice_len:(i+1)*slice_len], cam.CAP_RAW)
                 else:
-                    caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)
-            last_n = len(reqs) % SLICE_LEN
-            if last_n == 1:
-                if debug:
-                    caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]
-                else:
-                    caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]
-            elif last_n > 0:
+                    caps += cam.do_capture(reqs[i*slice_len:(i+1)*slice_len], raw_stat_fmt)
+            last_n = len(reqs) % slice_len
+            if last_n:
                 if debug:
                     caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)
                 else:
@@ -110,7 +114,6 @@
                 else:
                     mean_image, _ = its.image.unpack_rawstats_capture(cap)
                     mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]
-
                 print "ISO=%d, exposure time=%.3fms, mean=%s" % (
                         s, e_test[i] / 1000000.0, str(mean))
                 means.append(mean)
diff --git a/apps/CameraITS/tests/scene1/test_raw_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
index db69e36..5eb6d47 100644
--- a/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
+++ b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
@@ -12,13 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.device
-import its.caps
-import its.objects
-import its.image
 import os.path
-from matplotlib import pylab
+import its.caps
+import its.device
+import its.image
+import its.objects
 import matplotlib.pyplot
+from matplotlib import pylab
 
 GR_PLANE = 1  # GR plane index in RGGB data
 IMG_STATS_GRID = 9  # find used to find the center 11.11%
@@ -34,6 +34,7 @@
     with its.device.ItsSession() as cam:
 
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.raw16(props) and
                              its.caps.manual_sensor(props) and
                              its.caps.read_3a(props) and
@@ -94,7 +95,9 @@
 
         # Test that each shot is noisier than the previous one.
         for i in range(len(variances) - 1):
-            assert variances[i] < variances[i+1] / VAR_THRESH
+            msg = 'variances [i]: %.5f, [i+1]: %.5f, THRESH: %.2f' % (
+                    variances[i], variances[i+1], VAR_THRESH)
+            assert variances[i] < variances[i+1] / VAR_THRESH, msg
 
 if __name__ == "__main__":
     main()
diff --git a/apps/CameraITS/tests/scene1/test_reprocess_noise_reduction.py b/apps/CameraITS/tests/scene1/test_reprocess_noise_reduction.py
index f6eecae..0f84244 100644
--- a/apps/CameraITS/tests/scene1/test_reprocess_noise_reduction.py
+++ b/apps/CameraITS/tests/scene1/test_reprocess_noise_reduction.py
@@ -12,23 +12,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import matplotlib
-import matplotlib.pyplot
-import numpy
-import os.path
-from matplotlib import pylab
 
+import matplotlib
+from matplotlib import pylab
+import numpy
+
+NAME = os.path.basename(__file__).split(".")[0]
 NR_MODES = [0, 1, 2, 3, 4]
+NUM_FRAMES = 4
+SNR_TOLERANCE = 3  # unit in dB
 
 
 def main():
-    """Test that the android.noiseReduction.mode param is applied when set for
-       reprocessing requests.
+    """Test android.noiseReduction.mode is applied for reprocessing requests.
 
     Capture reprocessed images with the camera dimly lit. Uses a high analog
     gain to ensure the captured image is noisy.
@@ -38,11 +41,6 @@
     variance of this as the baseline.
     """
 
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    NUM_SAMPLES_PER_MODE = 4
-    SNR_TOLERANCE = 3 # unit in db
-
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
 
@@ -53,15 +51,16 @@
                               its.caps.private_reprocess(props)))
 
         # If reprocessing is supported, ZSL NR mode must be avaiable.
-        assert(its.caps.noise_reduction_mode(props, 4))
+        assert its.caps.noise_reduction_mode(props, 4)
 
         reprocess_formats = []
-        if (its.caps.yuv_reprocess(props)):
+        if its.caps.yuv_reprocess(props):
             reprocess_formats.append("yuv")
-        if (its.caps.private_reprocess(props)):
+        if its.caps.private_reprocess(props):
             reprocess_formats.append("private")
 
         for reprocess_format in reprocess_formats:
+            print "\nreprocess format:", reprocess_format
             # List of variances for R, G, B.
             snrs = [[], [], []]
             nr_modes_reported = []
@@ -75,10 +74,10 @@
             # TODO: Switch to reprocess_format->YUV when YUV reprocessing is
             #       supported.
             size = its.objects.get_available_output_sizes("jpg", props)[0]
-            out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+            out_surface = {"width": size[0], "height": size[1], "format": "jpg"}
             cap = cam.do_capture(req, out_surface, reprocess_format)
             img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
-            its.image.write_image(img, "%s_low_gain_fmt=jpg.jpg" % (NAME))
+            its.image.write_image(img, "%s_low_gain_fmt=jpg.jpg" % NAME)
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             ref_snr = its.image.compute_image_snrs(tile)
             print "Ref SNRs:", ref_snr
@@ -95,19 +94,19 @@
                 rgb_snr_list = []
                 # Capture several images to account for per frame noise
                 # variations
-                for n in range(NUM_SAMPLES_PER_MODE):
-                    req = its.objects.manual_capture_request(s, e)
-                    req["android.noiseReduction.mode"] = nr_mode
-                    cap = cam.do_capture(req, out_surface, reprocess_format)
-
-                    img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+                req = its.objects.manual_capture_request(s, e)
+                req["android.noiseReduction.mode"] = nr_mode
+                caps = cam.do_capture(
+                        [req]*NUM_FRAMES, out_surface, reprocess_format)
+                for n in range(NUM_FRAMES):
+                    img = its.image.decompress_jpeg_to_rgb_image(
+                            caps[n]["data"])
                     if n == 0:
                         its.image.write_image(
-                                img,
-                                "%s_high_gain_nr=%d_fmt=jpg.jpg"
-                                        %(NAME, nr_mode))
+                                img, "%s_high_gain_nr=%d_fmt=jpg.jpg" % (
+                                        NAME, nr_mode))
                         nr_modes_reported.append(
-                                cap["metadata"]["android.noiseReduction.mode"])
+                                caps[n]["metadata"]["android.noiseReduction.mode"])
 
                     tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
                     # Get the variances for R, G, and B channels
@@ -121,12 +120,12 @@
                             numpy.mean(g_snrs),
                             numpy.mean(b_snrs)]
                 print "NR mode", nr_mode, "SNRs:"
-                print "    R SNR:", rgb_snrs[0],\
-                        "Min:", min(r_snrs), "Max:", max(r_snrs)
-                print "    G SNR:", rgb_snrs[1],\
-                        "Min:", min(g_snrs), "Max:", max(g_snrs)
-                print "    B SNR:", rgb_snrs[2],\
-                        "Min:", min(b_snrs), "Max:", max(b_snrs)
+                print "    R SNR:", rgb_snrs[0],
+                print "Min:", min(r_snrs), "Max:", max(r_snrs)
+                print "    G SNR:", rgb_snrs[1],
+                print "Min:", min(g_snrs), "Max:", max(g_snrs)
+                print "    B SNR:", rgb_snrs[2],
+                print "Min:", min(b_snrs), "Max:", max(b_snrs)
 
                 for chan in range(3):
                     snrs[chan].append(rgb_snrs[chan])
@@ -136,6 +135,7 @@
             for channel in range(3):
                 pylab.plot(NR_MODES, snrs[channel], "-"+"rgb"[channel]+"o")
 
+            pylab.title(NAME + ", reprocess_fmt=" + reprocess_format)
             pylab.xlabel("Noise Reduction Mode")
             pylab.ylabel("SNR (dB)")
             pylab.xticks(NR_MODES)
@@ -145,30 +145,38 @@
             assert nr_modes_reported == NR_MODES
 
             for j in range(3):
-                # Larger is better
                 # Verify OFF(0) is not better than FAST(1)
-                assert(snrs[j][0] <
-                       snrs[j][1] + SNR_TOLERANCE)
+                msg = "FAST(1): %.2f, OFF(0): %.2f, TOL: %f" % (
+                        snrs[j][1], snrs[j][0], SNR_TOLERANCE)
+                assert snrs[j][0] < snrs[j][1] + SNR_TOLERANCE, msg
                 # Verify FAST(1) is not better than HQ(2)
-                assert(snrs[j][1] <
-                       snrs[j][2] + SNR_TOLERANCE)
+                msg = "HQ(2): %.2f, FAST(1): %.2f, TOL: %f" % (
+                        snrs[j][2], snrs[j][1], SNR_TOLERANCE)
+                assert snrs[j][1] < snrs[j][2] + SNR_TOLERANCE, msg
                 # Verify HQ(2) is better than OFF(0)
-                assert(snrs[j][0] < snrs[j][2])
+                msg = "HQ(2): %.2f, OFF(0): %.2f" % (snrs[j][2], snrs[j][0])
+                assert snrs[j][0] < snrs[j][2], msg
                 if its.caps.noise_reduction_mode(props, 3):
                     # Verify OFF(0) is not better than MINIMAL(3)
-                    assert(snrs[j][0] <
-                           snrs[j][3] + SNR_TOLERANCE)
+                    msg = "MINIMAL(3): %.2f, OFF(0): %.2f, TOL: %f" % (
+                            snrs[j][3], snrs[j][0], SNR_TOLERANCE)
+                    assert snrs[j][0] < snrs[j][3] + SNR_TOLERANCE, msg
                     # Verify MINIMAL(3) is not better than HQ(2)
-                    assert(snrs[j][3] <
-                           snrs[j][2] + SNR_TOLERANCE)
+                    msg = "MINIMAL(3): %.2f, HQ(2): %.2f, TOL: %f" % (
+                            snrs[j][3], snrs[j][2], SNR_TOLERANCE)
+                    assert snrs[j][3] < snrs[j][2] + SNR_TOLERANCE, msg
                     # Verify ZSL(4) is close to MINIMAL(3)
-                    assert(numpy.isclose(snrs[j][4], snrs[j][3],
-                                         atol=SNR_TOLERANCE))
+                    msg = "ZSL(4): %.2f, MINIMAL(3): %.2f, TOL: %f" % (
+                            snrs[j][4], snrs[j][3], SNR_TOLERANCE)
+                    assert numpy.isclose(snrs[j][4], snrs[j][3],
+                                         atol=SNR_TOLERANCE), msg
                 else:
                     # Verify ZSL(4) is close to OFF(0)
-                    assert(numpy.isclose(snrs[j][4], snrs[j][0],
-                                         atol=SNR_TOLERANCE))
+                    msg = "ZSL(4): %.2f, OFF(0): %.2f, TOL: %f" % (
+                            snrs[j][4], snrs[j][0], SNR_TOLERANCE)
+                    assert numpy.isclose(snrs[j][4], snrs[j][0],
+                                         atol=SNR_TOLERANCE), msg
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
index a7b9f6d..685f6eb 100644
--- a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
+++ b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
@@ -12,22 +12,24 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
-import os.path
 import numpy
 
+MAX_SAME_DELTA = 0.03  # match number in test_burst_sameness_manual
+MIN_DIFF_DELTA = 0.10
+NAME = os.path.basename(__file__).split(".")[0]
+
+
 def main():
     """Test a sequence of shots with different tonemap curves.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
 
-    # There should be 3 identical frames followed by a different set of
-    # 3 identical frames.
-    MAX_SAME_DELTA = 0.03  # match number in test_burst_sameness_manual
-    MIN_DIFF_DELTA = 0.10
+    There should be 3 identical frames followed by a different set of
+    3 identical frames.
+    """
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -41,16 +43,17 @@
         if debug:
             fmt = largest_yuv
         else:
-            match_ar = (largest_yuv['width'], largest_yuv['height'])
+            match_ar = (largest_yuv["width"], largest_yuv["height"])
             fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
 
-        sens, exp_time, _,_,f_dist = cam.do_3a(do_af=True,get_results=True)
+        sens, exp_time, _, _, f_dist = cam.do_3a(do_af=True, get_results=True)
 
         means = []
 
         # Capture 3 manual shots with a linear tonemap.
-        req = its.objects.manual_capture_request(sens, exp_time, f_dist, True, props)
-        for i in [0,1,2]:
+        req = its.objects.manual_capture_request(
+                sens, exp_time, f_dist, True, props)
+        for i in [0, 1, 2]:
             cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
@@ -59,7 +62,7 @@
 
         # Capture 3 manual shots with the default tonemap.
         req = its.objects.manual_capture_request(sens, exp_time, f_dist, False)
-        for i in [3,4,5]:
+        for i in [3, 4, 5]:
             cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
@@ -71,9 +74,12 @@
                   for i in range(len(means)-1)]
         print "Deltas between consecutive frames:", deltas
 
-        assert(all([abs(deltas[i]) < MAX_SAME_DELTA for i in [0,1,3,4]]))
-        assert(abs(deltas[2]) > MIN_DIFF_DELTA)
+        msg = "deltas: %s, MAX_SAME_DELTA: %.2f" % (
+                str(deltas), MAX_SAME_DELTA)
+        assert all([abs(deltas[i]) < MAX_SAME_DELTA for i in [0, 1, 3, 4]]), msg
+        assert abs(deltas[2]) > MIN_DIFF_DELTA, "delta: %.5f, THRESH: %.2f" % (
+                abs(deltas[2]), MIN_DIFF_DELTA)
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py b/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
index e7604c1..163437a 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
@@ -12,20 +12,25 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import math
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import os.path
-import math
+
+import matplotlib.pylab
+import matplotlib.pyplot
+
+NAME = os.path.basename(__file__).split(".")[0]
+THRESHOLD_MAX_RMS_DIFF = 0.03
+
 
 def main():
     """Test that the reported sizes and formats for image capture work.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    THRESHOLD_MAX_RMS_DIFF = 0.03
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -40,36 +45,48 @@
         rgbs = []
 
         for size in its.objects.get_available_output_sizes("yuv", props):
-            out_surface = {"width":size[0], "height":size[1], "format":"yuv"}
+            out_surface = {"width": size[0], "height": size[1], "format": "yuv"}
             cap = cam.do_capture(req, out_surface)
-            assert(cap["format"] == "yuv")
-            assert(cap["width"] == size[0])
-            assert(cap["height"] == size[1])
-            print "Captured YUV %dx%d" % (cap["width"], cap["height"])
+            assert cap["format"] == "yuv"
+            assert cap["width"] == size[0]
+            assert cap["height"] == size[1]
+            print "Captured YUV %dx%d" % (cap["width"], cap["height"]),
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_yuv_w%d_h%d.jpg"%(
-                    NAME,size[0],size[1]))
+                    NAME, size[0], size[1]))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb = its.image.compute_image_means(tile)
+            print "rgb =", rgb
             rgbs.append(rgb)
 
         for size in its.objects.get_available_output_sizes("jpg", props):
-            out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+            out_surface = {"width": size[0], "height": size[1], "format": "jpg"}
             cap = cam.do_capture(req, out_surface)
-            assert(cap["format"] == "jpeg")
-            assert(cap["width"] == size[0])
-            assert(cap["height"] == size[1])
+            assert cap["format"] == "jpeg"
+            assert cap["width"] == size[0]
+            assert cap["height"] == size[1]
             img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
             its.image.write_image(img, "%s_jpg_w%d_h%d.jpg"%(
-                    NAME,size[0], size[1]))
-            assert(img.shape[0] == size[1])
-            assert(img.shape[1] == size[0])
-            assert(img.shape[2] == 3)
-            print "Captured JPEG %dx%d" % (cap["width"], cap["height"])
+                    NAME, size[0], size[1]))
+            assert img.shape[0] == size[1]
+            assert img.shape[1] == size[0]
+            assert img.shape[2] == 3
+            print "Captured JPEG %dx%d" % (cap["width"], cap["height"]),
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb = its.image.compute_image_means(tile)
+            print "rgb =", rgb
             rgbs.append(rgb)
 
+        # Plot means vs format
+        matplotlib.pylab.title(NAME)
+        matplotlib.pylab.plot(range(len(rgbs)), [r[0] for r in rgbs], "-ro")
+        matplotlib.pylab.plot(range(len(rgbs)), [g[1] for g in rgbs], "-go")
+        matplotlib.pylab.plot(range(len(rgbs)), [b[2] for b in rgbs], "-bo")
+        matplotlib.pylab.ylim([0, 1])
+        matplotlib.pylab.xlabel("format number")
+        matplotlib.pylab.ylabel("RGB avg [0, 1]")
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
         max_diff = 0
         rgb0 = rgbs[0]
         for rgb1 in rgbs[1:]:
@@ -77,8 +94,10 @@
                     sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
             max_diff = max(max_diff, rms_diff)
         print "Max RMS difference:", max_diff
-        assert(max_diff < THRESHOLD_MAX_RMS_DIFF)
+        msg = "Max RMS difference: %.4f, spec: %.3f" % (max_diff,
+                                                        THRESHOLD_MAX_RMS_DIFF)
+        assert max_diff < THRESHOLD_MAX_RMS_DIFF, msg
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
index 4a62120..1d4113f 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
@@ -12,16 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
-import os.path
+
+NAME = os.path.basename(__file__).split(".")[0]
+
 
 def main():
     """Test capturing a single frame as both DNG and YUV outputs.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -32,13 +35,12 @@
         cam.do_3a(mono_camera=mono_camera)
 
         req = its.objects.auto_capture_request()
-        max_dng_size = \
-                its.objects.get_available_output_sizes("raw", props)[0]
-        w,h = its.objects.get_available_output_sizes(
+        max_dng_size = its.objects.get_available_output_sizes("raw", props)[0]
+        w, h = its.objects.get_available_output_sizes(
                 "yuv", props, (1920, 1080), max_dng_size)[0]
-        out_surfaces = [{"format":"dng"},
-                        {"format":"yuv", "width":w, "height":h}]
-        cap_dng, cap_yuv = cam.do_capture(req, cam.CAP_DNG_YUV)
+        out_surfaces = [{"format": "dng"},
+                        {"format": "yuv", "width": w, "height": h}]
+        cap_dng, cap_yuv = cam.do_capture(req, out_surfaces)
 
         img = its.image.convert_capture_to_rgb_image(cap_yuv)
         its.image.write_image(img, "%s.jpg" % (NAME))
@@ -49,6 +51,6 @@
         # No specific pass/fail check; test is assumed to have succeeded if
         # it completes.
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py b/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
index f559c2b..821eb35 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
@@ -12,20 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import math
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import os.path
-import math
+
+NAME = os.path.basename(__file__).split(".")[0]
+THRESHOLD_MAX_RMS_DIFF = 0.01
+
 
 def main():
     """Test capturing a single frame as both YUV and JPEG outputs.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    THRESHOLD_MAX_RMS_DIFF = 0.01
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -33,10 +35,10 @@
 
         max_jpeg_size = \
                 its.objects.get_available_output_sizes("jpeg", props)[0]
-        w,h = its.objects.get_available_output_sizes(
+        w, h = its.objects.get_available_output_sizes(
                 "yuv", props, (1920, 1080), max_jpeg_size)[0]
-        fmt_yuv =  {"format":"yuv", "width":w, "height":h}
-        fmt_jpeg = {"format":"jpeg"}
+        fmt_yuv = {"format": "yuv", "width": w, "height": h}
+        fmt_jpeg = {"format": "jpeg"}
 
         # Use a manual request with a linear tonemap so that the YUV and JPEG
         # should look the same (once converted by the its.image module).
@@ -58,7 +60,9 @@
         rms_diff = math.sqrt(
                 sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
         print "RMS difference:", rms_diff
-        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+        msg = "RMS difference: %.4f, spec: %.3f" % (rms_diff,
+                                                    THRESHOLD_MAX_RMS_DIFF)
+        assert rms_diff < THRESHOLD_MAX_RMS_DIFF, msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
index cd284b7..75e70ae 100644
--- a/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw12.py
@@ -12,20 +12,22 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import math
+import os.path
+
 import its.caps
 import its.device
+import its.image
 import its.objects
 import its.target
-import os.path
-import math
+
+NAME = os.path.basename(__file__).split(".")[0]
+THRESHOLD_MAX_RMS_DIFF = 0.035
+
 
 def main():
     """Test capturing a single frame as both RAW12 and YUV outputs.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    THRESHOLD_MAX_RMS_DIFF = 0.035
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
@@ -41,11 +43,11 @@
 
         max_raw12_size = \
                 its.objects.get_available_output_sizes("raw12", props)[0]
-        w,h = its.objects.get_available_output_sizes(
+        w, h = its.objects.get_available_output_sizes(
                 "yuv", props, (1920, 1080), max_raw12_size)[0]
-        cap_raw, cap_yuv = cam.do_capture(req,
-                [{"format":"raw12"},
-                 {"format":"yuv", "width":w, "height":h}])
+        cap_raw, cap_yuv = cam.do_capture(
+                req, [{"format": "raw12"},
+                      {"format": "yuv", "width": w, "height": h}])
 
         img = its.image.convert_capture_to_rgb_image(cap_yuv)
         its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
@@ -62,7 +64,9 @@
         rms_diff = math.sqrt(
                 sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
         print "RMS difference:", rms_diff
-        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+        msg = "RMS difference: %.4f, spec: %.3f" % (rms_diff,
+                                                    THRESHOLD_MAX_RMS_DIFF)
+        assert rms_diff < THRESHOLD_MAX_RMS_DIFF, msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/scene2/scene2_0.5_scaled.pdf b/apps/CameraITS/tests/scene2/scene2_0.5_scaled.pdf
new file mode 100644
index 0000000..de036b6
--- /dev/null
+++ b/apps/CameraITS/tests/scene2/scene2_0.5_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2/test_auto_per_frame_control.py b/apps/CameraITS/tests/scene2/test_auto_per_frame_control.py
new file mode 100644
index 0000000..4b8185a
--- /dev/null
+++ b/apps/CameraITS/tests/scene2/test_auto_per_frame_control.py
@@ -0,0 +1,150 @@
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+
+import its.caps
+import its.device
+import its.image
+import its.objects
+
+import matplotlib
+from matplotlib import pylab
+import numpy as np
+
+AE_STATE_CONVERGED = 2
+CONTROL_AE_STATE_FLASH_REQUIRED = 4
+NAME = os.path.basename(__file__).split('.')[0]
+NUM_CAPTURE = 30
+VALID_STABLE_LUMA_MIN = 0.1
+VALID_STABLE_LUMA_MAX = 0.9
+
+
+def is_awb_af_stable(prev_cap, cap):
+    awb_gains_0 = prev_cap['metadata']['android.colorCorrection.gains']
+    awb_gains_1 = cap['metadata']['android.colorCorrection.gains']
+    ccm_0 = prev_cap['metadata']['android.colorCorrection.transform']
+    ccm_1 = cap['metadata']['android.colorCorrection.transform']
+    focus_distance_0 = prev_cap['metadata']['android.lens.focusDistance']
+    focus_distance_1 = cap['metadata']['android.lens.focusDistance']
+
+    return (np.allclose(awb_gains_0, awb_gains_1, rtol=0.01) and
+            ccm_0 == ccm_1 and
+            np.isclose(focus_distance_0, focus_distance_1, rtol=0.01))
+
+
+def main():
+    """Tests PER_FRAME_CONTROL properties for auto capture requests.
+
+    If debug is required, MANUAL_POSTPROCESSING capability is implied
+    since its.caps.read_3a is valid for test. Debug can performed with
+    a defined tonemap curve:
+    req['android.tonemap.mode'] = 0
+    gamma = sum([[i/63.0,math.pow(i/63.0,1/2.2)] for i in xrange(64)],[])
+    req['android.tonemap.curve'] = {
+            'red': gamma, 'green': gamma, 'blue': gamma}
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.per_frame_control(props) and
+                             its.caps.read_3a(props))
+
+        debug = its.caps.debug_mode()
+        largest_yuv = its.objects.get_largest_yuv_format(props)
+        if debug:
+            fmt = largest_yuv
+        else:
+            match_ar = (largest_yuv['width'], largest_yuv['height'])
+            fmt = its.objects.get_smallest_yuv_format(props, match_ar=match_ar)
+
+        req = its.objects.auto_capture_request()
+        caps = cam.do_capture([req]*NUM_CAPTURE, fmt)
+
+        total_gains = []
+        lumas = []
+        ae_states = []
+        for i, cap in enumerate(caps):
+            print '=========== frame %d ==========' % i
+            y = its.image.convert_capture_to_planes(cap)[0]
+            tile = its.image.get_image_patch(y, 0.45, 0.45, 0.1, 0.1)
+            luma = its.image.compute_image_means(tile)[0]
+
+            ae_state = cap['metadata']['android.control.aeState']
+            iso = cap['metadata']['android.sensor.sensitivity']
+            isp_gain = cap['metadata']['android.control.postRawSensitivityBoost']
+            exp_time = cap['metadata']['android.sensor.exposureTime']
+            total_gain = iso*isp_gain/100.0*exp_time/1000000.0
+            awb_state = cap['metadata']['android.control.awbState']
+            awb_gains = cap['metadata']['android.colorCorrection.gains']
+            ccm = cap['metadata']['android.colorCorrection.transform']
+            focus_distance = cap['metadata']['android.lens.focusDistance']
+
+            # Convert CCM from rational to float, as numpy arrays.
+            awb_ccm = np.array(its.objects.rational_to_float(ccm)).reshape(3, 3)
+
+            print 'AE: %d ISO: %d ISP_sen: %d exp(ms): %d tot_gain: %f' % (
+                    ae_state, iso, isp_gain, exp_time, total_gain),
+            print 'luma: %f' % luma
+            print 'fd: %f' % focus_distance
+            print 'AWB: %d, AWB gains: %s\n AWB matrix: %s' % (
+                    awb_state, str(awb_gains), str(awb_ccm))
+            print 'Tonemap curve:', cap['metadata']['android.tonemap.curve']
+
+            lumas.append(luma)
+            total_gains.append(total_gain)
+            ae_states.append(ae_state)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, '%s_frame_%d.jpg'% (NAME, i))
+
+        norm_gains = [x / max(total_gains) * max(lumas) for x in total_gains]
+        pylab.plot(range(len(lumas)), lumas, '-g.',
+                   label='Center patch brightness')
+        pylab.plot(range(len(norm_gains)), norm_gains, '-r.',
+                   label='Metadata AE setting product')
+        pylab.title(NAME)
+        pylab.xlabel('frame index')
+        pylab.legend()
+        matplotlib.pyplot.savefig('%s_plot.png' % (NAME))
+
+        for i in range(1, len(caps)):
+            if is_awb_af_stable(caps[i-1], caps[i]):
+                prev_total_gain = total_gains[i-1]
+                total_gain = total_gains[i]
+                delta_gain = total_gain - prev_total_gain
+                prev_luma = lumas[i-1]
+                luma = lumas[i]
+                delta_luma = luma - prev_luma
+                # luma and total_gain should change in same direction
+                msg = 'Frame %d to frame %d:' % (i-1, i)
+                msg += ' metadata gain %f->%f (%s), luma %f->%f (%s)' % (
+                        prev_total_gain, total_gain,
+                        'increasing' if delta_gain > 0.0 else 'decreasing',
+                        prev_luma, luma,
+                        'increasing' if delta_luma > 0.0 else 'decreasing')
+                assert delta_gain * delta_luma >= 0.0, msg
+            else:
+                print 'Frame %d->%d AWB/AF changed' % (i-1, i)
+
+        for i in range(len(lumas)):
+            luma = lumas[i]
+            ae_state = ae_states[i]
+            if (ae_state == AE_STATE_CONVERGED or
+                        ae_state == CONTROL_AE_STATE_FLASH_REQUIRED):
+                msg = 'Frame %d AE converged luma %f. valid range: (%f, %f)' % (
+                        i, luma, VALID_STABLE_LUMA_MIN, VALID_STABLE_LUMA_MAX)
+                assert VALID_STABLE_LUMA_MIN < luma < VALID_STABLE_LUMA_MAX, msg
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene2/test_effects.py b/apps/CameraITS/tests/scene2/test_effects.py
new file mode 100644
index 0000000..e3ff30f
--- /dev/null
+++ b/apps/CameraITS/tests/scene2/test_effects.py
@@ -0,0 +1,105 @@
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import its.caps
+import its.device
+import its.image
+import its.objects
+import numpy as np
+
+# android.control.availableEffects
+EFFECTS = {0: 'OFF',
+           1: 'MONO',
+           2: 'NEGATIVE',
+           3: 'SOLARIZE',
+           4: 'SEPIA',
+           5: 'POSTERIZE',
+           6: 'WHITEBOARD',
+           7: 'BLACKBOARD',
+           8: 'AQUA'}
+MONO_UV_SPREAD_MAX = 2  # max spread for U & V channels [0:255] for mono image
+NAME = os.path.basename(__file__).split('.')[0]
+W, H = 640, 480
+YUV_MAX = 255.0  # normalization number for YUV images [0:1] --> [0:255]
+YUV_UV_SPREAD_MIN = 10  # min spread for U & V channels [0:255] for color image
+YUV_Y_SPREAD_MIN = 50  # min spread for Y channel [0:255] for color image
+
+
+def main():
+    """Test effects.
+
+    Test: capture frame for supported camera effects and check if generated
+    correctly. Note we only check effects OFF and MONO currently, but save
+    images for all supported effects.
+    """
+
+    print '\nStarting %s' % NAME
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        mono_camera = its.caps.mono_camera(props)
+        effects = props['android.control.availableEffects']
+        its.caps.skip_unless(effects != [0])
+        cam.do_3a(mono_camera=mono_camera)
+        print 'Supported effects:', effects
+        failed = []
+        for effect in effects:
+            req = its.objects.auto_capture_request()
+            req['android.control.effectMode'] = effect
+            fmt = {'format': 'yuv', 'width': W, 'height': H}
+            cap = cam.do_capture(req, fmt)
+
+            # Save image
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, '%s_%s.jpg' % (NAME, EFFECTS[effect]))
+
+            # Simple checks
+            if effect is 0:
+                print 'Checking effects OFF...'
+                y, u, v = its.image.convert_capture_to_planes(cap, props)
+                y_min, y_max = np.amin(y)*YUV_MAX, np.amax(y)*YUV_MAX
+                msg = 'Y_range:%.f,%.f THRESH:%d, ' % (
+                        y_min, y_max, YUV_Y_SPREAD_MIN)
+                if (y_max-y_min) < YUV_Y_SPREAD_MIN:
+                    failed.append({'effect': EFFECTS[effect], 'error': msg})
+                if not mono_camera:
+                    u_min, u_max = np.amin(u)*YUV_MAX, np.amax(u)*YUV_MAX
+                    v_min, v_max = np.amin(v)*YUV_MAX, np.amax(v)*YUV_MAX
+                    msg += 'U_range:%.f,%.f THRESH:%d, ' % (
+                            u_min, u_max, YUV_UV_SPREAD_MIN)
+                    msg += 'V_range:%.f,%.f THRESH:%d' % (
+                            v_min, v_max, YUV_UV_SPREAD_MIN)
+                    if ((u_max-u_min) < YUV_UV_SPREAD_MIN or
+                                (v_max-v_min) < YUV_UV_SPREAD_MIN):
+                        failed.append({'effect': EFFECTS[effect], 'error': msg})
+            if effect is 1:
+                print 'Checking MONO effect...'
+                _, u, v = its.image.convert_capture_to_planes(cap, props)
+                u_min, u_max = np.amin(u)*YUV_MAX, np.amax(u)*YUV_MAX
+                v_min, v_max = np.amin(v)*YUV_MAX, np.amax(v)*YUV_MAX
+                msg = 'U_range:%.f,%.f, ' % (u_min, u_max)
+                msg += 'V_range:%.f,%.f, TOL:%d' % (
+                        v_min, v_max, MONO_UV_SPREAD_MAX)
+                if ((u_max-u_min) > MONO_UV_SPREAD_MAX or
+                            (v_max-v_min) > MONO_UV_SPREAD_MAX):
+                    failed.append({'effect': EFFECTS[effect], 'error': msg})
+        if failed:
+            print 'Failed effects:'
+            for fail in failed:
+                print ' %s: %s' % (fail['effect'], fail['error'])
+        assert not failed
+
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene2/test_faces.py b/apps/CameraITS/tests/scene2/test_faces.py
index 16ec780..15dd0c7 100644
--- a/apps/CameraITS/tests/scene2/test_faces.py
+++ b/apps/CameraITS/tests/scene2/test_faces.py
@@ -12,24 +12,27 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
 import its.caps
 import its.device
+import its.image
 import its.objects
-import os.path
+
+NAME = os.path.basename(__file__).split('.')[0]
+NUM_TEST_FRAMES = 20
+FD_MODE_OFF = 0
+FD_MODE_SIMPLE = 1
+FD_MODE_FULL = 2
+W, H = 640, 480
+
 
 def main():
     """Test face detection.
     """
-    NAME = os.path.basename(__file__).split(".")[0]
-    NUM_TEST_FRAMES = 20
-    FD_MODE_OFF = 0
-    FD_MODE_SIMPLE = 1
-    FD_MODE_FULL = 2
-    W, H = 640, 480
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.face_detect(props))
         mono_camera = its.caps.mono_camera(props)
         fd_modes = props['android.statistics.info.availableFaceDetectModes']
@@ -45,75 +48,95 @@
             else:
                 print 'fd = %.2fcm' % (1.0E2/focus)
         for fd_mode in fd_modes:
-            assert(FD_MODE_OFF <= fd_mode <= FD_MODE_FULL)
+            assert FD_MODE_OFF <= fd_mode <= FD_MODE_FULL
             req = its.objects.auto_capture_request()
             req['android.statistics.faceDetectMode'] = fd_mode
-            fmt = {"format":"yuv", "width":W, "height":H}
+            fmt = {'format': 'yuv', 'width': W, 'height': H}
             caps = cam.do_capture([req]*NUM_TEST_FRAMES, fmt)
-            for i,cap in enumerate(caps):
+            for i, cap in enumerate(caps):
                 md = cap['metadata']
-                assert(md['android.statistics.faceDetectMode'] == fd_mode)
+                assert md['android.statistics.faceDetectMode'] == fd_mode
                 faces = md['android.statistics.faces']
 
                 # 0 faces should be returned for OFF mode
                 if fd_mode == FD_MODE_OFF:
-                    assert(len(faces) == 0)
+                    assert not faces
                     continue
                 # Face detection could take several frames to warm up,
                 # but it should detect at least one face in last frame
                 if i == NUM_TEST_FRAMES - 1:
-                    img = its.image.convert_capture_to_rgb_image(cap, props=props)
+                    img = its.image.convert_capture_to_rgb_image(
+                            cap, props=props)
                     img = its.image.rotate_img_per_argv(img)
-                    img_name = "%s_fd_mode_%s.jpg" % (NAME, fd_mode)
+                    img_name = '%s_fd_mode_%s.jpg' % (NAME, fd_mode)
                     its.image.write_image(img, img_name)
-                    if len(faces) == 0:
-                        print "Error: no face detected in mode", fd_mode
-                        assert(0)
-                if len(faces) == 0:
+                    if not faces:
+                        print 'Error: no face detected in mode', fd_mode
+                        assert 0
+                if not faces:
                     continue
 
-                print "Frame %d face metadata:" % i
-                print "  Faces:", faces
-                print ""
+                print 'Frame %d face metadata:' % i
+                print '  Faces:', faces
+                print ''
 
                 face_scores = [face['score'] for face in faces]
                 face_rectangles = [face['bounds'] for face in faces]
                 for score in face_scores:
-                    assert(score >= 1 and score <= 100)
+                    assert score >= 1 and score <= 100
                 # Face bounds should be within active array
-                for rect in face_rectangles:
-                    assert(rect['top'] < rect['bottom'])
-                    assert(rect['left'] < rect['right'])
-                    assert(0 <= rect['top'] <= ah)
-                    assert(0 <= rect['bottom'] <= ah)
-                    assert(0 <= rect['left'] <= aw)
-                    assert(0 <= rect['right'] <= aw)
+                for j, rect in enumerate(face_rectangles):
+                    print 'Checking face rectangle %d...' % j
+                    rect_t = rect['top']
+                    rect_b = rect['bottom']
+                    rect_l = rect['left']
+                    rect_r = rect['right']
+                    assert rect_t < rect_b
+                    assert rect_l < rect_r
+                    l_msg = 'l: %d outside of active W: 0,%d' % (rect_l, aw)
+                    r_msg = 'r: %d outside of active W: 0,%d' % (rect_r, aw)
+                    t_msg = 't: %d outside active H: 0,%d' % (rect_t, ah)
+                    b_msg = 'b: %d outside active H: 0,%d' % (rect_b, ah)
+                    # Assert same order as face landmarks below
+                    assert 0 <= rect_l <= aw, l_msg
+                    assert 0 <= rect_r <= aw, r_msg
+                    assert 0 <= rect_t <= ah, t_msg
+                    assert 0 <= rect_b <= ah, b_msg
 
                 # Face landmarks are reported if and only if fd_mode is FULL
                 # Face ID should be -1 for SIMPLE and unique for FULL
                 if fd_mode == FD_MODE_SIMPLE:
                     for face in faces:
-                        assert('leftEye' not in face)
-                        assert('rightEye' not in face)
-                        assert('mouth' not in face)
-                        assert(face['id'] == -1)
+                        assert 'leftEye' not in face
+                        assert 'rightEye' not in face
+                        assert 'mouth' not in face
+                        assert face['id'] == -1
                 elif fd_mode == FD_MODE_FULL:
                     face_ids = [face['id'] for face in faces]
-                    assert(len(face_ids) == len(set(face_ids)))
+                    assert len(face_ids) == len(set(face_ids))
                     # Face landmarks should be within face bounds
-                    for face in faces:
-                        left_eye = face['leftEye']
-                        right_eye = face['rightEye']
+                    for k, face in enumerate(faces):
+                        print 'Checking landmarks in face %d...' % k
+                        l_eye = face['leftEye']
+                        r_eye = face['rightEye']
                         mouth = face['mouth']
                         l, r = face['bounds']['left'], face['bounds']['right']
                         t, b = face['bounds']['top'], face['bounds']['bottom']
-                        assert(l <= left_eye['x'] <= r)
-                        assert(t <= left_eye['y'] <= b)
-                        assert(l <= right_eye['x'] <= r)
-                        assert(t <= right_eye['y'] <= b)
-                        assert(l <= mouth['x'] <= r)
-                        assert(t <= mouth['y'] <= b)
+                        l_eye_x, l_eye_y = l_eye['x'], l_eye['y']
+                        r_eye_x, r_eye_y = r_eye['x'], r_eye['y']
+                        mouth_x, mouth_y = mouth['x'], mouth['y']
+                        lx_msg = 'l: %d, r: %d, x: %d' % (l, r, l_eye_x)
+                        ly_msg = 't: %d, b: %d, y: %d' % (t, b, l_eye_y)
+                        rx_msg = 'l: %d, r: %d, x: %d' % (l, r, r_eye_x)
+                        ry_msg = 't: %d, b: %d, y: %d' % (t, b, r_eye_y)
+                        mx_msg = 'l: %d, r: %d, x: %d' % (l, r, mouth_x)
+                        my_msg = 't: %d, b: %d, y: %d' % (t, b, mouth_y)
+                        assert l <= l_eye_x <= r, lx_msg
+                        assert t <= l_eye_y <= b, ly_msg
+                        assert l <= r_eye_x <= r, rx_msg
+                        assert t <= r_eye_y <= b, ry_msg
+                        assert l <= mouth_x <= r, mx_msg
+                        assert t <= mouth_y <= b, my_msg
 
 if __name__ == '__main__':
     main()
-
diff --git a/apps/CameraITS/tests/scene2/test_format_combos.py b/apps/CameraITS/tests/scene2/test_format_combos.py
new file mode 100644
index 0000000..ff08d55
--- /dev/null
+++ b/apps/CameraITS/tests/scene2/test_format_combos.py
@@ -0,0 +1,135 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.error
+import its.target
+import sys
+import os
+
+NAME = os.path.basename(__file__).split(".")[0]
+STOP_AT_FIRST_FAILURE = False  # change to True to have test break @ 1st FAIL
+
+
+def main():
+    """Test different combinations of output formats.
+    
+    Note the test does not require a specific target but does perform
+    both automatic and manual captures so it requires a fixed scene
+    where 3A can converge.
+    """
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.raw16(props))
+
+        successes = []
+        failures = []
+        debug = its.caps.debug_mode()
+
+        # Two different requests: auto, and manual.
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req_aut = its.objects.auto_capture_request()
+        req_man = its.objects.manual_capture_request(s, e)
+        reqs = [req_aut,  # R0
+                req_man]  # R1
+
+        # 10 different combos of output formats; some are single surfaces, and
+        # some are multiple surfaces.
+        wyuv, hyuv = its.objects.get_available_output_sizes("yuv", props)[-1]
+        wjpg, hjpg = its.objects.get_available_output_sizes("jpg", props)[-1]
+        fmt_yuv_prev = {"format": "yuv", "width": wyuv, "height": hyuv}
+        fmt_yuv_full = {"format": "yuv"}
+        fmt_jpg_prev = {"format": "jpeg", "width": wjpg, "height": hjpg}
+        fmt_jpg_full = {"format": "jpeg"}
+        fmt_raw_full = {"format": "raw"}
+        fmt_combos = [
+            [fmt_yuv_prev],                              # F0
+            [fmt_yuv_full],                              # F1
+            [fmt_jpg_prev],                              # F2
+            [fmt_jpg_full],                              # F3
+            [fmt_raw_full],                              # F4
+            [fmt_yuv_prev, fmt_jpg_prev],                # F5
+            [fmt_yuv_prev, fmt_jpg_full],                # F6
+            [fmt_yuv_prev, fmt_raw_full],                # F7
+            [fmt_yuv_prev, fmt_jpg_prev, fmt_raw_full],  # F8
+            [fmt_yuv_prev, fmt_jpg_full, fmt_raw_full]]  # F9
+
+        if its.caps.y8(props):
+            wy8, hy8 = its.objects.get_available_output_sizes("y8", props)[-1]
+            fmt_y8_prev = {"format": "y8", "width": wy8, "height": hy8}
+            fmt_y8_full = {"format": "y8"}
+            fmt_combos.append([fmt_y8_prev])
+            fmt_combos.append([fmt_y8_full])
+
+        # Two different burst lengths: single frame, and 3 frames.
+        burst_lens = [1,  # B0
+                      3]  # B1
+
+        # There are 2xlen(fmt_combos)x2 different combinations. Run through them all.
+        n = 0
+        for r,req in enumerate(reqs):
+            for f,fmt_combo in enumerate(fmt_combos):
+                for b,burst_len in enumerate(burst_lens):
+                    try:
+                        caps = cam.do_capture([req]*burst_len, fmt_combo)
+                        successes.append((n,r,f,b))
+                        print "==> Success[%02d]: R%d F%d B%d" % (n,r,f,b)
+
+                        # Dump the captures out to jpegs in debug mode.
+                        if debug:
+                            if not isinstance(caps, list):
+                                caps = [caps]
+                            elif isinstance(caps[0], list):
+                                caps = sum(caps, [])
+                            for c, cap in enumerate(caps):
+                                img = its.image.convert_capture_to_rgb_image(cap, props=props)
+                                its.image.write_image(img,
+                                    "%s_n%02d_r%d_f%d_b%d_c%d.jpg"%(NAME,n,r,f,b,c))
+
+                    except Exception as e:
+                        print e
+                        print "==> Failure[%02d]: R%d F%d B%d" % (n,r,f,b)
+                        failures.append((n,r,f,b))
+                        if STOP_AT_FIRST_FAILURE:
+                            sys.exit(1)
+                    n += 1
+
+        num_fail = len(failures)
+        num_success = len(successes)
+        num_total = len(reqs)*len(fmt_combos)*len(burst_lens)
+        num_not_run = num_total - num_success - num_fail
+
+        print "\nFailures (%d / %d):" % (num_fail, num_total)
+        for (n,r,f,b) in failures:
+            print "  %02d: R%d F%d B%d" % (n,r,f,b)
+        print "\nSuccesses (%d / %d):" % (num_success, num_total)
+        for (n,r,f,b) in successes:
+            print "  %02d: R%d F%d B%d" % (n,r,f,b)
+        if num_not_run > 0:
+            print "\nNumber of tests not run: %d / %d" % (num_not_run, num_total)
+        print ""
+
+        # The test passes if all the combinations successfully capture.
+        assert num_fail == 0
+        assert num_success == num_total
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene2/test_num_faces.py b/apps/CameraITS/tests/scene2/test_num_faces.py
index 044c154..274aef3 100644
--- a/apps/CameraITS/tests/scene2/test_num_faces.py
+++ b/apps/CameraITS/tests/scene2/test_num_faces.py
@@ -32,6 +32,7 @@
     """Test face detection."""
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.face_detect(props))
         mono_camera = its.caps.mono_camera(props)
         fd_modes = props['android.statistics.info.availableFaceDetectModes']
diff --git a/apps/CameraITS/tests/scene2b/scene2b.pdf b/apps/CameraITS/tests/scene2b/scene2b.pdf
new file mode 100644
index 0000000..9e9f960
--- /dev/null
+++ b/apps/CameraITS/tests/scene2b/scene2b.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2b/scene2b_0.5_scaled.pdf b/apps/CameraITS/tests/scene2b/scene2b_0.5_scaled.pdf
new file mode 100644
index 0000000..3a5bd85
--- /dev/null
+++ b/apps/CameraITS/tests/scene2b/scene2b_0.5_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2b/scene2b_0.67_scaled.pdf b/apps/CameraITS/tests/scene2b/scene2b_0.67_scaled.pdf
new file mode 100644
index 0000000..706140a
--- /dev/null
+++ b/apps/CameraITS/tests/scene2b/scene2b_0.67_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2b/test_num_faces.py b/apps/CameraITS/tests/scene2b/test_num_faces.py
new file mode 100644
index 0000000..044c154
--- /dev/null
+++ b/apps/CameraITS/tests/scene2b/test_num_faces.py
@@ -0,0 +1,100 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import cv2
+import its.caps
+import its.device
+import its.image
+import its.objects
+
+NAME = os.path.basename(__file__).split('.')[0]
+NUM_TEST_FRAMES = 20
+NUM_FACES = 3
+FD_MODE_OFF = 0
+FD_MODE_SIMPLE = 1
+FD_MODE_FULL = 2
+W, H = 640, 480
+
+
+def main():
+    """Test face detection."""
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.face_detect(props))
+        mono_camera = its.caps.mono_camera(props)
+        fd_modes = props['android.statistics.info.availableFaceDetectModes']
+        a = props['android.sensor.info.activeArraySize']
+        aw, ah = a['right'] - a['left'], a['bottom'] - a['top']
+
+        if its.caps.read_3a(props):
+            _, _, _, _, _ = cam.do_3a(get_results=True,
+                                      mono_camera=mono_camera)
+
+        for fd_mode in fd_modes:
+            assert FD_MODE_OFF <= fd_mode <= FD_MODE_FULL
+            req = its.objects.auto_capture_request()
+            req['android.statistics.faceDetectMode'] = fd_mode
+            fmt = {'format': 'yuv', 'width': W, 'height': H}
+            caps = cam.do_capture([req]*NUM_TEST_FRAMES, fmt)
+            for i, cap in enumerate(caps):
+                md = cap['metadata']
+                assert md['android.statistics.faceDetectMode'] == fd_mode
+                faces = md['android.statistics.faces']
+
+                # 0 faces should be returned for OFF mode
+                if fd_mode == FD_MODE_OFF:
+                    assert not faces
+                    continue
+                # Face detection could take several frames to warm up,
+                # but should detect the correct number of faces in last frame
+                if i == NUM_TEST_FRAMES - 1:
+                    img = its.image.convert_capture_to_rgb_image(cap,
+                                                                 props=props)
+                    fnd_faces = len(faces)
+                    print 'Found %d face(s), expected %d.' % (fnd_faces,
+                                                              NUM_FACES)
+                    # draw boxes around faces
+                    for rect in [face['bounds'] for face in faces]:
+                        top_left = (int(round(rect['left']*W/aw)),
+                                    int(round(rect['top']*H/ah)))
+                        bot_rght = (int(round(rect['right']*W/aw)),
+                                    int(round(rect['bottom']*H/ah)))
+                        cv2.rectangle(img, top_left, bot_rght, (0, 1, 0), 2)
+                        img_name = '%s_fd_mode_%s.jpg' % (NAME, fd_mode)
+                        its.image.write_image(img, img_name)
+                    assert fnd_faces == NUM_FACES
+                if not faces:
+                    continue
+
+                print 'Frame %d face metadata:' % i
+                print '  Faces:', faces
+                print ''
+
+                # Reasonable scores for faces
+                face_scores = [face['score'] for face in faces]
+                for score in face_scores:
+                    assert score >= 1 and score <= 100
+                # Face bounds should be within active array
+                face_rectangles = [face['bounds'] for face in faces]
+                for rect in face_rectangles:
+                    assert rect['top'] < rect['bottom']
+                    assert rect['left'] < rect['right']
+                    assert 0 <= rect['top'] <= ah
+                    assert 0 <= rect['bottom'] <= ah
+                    assert 0 <= rect['left'] <= aw
+                    assert 0 <= rect['right'] <= aw
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene2c/scene2c.pdf b/apps/CameraITS/tests/scene2c/scene2c.pdf
new file mode 100644
index 0000000..d11a02d
--- /dev/null
+++ b/apps/CameraITS/tests/scene2c/scene2c.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2c/scene2c_0.5_scaled.pdf b/apps/CameraITS/tests/scene2c/scene2c_0.5_scaled.pdf
new file mode 100644
index 0000000..9ac02a1
--- /dev/null
+++ b/apps/CameraITS/tests/scene2c/scene2c_0.5_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2c/scene2c_0.67_scaled.pdf b/apps/CameraITS/tests/scene2c/scene2c_0.67_scaled.pdf
new file mode 100644
index 0000000..4a8bb09
--- /dev/null
+++ b/apps/CameraITS/tests/scene2c/scene2c_0.67_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene2c/test_num_faces.py b/apps/CameraITS/tests/scene2c/test_num_faces.py
new file mode 100644
index 0000000..044c154
--- /dev/null
+++ b/apps/CameraITS/tests/scene2c/test_num_faces.py
@@ -0,0 +1,100 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os.path
+import cv2
+import its.caps
+import its.device
+import its.image
+import its.objects
+
+NAME = os.path.basename(__file__).split('.')[0]
+NUM_TEST_FRAMES = 20
+NUM_FACES = 3
+FD_MODE_OFF = 0
+FD_MODE_SIMPLE = 1
+FD_MODE_FULL = 2
+W, H = 640, 480
+
+
+def main():
+    """Test face detection."""
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.face_detect(props))
+        mono_camera = its.caps.mono_camera(props)
+        fd_modes = props['android.statistics.info.availableFaceDetectModes']
+        a = props['android.sensor.info.activeArraySize']
+        aw, ah = a['right'] - a['left'], a['bottom'] - a['top']
+
+        if its.caps.read_3a(props):
+            _, _, _, _, _ = cam.do_3a(get_results=True,
+                                      mono_camera=mono_camera)
+
+        for fd_mode in fd_modes:
+            assert FD_MODE_OFF <= fd_mode <= FD_MODE_FULL
+            req = its.objects.auto_capture_request()
+            req['android.statistics.faceDetectMode'] = fd_mode
+            fmt = {'format': 'yuv', 'width': W, 'height': H}
+            caps = cam.do_capture([req]*NUM_TEST_FRAMES, fmt)
+            for i, cap in enumerate(caps):
+                md = cap['metadata']
+                assert md['android.statistics.faceDetectMode'] == fd_mode
+                faces = md['android.statistics.faces']
+
+                # 0 faces should be returned for OFF mode
+                if fd_mode == FD_MODE_OFF:
+                    assert not faces
+                    continue
+                # Face detection could take several frames to warm up,
+                # but should detect the correct number of faces in last frame
+                if i == NUM_TEST_FRAMES - 1:
+                    img = its.image.convert_capture_to_rgb_image(cap,
+                                                                 props=props)
+                    fnd_faces = len(faces)
+                    print 'Found %d face(s), expected %d.' % (fnd_faces,
+                                                              NUM_FACES)
+                    # draw boxes around faces
+                    for rect in [face['bounds'] for face in faces]:
+                        top_left = (int(round(rect['left']*W/aw)),
+                                    int(round(rect['top']*H/ah)))
+                        bot_rght = (int(round(rect['right']*W/aw)),
+                                    int(round(rect['bottom']*H/ah)))
+                        cv2.rectangle(img, top_left, bot_rght, (0, 1, 0), 2)
+                        img_name = '%s_fd_mode_%s.jpg' % (NAME, fd_mode)
+                        its.image.write_image(img, img_name)
+                    assert fnd_faces == NUM_FACES
+                if not faces:
+                    continue
+
+                print 'Frame %d face metadata:' % i
+                print '  Faces:', faces
+                print ''
+
+                # Reasonable scores for faces
+                face_scores = [face['score'] for face in faces]
+                for score in face_scores:
+                    assert score >= 1 and score <= 100
+                # Face bounds should be within active array
+                face_rectangles = [face['bounds'] for face in faces]
+                for rect in face_rectangles:
+                    assert rect['top'] < rect['bottom']
+                    assert rect['left'] < rect['right']
+                    assert 0 <= rect['top'] <= ah
+                    assert 0 <= rect['bottom'] <= ah
+                    assert 0 <= rect['left'] <= aw
+                    assert 0 <= rect['right'] <= aw
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene3/scene3_0.5_scaled.pdf b/apps/CameraITS/tests/scene3/scene3_0.5_scaled.pdf
new file mode 100644
index 0000000..805611d
--- /dev/null
+++ b/apps/CameraITS/tests/scene3/scene3_0.5_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene3/test_edge_enhancement.py b/apps/CameraITS/tests/scene3/test_edge_enhancement.py
index 76093ef..5f6efc8 100644
--- a/apps/CameraITS/tests/scene3/test_edge_enhancement.py
+++ b/apps/CameraITS/tests/scene3/test_edge_enhancement.py
@@ -12,22 +12,26 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
+import its.cv2image
 import its.device
+import its.image
 import its.objects
 import its.target
-import math
-import matplotlib
-import matplotlib.pyplot
+
 import numpy
-import os.path
-from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split(".")[0]
+NUM_SAMPLES = 4
+THRESH_REL_SHARPNESS_DIFF = 0.1
 
 
-def test_edge_mode(cam, edge_mode, sensitivity, exp, fd, out_surface):
-    """Return sharpness of the output image and the capture result metadata
-       for a capture request with the given edge mode, sensitivity, exposure
+def test_edge_mode(cam, edge_mode, sensitivity, exp, fd, out_surface, chart):
+    """Return sharpness of the output image and the capture result metadata.
+
+       Processes a capture request with a given edge mode, sensitivity, exposure
        time, focus distance, output surface parameter.
 
     Args:
@@ -39,7 +43,8 @@
             android.sensor.exposureTime.
         fd: Focus distance for the request as defined in
             android.lens.focusDistance
-        output_surface: Specifications of the output image format and size.
+        out_surface: Specifications of the output image format and size.
+        chart: object that contains chart information
 
     Returns:
         Object containing reported edge mode and the sharpness of the output
@@ -48,23 +53,21 @@
             "sharpness"
     """
 
-    NAME = os.path.basename(__file__).split(".")[0]
-    NUM_SAMPLES = 4
-
     req = its.objects.manual_capture_request(sensitivity, exp)
     req["android.lens.focusDistance"] = fd
     req["android.edge.mode"] = edge_mode
 
     sharpness_list = []
-    test_fmt = out_surface["format"]
     for n in range(NUM_SAMPLES):
         cap = cam.do_capture(req, out_surface, repeat_request=req)
-        img = its.image.convert_capture_to_rgb_image(cap)
+        y, _, _ = its.image.convert_capture_to_planes(cap)
+        chart.img = its.image.normalize_img(its.image.get_image_patch(
+                y, chart.xnorm, chart.ynorm, chart.wnorm, chart.hnorm))
         if n == 0:
-            its.image.write_image(img, "%s_edge=%d.jpg" % (NAME, edge_mode))
+            its.image.write_image(
+                    chart.img, "%s_edge=%d.jpg" % (NAME, edge_mode))
             res_edge_mode = cap["metadata"]["android.edge.mode"]
-        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
-        sharpness_list.append(its.image.compute_image_sharpness(tile))
+        sharpness_list.append(its.image.compute_image_sharpness(chart.img))
 
     ret = {}
     ret["edge_mode"] = res_edge_mode
@@ -72,6 +75,7 @@
 
     return ret
 
+
 def main():
     """Test that the android.edge.mode param is applied correctly.
 
@@ -79,8 +83,6 @@
     sharpness as a baseline.
     """
 
-    THRESHOLD_RELATIVE_SHARPNESS_DIFF = 0.1
-
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
 
@@ -88,13 +90,17 @@
                              its.caps.per_frame_control(props) and
                              its.caps.edge_mode(props, 0))
 
+    # initialize chart class and locate chart in scene
+    chart = its.cv2image.Chart()
+
+    with its.device.ItsSession() as cam:
         mono_camera = its.caps.mono_camera(props)
         test_fmt = "yuv"
         size = its.objects.get_available_output_sizes(test_fmt, props)[0]
-        out_surface = {"width":size[0], "height":size[1], "format":test_fmt}
+        out_surface = {"width": size[0], "height": size[1], "format": test_fmt}
 
         # Get proper sensitivity, exposure time, and focus distance.
-        s,e,_,_,fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
+        s, e, _, _, fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
 
         # Get the sharpness for each edge mode for regular requests
         sharpness_regular = []
@@ -105,24 +111,29 @@
                 edge_mode_reported_regular.append(edge_mode)
                 sharpness_regular.append(0)
                 continue
-            ret = test_edge_mode(cam, edge_mode, s, e, fd, out_surface)
+            ret = test_edge_mode(cam, edge_mode, s, e, fd, out_surface, chart)
             edge_mode_reported_regular.append(ret["edge_mode"])
             sharpness_regular.append(ret["sharpness"])
 
         print "Reported edge modes:", edge_mode_reported_regular
         print "Sharpness with EE mode [0,1,2,3]:", sharpness_regular
 
-        # Verify HQ(2) is sharper than OFF(0)
-        assert(sharpness_regular[2] > sharpness_regular[0])
+        print "Verify HQ(2) is sharper than OFF(0)"
+        assert sharpness_regular[2] > sharpness_regular[0]
 
-        # Verify OFF(0) is not sharper than FAST(1)
-        assert(sharpness_regular[1] >
-               sharpness_regular[0] * (1.0 - THRESHOLD_RELATIVE_SHARPNESS_DIFF))
+        print "Verify OFF(0) is not sharper than FAST(1)"
+        msg = "FAST: %.3f, OFF: %.3f, TOL: %.2f" % (
+                sharpness_regular[1], sharpness_regular[0],
+                THRESH_REL_SHARPNESS_DIFF)
+        assert (sharpness_regular[1] >
+                sharpness_regular[0] * (1.0 - THRESH_REL_SHARPNESS_DIFF)), msg
 
         # Verify FAST(1) is not sharper than HQ(2)
-        assert(sharpness_regular[2] >
-               sharpness_regular[1] * (1.0 - THRESHOLD_RELATIVE_SHARPNESS_DIFF))
+        msg = "HQ: %.3f, FAST: %.3f, TOL: %.2f" % (
+                sharpness_regular[2], sharpness_regular[1],
+                THRESH_REL_SHARPNESS_DIFF)
+        assert (sharpness_regular[2] >
+                sharpness_regular[1] * (1.0 - THRESH_REL_SHARPNESS_DIFF)), msg
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
-
diff --git a/apps/CameraITS/tests/scene3/test_flip_mirror.py b/apps/CameraITS/tests/scene3/test_flip_mirror.py
index 9742e0b..b4677c7 100644
--- a/apps/CameraITS/tests/scene3/test_flip_mirror.py
+++ b/apps/CameraITS/tests/scene3/test_flip_mirror.py
@@ -25,11 +25,6 @@
 NAME = os.path.basename(__file__).split('.')[0]
 CHART_FILE = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules', 'its',
                           'test_images', 'ISO12233.png')
-CHART_HEIGHT = 13.5  # cm
-CHART_DISTANCE = 30.0  # cm
-CHART_SCALE_START = 0.65
-CHART_SCALE_STOP = 1.35
-CHART_SCALE_STEP = 0.025
 CHART_ORIENTATIONS = ['nominal', 'flip', 'mirror', 'rotate']
 VGA_WIDTH = 640
 VGA_HEIGHT = 480
@@ -125,9 +120,7 @@
         props = cam.get_camera_properties()
         its.caps.skip_unless(its.caps.read_3a(props))
     # initialize chart class and locate chart in scene
-    chart = its.cv2image.Chart(CHART_FILE, CHART_HEIGHT, CHART_DISTANCE,
-                               CHART_SCALE_START, CHART_SCALE_STOP,
-                               CHART_SCALE_STEP)
+    chart = its.cv2image.Chart()
 
     with its.device.ItsSession() as cam:
         fmt = {'format': 'yuv', 'width': VGA_WIDTH, 'height': VGA_HEIGHT}
diff --git a/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py b/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py
index 6fea633..0862b3b 100644
--- a/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py
+++ b/apps/CameraITS/tests/scene3/test_lens_movement_reporting.py
@@ -28,13 +28,6 @@
 VGA_WIDTH = 640
 VGA_HEIGHT = 480
 NAME = os.path.basename(__file__).split('.')[0]
-CHART_FILE = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules', 'its',
-                          'test_images', 'ISO12233.png')
-CHART_HEIGHT = 13.5  # cm
-CHART_DISTANCE = 30.0  # cm
-CHART_SCALE_START = 0.65
-CHART_SCALE_STOP = 1.35
-CHART_SCALE_STEP = 0.025
 
 
 def test_lens_movement_reporting(cam, props, fmt, gain, exp, af_fd, chart):
@@ -109,9 +102,7 @@
         its.caps.skip_unless(its.caps.read_3a(props) and
                              its.caps.lens_approx_calibrated(props))
     # initialize chart class
-    chart = its.cv2image.Chart(CHART_FILE, CHART_HEIGHT, CHART_DISTANCE,
-                               CHART_SCALE_START, CHART_SCALE_STOP,
-                               CHART_SCALE_STEP)
+    chart = its.cv2image.Chart()
 
     with its.device.ItsSession() as cam:
         mono_camera = its.caps.mono_camera(props)
diff --git a/apps/CameraITS/tests/scene3/test_lens_position.py b/apps/CameraITS/tests/scene3/test_lens_position.py
index 3978081..101b44c 100644
--- a/apps/CameraITS/tests/scene3/test_lens_position.py
+++ b/apps/CameraITS/tests/scene3/test_lens_position.py
@@ -29,13 +29,6 @@
 VGA_WIDTH = 640
 VGA_HEIGHT = 480
 NAME = os.path.basename(__file__).split('.')[0]
-CHART_FILE = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules', 'its',
-                          'test_images', 'ISO12233.png')
-CHART_HEIGHT = 13.5  # cm
-CHART_DISTANCE = 30.0  # cm
-CHART_SCALE_START = 0.65
-CHART_SCALE_STOP = 1.35
-CHART_SCALE_STEP = 0.025
 
 
 def test_lens_position(cam, props, fmt, sensitivity, exp, chart):
@@ -126,9 +119,7 @@
         its.caps.skip_unless(its.caps.read_3a(props) and
                              its.caps.lens_calibrated(props))
     # initialize chart class
-    chart = its.cv2image.Chart(CHART_FILE, CHART_HEIGHT, CHART_DISTANCE,
-                               CHART_SCALE_START, CHART_SCALE_STOP,
-                               CHART_SCALE_STEP)
+    chart = its.cv2image.Chart()
 
     with its.device.ItsSession() as cam:
         mono_camera = its.caps.mono_camera(props)
diff --git a/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py b/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py
index 049426a..722af18 100644
--- a/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py
+++ b/apps/CameraITS/tests/scene3/test_reprocess_edge_enhancement.py
@@ -12,23 +12,47 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.image
+import os.path
+
 import its.caps
+import its.cv2image
 import its.device
+import its.image
 import its.objects
 import its.target
-import math
+
 import matplotlib
-import matplotlib.pyplot
-import numpy
-import os.path
 from matplotlib import pylab
+import numpy
+
+NAME = os.path.basename(__file__).split(".")[0]
+NUM_SAMPLES = 4
+THRESH_REL_SHARPNESS_DIFF = 0.15
 
 
-def test_edge_mode(cam, edge_mode, sensitivity, exp, fd, out_surface,
+def check_edge_modes(sharpness):
+    """Check that the sharpness for the different edge modes is correct."""
+    print " Verify HQ(2) is sharper than OFF(0)"
+    assert sharpness[2] > sharpness[0]
+
+    print " Verify ZSL(3) is similar to OFF(0)"
+    e_msg = "ZSL: %.5f, OFF: %.5f, RTOL: %.2f" % (
+            sharpness[3], sharpness[0], THRESH_REL_SHARPNESS_DIFF)
+    assert numpy.isclose(sharpness[3], sharpness[0],
+                         THRESH_REL_SHARPNESS_DIFF), e_msg
+
+    print " Verify OFF(0) is not sharper than FAST(1)"
+    assert sharpness[1] > sharpness[0] * (1.0 - THRESH_REL_SHARPNESS_DIFF)
+
+    print " Verify FAST(1) is not sharper than HQ(2)"
+    assert sharpness[2] > sharpness[1] * (1.0 - THRESH_REL_SHARPNESS_DIFF)
+
+
+def test_edge_mode(cam, edge_mode, sensitivity, exp, fd, out_surface, chart,
                    reprocess_format=None):
-    """Return sharpness of the output image and the capture result metadata
-       for a capture request with the given edge mode, sensitivity, exposure
+    """Return sharpness of the output images and the capture result metadata.
+
+       Processes a capture request with a given edge mode, sensitivity, exposure
        time, focus distance, output surface parameter, and reprocess format
        (None for a regular request.)
 
@@ -41,7 +65,8 @@
             android.sensor.exposureTime.
         fd: Focus distance for the request as defined in
             android.lens.focusDistance
-        output_surface: Specifications of the output image format and size.
+        out_surface: Specifications of the output image format and size.
+        chart: object containing chart information
         reprocess_format: (Optional) The reprocessing format. If not None,
                 reprocessing will be enabled.
 
@@ -52,25 +77,23 @@
             "sharpness"
     """
 
-    NAME = os.path.basename(__file__).split(".")[0]
-    NUM_SAMPLES = 4
-
     req = its.objects.manual_capture_request(sensitivity, exp)
     req["android.lens.focusDistance"] = fd
     req["android.edge.mode"] = edge_mode
-    if (reprocess_format != None):
+    if reprocess_format:
         req["android.reprocess.effectiveExposureFactor"] = 1.0
 
     sharpness_list = []
+    caps = cam.do_capture([req]*NUM_SAMPLES, [out_surface], reprocess_format)
     for n in range(NUM_SAMPLES):
-        cap = cam.do_capture(req, out_surface, reprocess_format)
-        img = its.image.convert_capture_to_rgb_image(cap)
+        y, _, _ = its.image.convert_capture_to_planes(caps[n])
+        chart.img = its.image.normalize_img(its.image.get_image_patch(
+                y, chart.xnorm, chart.ynorm, chart.wnorm, chart.hnorm))
         if n == 0:
-            its.image.write_image(img, "%s_reprocess_fmt_%s_edge=%d.jpg" %
-                (NAME, reprocess_format, edge_mode))
-            res_edge_mode = cap["metadata"]["android.edge.mode"]
-        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
-        sharpness_list.append(its.image.compute_image_sharpness(tile))
+            its.image.write_image(chart.img, "%s_reprocess_fmt_%s_edge=%d.jpg" %
+                                  (NAME, reprocess_format, edge_mode))
+            res_edge_mode = caps[n]["metadata"]["android.edge.mode"]
+        sharpness_list.append(its.image.compute_image_sharpness(chart.img))
 
     ret = {}
     ret["edge_mode"] = res_edge_mode
@@ -78,9 +101,9 @@
 
     return ret
 
+
 def main():
-    """Test that the android.edge.mode param is applied when set for
-       reprocessing requests.
+    """Test android.edge.mode param applied when set for reprocessing requests.
 
     Capture non-reprocess images for each edge mode and calculate their
     sharpness as a baseline.
@@ -90,8 +113,6 @@
     the sharpess of non-reprocess images.
     """
 
-    THRESHOLD_RELATIVE_SHARPNESS_DIFF = 0.15
-
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
 
@@ -101,21 +122,29 @@
                              (its.caps.yuv_reprocess(props) or
                               its.caps.private_reprocess(props)))
 
+    # initialize chart class and locate chart in scene
+    chart = its.cv2image.Chart()
+
+    with its.device.ItsSession() as cam:
         mono_camera = its.caps.mono_camera(props)
         # If reprocessing is supported, ZSL EE mode must be avaiable.
-        assert(its.caps.edge_mode(props, 3))
+        assert its.caps.edge_mode(props, 3), "EE mode not available!"
 
         reprocess_formats = []
-        if (its.caps.yuv_reprocess(props)):
+        if its.caps.yuv_reprocess(props):
             reprocess_formats.append("yuv")
-        if (its.caps.private_reprocess(props)):
+        if its.caps.private_reprocess(props):
             reprocess_formats.append("private")
 
         size = its.objects.get_available_output_sizes("jpg", props)[0]
-        out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+        out_surface = {"width": size[0], "height": size[1], "format": "jpg"}
 
         # Get proper sensitivity, exposure time, and focus distance.
-        s,e,_,_,fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
+        s, e, _, _, fd = cam.do_3a(get_results=True, mono_camera=mono_camera)
+
+        # Intialize plot
+        pylab.figure("reprocess_result")
+        gr_color = {"yuv": "r", "private": "g", "none": "b"}
 
         # Get the sharpness for each edge mode for regular requests
         sharpness_regular = []
@@ -126,12 +155,15 @@
                 edge_mode_reported_regular.append(edge_mode)
                 sharpness_regular.append(0)
                 continue
-            ret = test_edge_mode(cam, edge_mode, s, e, fd, out_surface)
+            ret = test_edge_mode(cam, edge_mode, s, e, fd, out_surface, chart)
             edge_mode_reported_regular.append(ret["edge_mode"])
             sharpness_regular.append(ret["sharpness"])
 
-        print "Reported edge modes:", edge_mode_reported_regular
+        pylab.plot(range(4), sharpness_regular, "-"+gr_color["none"]+"o")
+        print "Reported edge modes",
+        print "regular requests:", edge_mode_reported_regular
         print "Sharpness with EE mode [0,1,2,3]:", sharpness_regular
+        print ""
 
         # Get the sharpness for each reprocess format and edge mode for
         # reprocess requests.
@@ -150,60 +182,44 @@
                     continue
 
                 ret = test_edge_mode(cam, edge_mode, s, e, fd, out_surface,
-                    reprocess_format)
+                                     chart, reprocess_format)
                 edge_mode_reported.append(ret["edge_mode"])
                 sharpnesses.append(ret["sharpness"])
 
             sharpnesses_reprocess.append(sharpnesses)
             edge_mode_reported_reprocess.append(edge_mode_reported)
 
-            print "Reported edge modes:", edge_mode_reported
-            print "Sharpness with EE mode [0,1,2,3] for %s reprocess:" % \
-                (reprocess_format) , sharpnesses
+            pylab.plot(range(4), sharpnesses,
+                       "-"+gr_color[reprocess_format]+"o")
+            print "Reported edge modes w/ request fmt %s:" % reprocess_format
+            print "Sharpness with EE mode [0,1,2,3] for %s reprocess:" % (
+                    reprocess_format), sharpnesses
+            print ""
 
-
-        # Verify HQ(2) is sharper than OFF(0)
-        assert(sharpness_regular[2] > sharpness_regular[0])
-
-        # Verify ZSL(3) is similar to OFF(0)
-        assert(numpy.isclose(sharpness_regular[3], sharpness_regular[0],
-                             THRESHOLD_RELATIVE_SHARPNESS_DIFF))
-
-        # Verify OFF(0) is not sharper than FAST(1)
-        assert(sharpness_regular[1] >
-               sharpness_regular[0] * (1.0 - THRESHOLD_RELATIVE_SHARPNESS_DIFF))
-
-        # Verify FAST(1) is not sharper than HQ(2)
-        assert(sharpness_regular[2] >
-               sharpness_regular[1] * (1.0 - THRESHOLD_RELATIVE_SHARPNESS_DIFF))
+        # Finalize plot
+        pylab.title("Red-YUV Reprocess  Green-Private Reprocess  Blue-None")
+        pylab.xlabel("Edge Enhance Mode")
+        pylab.ylabel("Sharpness")
+        pylab.xticks(range(4))
+        matplotlib.pyplot.savefig("%s_plot_EE.png" %
+                                  ("test_reprocess_edge_enhancement"))
+        print "regular requests:"
+        check_edge_modes(sharpness_regular)
 
         for reprocess_format in range(len(reprocess_formats)):
-            # Verify HQ(2) is sharper than OFF(0)
-            assert(sharpnesses_reprocess[reprocess_format][2] >
-                   sharpnesses_reprocess[reprocess_format][0])
+            print "\nreprocess format:", reprocess_format
+            check_edge_modes(sharpnesses_reprocess[reprocess_format])
 
-            # Verify ZSL(3) is similar to OFF(0)
-            assert(numpy.isclose(sharpnesses_reprocess[reprocess_format][3],
-                                 sharpnesses_reprocess[reprocess_format][0],
-                                 THRESHOLD_RELATIVE_SHARPNESS_DIFF))
+            hq_div_off_reprocess = (sharpnesses_reprocess[reprocess_format][2] /
+                                    sharpnesses_reprocess[reprocess_format][0])
+            hq_div_off_regular = sharpness_regular[2] / sharpness_regular[0]
+            e_msg = "HQ/OFF_reprocess: %.4f, HQ/OFF_reg: %.4f, RTOL: %.2f" % (
+                    hq_div_off_reprocess, hq_div_off_regular,
+                    THRESH_REL_SHARPNESS_DIFF)
+            print " Verify reprocess HQ(2) ~= reg HQ(2) relative to OFF(0)"
+            assert numpy.isclose(hq_div_off_reprocess, hq_div_off_regular,
+                                 THRESH_REL_SHARPNESS_DIFF), e_msg
 
-            # Verify OFF(0) is not sharper than FAST(1)
-            assert(sharpnesses_reprocess[reprocess_format][1] >
-                   sharpnesses_reprocess[reprocess_format][0] *
-                   (1.0 - THRESHOLD_RELATIVE_SHARPNESS_DIFF))
-
-            # Verify FAST(1) is not sharper than HQ(2)
-            assert(sharpnesses_reprocess[reprocess_format][2] >
-                   sharpnesses_reprocess[reprocess_format][1] *
-                   (1.0 - THRESHOLD_RELATIVE_SHARPNESS_DIFF))
-
-            # Verify reprocessing HQ(2) is similar to regular HQ(2) relative to
-            # OFF(0)
-            assert(numpy.isclose(sharpnesses_reprocess[reprocess_format][2] /
-                                    sharpnesses_reprocess[reprocess_format][0],
-                                 sharpness_regular[2] / sharpness_regular[0],
-                                 THRESHOLD_RELATIVE_SHARPNESS_DIFF))
-
-if __name__ == '__main__':
+if __name__ == "__main__":
     main()
 
diff --git a/apps/CameraITS/tests/scene4/scene4_0.5_scaled.pdf b/apps/CameraITS/tests/scene4/scene4_0.5_scaled.pdf
new file mode 100644
index 0000000..589a3b4
--- /dev/null
+++ b/apps/CameraITS/tests/scene4/scene4_0.5_scaled.pdf
Binary files differ
diff --git a/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py b/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
index 564e3e7..de134a7 100644
--- a/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
+++ b/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
@@ -192,6 +192,7 @@
     aspect_ratio_gt = 1  # ground truth
     failed_ar = []  # streams failed the aspect ration test
     failed_crop = []  # streams failed the crop test
+    failed_fov = []  # streams that fail FoV test
     format_list = []  # format list for multiple capture objects.
     # Do multi-capture of "iter" and "cmpr". Iterate through all the
     # available sizes of "iter", and only use the size specified for "cmpr"
@@ -209,6 +210,7 @@
     ref_fov = {}
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         its.caps.skip_unless(its.caps.read_3a(props))
         full_device = its.caps.full_or_better(props)
         limited_device = its.caps.limited(props)
@@ -248,7 +250,7 @@
             if its.caps.distortion_correction(props):
                 # The intrinsics and distortion coefficients are meant for full
                 # size RAW. Resize back to full size here.
-                img_raw = cv2.resize(img_raw, (0,0), fx=2.0, fy=2.0)
+                img_raw = cv2.resize(img_raw, (0, 0), fx=2.0, fy=2.0)
                 # Intrinsic cal is of format: [f_x, f_y, c_x, c_y, s]
                 # [f_x, f_y] is the horizontal and vertical focal lengths,
                 # [c_x, c_y] is the position of the optical axis,
@@ -261,7 +263,7 @@
                 sensor_w = props["android.sensor.info.physicalSize"]["width"]
                 pixel_h = props["android.sensor.info.pixelArraySize"]["height"]
                 pixel_w = props["android.sensor.info.pixelArraySize"]["width"]
-                fd = float(props["android.lens.info.availableFocalLengths"][0])
+                fd = float(cap_raw["metadata"]["android.lens.focalLength"])
                 fd_w_pix = pixel_w * fd / sensor_w
                 fd_h_pix = pixel_h * fd / sensor_h
                 # transformation matrix
@@ -383,15 +385,15 @@
                                   atol=FMT_ATOL):
                         # scale check value based on aspect ratio
                         chk_percent = ref_fov["percent"] * ar_scaling[ar_check]
-
-                        msg = "FoV %%: %.2f, Ref FoV %%: %.2f, TOL=%.f%%, " % (
-                                fov_percent, chk_percent,
-                                FOV_PERCENT_RTOL*100)
-                        msg += "img: %dx%d, ref: %dx%d" % (w_iter, h_iter,
-                                                           ref_fov["w"],
-                                                           ref_fov["h"])
-                        assert np.isclose(fov_percent, chk_percent,
-                                          rtol=FOV_PERCENT_RTOL), msg
+                        if not np.isclose(fov_percent, chk_percent,
+                                          rtol=FOV_PERCENT_RTOL):
+                            msg = "FoV %%: %.2f, Ref FoV %%: %.2f, " % (
+                                    fov_percent, chk_percent)
+                            msg += "TOL=%.f%%, img: %dx%d, ref: %dx%d" % (
+                                    FOV_PERCENT_RTOL*100, w_iter, h_iter,
+                                    ref_fov["w"], ref_fov["h"])
+                            failed_fov.append(msg)
+                            its.image.write_image(img/255, img_name, True)
                 # check pass/fail for aspect ratio
                 # image size >= LARGE_SIZE: use THRESH_L_AR
                 # image size == 0 (extreme case): THRESH_XS_AR
@@ -455,11 +457,20 @@
             print "\nAspect ratio test summary"
             print "Images failed in the aspect ratio test:"
             print "Aspect ratio value: width / height"
-        for fa in failed_ar:
-            print "%s with %s %dx%d: %.3f;" % (fa["fmt_iter"], fa["fmt_cmpr"],
-                                               fa["w"], fa["h"], fa["ar"]),
-            print "valid range: %.3f ~ %.3f" % (fa["valid_range"][0],
-                                                fa["valid_range"][1])
+            for fa in failed_ar:
+                print "%s with %s %dx%d: %.3f;" % (
+                        fa["fmt_iter"], fa["fmt_cmpr"],
+                        fa["w"], fa["h"], fa["ar"]),
+                print "valid range: %.3f ~ %.3f" % (
+                        fa["valid_range"][0], fa["valid_range"][1])
+
+        # Print FoV test results
+        failed_image_number_for_fov_test = len(failed_fov)
+        if failed_image_number_for_fov_test > 0:
+            print "\nFoV test summary"
+            print "Images failed in the FoV test:"
+            for fov in failed_fov:
+                print fov
 
         # Print crop test results
         failed_image_number_for_crop_test = len(failed_crop)
@@ -468,16 +479,17 @@
             print "Images failed in the crop test:"
             print "Circle center position, (horizontal x vertical), listed",
             print "below is relative to the image center."
-        for fc in failed_crop:
-            print "%s with %s %dx%d: %.3f x %.3f;" % (
-                    fc["fmt_iter"], fc["fmt_cmpr"], fc["w"], fc["h"],
-                    fc["ct_hori"], fc["ct_vert"]),
-            print "valid horizontal range: %.3f ~ %.3f;" % (
-                    fc["valid_range_h"][0], fc["valid_range_h"][1]),
-            print "valid vertical range: %.3f ~ %.3f" % (
-                    fc["valid_range_v"][0], fc["valid_range_v"][1])
+            for fc in failed_crop:
+                print "%s with %s %dx%d: %.3f x %.3f;" % (
+                        fc["fmt_iter"], fc["fmt_cmpr"], fc["w"], fc["h"],
+                        fc["ct_hori"], fc["ct_vert"]),
+                print "valid horizontal range: %.3f ~ %.3f;" % (
+                        fc["valid_range_h"][0], fc["valid_range_h"][1]),
+                print "valid vertical range: %.3f ~ %.3f" % (
+                        fc["valid_range_v"][0], fc["valid_range_v"][1])
 
         assert failed_image_number_for_aspect_ratio_test == 0
+        assert failed_image_number_for_fov_test == 0
         if level3_device:
             assert failed_image_number_for_crop_test == 0
 
diff --git a/apps/CameraITS/tests/scene4/test_multi_camera_alignment.py b/apps/CameraITS/tests/scene4/test_multi_camera_alignment.py
index 4c3c4d9..9a70e65 100644
--- a/apps/CameraITS/tests/scene4/test_multi_camera_alignment.py
+++ b/apps/CameraITS/tests/scene4/test_multi_camera_alignment.py
@@ -33,7 +33,6 @@
 NAME = os.path.basename(__file__).split('.')[0]
 TRANS_REF_MATRIX = np.array([0, 0, 0])
 
-
 def convert_to_world_coordinates(x, y, r, t, k, z_w):
     """Convert x,y coordinates to world coordinates.
 
@@ -177,7 +176,7 @@
         print 'More than one black circle was detected. Background of scene',
         print 'may be too complex.\n'
         assert num_circle == 1
-    return (circle_ctx, circle_cty, (circle_w+circle_h)/4.0)
+    return [circle_ctx, circle_cty, (circle_w+circle_h)/4.0]
 
 
 def component_shape(contour):
@@ -263,188 +262,223 @@
 
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+        its.caps.skip_unless(its.caps.read_3a(props) and
                              its.caps.per_frame_control(props) and
-                             its.caps.logical_multi_camera(props) and
-                             its.caps.raw16(props) and
-                             its.caps.manual_sensor(props))
+                             its.caps.logical_multi_camera(props))
+
+        # Find physical camera IDs that support raw, and skip if less than 2
+        ids = its.caps.logical_multi_camera_physical_ids(props)
+        props_physical = {}
+        physical_ids = []
+        for i in ids:
+            prop = cam.get_camera_properties_by_id(i)
+            if its.caps.raw16(prop) and len(physical_ids) < 2:
+                physical_ids.append(i)
+                props_physical[i] = cam.get_camera_properties_by_id(i)
+
         debug = its.caps.debug_mode()
         avail_fls = props['android.lens.info.availableFocalLengths']
         pose_reference = props['android.lens.poseReference']
 
-        max_raw_size = its.objects.get_available_output_sizes('raw', props)[0]
-        w, h = its.objects.get_available_output_sizes(
-                'yuv', props, match_ar_size=max_raw_size)[0]
-
-        # Do 3A and get the values
-        s, e, _, _, fd = cam.do_3a(get_results=True,
-                                   lock_ae=True, lock_awb=True)
-        e *= 2  # brighten RAW images
-        req = its.objects.manual_capture_request(s, e, fd, True, props)
-
-        # get physical camera properties
-        ids = its.caps.logical_multi_camera_physical_ids(props)
-        props_physical = {}
-        for i in ids:
-            props_physical[i] = cam.get_camera_properties_by_id(i)
-
-        # capture RAWs of 1st 2 cameras
-        cap_raw = {}
-        out_surfaces = [{'format': 'yuv', 'width': w, 'height': h},
-                        {'format': 'raw', 'physicalCamera': ids[0]},
-                        {'format': 'raw', 'physicalCamera': ids[1]}]
-        _, cap_raw[ids[0]], cap_raw[ids[1]] = cam.do_capture(req, out_surfaces)
-
-    size_raw = {}
-    k = {}
-    cam_reference = {}
-    r = {}
-    t = {}
-    circle = {}
-    fl = {}
-    sensor_diag = {}
-    for i in ids:
-        print 'Camera %s' % i
-        # process image
-        img_raw = its.image.convert_capture_to_rgb_image(
-                cap_raw[i], props=props)
-        size_raw[i] = (cap_raw[i]['width'], cap_raw[i]['height'])
-
-        # save images if debug
-        if debug:
-            its.image.write_image(img_raw, '%s_raw_%s.jpg' % (NAME, i))
-
-        # convert to [0, 255] images
-        img_raw *= 255
-
-        # scale to match calibration data
-        img = cv2.resize(img_raw.astype(np.uint8), None, fx=2, fy=2)
-
-        # load parameters for each physical camera
-        ical = props_physical[i]['android.lens.intrinsicCalibration']
-        assert len(ical) == 5, 'android.lens.instrisicCalibration incorrect.'
-        k[i] = np.array([[ical[0], ical[4], ical[2]],
-                         [0, ical[1], ical[3]],
-                         [0, 0, 1]])
-        print ' k:', k[i]
-
-        rotation = np.array(props_physical[i]['android.lens.poseRotation'])
-        print ' rotation:', rotation
-        assert len(rotation) == 4, 'poseRotation has wrong # of params.'
-        r[i] = rotation_matrix(rotation)
-
-        t[i] = np.array(props_physical[i]['android.lens.poseTranslation'])
-        print ' translation:', t[i]
-        assert len(t[i]) == 3, 'poseTranslation has wrong # of params.'
-        if (t[i] == TRANS_REF_MATRIX).all():
-            cam_reference[i] = True
+        # Find highest resolution image and determine formats
+        fmts = ['yuv']
+        if len(physical_ids) == 2:
+            fmts.insert(0, 'raw')  # insert in first location in list
         else:
-            cam_reference[i] = False
+            physical_ids = ids[0:1]
 
-        # API spec defines poseTranslation as the world coordinate p_w_cam of
-        # optics center. When applying [R|t] to go from world coordinates to
-        # camera coordinates, we need -R*p_w_cam of the coordinate reported in
-        # metadata.
-        # ie. for a camera with optical center at world coordinate (5, 4, 3)
-        # and identity rotation, to convert a world coordinate into the
-        # camera's coordinate, we need a translation vector of [-5, -4, -3]
-        # so that: [I|[-5, -4, -3]^T] * [5, 4, 3]^T = [0,0,0]^T
-        t[i] = -1.0 * np.dot(r[i], t[i])
-        if debug:
-            print 't:', t[i]
-            print 'r:', r[i]
+        w, h = its.objects.get_available_output_sizes('yuv', props)[0]
 
-        # Do operation on distorted image
-        print 'Detecting pre-correction circle'
-        circle_distorted = find_circle(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
-                                       '%s_gray_precorr_cam_%s.jpg' % (NAME, i))
-        print 'camera %s circle pre-distortion correction: x, y: %.2f, %.2f' % (
-                i, circle_distorted[0], circle_distorted[1])
+        # do captures on 2 cameras
+        caps = {}
+        for i, fmt in enumerate(fmts):
+            out_surfaces = [{'format': 'yuv', 'width': w, 'height': h},
+                            {'format': fmt, 'physicalCamera': physical_ids[0]},
+                            {'format': fmt, 'physicalCamera': physical_ids[1]}]
 
-        # Apply correction to image (if available)
-        if its.caps.distortion_correction(props):
-            distort = np.array(props_physical[i]['android.lens.distortion'])
-            assert len(distort) == 5, 'distortion has wrong # of params.'
-            cv2_distort = np.array([distort[0], distort[1],
-                                    distort[3], distort[4],
-                                    distort[2]])
-            print ' cv2 distortion params:', cv2_distort
-            its.image.write_image(img/255.0, '%s_raw_%s.jpg' % (
-                    NAME, i))
-            img = cv2.undistort(img, k[i], cv2_distort)
-            its.image.write_image(img/255.0, '%s_correct_%s.jpg' % (
-                    NAME, i))
+            out_surfaces_supported = cam.is_stream_combination_supported(out_surfaces)
+            its.caps.skip_unless(out_surfaces_supported)
 
-        # Find the circles in grayscale image
-        circle[i] = find_circle(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
-                                '%s_gray_%s.jpg' % (NAME, i))
+            # Do 3A and get the values
+            s, e, _, _, fd = cam.do_3a(get_results=True,
+                                       lock_ae=True, lock_awb=True)
+            if fmt == 'raw':
+                e_corrected = e * 2  # brighten RAW images
+            else:
+                e_corrected = e
+            print 'out_surfaces:', out_surfaces
+            req = its.objects.manual_capture_request(s, e_corrected, fd)
+            _, caps[(fmt, physical_ids[0])], caps[(fmt, physical_ids[1])] = cam.do_capture(
+                    req, out_surfaces);
 
-        # Find focal length & sensor size
-        fl[i] = props_physical[i]['android.lens.info.availableFocalLengths'][0]
-        sensor_diag[i] = math.sqrt(size_raw[i][0] ** 2 + size_raw[i][1] ** 2)
+    for j, fmt in enumerate(fmts):
+        size = {}
+        k = {}
+        cam_reference = {}
+        r = {}
+        t = {}
+        circle = {}
+        fl = {}
+        sensor_diag = {}
+        print '\nFormat:', fmt
+        for i in physical_ids:
+            # process image
+            img = its.image.convert_capture_to_rgb_image(
+                    caps[(fmt, i)], props=props_physical[i])
+            size[i] = (caps[fmt, i]['width'], caps[fmt, i]['height'])
 
-    i_ref, i_2nd = define_reference_camera(pose_reference, cam_reference)
-    print 'reference camera: %s, secondary camera: %s' % (i_ref, i_2nd)
+            # save images if debug
+            if debug:
+                its.image.write_image(img, '%s_%s_%s.jpg' % (NAME, fmt, i))
 
-    # Convert circle centers to real world coordinates
-    x_w = {}
-    y_w = {}
-    if props['android.lens.facing']:
-        print 'lens facing BACK'
-        chart_distance *= -1  # API spec defines +z i pointing out from screen
-    for i in [i_ref, i_2nd]:
-        x_w[i], y_w[i] = convert_to_world_coordinates(
-                circle[i][0], circle[i][1], r[i], t[i], k[i], chart_distance)
+            # convert to [0, 255] images
+            img *= 255
 
-    # Back convert to image coordinates for sanity check
-    x_p = {}
-    y_p = {}
-    x_p[i_2nd], y_p[i_2nd] = convert_to_image_coordinates(
-            [x_w[i_ref], y_w[i_ref], chart_distance],
-            r[i_2nd], t[i_2nd], k[i_2nd])
-    x_p[i_ref], y_p[i_ref] = convert_to_image_coordinates(
-            [x_w[i_2nd], y_w[i_2nd], chart_distance],
-            r[i_ref], t[i_ref], k[i_ref])
+            # scale to match calibration data if RAW
+            if fmt == 'raw':
+                img = cv2.resize(img.astype(np.uint8), None, fx=2, fy=2)
+            else:
+                img = img.astype(np.uint8)
 
-    # Summarize results
-    for i in [i_ref, i_2nd]:
-        print ' Camera: %s' % i
-        print ' x, y (pixels): %.1f, %.1f' % (circle[i][0], circle[i][1])
-        print ' x_w, y_w (mm): %.2f, %.2f' % (x_w[i]*1.0E3, y_w[i]*1.0E3)
-        print ' x_p, y_p (pixels): %.1f, %.1f' % (x_p[i], y_p[i])
+            # load parameters for each physical camera
+            ical = props_physical[i]['android.lens.intrinsicCalibration']
+            assert len(ical) == 5, 'android.lens.instrisicCalibration incorrect.'
+            k[i] = np.array([[ical[0], ical[4], ical[2]],
+                             [0, ical[1], ical[3]],
+                             [0, 0, 1]])
+            if j == 0:
+                print 'Camera %s' % i
+                print ' k:', k[i]
 
-    # Check center locations
-    err = np.linalg.norm(np.array([x_w[i_ref], y_w[i_ref]]) -
-                         np.array([x_w[i_2nd], y_w[i_2nd]]))
-    print '\nCenter location err (mm): %.2f' % (err*1E3)
-    msg = 'Center locations %s <-> %s too different!' % (i_ref, i_2nd)
-    msg += ' val=%.2fmm, THRESH=%.fmm' % (err*1E3, ALIGN_TOL_MM*1E3)
-    assert err < ALIGN_TOL, msg
+            rotation = np.array(props_physical[i]['android.lens.poseRotation'])
+            if j == 0:
+                print ' rotation:', rotation
+            assert len(rotation) == 4, 'poseRotation has wrong # of params.'
+            r[i] = rotation_matrix(rotation)
 
-    # Check projections back into pixel space
-    for i in [i_ref, i_2nd]:
-        err = np.linalg.norm(np.array([circle[i][0], circle[i][1]]) -
-                             np.array([x_p[i], y_p[i]]))
-        print 'Camera %s projection error (pixels): %.1f' % (i, err)
-        tol = ALIGN_TOL * sensor_diag[i]
-        msg = 'Camera %s project locations too different!' % i
-        msg += ' diff=%.2f, TOL=%.2f' % (err, tol)
-        assert err < tol, msg
+            t[i] = np.array(props_physical[i]['android.lens.poseTranslation'])
+            if j == 0:
+                print ' translation:', t[i]
+            assert len(t[i]) == 3, 'poseTranslation has wrong # of params.'
+            if (t[i] == TRANS_REF_MATRIX).all():
+                cam_reference[i] = True
+            else:
+                cam_reference[i] = False
 
-    # Check focal length and circle size if more than 1 focal length
-    if len(avail_fls) > 1:
-        print 'Circle radii (pixels); ref: %.1f, 2nd: %.1f' % (
-                circle[i_ref][2], circle[i_2nd][2])
-        print 'Focal lengths (diopters); ref: %.2f, 2nd: %.2f' % (
-                fl[i_ref], fl[i_2nd])
-        print 'Sensor diagonals (pixels); ref: %.2f, 2nd: %.2f' % (
-                sensor_diag[i_ref], sensor_diag[i_2nd])
-        msg = 'Circle size scales improperly! RTOL=%.1f' % CIRCLE_RTOL
-        msg += '\nMetric: radius/focal_length*sensor_diag should be equal.'
-        assert np.isclose(circle[i_ref][2]/fl[i_ref]*sensor_diag[i_ref],
-                          circle[i_2nd][2]/fl[i_2nd]*sensor_diag[i_2nd],
-                          rtol=CIRCLE_RTOL), msg
+            # API spec defines poseTranslation as the world coordinate p_w_cam of
+            # optics center. When applying [R|t] to go from world coordinates to
+            # camera coordinates, we need -R*p_w_cam of the coordinate reported in
+            # metadata.
+            # ie. for a camera with optical center at world coordinate (5, 4, 3)
+            # and identity rotation, to convert a world coordinate into the
+            # camera's coordinate, we need a translation vector of [-5, -4, -3]
+            # so that: [I|[-5, -4, -3]^T] * [5, 4, 3]^T = [0,0,0]^T
+            t[i] = -1.0 * np.dot(r[i], t[i])
+            if debug and j == 1:
+                print 't:', t[i]
+                print 'r:', r[i]
 
+            # Correct lens distortion to image (if available)
+            if its.caps.distortion_correction(props_physical[i]) and fmt == 'raw':
+                distort = np.array(props_physical[i]['android.lens.distortion'])
+                assert len(distort) == 5, 'distortion has wrong # of params.'
+                cv2_distort = np.array([distort[0], distort[1],
+                                        distort[3], distort[4],
+                                        distort[2]])
+                print ' cv2 distortion params:', cv2_distort
+                its.image.write_image(img/255.0, '%s_%s_%s.jpg' % (
+                        NAME, fmt, i))
+                img = cv2.undistort(img, k[i], cv2_distort)
+                its.image.write_image(img/255.0, '%s_%s_correct_%s.jpg' % (
+                        NAME, fmt, i))
+
+            # Find the circles in grayscale image
+            circle[i] = find_circle(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),
+                                    '%s_%s_gray_%s.jpg' % (NAME, fmt, i))
+            print "Circle radius ", i, ": ", circle[i][2]
+
+            # Undo zoom to image (if applicable). Assume that the maximum
+            # physical YUV image size is close to active array size.
+            if fmt == 'yuv':
+                ar = props_physical[i]['android.sensor.info.activeArraySize']
+                arw = ar['right'] - ar['left']
+                arh = ar['bottom'] - ar['top']
+                cr = caps[(fmt, i)]['metadata']['android.scaler.cropRegion'];
+                crw = cr['right'] - cr['left']
+                crh = cr['bottom'] - cr['top']
+                # Assume pixels remain square after zoom, so use same zoom
+                # ratios for x and y.
+                zoom_ratio = min(1.0 * arw / crw, 1.0 * arh / crh);
+                circle[i][0] = cr['left'] + circle[i][0] / zoom_ratio
+                circle[i][1] = cr['top'] + circle[i][1] / zoom_ratio
+                circle[i][2] = circle[i][2] / zoom_ratio
+
+            # Find focal length & sensor size
+            fl[i] = props_physical[i]['android.lens.info.availableFocalLengths'][0]
+            sensor_diag[i] = math.sqrt(size[i][0] ** 2 + size[i][1] ** 2)
+
+        i_ref, i_2nd = define_reference_camera(pose_reference, cam_reference)
+        print 'reference camera: %s, secondary camera: %s' % (i_ref, i_2nd)
+
+        # Convert circle centers to real world coordinates
+        x_w = {}
+        y_w = {}
+        if props['android.lens.facing']:
+            # API spec defines +z is pointing out from screen
+            print 'lens facing BACK'
+            chart_distance *= -1
+        for i in [i_ref, i_2nd]:
+            x_w[i], y_w[i] = convert_to_world_coordinates(
+                    circle[i][0], circle[i][1], r[i], t[i], k[i], chart_distance)
+
+        # Back convert to image coordinates for sanity check
+        x_p = {}
+        y_p = {}
+        x_p[i_2nd], y_p[i_2nd] = convert_to_image_coordinates(
+                [x_w[i_ref], y_w[i_ref], chart_distance],
+                r[i_2nd], t[i_2nd], k[i_2nd])
+        x_p[i_ref], y_p[i_ref] = convert_to_image_coordinates(
+                [x_w[i_2nd], y_w[i_2nd], chart_distance],
+                r[i_ref], t[i_ref], k[i_ref])
+
+        # Summarize results
+        for i in [i_ref, i_2nd]:
+            print ' Camera: %s' % i
+            print ' x, y (pixels): %.1f, %.1f' % (circle[i][0], circle[i][1])
+            print ' x_w, y_w (mm): %.2f, %.2f' % (x_w[i]*1.0E3, y_w[i]*1.0E3)
+            print ' x_p, y_p (pixels): %.1f, %.1f' % (x_p[i], y_p[i])
+
+        # Check center locations
+        err = np.linalg.norm(np.array([x_w[i_ref], y_w[i_ref]]) -
+                             np.array([x_w[i_2nd], y_w[i_2nd]]))
+        print 'Center location err (mm): %.2f' % (err*1E3)
+        msg = 'Center locations %s <-> %s too different!' % (i_ref, i_2nd)
+        msg += ' val=%.2fmm, THRESH=%.fmm' % (err*1E3, ALIGN_TOL_MM*1E3)
+        assert err < ALIGN_TOL, msg
+
+        # Check projections back into pixel space
+        for i in [i_ref, i_2nd]:
+            err = np.linalg.norm(np.array([circle[i][0], circle[i][1]]) -
+                                 np.array([x_p[i], y_p[i]]))
+            print 'Camera %s projection error (pixels): %.1f' % (i, err)
+            tol = ALIGN_TOL * sensor_diag[i]
+            msg = 'Camera %s project locations too different!' % i
+            msg += ' diff=%.2f, TOL=%.2f' % (err, tol)
+            assert err < tol, msg
+
+        # Check focal length and circle size if more than 1 focal length
+        if len(avail_fls) > 1:
+            print 'Circle radii (pixels); ref: %.1f, 2nd: %.1f' % (
+                    circle[i_ref][2], circle[i_2nd][2])
+            print 'Focal lengths (diopters); ref: %.2f, 2nd: %.2f' % (
+                    fl[i_ref], fl[i_2nd])
+            print 'Sensor diagonals (pixels); ref: %.2f, 2nd: %.2f' % (
+                    sensor_diag[i_ref], sensor_diag[i_2nd])
+            msg = 'Circle size scales improperly! RTOL=%.1f' % CIRCLE_RTOL
+            msg += '\nMetric: radius/focal_length*sensor_diag should be equal.'
+            assert np.isclose(circle[i_ref][2]/fl[i_ref]*sensor_diag[i_ref],
+                              circle[i_2nd][2]/fl[i_2nd]*sensor_diag[i_2nd],
+                              rtol=CIRCLE_RTOL), msg
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/sensor_fusion/test_multi_camera_frame_sync.py b/apps/CameraITS/tests/sensor_fusion/test_multi_camera_frame_sync.py
index 63ddbdd..2ebac1e 100644
--- a/apps/CameraITS/tests/sensor_fusion/test_multi_camera_frame_sync.py
+++ b/apps/CameraITS/tests/sensor_fusion/test_multi_camera_frame_sync.py
@@ -39,11 +39,9 @@
 def _check_available_capabilities(props):
     """Returns True if all required test capabilities are present."""
     return all([
-            its.caps.compute_target_exposure(props),
+            its.caps.read_3a(props),
             its.caps.per_frame_control(props),
             its.caps.logical_multi_camera(props),
-            its.caps.raw16(props),
-            its.caps.manual_sensor(props),
             its.caps.sensor_fusion(props)])
 
 
@@ -100,9 +98,9 @@
     pylab.ylabel("Rotation angle difference (degrees)")
     matplotlib.pyplot.savefig("%s_angle_diffs_plot.png" % (NAME))
 
+
 def _collect_data():
     """Returns list of pair of gray frames and camera ids used for captures."""
-    yuv_sizes = {}
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
 
@@ -116,7 +114,12 @@
         # Define capture request
         s, e, _, _, _ = cam.do_3a(get_results=True, do_af=False)
         req = its.objects.manual_capture_request(s, e)
-        req["android.lens.focusDistance"] = 1 / (CHART_DISTANCE * CM_TO_M)
+        fd_min = props["android.lens.info.minimumFocusDistance"]
+        fd_chart = 1 / (CHART_DISTANCE * CM_TO_M)
+        if fd_min < fd_chart:
+            req["android.lens.focusDistance"] = fd_min
+        else:
+            req["android.lens.focusDistance"] = fd_chart
 
         # capture YUVs
         out_surfaces = [{"format": "yuv", "width": W, "height": H,
@@ -124,6 +127,9 @@
                         {"format": "yuv", "width": W, "height": H,
                          "physicalCamera": ids[1]}]
 
+        out_surfaces_supported = cam.is_stream_combination_supported(out_surfaces)
+        its.caps.skip_unless(out_surfaces_supported)
+
         capture_1_list, capture_2_list = cam.do_capture(
             [req]*NUM_CAPTURES, out_surfaces)
 
@@ -145,6 +151,7 @@
 
         return frame_pairs_gray, ids
 
+
 def main():
     """Test frame timestamps captured by logical camera are within 10ms."""
     frame_pairs_gray, ids = _collect_data()
@@ -156,12 +163,12 @@
     # Remove frames where not enough squares were detected.
     filtered_pairs_angles = []
     for angle_1, angle_2 in frame_pairs_angles:
-        if angle_1 == None or angle_2 == None:
+        if angle_1 is None or angle_2 is None:
             continue
         filtered_pairs_angles.append([angle_1, angle_2])
 
-    print 'Using {} image pairs to compute angular difference.'.format(
-        len(filtered_pairs_angles))
+    print "Using {} image pairs to compute angular difference.".format(
+            len(filtered_pairs_angles))
 
     assert len(filtered_pairs_angles) > 20, (
         "Unable to identify enough frames with detected squares.")
diff --git a/apps/CameraITS/tests/sensor_fusion/test_sensor_fusion.py b/apps/CameraITS/tests/sensor_fusion/test_sensor_fusion.py
index fbf7bcd..25296b6 100644
--- a/apps/CameraITS/tests/sensor_fusion/test_sensor_fusion.py
+++ b/apps/CameraITS/tests/sensor_fusion/test_sensor_fusion.py
@@ -226,7 +226,7 @@
     if abs(best_shift - exact_best_shift) > 2.0 or a <= 0 or c <= 0:
         print "Test failed; bad fit to time-shift curve"
         print "best_shift %f, exact_best_shift %f, a %f, c %f" % (
-            best_shift, exact_best_shift, a, c)
+                best_shift, exact_best_shift, a, c)
         assert 0
 
     xfit = numpy.arange(candidates[0], candidates[-1], 0.05).tolist()
@@ -343,12 +343,13 @@
         p0_filtered = p0[mask]
         num_features = len(p0_filtered)
         if num_features < MIN_FEATURE_PTS:
-            print "Not enough feature points in frame", i
+            print "Not enough feature points in frame %s" % str(i-1).zfill(3)
             print "Need at least %d features, got %d" % (
-                MIN_FEATURE_PTS, num_features)
+                    MIN_FEATURE_PTS, num_features)
             assert 0
         else:
-            print "Number of features in frame %d is %d" % (i, num_features)
+            print "Number of features in frame %s is %d" % (
+                    str(i-1).zfill(3), num_features)
         p1, st, _ = cv2.calcOpticalFlowPyrLK(gframe0, gframe1, p0_filtered,
                                              None, **LK_PARAMS)
         tform = procrustes_rotation(p0_filtered[st == 1], p1[st == 1])
@@ -428,8 +429,9 @@
     """
     with its.device.ItsSession() as cam:
         props = cam.get_camera_properties()
-        its.caps.skip_unless(its.caps.sensor_fusion(props) and
-                             its.caps.manual_sensor(props) and
+        props = cam.override_with_hidden_physical_camera_props(props)
+        its.caps.skip_unless(its.caps.read_3a and
+                             its.caps.sensor_fusion(props) and
                              props["android.lens.facing"] != FACING_EXTERNAL and
                              cam.get_sensors().get("gyro"))
 
@@ -449,11 +451,16 @@
         s, e, _, _, _ = cam.do_3a(get_results=True, do_af=False)
         req = its.objects.manual_capture_request(s, e)
         its.objects.turn_slow_filters_off(props, req)
-        req["android.lens.focusDistance"] = 1 / (CHART_DISTANCE * CM_TO_M)
+        fd_min = props["android.lens.info.minimumFocusDistance"]
+        fd_chart = 1 / (CHART_DISTANCE * CM_TO_M)
+        if fd_min < fd_chart:
+            req["android.lens.focusDistance"] = fd_min
+        else:
+            req["android.lens.focusDistance"] = fd_chart
         req["android.control.aeTargetFpsRange"] = [fps, fps]
         req["android.sensor.frameDuration"] = int(1000.0/fps * MSEC_TO_NSEC)
         print "Capturing %dx%d with sens. %d, exp. time %.1fms at %dfps" % (
-            w, h, s, e*NSEC_TO_MSEC, fps)
+                w, h, s, e*NSEC_TO_MSEC, fps)
         caps = cam.do_capture([req]*int(fps*test_length), fmt)
 
         # Capture a bit more gyro samples for use in
diff --git a/apps/CameraITS/tools/DngNoiseModel.pdf b/apps/CameraITS/tools/DngNoiseModel.pdf
new file mode 100644
index 0000000..fee9e70
--- /dev/null
+++ b/apps/CameraITS/tools/DngNoiseModel.pdf
Binary files differ
diff --git a/apps/CameraITS/tools/dng_noise_model.py b/apps/CameraITS/tools/dng_noise_model.py
new file mode 100644
index 0000000..b490d19
--- /dev/null
+++ b/apps/CameraITS/tools/dng_noise_model.py
@@ -0,0 +1,363 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import os.path
+import textwrap
+
+import its.caps
+import its.device
+import its.image
+import its.objects
+
+import matplotlib
+from matplotlib import pylab
+import matplotlib.pyplot as plt
+import numpy as np
+import scipy.signal
+import scipy.stats
+
+BAYER_LIST = ['R', 'GR', 'GB', 'B']
+NAME = os.path.basename(__file__).split('.')[0]
+RTOL_EXP_GAIN = 0.97
+
+
+def tile(a, tile_size):
+    """Convert a 2D array to 4D w/ dims [tile_size, tile_size, row, col] where row, col are tile indices.
+    """
+    tile_rows, tile_cols = a.shape[0]/tile_size, a.shape[1]/tile_size
+    a = a.reshape([tile_rows, tile_size, tile_cols, tile_size])
+    a = a.transpose([1, 3, 0, 2])
+    return a
+
+
+def main():
+    """Capture a set of raw images with increasing analog gains and measure the noise.
+    """
+
+    # How many sensitivities per stop to sample.
+    steps_per_stop = 2
+    # How large of tiles to use to compute mean/variance.
+    tile_size = 64
+    # Exposure bracketing range in stops
+    bracket_stops = 4
+    # How high to allow the mean of the tiles to go.
+    max_signal_level = 0.25
+    # Colors used for plotting the data for each exposure.
+    colors = 'rygcbm'
+
+    # Define a first order high pass filter to eliminate low frequency
+    # signal content when computing variance.
+    f = np.array([-1, 1]).astype('float32')
+    # Make it a higher order filter by convolving the first order
+    # filter with itself a few times.
+    f = np.convolve(f, f)
+    f = np.convolve(f, f)
+
+    # Compute the normalization of the filter to preserve noise
+    # power. Let a be the normalization factor we're looking for, and
+    # Let X and X' be the random variables representing the noise
+    # before and after filtering, respectively. First, compute
+    # Var[a*X']:
+    #
+    #   Var[a*X'] = a^2*Var[X*f_0 + X*f_1 + ... + X*f_N-1]
+    #             = a^2*(f_0^2*Var[X] + f_1^2*Var[X] + ... + (f_N-1)^2*Var[X])
+    #             = sum(f_i^2)*a^2*Var[X]
+    #
+    # We want Var[a*X'] to be equal to Var[X]:
+    #
+    #    sum(f_i^2)*a^2*Var[X] = Var[X] -> a = sqrt(1/sum(f_i^2))
+    #
+    # We can just bake this normalization factor into the high pass
+    # filter kernel.
+    f /= math.sqrt(np.dot(f, f))
+
+    bracket_factor = math.pow(2, bracket_stops)
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
+
+        # Get basic properties we need.
+        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
+        sens_max_analog = props['android.sensor.maxAnalogSensitivity']
+        sens_max_meas = sens_max_analog
+        white_level = props['android.sensor.info.whiteLevel']
+
+        print "Sensitivity range: [%f, %f]" % (sens_min, sens_max)
+        print "Max analog sensitivity: %f" % (sens_max_analog)
+
+        # Do AE to get a rough idea of where we are.
+        s_ae, e_ae, _, _, _ = \
+            cam.do_3a(get_results=True, do_awb=False, do_af=False)
+        # Underexpose to get more data for low signal levels.
+        auto_e = s_ae*e_ae/bracket_factor
+        # Focus at zero to intentionally blur the scene as much as possible.
+        f_dist = 0.0
+
+        # If the auto-exposure result is too bright for the highest
+        # sensitivity or too dark for the lowest sensitivity, report
+        # an error.
+        min_exposure_ns, max_exposure_ns = \
+            props['android.sensor.info.exposureTimeRange']
+        if auto_e < min_exposure_ns*sens_max_meas:
+            raise its.error.Error("Scene is too bright to properly expose \
+                                  at the highest sensitivity")
+        if auto_e*bracket_factor > max_exposure_ns*sens_min:
+            raise its.error.Error("Scene is too dark to properly expose \
+                                  at the lowest sensitivity")
+
+        # Start the sensitivities at the minimum.
+        s = sens_min
+
+        samples = [[], [], [], []]
+        plots = []
+        measured_models = [[], [], [], []]
+        color_plane_plots = {}
+        while int(round(s)) <= sens_max_meas:
+            s_int = int(round(s))
+            print 'ISO %d' % s_int
+            fig, [[plt_r, plt_gr], [plt_gb, plt_b]] = plt.subplots(2, 2, figsize=(11, 11))
+            fig.gca()
+            color_plane_plots[s_int] = [plt_r, plt_gr, plt_gb, plt_b]
+            fig.suptitle('ISO %d' % s_int, x=0.54, y=0.99)
+            for i, plot in enumerate(color_plane_plots[s_int]):
+                plot.set_title('%s' % BAYER_LIST[i])
+                plot.set_xlabel('Mean signal level')
+                plot.set_ylabel('Variance')
+
+            samples_s = [[], [], [], []]
+            for b in range(0, bracket_stops):
+                # Get the exposure for this sensitivity and exposure time.
+                e = int(math.pow(2, b)*auto_e/float(s))
+                print 'exp %.3fms' % round(e*1.0E-6, 3)
+                req = its.objects.manual_capture_request(s_int, e, f_dist)
+                cap = cam.do_capture(req, cam.CAP_RAW)
+                planes = its.image.convert_capture_to_planes(cap, props)
+                e_read = cap['metadata']['android.sensor.exposureTime']
+                s_read = cap['metadata']['android.sensor.sensitivity']
+                s_err = 's_write: %d, s_read: %d, RTOL: %.2f' % (
+                        s, s_read, RTOL_EXP_GAIN)
+                assert (1.0 >= s_read/float(s_int) >= RTOL_EXP_GAIN), s_err
+                print 'ISO_write: %d, ISO_read: %d' %  (s_int, s_read)
+
+                for (pidx, p) in enumerate(planes):
+                    plot = color_plane_plots[s_int][pidx]
+
+                    p = p.squeeze()
+
+                    # Crop the plane to be a multiple of the tile size.
+                    p = p[0:p.shape[0] - p.shape[0]%tile_size,
+                          0:p.shape[1] - p.shape[1]%tile_size]
+
+                    # convert_capture_to_planes normalizes the range
+                    # to [0, 1], but without subtracting the black
+                    # level.
+                    black_level = its.image.get_black_level(
+                            pidx, props, cap["metadata"])
+                    p *= white_level
+                    p = (p - black_level)/(white_level - black_level)
+
+                    # Use our high pass filter to filter this plane.
+                    hp = scipy.signal.sepfir2d(p, f, f).astype('float32')
+
+                    means_tiled = \
+                        np.mean(tile(p, tile_size), axis=(0, 1)).flatten()
+                    vars_tiled = \
+                        np.var(tile(hp, tile_size), axis=(0, 1)).flatten()
+
+                    samples_e = []
+                    for (mean, var) in zip(means_tiled, vars_tiled):
+                        # Don't include the tile if it has samples that might
+                        # be clipped.
+                        if mean + 2*math.sqrt(var) < max_signal_level:
+                            samples_e.append([mean, var])
+
+                    if samples_e:
+                        means_e, vars_e = zip(*samples_e)
+                        color_plane_plots[s_int][pidx].plot(
+                                means_e, vars_e, colors[b%len(colors)] + '.',
+                                alpha=0.5)
+                        samples_s[pidx].extend(samples_e)
+
+            for (pidx, p) in enumerate(samples_s):
+                [S, O, R, _, _] = scipy.stats.linregress(samples_s[pidx])
+                measured_models[pidx].append([s_int, S, O])
+                print "Sensitivity %d: %e*y + %e (R=%f)" % (s_int, S, O, R)
+
+                # Add the samples for this sensitivity to the global samples list.
+                samples[pidx].extend([(s_int, mean, var) for (mean, var) in samples_s[pidx]])
+
+                # Add the linear fit to subplot for this sensitivity.
+                # pylab.subplot(2, 2, pidx+1)
+                #pylab.plot([0, max_signal_level], [O, O + S*max_signal_level], 'rgkb'[pidx]+'--',
+                           #label="Linear fit")
+                color_plane_plots[s_int][pidx].plot([0, max_signal_level],
+                        [O, O + S*max_signal_level], 'rgkb'[pidx]+'--',
+                        label="Linear fit")
+
+                xmax = max([max([x for (x, _) in p]) for p in samples_s])*1.25
+                ymax = max([max([y for (_, y) in p]) for p in samples_s])*1.25
+                color_plane_plots[s_int][pidx].set_xlim(xmin=0, xmax=xmax)
+                color_plane_plots[s_int][pidx].set_ylim(ymin=0, ymax=ymax)
+                color_plane_plots[s_int][pidx].legend()
+                pylab.tight_layout()
+
+            fig.savefig('%s_samples_iso%04d.png' % (NAME, s_int))
+            plots.append([s_int, fig])
+
+            # Move to the next sensitivity.
+            s *= math.pow(2, 1.0/steps_per_stop)
+
+        # do model plots
+        (fig, (plt_S, plt_O)) = plt.subplots(2, 1, figsize=(11, 8.5))
+        plt_S.set_title("Noise model")
+        plt_S.set_ylabel("S")
+        plt_O.set_xlabel("ISO")
+        plt_O.set_ylabel("O")
+
+        A = []
+        B = []
+        C = []
+        D = []
+        for (pidx, p) in enumerate(measured_models):
+            # Grab the sensitivities and line parameters from each sensitivity.
+            S_measured = [e[1] for e in measured_models[pidx]]
+            O_measured = [e[2] for e in measured_models[pidx]]
+            sens = np.asarray([e[0] for e in measured_models[pidx]])
+            sens_sq = np.square(sens)
+
+            # Use a global linear optimization to fit the noise model.
+            gains = np.asarray([s[0] for s in samples[pidx]])
+            means = np.asarray([s[1] for s in samples[pidx]])
+            vars_ = np.asarray([s[2] for s in samples[pidx]])
+
+            # Define digital gain as the gain above the max analog gain
+            # per the Camera2 spec. Also, define a corresponding C
+            # expression snippet to use in the generated model code.
+            digital_gains = np.maximum(gains/sens_max_analog, 1)
+            digital_gain_cdef = "(sens / %d.0) < 1.0 ? 1.0 : (sens / %d.0)" % \
+                (sens_max_analog, sens_max_analog)
+
+            # Find the noise model parameters via least squares fit.
+            ad = gains*means
+            bd = means
+            cd = gains*gains
+            dd = digital_gains*digital_gains
+            a = np.asarray([ad, bd, cd, dd]).T
+            b = vars_
+
+            # To avoid overfitting to high ISOs (high variances), divide the system
+            # by the gains.
+            a /= (np.tile(gains, (a.shape[1], 1)).T)
+            b /= gains
+
+            [A_p, B_p, C_p, D_p], _, _, _ = np.linalg.lstsq(a, b)
+            A.append(A_p)
+            B.append(B_p)
+            C.append(C_p)
+            D.append(D_p)
+
+            # Plot the noise model components with the values predicted by the
+            # noise model.
+            S_model = A_p*sens + B_p
+            O_model = \
+                C_p*sens_sq + D_p*np.square(np.maximum(sens/sens_max_analog, 1))
+
+            plt_S.loglog(sens, S_measured, 'rgkb'[pidx]+'+', basex=10, basey=10,
+                         label="Measured")
+            plt_S.loglog(sens, S_model, 'rgkb'[pidx]+'x', basex=10, basey=10, label="Model")
+
+            plt_O.loglog(sens, O_measured, 'rgkb'[pidx]+'+', basex=10, basey=10,
+                         label="Measured")
+            plt_O.loglog(sens, O_model, 'rgkb'[pidx]+'x', basex=10, basey=10, label="Model")
+        plt_S.legend()
+        plt_O.legend()
+
+        fig.savefig("%s.png" % (NAME))
+
+        # add models to subplots and re-save
+        for [s, fig] in plots:  # re-step through figs...
+            dg = max(s/sens_max_analog, 1)
+            fig.gca()
+            for (pidx, p) in enumerate(measured_models):
+                S = A[pidx]*s + B[pidx]
+                O = C[pidx]*s*s + D[pidx]*dg*dg
+                color_plane_plots[s][pidx].plot([0, max_signal_level],
+                        [O, O + S*max_signal_level], 'rgkb'[pidx]+'-',
+                        label="Model", alpha=0.5)
+                color_plane_plots[s][pidx].legend(loc='upper left')
+            fig.savefig('%s_samples_iso%04d.png' % (NAME, s))
+
+        # Generate the noise model implementation.
+        A_array = ",".join([str(i) for i in A])
+        B_array = ",".join([str(i) for i in B])
+        C_array = ",".join([str(i) for i in C])
+        D_array = ",".join([str(i) for i in D])
+        noise_model_code = textwrap.dedent("""\
+            /* Generated test code to dump a table of data for external validation
+             * of the noise model parameters.
+             */
+            #include <stdio.h>
+            #include <assert.h>
+            double compute_noise_model_entry_S(int plane, int sens);
+            double compute_noise_model_entry_O(int plane, int sens);
+            int main(void) {
+                for (int plane = 0; plane < %d; plane++) {
+                    for (int sens = %d; sens <= %d; sens += 100) {
+                        double o = compute_noise_model_entry_O(plane, sens);
+                        double s = compute_noise_model_entry_S(plane, sens);
+                        printf("%%d,%%d,%%lf,%%lf\\n", plane, sens, o, s);
+                    }
+                }
+                return 0;
+            }
+
+            /* Generated functions to map a given sensitivity to the O and S noise
+             * model parameters in the DNG noise model. The planes are in
+             * R, Gr, Gb, B order.
+             */
+            double compute_noise_model_entry_S(int plane, int sens) {
+                static double noise_model_A[] = { %s };
+                static double noise_model_B[] = { %s };
+                double A = noise_model_A[plane];
+                double B = noise_model_B[plane];
+                double s = A * sens + B;
+                return s < 0.0 ? 0.0 : s;
+            }
+
+            double compute_noise_model_entry_O(int plane, int sens) {
+                static double noise_model_C[] = { %s };
+                static double noise_model_D[] = { %s };
+                double digital_gain = %s;
+                double C = noise_model_C[plane];
+                double D = noise_model_D[plane];
+                double o = C * sens * sens + D * digital_gain * digital_gain;
+                return o < 0.0 ? 0.0 : o;
+            }
+            """ % (len(A), sens_min, sens_max, A_array, B_array, C_array, D_array, digital_gain_cdef))
+        print noise_model_code
+        for i, _ in enumerate(BAYER_LIST):
+            read_noise = C[i] * sens_min * sens_min + D[i]
+            e_msg = '%s model min ISO noise < 0! C: %.4e, D: %.4e, rn: %.4e' % (
+                    BAYER_LIST[i], C[i], D[i], read_noise)
+            assert read_noise > 0, e_msg
+            assert C[i] > 0, '%s model slope is negative. slope=%.4e' % (
+                    BAYER_LIST[i], C[i])
+        text_file = open("noise_model.c", "w")
+        text_file.write("%s" % noise_model_code)
+        text_file.close()
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tools/load_scene.py b/apps/CameraITS/tools/load_scene.py
index 330b32f..e25a3f5 100644
--- a/apps/CameraITS/tools/load_scene.py
+++ b/apps/CameraITS/tools/load_scene.py
@@ -18,6 +18,7 @@
 import sys
 import time
 
+import its.cv2image
 import numpy as np
 
 
@@ -46,25 +47,28 @@
         print 'Error: need to specify screen serial'
         assert False
 
-    remote_scene_file = '/sdcard/Download/%s.pdf' % scene
-    local_scene_file = os.path.join(os.environ['CAMERA_ITS_TOP'], 'tests',
-                                    scene)
-    if np.isclose(chart_distance, 20, rtol=0.1) and camera_fov < 90:
-        local_scene_file = os.path.join(local_scene_file,
-                                        scene+'_0.67_scaled.pdf')
+    src_scene_path = os.path.join(os.environ['CAMERA_ITS_TOP'], 'tests', scene)
+    dst_scene_file = '/sdcard/Download/%s.pdf' % scene
+    chart_scaling = its.cv2image.calc_chart_scaling(chart_distance, camera_fov)
+    if np.isclose(chart_scaling, its.cv2image.SCALE_TELE_IN_WFOV_BOX, atol=0.01):
+        file_name = '%s_%s_scaled.pdf' % (
+                scene, str(its.cv2image.SCALE_TELE_IN_WFOV_BOX))
+    elif np.isclose(chart_scaling, its.cv2image.SCALE_RFOV_IN_WFOV_BOX, atol=0.01):
+        file_name = '%s_%s_scaled.pdf' % (
+                scene, str(its.cv2image.SCALE_RFOV_IN_WFOV_BOX))
     else:
-        local_scene_file = os.path.join(local_scene_file, scene+'.pdf')
-    print 'Loading %s on %s' % (local_scene_file, screen_id)
-    cmd = 'adb -s %s push %s /mnt%s' % (screen_id, local_scene_file,
-                                        remote_scene_file)
+        file_name = '%s.pdf' % scene
+    src_scene_file = os.path.join(src_scene_path, file_name)
+    print 'Loading %s on %s' % (src_scene_file, screen_id)
+    cmd = 'adb -s %s push %s /mnt%s' % (screen_id, src_scene_file,
+                                        dst_scene_file)
     subprocess.Popen(cmd.split())
     time.sleep(1)  # wait-for-device doesn't always seem to work...
     # The intent require PDF viewing app be installed on device.
     # Also the first time such app is opened it might request some permission,
     # so it's  better to grant those permissions before using this script
     cmd = ("adb -s %s wait-for-device shell am start -d 'file://%s'"
-           " -a android.intent.action.VIEW" % (screen_id,
-                                               remote_scene_file))
+           " -a android.intent.action.VIEW" % (screen_id, dst_scene_file))
     subprocess.Popen(cmd.split())
 
 if __name__ == '__main__':
diff --git a/apps/CameraITS/tools/run_all_tests.py b/apps/CameraITS/tools/run_all_tests.py
index b6fdde2..682d490 100644
--- a/apps/CameraITS/tools/run_all_tests.py
+++ b/apps/CameraITS/tools/run_all_tests.py
@@ -20,6 +20,7 @@
 import subprocess
 import sys
 import tempfile
+import threading
 import time
 
 import its.caps
@@ -30,53 +31,113 @@
 
 import numpy as np
 
+# For sanity checking the installed APK's target SDK version
+MIN_SUPPORTED_SDK_VERSION = 28  # P
+
 CHART_DELAY = 1  # seconds
 CHART_DISTANCE = 30.0  # cm
 CHART_HEIGHT = 13.5  # cm
+CHART_LEVEL = 96
 CHART_SCALE_START = 0.65
 CHART_SCALE_STOP = 1.35
 CHART_SCALE_STEP = 0.025
 FACING_EXTERNAL = 2
 NUM_TRYS = 2
-SCENE3_FILE = os.path.join(os.environ["CAMERA_ITS_TOP"], "pymodules", "its",
-                           "test_images", "ISO12233.png")
+PROC_TIMEOUT_CODE = -101  # terminated process return -process_id
+PROC_TIMEOUT_TIME = 900  # timeout in seconds for a process (15 minutes)
+SCENE3_FILE = os.path.join(os.environ['CAMERA_ITS_TOP'], 'pymodules', 'its',
+                           'test_images', 'ISO12233.png')
 SKIP_RET_CODE = 101  # note this must be same as tests/scene*/test_*
 VGA_HEIGHT = 480
 VGA_WIDTH = 640
 
 # Not yet mandated tests
 NOT_YET_MANDATED = {
-        "scene0": [
-                "test_jitter",
-                "test_burst_capture",
-                "test_test_patterns"
+        'scene0': [
+                'test_test_patterns',
+                'test_tonemap_curve'
         ],
-        "scene1": [
-                "test_ae_af",
-                "test_ae_precapture_trigger",
-                "test_crop_region_raw",
-                "test_ev_compensation_advanced",
-                "test_ev_compensation_basic",
-                "test_yuv_plus_jpeg"
+        'scene1': [
+                'test_ae_precapture_trigger',
+                'test_channel_saturation'
         ],
-        "scene2": [
-                "test_num_faces"
+        'scene2': [
+                'test_auto_per_frame_control'
         ],
-        "scene3": [
-                "test_flip_mirror",
-                "test_lens_movement_reporting",
-                "test_lens_position"
-        ],
-        "scene4": [],
-        "scene5": [],
-        "sensor_fusion": []
+        'scene2b': [],
+        'scene2c': [],
+        'scene3': [],
+        'scene4': [],
+        'scene5': [],
+        'sensor_fusion': []
 }
 
+# Must match mHiddenPhysicalCameraSceneIds in ItsTestActivity.java
+HIDDEN_PHYSICAL_CAMERA_TESTS = {
+        'scene0': [
+                'test_burst_capture',
+                'test_metadata',
+                'test_read_write',
+                'test_sensor_events'
+        ],
+        'scene1': [
+                'test_exposure',
+                'test_dng_noise_model',
+                'test_linearity',
+                'test_raw_exposure',
+                'test_raw_sensitivity'
+        ],
+        'scene2': [
+                'test_faces',
+                'test_num_faces'
+        ],
+        'scene2b': [],
+        'scene2c': [],
+        'scene3': [],
+        'scene4': [
+                'test_aspect_ratio_and_crop'
+        ],
+        'scene5': [],
+        'sensor_fusion': [
+                'test_sensor_fusion'
+        ]
+}
 
-def calc_camera_fov(camera_id):
+def run_subprocess_with_timeout(cmd, fout, ferr, outdir):
+    """Run subprocess with a timeout.
+
+    Args:
+        cmd:    list containing python command
+        fout:   stdout file for the test
+        ferr:   stderr file for the test
+        outdir: dir location for fout/ferr
+
+    Returns:
+        process status or PROC_TIMEOUT_CODE if timer maxes
+    """
+
+    proc = subprocess.Popen(
+            cmd, stdout=fout, stderr=ferr, cwd=outdir)
+    timer = threading.Timer(PROC_TIMEOUT_TIME, proc.kill)
+
+    try:
+        timer.start()
+        proc.communicate()
+        test_code = proc.returncode
+    finally:
+        timer.cancel()
+
+    if test_code < 0:
+        return PROC_TIMEOUT_CODE
+    else:
+        return test_code
+
+
+def calc_camera_fov(camera_id, hidden_physical_id):
     """Determine the camera field of view from internal params."""
-    with ItsSession(camera_id) as cam:
+    with ItsSession(camera_id, hidden_physical_id) as cam:
         props = cam.get_camera_properties()
+        props = cam.override_with_hidden_physical_camera_props(props)
         focal_ls = props['android.lens.info.availableFocalLengths']
         if len(focal_ls) > 1:
             print 'Doing capture to determine logical camera focal length'
@@ -148,15 +209,17 @@
         dist:    [Experimental] chart distance in cm.
     """
 
-    all_scenes = ["scene0", "scene1", "scene2", "scene3", "scene4", "scene5",
+    all_scenes = ["scene0", "scene1", "scene2", "scene2b", "scene2c", "scene3", "scene4", "scene5",
                   "sensor_fusion"]
 
-    auto_scenes = ["scene0", "scene1", "scene2", "scene3", "scene4"]
+    auto_scenes = ["scene0", "scene1", "scene2", "scene2b", "scene2c", "scene3", "scene4"]
 
     scene_req = {
         "scene0": None,
         "scene1": "A grey card covering at least the middle 30% of the scene",
         "scene2": "A picture containing human faces",
+        "scene2b": "A picture containing human faces",
+        "scene2c": "A picture containing human faces",
         "scene3": "The ISO 12233 chart",
         "scene4": "A specific test page of a circle covering at least the "
                   "middle 50% of the scene. See CameraITS.pdf section 2.3.4 "
@@ -172,7 +235,7 @@
         "scene5": ["doAF=False"]
     }
 
-    camera_ids = []
+    camera_id_combos = []
     scenes = []
     chart_host_id = None
     result_device_id = None
@@ -180,10 +243,14 @@
     tmp_dir = None
     skip_scene_validation = False
     chart_distance = CHART_DISTANCE
+    chart_level = CHART_LEVEL
+    one_camera_argv = sys.argv[1:]
 
-    for s in sys.argv[1:]:
+    for s in list(sys.argv[1:]):
         if s[:7] == "camera=" and len(s) > 7:
             camera_ids = s[7:].split(',')
+            camera_id_combos = its.device.parse_camera_ids(camera_ids)
+            one_camera_argv.remove(s)
         elif s[:7] == "scenes=" and len(s) > 7:
             scenes = s[7:].split(',')
         elif s[:6] == 'chart=' and len(s) > 6:
@@ -199,8 +266,11 @@
             skip_scene_validation = True
         elif s[:5] == 'dist=' and len(s) > 5:
             chart_distance = float(re.sub('cm', '', s[5:]))
+        elif s[:11] == 'brightness=' and len(s) > 11:
+            chart_level = s[11:]
 
     chart_dist_arg = 'dist= ' + str(chart_distance)
+    chart_level_arg = 'brightness=' + str(chart_level)
     auto_scene_switch = chart_host_id is not None
     merge_result_switch = result_device_id is not None
 
@@ -232,12 +302,6 @@
             assert False
         scenes = temp_scenes
 
-    # Initialize test results
-    results = {}
-    result_key = ItsSession.RESULT_KEY
-    for s in all_scenes:
-        results[s] = {result_key: ItsSession.RESULT_NOT_EXECUTED}
-
     # Make output directories to hold the generated files.
     topdir = tempfile.mkdtemp(dir=tmp_dir)
     subprocess.call(['chmod', 'g+rx', topdir])
@@ -247,6 +311,50 @@
     device_id_arg = "device=" + device_id
     print "Testing device " + device_id
 
+    # Sanity check CtsVerifier SDK level
+    # Here we only do warning as there is no guarantee on pm dump output formt not changed
+    # Also sometimes it's intentional to run mismatched versions
+    cmd = "adb -s %s shell pm dump com.android.cts.verifier" % (device_id)
+    dump_path = os.path.join(topdir, 'CtsVerifier.txt')
+    with open(dump_path, 'w') as fout:
+        fout.write('ITS minimum supported SDK version is %d\n--\n' % (MIN_SUPPORTED_SDK_VERSION))
+        fout.flush()
+        ret_code = subprocess.call(cmd.split(), stdout=fout)
+
+    if ret_code != 0:
+        print "Warning: cannot get CtsVerifier SDK version. Is CtsVerifier installed?"
+
+    ctsv_version = None
+    ctsv_version_name = None
+    with open(dump_path, 'r') as f:
+        target_sdk_found = False
+        version_name_found = False
+        for line in f:
+            match = re.search('targetSdk=([0-9]+)', line)
+            if match:
+                ctsv_version = int(match.group(1))
+                target_sdk_found = True
+            match = re.search('versionName=([\S]+)$', line)
+            if match:
+                ctsv_version_name = match.group(1)
+                version_name_found = True
+            if target_sdk_found and version_name_found:
+                break
+
+    if ctsv_version is None:
+        print "Warning: cannot get CtsVerifier SDK version. Is CtsVerifier installed?"
+    elif ctsv_version < MIN_SUPPORTED_SDK_VERSION:
+        print "Warning: CtsVerifier version (%d) < ITS version (%d), is this intentional?" % (
+                ctsv_version, MIN_SUPPORTED_SDK_VERSION)
+    else:
+        print "CtsVerifier targetSdk is", ctsv_version
+        if ctsv_version_name:
+            print "CtsVerifier version name is", ctsv_version_name
+
+    # Hard check on ItsService/host script version that should catch incompatible APK/script
+    with ItsSession() as cam:
+        cam.check_its_version_compatible()
+
     # Sanity Check for devices
     device_bfp = its.device.get_device_fingerprint(device_id)
     assert device_bfp is not None
@@ -262,35 +370,50 @@
         assert device_bfp == result_device_bfp, assert_err_msg
 
     # user doesn't specify camera id, run through all cameras
-    if not camera_ids:
+    if not camera_id_combos:
         with its.device.ItsSession() as cam:
             camera_ids = cam.get_camera_ids()
+            camera_id_combos = its.device.parse_camera_ids(camera_ids);
 
-    print "Running ITS on camera: %s, scene %s" % (camera_ids, scenes)
+    print "Running ITS on camera: %s, scene %s" % (camera_id_combos, scenes)
 
     if auto_scene_switch:
         # merge_result only supports run_parallel_tests
-        if merge_result_switch and camera_ids[0] == '1':
-            print 'Skip chart screen'
+        if merge_result_switch and camera_ids[0] == "1":
+            print "Skip chart screen"
             time.sleep(1)
         else:
-            print 'Waking up chart screen: ', chart_host_id
-            screen_id_arg = ('screen=%s' % chart_host_id)
-            cmd = ['python', os.path.join(os.environ['CAMERA_ITS_TOP'], 'tools',
-                                          'wake_up_screen.py'), screen_id_arg]
+            print "Waking up chart screen: ", chart_host_id
+            screen_id_arg = ("screen=%s" % chart_host_id)
+            cmd = ["python", os.path.join(os.environ["CAMERA_ITS_TOP"], "tools",
+                                          "wake_up_screen.py"), screen_id_arg,
+                   chart_level_arg]
             wake_code = subprocess.call(cmd)
             assert wake_code == 0
 
-    for camera_id in camera_ids:
-        camera_fov = calc_camera_fov(camera_id)
+    for id_combo in camera_id_combos:
+        # Initialize test results
+        results = {}
+        result_key = ItsSession.RESULT_KEY
+        for s in all_scenes:
+            results[s] = {result_key: ItsSession.RESULT_NOT_EXECUTED}
+
+        camera_fov = calc_camera_fov(id_combo.id, id_combo.sub_id)
+        id_combo_string = id_combo.id;
+        has_hidden_sub_camera = id_combo.sub_id is not None
+        if has_hidden_sub_camera:
+            id_combo_string += ":" + id_combo.sub_id
+            scenes = [scene for scene in scenes if HIDDEN_PHYSICAL_CAMERA_TESTS[scene]]
         # Loop capturing images until user confirm test scene is correct
-        camera_id_arg = "camera=" + camera_id
-        print "Preparing to run ITS on camera", camera_id
+        camera_id_arg = "camera=" + id_combo.id
+        print "Preparing to run ITS on camera", id_combo_string, "for scenes ", scenes
 
-        os.mkdir(os.path.join(topdir, camera_id))
+        os.mkdir(os.path.join(topdir, id_combo_string))
         for d in scenes:
-            os.mkdir(os.path.join(topdir, camera_id, d))
+            os.mkdir(os.path.join(topdir, id_combo_string, d))
 
+        tot_tests = []
+        tot_pass = 0
         for scene in scenes:
             # unit is millisecond for execution time record in CtsVerifier
             scene_start_time = int(round(time.time() * 1000))
@@ -299,18 +422,19 @@
                      for s in os.listdir(os.path.join("tests", scene))
                      if s[-3:] == ".py" and s[:4] == "test"]
             tests.sort()
+            tot_tests.extend(tests)
 
-            summary = "Cam" + camera_id + " " + scene + "\n"
+            summary = "Cam" + id_combo_string + " " + scene + "\n"
             numpass = 0
             numskip = 0
             num_not_mandated_fail = 0
             numfail = 0
             validate_switch = True
             if scene_req[scene] is not None:
-                out_path = os.path.join(topdir, camera_id, scene+".jpg")
+                out_path = os.path.join(topdir, id_combo_string, scene+".jpg")
                 out_arg = "out=" + out_path
                 if scene == 'sensor_fusion':
-                    skip_code = skip_sensor_fusion(camera_id)
+                    skip_code = skip_sensor_fusion(id_combo.id)
                     if rot_rig_id or skip_code == SKIP_RET_CODE:
                         validate_switch = False
                 if skip_scene_validation:
@@ -318,7 +442,7 @@
                 cmd = None
                 if auto_scene_switch:
                     if (not merge_result_switch or
-                            (merge_result_switch and camera_ids[0] == '0')):
+                            (merge_result_switch and id_combo_string == '0')):
                         scene_arg = 'scene=' + scene
                         fov_arg = 'fov=' + camera_fov
                         cmd = ['python',
@@ -339,25 +463,29 @@
                 if cmd is not None:
                     valid_scene_code = subprocess.call(cmd, cwd=topdir)
                     assert valid_scene_code == 0
-            print "Start running ITS on camera %s, %s" % (camera_id, scene)
+            print 'Start running ITS on camera %s, %s' % (id_combo_string, scene)
             # Extract chart from scene for scene3 once up front
             chart_loc_arg = ''
             chart_height = CHART_HEIGHT
             if scene == 'scene3':
-                if float(camera_fov) < 90 and np.isclose(chart_distance, 22,
-                                                         rtol=0.1):
-                    chart_height *= 0.67
+                chart_height *= its.cv2image.calc_chart_scaling(
+                        chart_distance, camera_fov)
                 chart = its.cv2image.Chart(SCENE3_FILE, chart_height,
                                            chart_distance, CHART_SCALE_START,
                                            CHART_SCALE_STOP, CHART_SCALE_STEP,
-                                           camera_id)
+                                           id_combo.id)
                 chart_loc_arg = 'chart_loc=%.2f,%.2f,%.2f,%.2f,%.3f' % (
                         chart.xnorm, chart.ynorm, chart.wnorm, chart.hnorm,
                         chart.scale)
             # Run each test, capturing stdout and stderr.
             for (testname, testpath) in tests:
+                # Only pick predefined tests for hidden physical camera
+                if has_hidden_sub_camera and \
+                        testname not in HIDDEN_PHYSICAL_CAMERA_TESTS[scene]:
+                    numskip += 1
+                    continue
                 if auto_scene_switch:
-                    if merge_result_switch and camera_ids[0] == '0':
+                    if merge_result_switch and id_combo_string == '0':
                         # Send an input event to keep the screen not dimmed.
                         # Since we are not using camera of chart screen, FOCUS event
                         # should do nothing but keep the screen from dimming.
@@ -369,7 +497,7 @@
                         subprocess.call(cmd.split())
                 t0 = time.time()
                 for num_try in range(NUM_TRYS):
-                    outdir = os.path.join(topdir, camera_id, scene)
+                    outdir = os.path.join(topdir, id_combo_string, scene)
                     outpath = os.path.join(outdir, testname+'_stdout.txt')
                     errpath = os.path.join(outdir, testname+'_stderr.txt')
                     if scene == 'sensor_fusion':
@@ -385,16 +513,16 @@
                             test_code = skip_code
                     if skip_code is not SKIP_RET_CODE:
                         cmd = ['python', os.path.join(os.getcwd(), testpath)]
-                        cmd += sys.argv[1:] + [camera_id_arg] + [chart_loc_arg]
+                        cmd += one_camera_argv + ["camera="+id_combo_string] + [chart_loc_arg]
                         cmd += [chart_dist_arg]
                         with open(outpath, 'w') as fout, open(errpath, 'w') as ferr:
-                            test_code = subprocess.call(
-                                cmd, stderr=ferr, stdout=fout, cwd=outdir)
+                            test_code = run_subprocess_with_timeout(
+                                cmd, fout, ferr, outdir)
                     if test_code == 0 or test_code == SKIP_RET_CODE:
                         break
                     else:
                         socket_fail = evaluate_socket_failure(errpath)
-                        if socket_fail:
+                        if socket_fail or test_code == PROC_TIMEOUT_CODE:
                             if num_try != NUM_TRYS-1:
                                 print ' Retry %s/%s' % (scene, testname)
                             else:
@@ -445,7 +573,11 @@
                 msg = "(*) tests are not yet mandated"
                 print msg
 
-            summary_path = os.path.join(topdir, camera_id, scene, "summary.txt")
+            tot_pass += numpass
+            print "%s compatibility score: %.f/100\n" % (
+                    scene, 100.0 * numpass / len(tests))
+
+            summary_path = os.path.join(topdir, id_combo_string, scene, "summary.txt")
             with open(summary_path, "w") as f:
                 f.write(summary)
 
@@ -456,15 +588,21 @@
             results[scene][ItsSession.START_TIME_KEY] = scene_start_time
             results[scene][ItsSession.END_TIME_KEY] = scene_end_time
 
+        if tot_tests:
+            print "Compatibility Score: %.f/100" % (100.0 * tot_pass / len(tot_tests))
+        else:
+            print "Compatibility Score: 0/100"
+
         msg = "Reporting ITS result to CtsVerifier"
         print msg
         its.device.adb_log(device_id, msg)
         if merge_result_switch:
             # results are modified by report_result
             results_backup = copy.deepcopy(results)
-            its.device.report_result(result_device_id, camera_id, results_backup)
+            its.device.report_result(result_device_id, id_combo_string, results_backup)
 
-        its.device.report_result(device_id, camera_id, results)
+        # Report hidden_physical_id results as well.
+        its.device.report_result(device_id, id_combo_string, results)
 
     if auto_scene_switch:
         if merge_result_switch:
diff --git a/apps/CameraITS/tools/run_sensor_fusion_box.py b/apps/CameraITS/tools/run_sensor_fusion_box.py
index 3c9199a..82f915d 100644
--- a/apps/CameraITS/tools/run_sensor_fusion_box.py
+++ b/apps/CameraITS/tools/run_sensor_fusion_box.py
@@ -91,7 +91,7 @@
     print 'Testing device ' + device_id
 
     # ensure camera_id is valid
-    avail_camera_ids = find_avail_camera_ids(device_id_arg, tmpdir)
+    avail_camera_ids = find_avail_camera_ids()
     if camera_id not in avail_camera_ids:
         print 'Need to specify valid camera_id in ', avail_camera_ids
         sys.exit()
@@ -220,12 +220,9 @@
                 return line
     return None
 
-def find_avail_camera_ids(device_id_arg, tmpdir):
+def find_avail_camera_ids():
     """Find the available camera IDs.
 
-    Args:
-        devices_id_arg(str):    device=###
-        tmpdir(str):            generated tmp dir for run
     Returns:
         list of available cameras
     """
diff --git a/apps/CameraITS/tools/set_charging_limits.py b/apps/CameraITS/tools/set_charging_limits.py
new file mode 100644
index 0000000..280e226
--- /dev/null
+++ b/apps/CameraITS/tools/set_charging_limits.py
@@ -0,0 +1,78 @@
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+import sys
+
+CHARGE_PERCENT_START = 40
+CHARGE_PERCENT_STOP = 60
+
+
+def set_device_charging_limits(device_id):
+    """Set the start/stop percentages for charging.
+
+    This can keep battery from overcharging.
+    Args:
+        device_id:  str; device ID to set limits
+    """
+    print 'Rooting device %s' % device_id
+    cmd = ('adb -s %s root' % device_id)
+    process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+    pout, perr = process.communicate()
+    if 'cannot' in pout.lower() or perr:  # 'cannot root' returns no error
+        print ' Warning: unable to root %s and set charging limits.' % device_id
+    else:
+        print ' Setting charging limits on %s' % device_id
+        cmd = ('adb -s %s shell setprop persist.vendor.charge.start.level %d' % (
+                device_id, CHARGE_PERCENT_START))
+        process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
+                                   stderr=subprocess.PIPE)
+        _, perr = process.communicate()
+        if not perr:
+            print ' Min: %d%%' % CHARGE_PERCENT_START
+        else:
+            print ' Warning: unable to set charging start limit.'
+
+        cmd = ('adb -s %s shell setprop persist.vendor.charge.stop.level %d' % (
+                device_id, CHARGE_PERCENT_STOP))
+        process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,
+                                   stderr=subprocess.PIPE)
+        _, perr = process.communicate()
+        if not perr:
+            print ' Max: %d%%' % CHARGE_PERCENT_STOP
+        else:
+            print ' Warning: unable to set charging stop limit.'
+
+        print 'Unrooting device %s' % device_id
+        cmd = ('adb -s %s unroot' % device_id)
+        subprocess.call(cmd.split(), stdout=subprocess.PIPE)
+
+
+def main():
+    """Set charging limits for battery."""
+
+    device_id = None
+    for s in sys.argv[1:]:
+        if s[:7] == 'device=' and len(s) > 7:
+            device_id = s[7:]
+
+    if device_id:
+        set_device_charging_limits(device_id)
+    else:
+        print 'Usage: python %s device=$DEVICE_ID' % os.path.basename(__file__)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tools/wake_up_screen.py b/apps/CameraITS/tools/wake_up_screen.py
index b317b05..b9305cb 100644
--- a/apps/CameraITS/tools/wake_up_screen.py
+++ b/apps/CameraITS/tools/wake_up_screen.py
@@ -25,9 +25,15 @@
 def main():
     """Power up and unlock screen as needed."""
     screen_id = None
+    display_level = DISPLAY_LEVEL
     for s in sys.argv[1:]:
         if s[:7] == 'screen=' and len(s) > 7:
             screen_id = s[7:]
+        if s[:11] == 'brightness=' and len(s) > 11:
+            display_level = int(s[11:])
+            if display_level < 0 or display_level > 255:
+                print 'Invalid brightness value. Range is [0-255]'
+                display_level = DISPLAY_LEVEL
 
     if not screen_id:
         print 'Error: need to specify screen serial'
@@ -53,10 +59,14 @@
     subprocess.Popen(unlock.split())
     time.sleep(DISPLAY_CMD_WAIT)
 
-    # set brightness
-    print 'Tablet display brightness set to %d' % DISPLAY_LEVEL
+    # set to manual mode and set brightness
+    manual = ('adb -s %s shell settings put system screen_brightness_mode 0'
+              % screen_id)
+    subprocess.Popen(manual.split())
+    time.sleep(DISPLAY_CMD_WAIT)
+    print 'Tablet display brightness set to %d' % display_level
     bright = ('adb -s %s shell settings put system screen_brightness %d'
-              % (screen_id, DISPLAY_LEVEL))
+              % (screen_id, display_level))
     subprocess.Popen(bright.split())
     time.sleep(DISPLAY_CMD_WAIT)
 
diff --git a/apps/CtsVerifier/Android.mk b/apps/CtsVerifier/Android.mk
index 3208ee5..af6dc5b 100644
--- a/apps/CtsVerifier/Android.mk
+++ b/apps/CtsVerifier/Android.mk
@@ -23,7 +23,8 @@
 
 LOCAL_MULTILIB := both
 
-LOCAL_SRC_FILES := $(call all-java-files-under, src) $(call all-Iaidl-files-under, src)
+LOCAL_SRC_FILES := $(call all-java-files-under, src) $(call all-Iaidl-files-under, src) \
+                    ../ForceStopHelperApp/src/com/android/cts/forcestophelper/Constants.java
 
 LOCAL_AIDL_INCLUDES := \
     frameworks/native/aidl/gui
@@ -34,6 +35,7 @@
                                compatibility-common-util-devicesidelib \
                                cts-sensors-tests \
                                cts-location-tests \
+                               cts-camera-performance-tests \
                                ctstestrunner-axt \
                                apache-commons-math \
                                androidplot \
@@ -44,7 +46,8 @@
                                mockwebserver \
                                compatibility-device-util-axt \
                                platform-test-annotations \
-                               cts-security-test-support-library
+                               cts-security-test-support-library \
+                               cts-midi-lib
 
 LOCAL_STATIC_ANDROID_LIBRARIES := \
     androidx.legacy_legacy-support-v4
@@ -53,14 +56,15 @@
 LOCAL_JAVA_LIBRARIES += android.test.runner.stubs
 LOCAL_JAVA_LIBRARIES += android.test.base.stubs
 LOCAL_JAVA_LIBRARIES += android.test.mock.stubs
-LOCAL_JAVA_LIBRARIES += bouncycastle
 LOCAL_JAVA_LIBRARIES += voip-common
 
 LOCAL_PACKAGE_NAME := CtsVerifier
 LOCAL_PRIVATE_PLATFORM_APIS := true
 
-LOCAL_JNI_SHARED_LIBRARIES := libctsverifier_jni \
-		libaudioloopback_jni \
+LOCAL_JNI_SHARED_LIBRARIES := \
+	libctsverifier_jni \
+	libctsnativemidi_jni \
+	libaudioloopback_jni \
 
 LOCAL_PROGUARD_FLAG_FILES := proguard.flags
 
@@ -82,6 +86,7 @@
 LOCAL_MODULE := cts-verifier-framework
 LOCAL_AAPT_FLAGS := --auto-add-overlay --extra-packages android.support.v4
 LOCAL_SDK_VERSION := current
+LOCAL_MIN_SDK_VERSION := 19
 LOCAL_RESOURCE_DIR := $(LOCAL_PATH)/res
 LOCAL_SRC_FILES := \
     $(call java-files-in, src/com/android/cts/verifier) \
@@ -89,7 +94,7 @@
 
 LOCAL_STATIC_JAVA_LIBRARIES := androidx.legacy_legacy-support-v4 \
                                compatibility-common-util-devicesidelib \
-                               compatibility-device-util-axt \
+                               compatibility-device-util-axt
 
 include $(BUILD_STATIC_JAVA_LIBRARY)
 
@@ -105,8 +110,12 @@
     CtsEmptyDeviceAdmin \
     CtsEmptyDeviceOwner \
     CtsPermissionApp \
+    CtsForceStopHelper \
     NotificationBot
 
+# Apps to be installed as Instant App using adb install --instant
+pre-installed-instant-app := CtsVerifierInstantApp
+
 other-required-apps := \
     CtsVerifierUSBCompanion \
     CtsVpnFirewallAppApi23 \
@@ -115,6 +124,7 @@
 
 apps-to-include := \
     $(pre-installed-apps) \
+    $(pre-installed-instant-app) \
     $(other-required-apps)
 
 define apk-location-for
@@ -123,10 +133,11 @@
 
 # Builds and launches CTS Verifier on a device.
 .PHONY: cts-verifier
-cts-verifier: CtsVerifier adb $(pre-installed-apps)
+cts-verifier: CtsVerifier adb $(pre-installed-apps) $(pre-installed-instant-app)
 	adb install -r $(PRODUCT_OUT)/data/app/CtsVerifier/CtsVerifier.apk \
 		$(foreach app,$(pre-installed-apps), \
 		    && adb install -r -t $(call apk-location-for,$(app))) \
+		&& adb install -r --instant $(call apk-location-for,$(pre-installed-instant-app)) \
 		&& adb shell "am start -n com.android.cts.verifier/.CtsVerifierActivity"
 
 #
diff --git a/apps/CtsVerifier/AndroidManifest.xml b/apps/CtsVerifier/AndroidManifest.xml
index e95fd1e..6806c2e 100644
--- a/apps/CtsVerifier/AndroidManifest.xml
+++ b/apps/CtsVerifier/AndroidManifest.xml
@@ -18,14 +18,16 @@
 <manifest xmlns:android="http://schemas.android.com/apk/res/android"
       package="com.android.cts.verifier"
       android:versionCode="5"
-      android:versionName="9.0_r1">
+      android:versionName="10_r1">
 
-    <uses-sdk android:minSdkVersion="19" android:targetSdkVersion="28"/>
+    <uses-sdk android:minSdkVersion="19" android:targetSdkVersion="29"/>
 
+    <uses-permission android:name="android.permission.ACCESS_BACKGROUND_LOCATION" />
     <uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
     <uses-permission android:name="android.permission.ACCESS_LOCATION_EXTRA_COMMANDS"/>
     <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
     <uses-permission android:name="android.permission.ACCESS_WIFI_STATE" />
+    <uses-permission android:name="android.permission.ACTIVITY_RECOGNITION" />
     <uses-permission android:name="android.permission.BLUETOOTH" />
     <uses-permission android:name="android.permission.BLUETOOTH_ADMIN" />
     <uses-permission android:name="android.permission.BODY_SENSORS"/>
@@ -38,6 +40,8 @@
     <uses-permission android:name="android.permission.NFC" />
     <uses-permission android:name="android.permission.VIBRATE" />
     <uses-permission android:name="android.permission.REQUEST_INSTALL_PACKAGES" />
+    <uses-permission android:name="android.permission.REQUEST_PASSWORD_COMPLEXITY" />
+    <uses-permission android:name="android.permission.SYSTEM_ALERT_WINDOW"/>
     <uses-feature android:name="android.hardware.camera" android:required="false"/>
     <uses-feature android:name="android.hardware.camera.flash" android:required="false"/>
     <uses-feature android:name="android.hardware.sensor.accelerometer" android:required="false" />
@@ -59,27 +63,17 @@
     <uses-permission android:name="android.permission.READ_PHONE_STATE" />
     <uses-permission android:name="android.permission.READ_CONTACTS"/>
     <uses-permission android:name="android.permission.WRITE_CONTACTS"/>
-    <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE"/>
     <uses-permission android:name="com.android.providers.tv.permission.WRITE_EPG_DATA" />
     <uses-permission android:name="android.permission.USE_FINGERPRINT"/>
     <uses-permission android:name="android.permission.USE_BIOMETRIC"/>
     <uses-permission android:name="android.permission.ACCESS_NOTIFICATION_POLICY" />
-    <uses-permission android:name="android.permission.ACCESS_WIFI_STATE" />
-    <uses-permission android:name="android.permission.CHANGE_WIFI_STATE" />
     <uses-permission android:name="android.permission.ACCESS_COARSE_LOCATION" />
 
-    <uses-permission android:name="android.permission.READ_PHONE_STATE"/>
     <uses-permission android:name="android.permission.READ_SMS"/>
     <uses-permission android:name="android.permission.READ_PHONE_NUMBERS"/>
     <uses-permission android:name="android.permission.RECEIVE_SMS" />
     <uses-permission android:name="android.permission.SEND_SMS" />
 
-    <!-- Needed by UsbTest tapjacking -->
-    <uses-permission android:name="android.permission.SYSTEM_ALERT_WINDOW" />
-
-    <!-- Needed by the Audio Quality Verifier to store the sound samples that will be mailed. -->
-    <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
-
     <!-- Needed for Telecom self-managed ConnectionService tests. -->
     <uses-permission android:name="android.permission.MANAGE_OWN_CALLS" />
 
@@ -88,6 +82,7 @@
             android:icon="@drawable/icon"
             android:debuggable="true"
             android:largeHeap="true"
+            android:requestLegacyExternalStorage="true"
             android:theme="@android:style/Theme.DeviceDefault">
 
         <provider android:name="android.location.cts.MmsPduProvider"
@@ -99,7 +94,6 @@
 
         <meta-data android:name="android.telephony.HIDE_VOICEMAIL_SETTINGS_MENU"
             android:value="true"/>
-        <uses-library android:name="android.test.runner"/>
 
         <activity android:name=".TestListActivity" android:label="@string/app_name" />
 
@@ -108,15 +102,10 @@
                 android:label="@string/report_viewer" />
 
         <provider android:name=".TestResultsProvider"
-                android:authorities="com.android.cts.verifier.testresultsprovider" />
-
-        <activity android:name=".admin.tapjacking.UsbTest" android:label="@string/usb_tapjacking_test">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_device_admin" />
-        </activity>
+                android:authorities="com.android.cts.verifier.testresultsprovider"
+                android:grantUriPermissions="true"
+                android:exported="true"
+                android:enabled="true" />
 
         <activity android:name=".admin.PolicySerializationTestActivity"
                 android:label="@string/da_policy_serialization_test"
@@ -170,6 +159,16 @@
             android:theme="@style/OverlayTheme"
             android:label="Overlaying Activity"/>
 
+        <activity android:name=".forcestop.RecentTaskRemovalTestActivity"
+                  android:label="@string/remove_from_recents_test"
+                  android:configChanges="keyboardHidden|orientation|screenSize">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_required_configs" android:value="config_has_recents"/>
+        </activity>
+
         <activity android:name=".companion.CompanionDeviceTestActivity"
                   android:label="@string/companion_test"
                   android:configChanges="keyboardHidden|orientation|screenSize">
@@ -190,34 +189,6 @@
         <activity android:name=".IntentDrivenTestActivity"
                 android:stateNotNeeded="true"/>
 
-        <activity android:name=".admin.DeviceAdminKeyguardDisabledFeaturesActivity"
-                android:label="@string/da_kg_disabled_features_test"
-                android:configChanges="keyboardHidden|orientation|screenSize">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_device_admin" />
-            <meta-data android:name="test_excluded_features"
-                       android:value="android.software.lockscreen_disabled" />
-            <meta-data android:name="test_required_features"
-                    android:value="android.software.device_admin" />
-        </activity>
-
-        <activity android:name=".admin.RedactedNotificationKeyguardDisabledFeaturesActivity"
-                android:label="@string/rn_kg_disabled_features_test"
-                android:configChanges="keyboardHidden|orientation|screenSize">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_device_admin" />
-            <meta-data android:name="test_required_features"
-                    android:value="android.software.device_admin" />
-            <meta-data android:name="test_excluded_features"
-                    android:value="android.hardware.type.watch" />
-        </activity>
-
         <activity android:name=".admin.ScreenLockTestActivity"
                 android:label="@string/da_screen_lock_test"
                 android:configChanges="keyboardHidden|orientation|screenSize">
@@ -500,6 +471,11 @@
         <!-- CTS Verifier BLE Server Encrypted Test Service -->
         <service android:name=".bluetooth.BleEncryptedServerService" />
 
+        <!-- CTS Verifier BLE CoC Client Test Service -->
+        <service android:name=".bluetooth.BleCocClientService" />
+        <!-- CTS Verifier BLE CoC Server Test Service -->
+        <service android:name=".bluetooth.BleCocServerService" />
+
         <!--
              =================================================================================
              ==                     BLE Insecure Client Test Info                           ==
@@ -961,6 +937,234 @@
 
         <!--
              =================================================================================
+             ==                     BLE CoC Insecure Client Test Info                       ==
+             =================================================================================
+        -->
+        <!--
+            CTS Verifier BLE CoC Insecure Client Test Top Screen
+                test category : bt_le_coc
+                test parent : BluetoothTestActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocInsecureClientTestListActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_insecure_client_test_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BluetoothTestActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+             CTS Verifier BLE CoC Insecure Client Test List Screen
+                 test category : bt_le_coc
+                 test parent : BleInsecureClientTestListActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocInsecureClientStartActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_client_test_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BleCocInsecureClientTestListActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+              =================================================================================
+              ==                     BLE CoC Insecure Server Test Info                       ==
+              =================================================================================
+        -->
+        <!--
+             CTS Verifier BLE Coc Insecure Server Test Top Screen
+                 test category : bt_le_coc
+                 test parent : BluetoothTestActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocInsecureServerTestListActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_insecure_server_test_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BluetoothTestActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+              CTS Verifier BLE Coc Insecure Server Test List Screen
+                  test category : bt_le_coc
+                  test parent : BleCocInsecureServerTestListActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocInsecureServerStartActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_server_start_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BleCocInsecureServerTestListActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+             =================================================================================
+             ==                     BLE CoC Secure Client Test Info                         ==
+             =================================================================================
+        -->
+        <!--
+            CTS Verifier BLE Coc Secure Client Test Top Screen
+                test category : bt_le_coc
+                test parent : BluetoothTestActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocSecureClientTestListActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_secure_client_test_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BluetoothTestActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+             CTS Verifier BLE Coc Secure Client Test List Screen
+                 test category : bt_le_coc
+                 test parent : BleSecureClientTestListActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocSecureClientStartActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_client_test_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BleCocSecureClientTestListActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+              =================================================================================
+              ==                     BLE CoC Secure Server Test Info                         ==
+              =================================================================================
+        -->
+        <!--
+             CTS Verifier BLE Coc Secure Server Test Top Screen
+                 test category : bt_le_coc
+                 test parent : BluetoothTestActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocSecureServerTestListActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_secure_server_test_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BluetoothTestActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+              CTS Verifier BLE Coc Secure Server Test List Screen
+                  test category : bt_le_coc
+                  test parent : BleCocSecureServerTestListActivity
+        -->
+        <activity
+            android:name=".bluetooth.BleCocSecureServerStartActivity"
+            android:configChanges="keyboardHidden|orientation|screenSize"
+            android:label="@string/ble_coc_server_start_name" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/bt_le_coc" />
+            <meta-data
+                android:name="test_parent"
+                android:value="com.android.cts.verifier.bluetooth.BleCocSecureServerTestListActivity" />
+            <meta-data
+                android:name="test_required_features"
+                android:value="android.hardware.bluetooth_le" />
+        </activity>
+
+        <!--
+             =================================================================================
              ==                      BLE Scanner Test Info                            ==
              =================================================================================
         -->
@@ -1068,6 +1272,19 @@
                 android:value="com.android.cts.verifier.bluetooth.BleAdvertiserTestActivity" />
         </activity>
 
+        <activity android:name=".biometrics.BiometricTest"
+            android:label="@string/biometric_test"
+            android:configChanges="keyboardHidden|orientation|screenSize" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_security" />
+            <meta-data android:name="test_required_features" android:value="android.software.secure_lock_screen" />
+            <meta-data android:name="test_excluded_features"
+                android:value="android.hardware.type.television:android.software.leanback:android.hardware.type.watch" />
+        </activity>
+
         <activity android:name=".security.FingerprintBoundKeysTest"
                 android:label="@string/sec_fingerprint_bound_key_test"
                 android:configChanges="keyboardHidden|orientation|screenSize" >
@@ -1078,20 +1295,21 @@
             <meta-data android:name="test_category" android:value="@string/test_category_security" />
             <meta-data android:name="test_excluded_features"
                        android:value="android.hardware.type.television:android.software.leanback:android.hardware.type.watch" />
-            <meta-data android:name="test_required_features" android:value="android.hardware.fingerprint" />
+            <meta-data android:name="test_required_features"
+                       android:value="android.hardware.fingerprint:android.software.secure_lock_screen" />
         </activity>
 
         <activity android:name=".security.BiometricPromptBoundKeysTest"
-            android:label="@string/sec_fingerprint_dialog_bound_key_test"
+            android:label="@string/sec_biometric_prompt_bound_key_test"
             android:configChanges="keyboardHidden|orientation|screenSize" >
             <intent-filter>
                 <action android:name="android.intent.action.MAIN" />
                 <category android:name="android.cts.intent.category.MANUAL_TEST" />
             </intent-filter>
             <meta-data android:name="test_category" android:value="@string/test_category_security" />
+            <meta-data android:name="test_required_features" android:value="android.software.secure_lock_screen" />
             <meta-data android:name="test_excluded_features"
                 android:value="android.hardware.type.television:android.software.leanback:android.hardware.type.watch" />
-            <meta-data android:name="test_required_features" android:value="android.hardware.fingerprint" />
         </activity>
 
         <activity android:name=".security.ScreenLockBoundKeysTest"
@@ -1105,7 +1323,7 @@
             <meta-data android:name="test_excluded_features"
                        android:value="android.software.lockscreen_disabled" />
             <meta-data android:name="test_required_features"
-                    android:value="android.software.device_admin" />
+                    android:value="android.software.device_admin:android.software.secure_lock_screen" />
         </activity>
 
         <activity android:name=".security.LockConfirmBypassTest"
@@ -1119,7 +1337,21 @@
             <meta-data android:name="test_excluded_features"
                        android:value="android.software.lockscreen_disabled" />
             <meta-data android:name="test_required_features"
-                    android:value="android.software.device_admin" />
+                       android:value="android.software.device_admin:android.software.secure_lock_screen" />
+        </activity>
+
+        <activity android:name=".security.SetNewPasswordComplexityTest"
+                  android:label="@string/set_complexity_test_title"
+                  android:configChanges="keyboardHidden|orientation|screenSize" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_security" />
+            <meta-data android:name="test_required_features"
+                       android:value="android.software.secure_lock_screen" />
+            <meta-data android:name="test_excluded_features"
+                       android:value="android.software.lockscreen_disabled" />
         </activity>
 
         <activity android:name=".streamquality.StreamingVideoActivity"
@@ -1149,17 +1381,6 @@
         </activity>
         -->
 
-        <activity android:name=".location.GpsTestActivity"
-                android:label="@string/location_gps_test"
-                android:configChanges="keyboardHidden|orientation|screenSize">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_hardware" />
-            <meta-data android:name="test_required_features" android:value="android.hardware.location.gps" />
-        </activity>
-
         <activity android:name=".location.LocationListenerActivity"
                 android:label="@string/location_listener_activity"
                 android:configChanges="keyboardHidden|orientation|screenSize">
@@ -1731,6 +1952,16 @@
             <meta-data android:name="test_required_features" android:value="android.hardware.camera.flash" />
         </activity>
 
+        <activity android:name=".camera.performance.CameraPerformanceActivity"
+                  android:label="@string/camera_performance_test"
+                  android:configChanges="keyboardHidden|orientation|screenSize">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_camera" />
+            <meta-data android:name="test_required_features" android:value="android.hardware.camera.any" />
+        </activity>
         <activity android:name=".usb.accessory.UsbAccessoryTestActivity"
                 android:label="@string/usb_accessory_test"
                 android:configChanges="keyboardHidden|orientation|screenSize">
@@ -1813,6 +2044,16 @@
                 <category android:name="android.intent.category.DEFAULT"></category>
             </intent-filter>
         </activity>
+        <activity android:name=".wifi.TestListActivity"
+                  android:label="@string/wifi_test"
+                  android:configChanges="keyboardHidden|orientation|screenSize">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_networking" />
+            <meta-data android:name="test_required_features" android:value="android.hardware.wifi" />
+        </activity>
         <activity android:name=".wifiaware.TestListActivity"
                   android:label="@string/aware_test"
                   android:configChanges="keyboardHidden|orientation|screenSize">
@@ -1863,6 +2104,24 @@
                     android:value="android.hardware.type.watch:android.software.leanback" />
         </activity>
 
+        <activity android:name=".notifications.BubblesVerifierActivity"
+                  android:label="@string/bubbles_notification_title">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_notifications" />
+            <meta-data android:name="test_excluded_features"
+                       android:value="android.hardware.type.watch:android.software.leanback" />
+        </activity>
+
+        <activity android:name=".notifications.BubbleActivity"
+                  android:label="@string/bubble_activity_title"
+                  android:allowEmbedded="true"
+                  android:documentLaunchMode="always"
+                  android:resizeableActivity="true">
+        </activity>
+
         <service android:name=".notifications.MockListener"
           android:exported="true"
           android:label="@string/nls_service_name"
@@ -1893,6 +2152,27 @@
                 android:value="android.hardware.type.watch:android.software.leanback:android.hardware.type.automotive" />
         </activity>
 
+        <activity android:name=".qstiles.TileServiceVerifierActivity"
+                  android:label="@string/tiles_test">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_tiles" />
+            <meta-data android:name="test_excluded_features"
+                       android:value="android.hardware.type.television:android.software.leanback:android.hardware.type.watch" />
+        </activity>
+
+        <service android:name=".qstiles.MockTileService"
+                 android:icon="@android:drawable/ic_dialog_alert"
+                 android:label="@string/tile_service_name"
+                 android:enabled="false"
+                 android:permission="android.permission.BIND_QUICK_SETTINGS_TILE">
+            <intent-filter>
+                <action android:name="android.service.quicksettings.action.QS_TILE" />
+            </intent-filter>
+        </service>
+
         <activity android:name=".vr.VrListenerVerifierActivity"
             android:configChanges="uiMode"
             android:label="@string/vr_tests">
@@ -1983,6 +2263,30 @@
                     android:value="android.hardware.type.watch:android.hardware.type.television:android.software.leanback" />
         </activity>
 
+        <activity android:name=".wifi.NetworkRequestSpecificNetworkSpecifierTestActivity"
+                  android:label="@string/wifi_test_network_request_specific"
+                  android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".wifi.NetworkRequestPatternNetworkSpecifierTestActivity"
+                  android:label="@string/wifi_test_network_request_pattern"
+                  android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".wifi.NetworkRequestUnavailableNetworkSpecifierTestActivity"
+                  android:label="@string/wifi_test_network_request_unavailable"
+                  android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".wifi.NetworkSuggestionSsidTestActivity"
+                  android:label="@string/wifi_test_network_suggestion_ssid"
+                  android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".wifi.NetworkSuggestionSsidBssidTestActivity"
+                  android:label="@string/wifi_test_network_suggestion_ssid_bssid"
+                  android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".wifi.NetworkSuggestionSsidPostConnectTestActivity"
+                  android:label="@string/wifi_test_network_suggestion_ssid_post_connect"
+                  android:configChanges="keyboardHidden|orientation|screenSize" />
+
         <activity android:name=".p2p.GoNegRequesterTestListActivity"
                 android:label="@string/p2p_go_neg_requester"
                 android:configChanges="keyboardHidden|orientation|screenSize" />
@@ -2007,6 +2311,42 @@
                 android:label="@string/p2p_accept_client"
                 android:configChanges="keyboardHidden|orientation|screenSize" />
 
+        <activity android:name=".p2p.P2pClientWithConfigTestListActivity"
+                android:label="@string/p2p_join_go"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.P2pClientWithConfig2gBandTestListActivity"
+                android:label="@string/p2p_join_go"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.P2pClientWithConfigFixedFrequencyTestListActivity"
+                android:label="@string/p2p_join_go"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.P2pClientWithConfigTestActivity"
+                android:label="@string/p2p_join_go"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.P2pClientWithConfig2gBandTestActivity"
+                android:label="@string/p2p_join_go"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.P2pClientWithConfigFixedFrequencyTestActivity"
+                android:label="@string/p2p_join_go"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.GoWithConfigTestActivity"
+                android:label="@string/p2p_accept_client"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.GoWithConfig2gBandTestActivity"
+                android:label="@string/p2p_accept_client"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
+        <activity android:name=".p2p.GoWithConfigFixedFrequencyTestActivity"
+                android:label="@string/p2p_accept_client"
+                android:configChanges="keyboardHidden|orientation|screenSize" />
+
         <activity android:name=".p2p.ServiceRequesterTestListActivity"
                 android:label="@string/p2p_service_discovery_requester"
                 android:configChanges="keyboardHidden|orientation|screenSize" />
@@ -2134,6 +2474,24 @@
                        android:value="android.hardware.type.television:android.software.leanback" />
         </activity>
 -->
+
+       <activity
+            android:name="com.android.cts.verifier.sensors.StepSensorPermissionTestActivity"
+            android:label="@string/snsr_step_permission_test"
+            android:screenOrientation="nosensor" >
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+
+            <meta-data
+                android:name="test_category"
+                android:value="@string/test_category_sensors" />
+            <meta-data android:name="test_required_features"
+                       android:value="android.hardware.sensor.stepcounter:android.hardware.sensor.stepdetector" />
+        </activity>
+
         <activity
                 android:name="com.android.cts.verifier.sensors.DeviceSuspendTestActivity"
                 android:label="@string/snsr_device_suspend_test"
@@ -2340,6 +2698,10 @@
             <meta-data android:name="test_required_features" android:value="android.software.device_admin" />
         </activity>
 
+        <activity android:name=".managedprovisioning.NonMarketAppsActivity"
+                  android:label="@string/provisioning_byod_non_market_apps">
+        </activity>
+
         <activity android:name=".managedprovisioning.KeyguardDisabledFeaturesActivity"
                 android:label="@string/provisioning_byod_keyguard_disabled_features">
         </activity>
@@ -2541,6 +2903,13 @@
             <meta-data android:name="test_required_features" android:value="android.software.managed_users:android.software.device_admin" />
         </activity>
 
+        <receiver
+            android:name=".managedprovisioning.ByodFlowTestActivity$ProvisioningCompleteReceiver">
+            <intent-filter>
+                <action android:name="android.app.action.MANAGED_PROFILE_PROVISIONED" />
+            </intent-filter>
+        </receiver>
+
         <activity android:name=".managedprovisioning.CompTestActivity"
                 android:launchMode="singleTask"
                 android:label="@string/comp_test">
@@ -2563,6 +2932,8 @@
                 <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_QUERY" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_REMOVE" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_INSTALL_APK" />
+                <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_INSTALL_APK_WORK_PROFILE_GLOBAL_RESTRICTION" />
+                <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_INSTALL_APK_PRIMARY_PROFILE_GLOBAL_RESTRICTION" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.action.BYOD_CHECK_DISK_ENCRYPTION" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.action.CHECK_INTENT_FILTERS" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_CAPTURE_AND_CHECK_IMAGE" />
@@ -2581,6 +2952,7 @@
                 <action android:name="com.android.cts.verifier.managedprovisioning.LOCKSCREEN_NOTIFICATION" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.CLEAR_NOTIFICATION" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.TEST_SELECT_WORK_CHALLENGE" />
+                <action android:name="com.android.cts.verifier.managedprovisioning.TEST_PATTERN_WORK_CHALLENGE" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.LAUNCH_CONFIRM_WORK_CREDENTIALS" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.TEST_ORGANIZATION_INFO" />
                 <action android:name="com.android.cts.verifier.managedprovisioning.TEST_PARENT_PROFILE_PASSWORD" />
@@ -2588,6 +2960,13 @@
             </intent-filter>
         </activity>
 
+        <activity android:name=".managedprovisioning.ByodPrimaryHelperActivity">
+            <intent-filter>
+                <action android:name="com.android.cts.verifier.managedprovisioning.BYOD_INSTALL_APK_IN_PRIMARY" />
+                <category android:name="android.intent.category.DEFAULT" />
+            </intent-filter>
+        </activity>
+
         <activity android:name=".managedprovisioning.NfcTestActivity">
             <meta-data android:name="test_required_features" android:value="android.hardware.nfc" />
         </activity>
@@ -2736,41 +3115,6 @@
                  android:permission="android.permission.BIND_DEVICE_ADMIN">
         </service>
 
-<!-- Comment out until b/28406044 is addressed
-        <activity android:name=".jobscheduler.IdleConstraintTestActivity" android:label="@string/js_idle_test">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_jobscheduler" />
-            <meta-data android:name="test_excluded_features"
-                    android:value="android.hardware.type.television:android.software.leanback" />
-        </activity>
--->
-
-        <activity android:name=".jobscheduler.ChargingConstraintTestActivity" android:label="@string/js_charging_test">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_jobscheduler" />
-            <meta-data android:name="test_excluded_features"
-                    android:value="android.hardware.type.television:android.software.leanback:android.hardware.type.automotive" />
-        </activity>
-
-        <activity android:name=".jobscheduler.ConnectivityConstraintTestActivity" android:label="@string/js_connectivity_test">
-            <intent-filter>
-                <action android:name="android.intent.action.MAIN" />
-                <category android:name="android.cts.intent.category.MANUAL_TEST" />
-            </intent-filter>
-            <meta-data android:name="test_category" android:value="@string/test_category_jobscheduler" />
-            <meta-data android:name="test_excluded_features"
-                    android:value="android.hardware.type.television:android.software.leanback" />
-        </activity>
-
-        <service android:name=".jobscheduler.MockJobService"
-            android:permission="android.permission.BIND_JOB_SERVICE"/>
-
         <!-- Used by the SensorTestScreenManipulator to reset the screen timeout after turn off. -->
         <activity android:name=".os.TimeoutResetActivity"/>
 
@@ -2983,6 +3327,19 @@
                 android:value="android.hardware.type.television:android.software.leanback:android.hardware.type.watch:android.hardware.type.automotive" />
         </activity>
 
+        <activity android:name=".audio.ProAudioActivity"
+                  android:label="@string/pro_audio_latency_test">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_audio" />
+            <meta-data android:name="test_required_features" android:value="android.hardware.usb.host" />
+        </activity>
+
+        <!-- ProAudio test invokes the "Loopback" App -->
+        <activity android:name="org.drrickorang.loopback"/>
+
         <activity android:name=".audio.AudioLoopbackActivity"
                   android:label="@string/audio_loopback_test">
             <intent-filter>
@@ -2995,6 +3352,37 @@
                        android:value="android.hardware.type.watch:android.hardware.type.television" />
         </activity>
 
+        <activity android:name=".audio.MidiActivity"
+                  android:label="@string/midi_test">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_audio" />
+            <meta-data android:name="test_required_features"
+                android:value="android.hardware.usb.host:android.software.midi" />
+        </activity>
+
+        <activity android:name=".audio.NDKMidiActivity"
+                  android:label="@string/ndk_midi_test">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_audio" />
+            <meta-data android:name="test_required_features"
+                android:value="android.hardware.usb.host:android.software.midi" />
+        </activity>
+
+        <service android:name="com.android.midi.MidiEchoTestService"
+            android:permission="android.permission.BIND_MIDI_DEVICE_SERVICE">
+            <intent-filter>
+                <action android:name="android.media.midi.MidiDeviceService" />
+            </intent-filter>
+            <meta-data android:name="android.media.midi.MidiDeviceService"
+                android:resource="@xml/echo_device_info" />
+        </service>
+
         <activity android:name=".audio.AudioFrequencyLineActivity"
                   android:label="@string/audio_frequency_line_test">
             <intent-filter>
@@ -3035,6 +3423,26 @@
             <meta-data android:name="test_required_features" android:value="android.hardware.microphone:android.hardware.usb.host" />
         </activity>
 
+        <activity android:name=".audio.AudioFrequencyVoiceRecognitionActivity"
+                  android:label="@string/audio_frequency_voice_recognition_test">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_audio" />
+            <meta-data android:name="test_required_features" android:value="android.hardware.microphone:android.hardware.usb.host" />
+        </activity>
+
+        <activity android:name=".audio.AudioAEC"
+                  android:label="@string/audio_aec_test">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_audio" />
+            <meta-data android:name="test_required_features" android:value="android.hardware.microphone:android.hardware.audio.output" />
+        </activity>
+
         <service android:name=".tv.MockTvInputService"
             android:permission="android.permission.BIND_TV_INPUT">
             <intent-filter>
@@ -3123,10 +3531,12 @@
             </intent-filter>
             <intent-filter>
                 <action android:name="android.intent.action.DIAL" />
+                <category android:name="android.intent.category.DEFAULT" />
                 <data android:scheme="tel" />
             </intent-filter>
             <intent-filter>
                 <action android:name="android.intent.action.DIAL" />
+                <category android:name="android.intent.category.DEFAULT" />
             </intent-filter>
             <meta-data android:name="test_category" android:value="@string/test_category_telephony"/>
             <meta-data
@@ -3369,6 +3779,9 @@
                 android:value="config_voice_capable"/>
         </activity>
 
+        <activity android:name=".managedprovisioning.LockscreenMessageTestActivity"
+            android:label="@string/device_owner_customize_lockscreen_message" />
+
         <service android:name="com.android.cts.verifier.telecom.CtsConnectionService"
             android:permission="android.permission.BIND_TELECOM_CONNECTION_SERVICE" >
             <intent-filter>
@@ -3381,6 +3794,31 @@
                 <action android:name="android.telecom.ConnectionService" />
             </intent-filter>
         </service>
+
+        <activity android:name=".instantapps.NotificationTestActivity"
+                 android:label="@string/ia_notification">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_instant_apps" />
+        </activity>
+        <activity android:name=".instantapps.RecentAppsTestActivity"
+                 android:label="@string/ia_recents">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_instant_apps" />
+        </activity>
+        <activity android:name=".instantapps.AppInfoTestActivity"
+                 android:label="@string/ia_app_info">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_instant_apps" />
+        </activity>
     </application>
 
 </manifest>
diff --git a/apps/CtsVerifier/jni/midi/Android.mk b/apps/CtsVerifier/jni/midi/Android.mk
new file mode 100644
index 0000000..c76f0c7
--- /dev/null
+++ b/apps/CtsVerifier/jni/midi/Android.mk
@@ -0,0 +1,51 @@
+#
+# Copyright (C) 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := libctsnativemidi_jni
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_SRC_FILES := \
+	com_android_cts_verifier_audio_midilib_NativeMidiManager.cpp \
+	MidiTestManager.cpp
+
+LOCAL_C_INCLUDES := $(JNI_H_INCLUDE)
+
+LOCAL_C_INCLUDES += \
+    frameworks/base/media/native/midi/include \
+    frameworks/av/media/ndk/include \
+    system/core/include/cutils
+
+#LOCAL_CXX_STL := libc++_static
+#LOCAL_NDK_STL_VARIANT := libc++_static
+
+#APP_STL := stlport_static
+#APP_STL := gnustl_static
+
+LOCAL_SDK_VERSION := current
+LOCAL_NDK_STL_VARIANT := system
+
+LOCAL_SHARED_LIBRARIES := liblog libamidi \
+
+LOCAL_CFLAGS := \
+        -Wall -Werror \
+        -Wno-unused-parameter \
+        -Wno-unused-variable \
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/apps/CtsVerifier/jni/midi/MidiTestManager.cpp b/apps/CtsVerifier/jni/midi/MidiTestManager.cpp
new file mode 100644
index 0000000..beececf
--- /dev/null
+++ b/apps/CtsVerifier/jni/midi/MidiTestManager.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <cstring>
+#include <pthread.h>
+#include <unistd.h>
+
+#define TAG "MidiTestManager"
+#include <android/log.h>
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+
+#include "MidiTestManager.h"
+
+static pthread_t readThread;
+
+static const bool DEBUG = false;
+static const bool DEBUG_MIDIDATA = false;
+
+//
+// MIDI Messages
+//
+// Channel Commands
+static const uint8_t kMIDIChanCmd_KeyDown = 9;
+static const uint8_t kMIDIChanCmd_KeyUp = 8;
+static const uint8_t kMIDIChanCmd_PolyPress = 10;
+static const uint8_t kMIDIChanCmd_Control = 11;
+static const uint8_t kMIDIChanCmd_ProgramChange = 12;
+static const uint8_t kMIDIChanCmd_ChannelPress = 13;
+static const uint8_t kMIDIChanCmd_PitchWheel = 14;
+// System Commands
+static const uint8_t kMIDISysCmd_SysEx = 0xF0;
+static const uint8_t kMIDISysCmd_EndOfSysEx =  0xF7;
+static const uint8_t kMIDISysCmd_ActiveSensing = 0xFE;
+static const uint8_t kMIDISysCmd_Reset = 0xFF;
+
+static void* readThreadRoutine(void * context) {
+    MidiTestManager* testManager = (MidiTestManager*)context;
+    return reinterpret_cast<void*>(static_cast<intptr_t>(testManager->ProcessInput()));
+}
+
+/*
+ * TestMessage
+ */
+#define makeMIDICmd(cmd, channel)  (uint8_t)((cmd << 4) | (channel & 0x0F))
+
+class TestMessage {
+public:
+    uint8_t*   mMsgBytes;
+    int     mNumMsgBytes;
+
+    TestMessage()
+        : mMsgBytes(NULL), mNumMsgBytes(0)
+    {}
+
+    ~TestMessage() {
+        delete[] mMsgBytes;
+    }
+
+    bool set(uint8_t* msgBytes, int numMsgBytes) {
+        if (msgBytes == NULL || numMsgBytes <= 0) {
+            return false;
+        }
+        mNumMsgBytes = numMsgBytes;
+        mMsgBytes = new uint8_t[numMsgBytes];
+        memcpy(mMsgBytes, msgBytes, mNumMsgBytes * sizeof(uint8_t));
+        return true;
+    }
+}; /* class TestMessage */
+
+/*
+ * MidiTestManager
+ */
+MidiTestManager::MidiTestManager()
+    : mTestModuleObj(NULL),
+      mTestStream(NULL), mNumTestStreamBytes(0),
+      mReceiveStreamPos(0),
+      mMidiSendPort(NULL), mMidiReceivePort(NULL),
+      mTestMsgs(NULL), mNumTestMsgs(0)
+{}
+
+MidiTestManager::~MidiTestManager(){
+    delete[] mTestStream;
+}
+
+void MidiTestManager::jniSetup(JNIEnv* env) {
+    env->GetJavaVM(&mJvm);
+
+    jclass clsMidiTestModule =
+        env->FindClass("com/android/cts/verifier/audio/NDKMidiActivity$NDKMidiTestModule");
+    if (DEBUG) {
+        ALOGI("gClsMidiTestModule:%p", clsMidiTestModule);
+    }
+
+    // public void endTest(int endCode)
+    mMidEndTest = env->GetMethodID(clsMidiTestModule, "endTest", "(I)V");
+    if (DEBUG) {
+        ALOGI("mMidEndTestgMidEndTest:%p", mMidEndTest);
+    }
+}
+
+void MidiTestManager::buildTestStream() {
+    // add up the total space
+    mNumTestStreamBytes = 0;
+    for(int msgIndex = 0; msgIndex < mNumTestMsgs; msgIndex++) {
+        mNumTestStreamBytes += mTestMsgs[msgIndex].mNumMsgBytes;
+    }
+
+    delete[] mTestStream;
+    mTestStream = new uint8_t[mNumTestStreamBytes];
+    int streamIndex = 0;
+    for(int msgIndex = 0; msgIndex < mNumTestMsgs; msgIndex++) {
+        for(int byteIndex = 0; byteIndex < mTestMsgs[msgIndex].mNumMsgBytes; byteIndex++) {
+            mTestStream[streamIndex++] = mTestMsgs[msgIndex].mMsgBytes[byteIndex];
+        }
+    }
+
+    // Reset stream position
+    mReceiveStreamPos = 0;
+}
+
+/**
+ * Compares the supplied bytes against the sent message stream at the current postion
+ * and advances the stream position.
+ */
+bool MidiTestManager::matchStream(uint8_t* bytes, int count) {
+    if (DEBUG) {
+        ALOGI("---- matchStream() count:%d", count);
+    }
+    bool matches = true;
+
+    for (int index = 0; index < count; index++) {
+        if (bytes[index] != mTestStream[mReceiveStreamPos]) {
+            matches = false;
+            if (DEBUG) {
+                ALOGI("---- mismatch @%d [%d : %d]",
+                        index, bytes[index], mTestStream[mReceiveStreamPos]);
+            }
+        }
+        mReceiveStreamPos++;
+    }
+
+    if (DEBUG) {
+        ALOGI("  returns:%d", matches);
+    }
+    return matches;
+}
+
+/**
+ * Writes out the list of MIDI messages to the output port.
+ * Returns total number of bytes sent.
+ */
+int MidiTestManager::sendMessages() {
+    if (DEBUG) {
+        ALOGI("---- sendMessages()...");
+        for(int msgIndex = 0; msgIndex < mNumTestMsgs; msgIndex++) {
+            if (DEBUG_MIDIDATA) {
+            ALOGI("--------");
+                for(int byteIndex = 0; byteIndex < mTestMsgs[msgIndex].mNumMsgBytes; byteIndex++) {
+                    ALOGI("  0x%X", mTestMsgs[msgIndex].mMsgBytes[byteIndex]);
+                }
+            }
+        }
+    }
+
+    int totalSent = 0;
+    for(int msgIndex = 0; msgIndex < mNumTestMsgs; msgIndex++) {
+        ssize_t numSent =
+            AMidiInputPort_send(mMidiSendPort,
+                    mTestMsgs[msgIndex].mMsgBytes, mTestMsgs[msgIndex].mNumMsgBytes);
+        totalSent += numSent;
+    }
+
+    if (DEBUG) {
+        ALOGI("---- totalSent:%d", totalSent);
+    }
+
+    return totalSent;
+}
+
+int MidiTestManager::ProcessInput() {
+    uint8_t readBuffer[128];
+    size_t totalNumReceived = 0;
+
+    bool testRunning = true;
+    int testResult = TESTSTATUS_NOTRUN;
+
+    int32_t opCode;
+    size_t numBytesReceived;
+    int64_t timeStamp;
+    while (testRunning) {
+        // AMidiOutputPort_receive is non-blocking, so let's not burn up the CPU unnecessarily
+        usleep(2000);
+
+        numBytesReceived = 0;
+        ssize_t numMessagesReceived =
+            AMidiOutputPort_receive(mMidiReceivePort, &opCode, readBuffer, 128,
+                        &numBytesReceived, &timeStamp);
+
+        if (testRunning &&
+            numBytesReceived > 0 &&
+            opCode == AMIDI_OPCODE_DATA &&
+            readBuffer[0] != kMIDISysCmd_ActiveSensing &&
+            readBuffer[0] != kMIDISysCmd_Reset) {
+            if (DEBUG) {
+                ALOGI("---- msgs:%zd, bytes:%zu", numMessagesReceived, numBytesReceived);
+            }
+            // Process Here
+            if (!matchStream(readBuffer, numBytesReceived)) {
+                testResult = TESTSTATUS_FAILED_MISMATCH;
+                testRunning = false;   // bail
+            }
+            totalNumReceived += numBytesReceived;
+            if (totalNumReceived > mNumTestStreamBytes) {
+                testResult = TESTSTATUS_FAILED_OVERRUN;
+                testRunning = false;   // bail
+            }
+            if (totalNumReceived == mNumTestStreamBytes) {
+                testResult = TESTSTATUS_PASSED;
+                testRunning = false;   // done
+            }
+        }
+    }
+
+    return testResult;
+}
+
+bool MidiTestManager::StartReading(AMidiDevice* nativeReadDevice) {
+    ALOGI("StartReading()...");
+
+    media_status_t m_status =
+        AMidiOutputPort_open(nativeReadDevice, 0, &mMidiReceivePort);
+    if (m_status != 0) {
+        ALOGE("Can't open MIDI device for reading err:%d", m_status);
+        return false;
+    }
+
+    // Start read thread
+    int status = pthread_create(&readThread, NULL, readThreadRoutine, this);
+    if (status != 0) {
+        ALOGE("Can't start readThread: %s (%d)", strerror(status), status);
+    }
+    return status == 0;
+}
+
+bool MidiTestManager::StartWriting(AMidiDevice* nativeWriteDevice) {
+    ALOGI("StartWriting()...");
+
+    media_status_t status =
+        AMidiInputPort_open(nativeWriteDevice, 0, &mMidiSendPort);
+    if (status != 0) {
+        ALOGE("Can't open MIDI device for writing err:%d", status);
+        return false;
+    }
+    return true;
+}
+
+uint8_t msg0[] = {makeMIDICmd(kMIDIChanCmd_KeyDown, 0), 64, 120};
+//uint8_t msg0Alt[] = {makeMIDICmd(kMIDIChanCmd_KeyDown, 0), 65, 120};
+uint8_t msg1[] = {makeMIDICmd(kMIDIChanCmd_KeyUp, 0), 64, 35};
+
+bool MidiTestManager::RunTest(jobject testModuleObj, AMidiDevice* sendDevice,
+        AMidiDevice* receiveDevice) {
+    if (DEBUG) {
+        ALOGI("RunTest(%p, %p, %p)", testModuleObj, sendDevice, receiveDevice);
+    }
+
+    JNIEnv* env;
+    mJvm->AttachCurrentThread(&env, NULL);
+    if (env == NULL) {
+        EndTest(TESTSTATUS_FAILED_JNI);
+    }
+
+    mTestModuleObj = env->NewGlobalRef(testModuleObj);
+
+    // Call StartWriting first because StartReading starts a thread.
+    if (!StartWriting(sendDevice) || !StartReading(receiveDevice)) {
+        // Test call to EndTest will close any open devices.
+        EndTest(TESTSTATUS_FAILED_DEVICE);
+        return false; // bail
+    }
+
+    // setup messages
+    delete[] mTestMsgs;
+    mNumTestMsgs = 3;
+    mTestMsgs = new TestMessage[mNumTestMsgs];
+
+    int sysExSize = 8;
+    uint8_t* sysExMsg = new uint8_t[sysExSize];
+    sysExMsg[0] = kMIDISysCmd_SysEx;
+    for(int index = 1; index < sysExSize-1; index++) {
+        sysExMsg[index] = (uint8_t)index;
+    }
+    sysExMsg[sysExSize-1] = kMIDISysCmd_EndOfSysEx;
+
+    if (!mTestMsgs[0].set(msg0, sizeof(msg0)) ||
+        !mTestMsgs[1].set(msg1, sizeof(msg1)) ||
+        !mTestMsgs[2].set(sysExMsg, sysExSize)) {
+        return false;
+    }
+    delete[] sysExMsg;
+
+    buildTestStream();
+
+    // Inject an error
+    // mTestMsgs[0].set(msg0Alt, 3);
+
+    sendMessages();
+    void* threadRetval = (void*)TESTSTATUS_NOTRUN;
+    int status = pthread_join(readThread, &threadRetval);
+    if (status != 0) {
+        ALOGE("Failed to join readThread: %s (%d)", strerror(status), status);
+    }
+    EndTest(static_cast<int>(reinterpret_cast<intptr_t>(threadRetval)));
+    return true;
+}
+
+void MidiTestManager::EndTest(int endCode) {
+
+    JNIEnv* env;
+    mJvm->AttachCurrentThread(&env, NULL);
+    if (env == NULL) {
+        ALOGE("Error retrieving JNI Env");
+    }
+
+    env->CallVoidMethod(mTestModuleObj, mMidEndTest, endCode);
+    env->DeleteGlobalRef(mTestModuleObj);
+
+    // EndTest() will ALWAYS be called, so we can close the ports here.
+    if (mMidiSendPort != NULL) {
+        AMidiInputPort_close(mMidiSendPort);
+        mMidiSendPort = NULL;
+    }
+    if (mMidiReceivePort != NULL) {
+        AMidiOutputPort_close(mMidiReceivePort);
+        mMidiReceivePort = NULL;
+    }
+}
diff --git a/apps/CtsVerifier/jni/midi/MidiTestManager.h b/apps/CtsVerifier/jni/midi/MidiTestManager.h
new file mode 100644
index 0000000..c594efa
--- /dev/null
+++ b/apps/CtsVerifier/jni/midi/MidiTestManager.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <amidi/AMidi.h>
+
+#include <jni.h>
+
+class TestMessage;
+
+class MidiTestManager {
+public:
+    MidiTestManager();
+    ~MidiTestManager();
+
+    void jniSetup(JNIEnv* env);
+
+    bool RunTest(jobject testModuleObj, AMidiDevice* sendDevice, AMidiDevice* receiveDevice);
+    void EndTest(int testCode);
+
+    // Called by the thread routine.
+    int ProcessInput();
+
+private:
+   void buildTestStream();
+    bool matchStream(uint8_t* bytes, int count);
+
+    int sendMessages();
+
+    jobject mTestModuleObj;
+
+    // The send messages in a linear stream for matching.
+    uint8_t*   mTestStream;
+    int     mNumTestStreamBytes;
+    int     mReceiveStreamPos;
+
+    AMidiInputPort* mMidiSendPort;
+    AMidiOutputPort* mMidiReceivePort;
+
+    // The array of messages to send/receive
+    TestMessage*    mTestMsgs;
+    int             mNumTestMsgs;
+
+    // JNI
+    JavaVM* mJvm;
+    jmethodID mMidEndTest;
+
+    // Test result codes
+    static const int TESTSTATUS_NOTRUN = 0;
+    static const int TESTSTATUS_PASSED = 1;
+    static const int TESTSTATUS_FAILED_MISMATCH = 2;
+    static const int TESTSTATUS_FAILED_TIMEOUT = 3;
+    static const int TESTSTATUS_FAILED_OVERRUN = 4;
+    static const int TESTSTATUS_FAILED_DEVICE = 5;
+    static const int TESTSTATUS_FAILED_JNI = 6;
+
+    bool StartReading(AMidiDevice* nativeReadDevice);
+    bool StartWriting(AMidiDevice* nativeWriteDevice);
+};
diff --git a/apps/CtsVerifier/jni/midi/com_android_cts_verifier_audio_midilib_NativeMidiManager.cpp b/apps/CtsVerifier/jni/midi/com_android_cts_verifier_audio_midilib_NativeMidiManager.cpp
new file mode 100644
index 0000000..3c416ce
--- /dev/null
+++ b/apps/CtsVerifier/jni/midi/com_android_cts_verifier_audio_midilib_NativeMidiManager.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#define TAG "NativeMidiManager-JNI"
+
+#include <android/log.h>
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#include <amidi/AMidi.h>
+
+#include "MidiTestManager.h"
+
+static MidiTestManager sTestManager;
+
+static bool DEBUG = false;
+
+extern "C" {
+
+void Java_com_android_cts_verifier_audio_midilib_NativeMidiManager_initN(
+        JNIEnv* env, jobject midiTestModule) {
+
+    sTestManager.jniSetup(env);
+}
+
+void Java_com_android_cts_verifier_audio_midilib_NativeMidiManager_startTest(
+        JNIEnv* env, jobject thiz, jobject testModuleObj, jobject midiObj) {
+
+    (void)thiz;
+
+    if (DEBUG) {
+        ALOGI("NativeMidiManager_startTest(%p, %p)", testModuleObj, midiObj);
+    }
+
+    media_status_t status;
+
+    AMidiDevice* nativeMidiDevice = NULL;
+    status = AMidiDevice_fromJava(env, midiObj, &nativeMidiDevice);
+    if (DEBUG) {
+        ALOGI("nativeSendDevice:%p, status:%d", nativeMidiDevice, status);
+    }
+
+    sTestManager.RunTest(testModuleObj, nativeMidiDevice, nativeMidiDevice);
+
+    status = AMidiDevice_release(nativeMidiDevice);
+    if (DEBUG) {
+        ALOGI("device release status:%d", status);
+    }
+}
+
+} // extern "C"
diff --git a/apps/CtsVerifier/proguard.flags b/apps/CtsVerifier/proguard.flags
index 2be1211..85f378e 100644
--- a/apps/CtsVerifier/proguard.flags
+++ b/apps/CtsVerifier/proguard.flags
@@ -18,6 +18,15 @@
     public <methods>;
 }
 
+# ensure we keep public camera test methods, these are needed at runtime
+-keepclassmembers class * extends android.hardware.camera2.cts.testcases.Camera2AndroidTestCase {
+    public <methods>;
+}
+
+-keepclassmembers class * extends android.hardware.cts.CameraTestCase {
+    public <methods>;
+}
+
 -keepclasseswithmembers class * extends com.android.cts.verifier.location.LocationModeTestActivity
 
 -keepclasseswithmembers class * extends com.android.cts.verifier.audio.HifiUltrasoundSpeakerTestActivity
@@ -38,7 +47,6 @@
 
 # Jack seems less rigorous than proguard when it comes to warning about
 # transitive dependencies.
--dontwarn com.android.org.bouncycastle.**
 -dontwarn com.android.okhttp.**
 -dontwarn org.opencv.**
 -dontwarn androidx.test.internal.runner.hidden.ExposedInstrumentationApi
diff --git a/apps/CtsVerifier/res/drawable/ic_android.xml b/apps/CtsVerifier/res/drawable/ic_android.xml
new file mode 100644
index 0000000..fb3107b
--- /dev/null
+++ b/apps/CtsVerifier/res/drawable/ic_android.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  Copyright (C) 2019 The Android Open Source Project
+
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+  -->
+<vector xmlns:android="http://schemas.android.com/apk/res/android"
+        android:width="24dp"
+        android:height="24dp"
+        android:viewportWidth="24.0"
+        android:viewportHeight="24.0">
+    <path
+        android:fillColor="#FF000000"
+        android:pathData="M6,18c0,0.55 0.45,1 1,1h1v3.5c0,0.83 0.67,1.5 1.5,1.5s1.5,-0.67 1.5,-1.5L11,19h2v3.5c0,0.83 0.67,1.5 1.5,1.5s1.5,-0.67 1.5,-1.5L16,19h1c0.55,0 1,-0.45 1,-1L18,8L6,8v10zM3.5,8C2.67,8 2,8.67 2,9.5v7c0,0.83 0.67,1.5 1.5,1.5S5,17.33 5,16.5v-7C5,8.67 4.33,8 3.5,8zM20.5,8c-0.83,0 -1.5,0.67 -1.5,1.5v7c0,0.83 0.67,1.5 1.5,1.5s1.5,-0.67 1.5,-1.5v-7c0,-0.83 -0.67,-1.5 -1.5,-1.5zM15.53,2.16l1.3,-1.3c0.2,-0.2 0.2,-0.51 0,-0.71 -0.2,-0.2 -0.51,-0.2 -0.71,0l-1.48,1.48C13.85,1.23 12.95,1 12,1c-0.96,0 -1.86,0.23 -2.66,0.63L7.85,0.15c-0.2,-0.2 -0.51,-0.2 -0.71,0 -0.2,0.2 -0.2,0.51 0,0.71l1.31,1.31C6.97,3.26 6,5.01 6,7h12c0,-1.99 -0.97,-3.75 -2.47,-4.84zM10,5L9,5L9,4h1v1zM15,5h-1L14,4h1v1z"/>
+</vector>
diff --git a/apps/CtsVerifier/res/drawable/user_icon_1.png b/apps/CtsVerifier/res/drawable/user_icon_1.png
new file mode 100644
index 0000000..941458d
--- /dev/null
+++ b/apps/CtsVerifier/res/drawable/user_icon_1.png
Binary files differ
diff --git a/apps/CtsVerifier/res/drawable/user_icon_2.png b/apps/CtsVerifier/res/drawable/user_icon_2.png
new file mode 100644
index 0000000..6792671
--- /dev/null
+++ b/apps/CtsVerifier/res/drawable/user_icon_2.png
Binary files differ
diff --git a/apps/CtsVerifier/res/layout/audio_aec_activity.xml b/apps/CtsVerifier/res/layout/audio_aec_activity.xml
new file mode 100644
index 0000000..83aa9cd
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/audio_aec_activity.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<LinearLayout
+    xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="wrap_content"
+    android:orientation="vertical"
+    style="@style/RootLayoutPadding">
+
+    <ScrollView
+        android:layout_width="match_parent"
+        android:layout_height="match_parent"
+        android:id="@+id/scrollView">
+
+        <LinearLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content"
+            android:orientation="vertical">
+
+            <TextView
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:scrollbars="vertical"
+                    android:gravity="bottom"
+                    android:id="@+id/audio_aec_mandatory_info"
+                    android:text="@string/audio_aec_mandatory_test" />
+
+            <LinearLayout
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:orientation="horizontal" >
+
+                <Button
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:id="@+id/audio_aec_mandatory_no"
+                        android:text="@string/af_no" />
+                <Button
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:id="@+id/audio_aec_mandatory_yes"
+                        android:text="@string/af_yes" />
+            </LinearLayout>
+
+            <LinearLayout
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:orientation="vertical"
+                    android:id="@+id/audio_aec_test_layout" >
+
+                <View
+                        android:layout_width="match_parent"
+                        android:layout_height="1dp"
+                        android:background="?android:colorAccent" />
+
+                <TextView
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:scrollbars="vertical"
+                        android:gravity="bottom"
+                        android:text="@string/audio_aec_instructions"
+                        android:id="@+id/audio_aec_instructions" />
+
+                <Button
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:soundEffectsEnabled="false"
+                        android:text="@string/af_button_test"
+                        android:id="@+id/audio_aec_button_test" />
+                <ProgressBar
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:layout_weight="1"
+                        android:id="@+id/audio_aec_test_progress_bar" />
+
+                <TextView
+                        android:layout_width="match_parent"
+                        android:layout_height="wrap_content"
+                        android:text="@string/af_test_results"
+                        android:id="@+id/audio_aec_test_result" />
+            </LinearLayout>
+
+            <include layout="@layout/pass_fail_buttons" />
+        </LinearLayout>
+    </ScrollView>
+
+</LinearLayout>
diff --git a/apps/CtsVerifier/res/layout/audio_frequency_voice_recognition_activity.xml b/apps/CtsVerifier/res/layout/audio_frequency_voice_recognition_activity.xml
new file mode 100644
index 0000000..c6cd0cd
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/audio_frequency_voice_recognition_activity.xml
@@ -0,0 +1,303 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<LinearLayout
+    xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="wrap_content"
+    android:orientation="vertical"
+    style="@style/RootLayoutPadding">
+
+    <ScrollView
+        android:layout_width="match_parent"
+        android:layout_height="match_parent"
+        android:id="@+id/scrollView">
+
+        <LinearLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content"
+            android:orientation="vertical">
+
+            <LinearLayout
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:orientation="vertical"
+                android:id="@+id/vr_layout_test_tone">
+
+                <View
+                    android:layout_width="match_parent"
+                    android:layout_height="1dp"
+                    android:background="?android:colorAccent" />
+
+                <TextView
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:scrollbars="vertical"
+                    android:gravity="bottom"
+                    android:text="@string/vr_test_tone_instructions"
+                    android:id="@+id/vr_test_tone_instructions" />
+
+                <LinearLayout
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:orientation="horizontal">
+
+                    <LinearLayout
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="2">
+
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_test"
+                            android:id="@+id/vr_button_test_tone" />
+                        <ProgressBar
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:layout_weight="1"
+                            android:id="@+id/vr_test_tone_progress_bar" />
+
+                    </LinearLayout>
+
+                    <View
+                        android:layout_width="1dp"
+                        android:layout_height="match_parent"
+                        android:background="?android:colorAccent" />
+
+                    <LinearLayout
+                        android:layout_width="0dp"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="1">
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_play"
+                            android:id="@+id/vr_button_play_tone" />
+                    </LinearLayout>
+                </LinearLayout>
+                <TextView
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:text="@string/af_test_results"
+                    android:id="@+id/vr_test_tone_result" />
+            </LinearLayout>
+
+            <LinearLayout
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:orientation="vertical"
+                android:id="@+id/vr_layout_test_noise">
+
+                <View
+                    android:layout_width="match_parent"
+                    android:layout_height="1dp"
+                    android:background="?android:colorAccent" />
+
+                <TextView
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:scrollbars="vertical"
+                    android:gravity="bottom"
+                    android:text="@string/vr_test_noise_instructions"
+                    android:id="@+id/vr_test_noise_instructions" />
+
+                <LinearLayout
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:orientation="horizontal">
+
+                    <LinearLayout
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="2">
+
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_test"
+                            android:id="@+id/vr_button_test_noise" />
+                        <ProgressBar
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:layout_weight="1"
+                            android:id="@+id/vr_test_noise_progress_bar" />
+                    </LinearLayout>
+
+                    <View
+                        android:layout_width="1dp"
+                        android:layout_height="match_parent"
+                        android:background="?android:colorAccent" />
+
+                    <LinearLayout
+                        android:layout_width="0dp"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="1">
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_play"
+                            android:id="@+id/vr_button_play_noise" />
+                    </LinearLayout>
+                </LinearLayout>
+                <TextView
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:text="@string/af_test_results"
+                    android:id="@+id/vr_test_noise_result" />
+            </LinearLayout>
+
+        <LinearLayout
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:orientation="vertical"
+                android:id="@+id/vr_layout_test_usb_background">
+
+                <View
+                    android:layout_width="match_parent"
+                    android:layout_height="1dp"
+                    android:background="?android:colorAccent" />
+
+                <TextView
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:scrollbars="vertical"
+                    android:gravity="bottom"
+                    android:text="@string/vr_test_usb_background_instructions"
+                    android:id="@+id/vr_test_usb_background_instructions" />
+
+                <LinearLayout
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:orientation="horizontal">
+
+                    <LinearLayout
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="2">
+
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_test"
+                            android:id="@+id/vr_button_test_usb_background" />
+                        <ProgressBar
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:layout_weight="1"
+                            android:id="@+id/vr_test_usb_background_progress_bar" />
+                    </LinearLayout>
+                </LinearLayout>
+                <TextView
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:text="@string/af_test_results"
+                    android:id="@+id/vr_test_usb_background_result" />
+            </LinearLayout>
+
+            <LinearLayout
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:orientation="vertical"
+                android:id="@+id/vr_layout_test_usb_noise">
+
+                <View
+                    android:layout_width="match_parent"
+                    android:layout_height="1dp"
+                    android:background="?android:colorAccent" />
+
+                <TextView
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:scrollbars="vertical"
+                    android:gravity="bottom"
+                    android:text="@string/vr_test_usb_noise_instructions"
+                    android:id="@+id/vr_test_usb_noise_instructions" />
+
+                <LinearLayout
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:orientation="horizontal">
+
+                    <LinearLayout
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="2">
+
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_test"
+                            android:id="@+id/vr_button_test_usb_noise" />
+                        <ProgressBar
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:layout_weight="1"
+                            android:id="@+id/vr_test_usb_noise_progress_bar" />
+                    </LinearLayout>
+
+                    <View
+                        android:layout_width="1dp"
+                        android:layout_height="match_parent"
+                        android:background="?android:colorAccent" />
+
+                    <LinearLayout
+                        android:layout_width="0dp"
+                        android:layout_height="wrap_content"
+                        android:orientation="vertical"
+                        android:layout_weight="1">
+                        <Button
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:soundEffectsEnabled="false"
+                            android:text="@string/af_button_play"
+                            android:id="@+id/vr_button_play_usb_noise" />
+                    </LinearLayout>
+                </LinearLayout>
+                <TextView
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:text="@string/af_test_results"
+                    android:id="@+id/vr_test_usb_noise_result" />
+            </LinearLayout>
+            <View
+                android:layout_width="match_parent"
+                android:layout_height="1dp"
+                android:background="?android:colorAccent" />
+
+            <TextView
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:text="@string/af_global_test_results"
+                    android:id="@+id/vr_test_global_result" />
+
+            <include layout="@layout/pass_fail_buttons" />
+        </LinearLayout>
+    </ScrollView>
+
+</LinearLayout>
diff --git a/apps/CtsVerifier/res/layout/biometric_test_main.xml b/apps/CtsVerifier/res/layout/biometric_test_main.xml
new file mode 100644
index 0000000..73d0203
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/biometric_test_main.xml
@@ -0,0 +1,82 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+  ~ Copyright (C) 2018 The Android Open Source Project
+  ~
+  ~ Licensed under the Apache License, Version 2.0 (the "License");
+  ~ you may not use this file except in compliance with the License.
+  ~ You may obtain a copy of the License at
+  ~
+  ~      http://www.apache.org/licenses/LICENSE-2.0
+  ~
+  ~ Unless required by applicable law or agreed to in writing, software
+  ~ distributed under the License is distributed on an "AS IS" BASIS,
+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  ~ See the License for the specific language governing permissions and
+  ~ limitations under the License
+  -->
+<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent"
+    android:padding="10dip"
+    >
+
+    <Button android:id="@+id/biometric_enroll_button"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_centerHorizontal="true"
+        android:text="@string/biometric_enroll"
+        android:visibility="gone"
+        />
+
+    <Button android:id="@+id/biometric_start_test_not_secured"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_centerHorizontal="true"
+        android:layout_below="@+id/biometric_enroll_button"
+        android:text="@string/biometric_start_test1"
+        />
+
+    <Button android:id="@+id/biometric_start_test_none_enrolled"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_centerHorizontal="true"
+        android:layout_below="@+id/biometric_start_test_not_secured"
+        android:text="@string/biometric_start_test2"
+        android:visibility="invisible"
+        />
+
+    <Button android:id="@+id/biometric_start_test_credential_button"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_below="@+id/biometric_start_test_none_enrolled"
+        android:layout_centerHorizontal="true"
+        android:text="@string/biometric_start_test3"
+        android:visibility="invisible"
+        />
+
+    <Button android:id="@+id/biometric_start_test_authenticate_button"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_below="@+id/biometric_start_test_credential_button"
+        android:layout_centerHorizontal="true"
+        android:text="@string/biometric_start_test4"
+        android:visibility="invisible"
+        />
+
+    <Button android:id="@+id/biometric_start_test_strings_button"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_below="@+id/biometric_start_test_authenticate_button"
+        android:layout_centerHorizontal="true"
+        android:text="@string/biometric_start_test5"
+        android:visibility="invisible"
+        />
+
+    <include android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:layout_alignParentBottom="true"
+        layout="@layout/pass_fail_buttons"
+        />
+
+</RelativeLayout>
+
diff --git a/apps/CtsVerifier/res/layout/bubble_activity.xml b/apps/CtsVerifier/res/layout/bubble_activity.xml
new file mode 100644
index 0000000..ba0cc4e
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/bubble_activity.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+     Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+              android:layout_width="match_parent"
+              android:layout_height="match_parent"
+              android:orientation="vertical"
+              android:gravity="center_horizontal"
+              style="@style/RootLayoutPadding">
+
+    <TextView
+        android:layout_width="match_parent"
+        android:layout_height="match_parent"
+        android:text="Bubble expanded state"
+        style="@style/InstructionsFont"/>
+
+</LinearLayout>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/bubbles_main.xml b/apps/CtsVerifier/res/layout/bubbles_main.xml
new file mode 100644
index 0000000..7114fda
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/bubbles_main.xml
@@ -0,0 +1,89 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+     Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+              android:layout_width="match_parent"
+              android:layout_height="match_parent"
+              android:orientation="vertical"
+              android:gravity="center_horizontal"
+              style="@style/RootLayoutPadding">
+
+    <ScrollView
+        android:layout_width="match_parent"
+        android:layout_height="0dp"
+        android:layout_weight="1"
+        android:orientation="vertical">
+
+        <LinearLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content"
+            android:gravity="center_horizontal"
+            android:orientation="vertical" >
+
+            <TextView
+                android:id="@+id/bubble_test_title"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                style="@style/InstructionsFont"/>
+
+            <!-- The padding here matches what InstructionsFont has so that these look
+                 nice together-->
+            <TextView
+                android:id="@+id/bubble_test_description"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:padding="10dp"
+                style="@style/InstructionsSmallFont"/>
+
+            <Button
+                android:id="@+id/bubble_test_button"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:layout_margin="24dp"/>
+
+            <!-- Pass / fail buttons for the test step -->
+            <LinearLayout
+                android:id="@+id/button_layout"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:gravity="center_horizontal"
+                android:orientation="horizontal">
+
+                <Button
+                    android:id="@+id/test_step_passed"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:text="@string/pass_button_text"/>
+
+                <Button
+                    android:id="@+id/test_step_failed"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:text="@string/fail_button_text"/>
+
+            </LinearLayout>
+
+        </LinearLayout>
+
+    </ScrollView>
+
+    <include
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:layout_weight="0"
+        layout="@layout/pass_fail_buttons" />
+
+</LinearLayout>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/camera_performance.xml b/apps/CtsVerifier/res/layout/camera_performance.xml
new file mode 100644
index 0000000..bdac03f
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/camera_performance.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent"
+    android:layout_gravity="bottom"
+    android:orientation="vertical">
+
+    <include layout="@layout/pass_fail_buttons"/>
+
+        <ListView
+            android:id="@+id/android:list"
+            android:layout_width="match_parent"
+            android:layout_height="0dp"
+            android:layout_weight="2"
+            android:gravity="top"
+            android:scrollbars="vertical"/>
+
+        <TextView
+            android:id="@+id/test_instructions"
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content"
+            android:visibility="gone"
+            android:textSize="16dip"/>
+
+        <Button
+            android:id="@+id/prepare_test_button"
+            android:layout_width="match_parent"
+            android:visibility="gone"
+            android:layout_height="wrap_content"/>
+</LinearLayout>
diff --git a/apps/CtsVerifier/res/layout/ci_main.xml b/apps/CtsVerifier/res/layout/ci_main.xml
index 21235b0..b9647f3 100644
--- a/apps/CtsVerifier/res/layout/ci_main.xml
+++ b/apps/CtsVerifier/res/layout/ci_main.xml
@@ -70,6 +70,14 @@
                     android:enabled="false"
                     android:text="@string/ci_start_test_button_caption" />
 
+                <Button
+                    android:id="@+id/settings_button"
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:enabled="false"
+                    android:text="@string/ci_settings_button_caption" />
+
+
                 </LinearLayout>
             </LinearLayout>
         <include layout="@layout/pass_fail_buttons" />
diff --git a/apps/CtsVerifier/res/layout/force_stop_recents_main.xml b/apps/CtsVerifier/res/layout/force_stop_recents_main.xml
new file mode 100644
index 0000000..ef5664e
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/force_stop_recents_main.xml
@@ -0,0 +1,151 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
+                style="@style/RootLayoutPadding"
+                android:layout_width="match_parent"
+                android:layout_height="match_parent">
+
+    <LinearLayout
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:orientation="vertical">
+
+        <!-- Install test app -->
+        <RelativeLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content">
+
+            <ImageView
+                android:id="@+id/fs_test_app_install_status"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:layout_alignParentLeft="true"
+                android:layout_alignParentTop="true"
+                android:layout_marginTop="10dip"
+                android:padding="10dip"/>
+
+            <TextView
+                android:id="@+id/fs_test_app_install_instructions"
+                style="@style/InstructionsSmallFont"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:layout_alignParentRight="true"
+                android:layout_alignParentTop="true"
+                android:layout_toRightOf="@id/fs_test_app_install_status"
+                android:layout_marginTop="10dip"
+                android:text="@string/fs_test_app_install_instructions"/>
+        </RelativeLayout>
+
+        <!-- Launch test activity -->
+        <RelativeLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content">
+
+            <ImageView
+                android:id="@+id/fs_test_app_launch_status"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:layout_alignParentLeft="true"
+                android:layout_alignParentTop="true"
+                android:layout_marginTop="10dip"
+                android:padding="10dip"/>
+
+            <TextView
+                android:id="@+id/fs_test_app_launch_instructions"
+                style="@style/InstructionsSmallFont"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:layout_alignParentRight="true"
+                android:layout_alignParentTop="true"
+                android:layout_toRightOf="@id/fs_test_app_launch_status"
+                android:layout_marginTop="10dip"
+                android:text="@string/fs_test_app_launch_instructions"/>
+
+            <Button
+                android:id="@+id/fs_launch_test_app_button"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:layout_alignParentRight="true"
+                android:layout_below="@id/fs_test_app_launch_instructions"
+                android:layout_marginTop="10dip"
+                android:layout_marginLeft="20dip"
+                android:layout_marginRight="20dip"
+                android:layout_toRightOf="@id/fs_test_app_launch_status"
+                android:text="@string/fs_launch_test_app_button_text"/>
+        </RelativeLayout>
+
+        <!-- Remove test activity task from recents -->
+        <RelativeLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content">
+
+            <ImageView
+                android:id="@+id/fs_test_app_recents_status"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:layout_alignParentLeft="true"
+                android:layout_alignParentTop="true"
+                android:layout_marginTop="10dip"
+                android:padding="10dip"
+                android:visibility="visible"/>
+
+            <TextView
+                android:id="@+id/fs_test_app_recents_instructions"
+                style="@style/InstructionsSmallFont"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:layout_alignParentRight="true"
+                android:layout_alignParentTop="true"
+                android:layout_toRightOf="@id/fs_test_app_recents_status"
+                android:layout_marginTop="10dip"
+                android:visibility="visible"
+                android:text="@string/fs_test_app_recents_instructions"/>
+        </RelativeLayout>
+
+        <!-- Verify that app wasn't force-stopped -->
+        <RelativeLayout
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content">
+
+            <ImageView
+                android:id="@+id/fs_force_stop_status"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:layout_alignParentLeft="true"
+                android:layout_alignParentTop="true"
+                android:layout_marginTop="10dip"
+                android:visibility="gone"
+                android:padding="10dip"/>
+
+            <TextView
+                android:id="@+id/fs_force_stop_verification"
+                style="@style/InstructionsSmallFont"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:layout_alignParentRight="true"
+                android:layout_alignParentTop="true"
+                android:layout_toRightOf="@id/fs_force_stop_status"
+                android:layout_marginTop="10dip"
+                android:visibility="gone"
+                android:text="@string/fs_force_stop_verification_pending"/>
+        </RelativeLayout>
+    </LinearLayout>
+
+    <include android:layout_width="match_parent"
+             android:layout_height="wrap_content"
+             android:layout_alignParentBottom="true"
+             layout="@layout/pass_fail_buttons"/>
+</RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/instant_apps.xml b/apps/CtsVerifier/res/layout/instant_apps.xml
new file mode 100644
index 0000000..b0bc3ff
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/instant_apps.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0" encoding="utf-8"?>
+
+<!-- Copyright (C) 2019 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<ScrollView
+    xmlns:android="http://schemas.android.com/apk/res/android"
+    android:fillViewport="true"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent">
+    <LinearLayout
+        android:layout_width="match_parent"
+        android:layout_height="match_parent"
+        android:orientation="vertical">
+        <LinearLayout
+            android:layout_width="match_parent"
+            android:layout_height="0dp"
+            android:layout_weight="1"
+            android:orientation="vertical">
+            <LinearLayout
+                android:layout_width="match_parent"
+                android:layout_height="match_parent"
+                android:gravity="bottom"
+                android:orientation="vertical" >
+
+                <TextView
+                    android:id="@+id/instruction_header"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:text="@string/ia_instruction_heading_label" />
+
+                <TextView
+                    android:id="@+id/instruction_text"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:text="@string/ia_instruction_text_photo_label" />
+
+                <TextView
+                    android:id="@+id/instruction_extra_text"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content" />
+
+                <Button
+                    android:id="@+id/start_test_button"
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:enabled="false"
+                    android:text="@string/ia_start_test_button_caption" />
+
+                </LinearLayout>
+            </LinearLayout>
+        <include layout="@layout/pass_fail_buttons" />
+    </LinearLayout>
+</ScrollView>
diff --git a/apps/CtsVerifier/res/layout/lockscreen_message.xml b/apps/CtsVerifier/res/layout/lockscreen_message.xml
new file mode 100644
index 0000000..dcf0315
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/lockscreen_message.xml
@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+        android:id="@+id/root_view"
+        android:orientation="vertical"
+        android:layout_width="match_parent"
+        android:layout_height="match_parent">
+
+        <TextView
+                android:layout_width="match_parent"
+                android:layout_height="0dp"
+                android:layout_weight="1"
+                android:text="@string/device_owner_customize_lockscreen_message_info"/>
+
+        <EditText
+                android:id="@+id/lockscreen_message_edit_text"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:hint="@string/device_owner_set_lockscreen_message_hint"
+                android:gravity="top|start"
+                android:windowSoftInputMode="adjustPan"
+                android:padding="16dp" />
+
+        <LinearLayout
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:orientation="horizontal">
+            <Button android:id="@+id/lockscreen_message_set_button"
+                    android:layout_width="0dp"
+                    android:layout_height="wrap_content"
+                    android:text="@string/device_owner_set_lockscreen_message_button"
+                    android:layout_weight="1"/>
+            <Button android:id="@+id/go_button"
+                    android:layout_width="0dp"
+                    android:layout_height="wrap_content"
+                    android:text="@string/go_button_text"
+                    android:layout_weight="1"/>
+        </LinearLayout>
+
+        <include layout="@layout/pass_fail_buttons" />
+
+</LinearLayout>
diff --git a/apps/CtsVerifier/res/layout/midi_activity.xml b/apps/CtsVerifier/res/layout/midi_activity.xml
new file mode 100644
index 0000000..d8daee1
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/midi_activity.xml
@@ -0,0 +1,232 @@
+<?xml version="1.0" encoding="utf-8"?>
+<ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent"
+    android:id="@+id/scrollView"
+    style="@style/RootLayoutPadding">
+
+<LinearLayout android:orientation="vertical"
+    android:layout_width="match_parent"
+    android:layout_height="wrap_content">
+
+    <TextView
+        android:text="@string/midiHasMIDILbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="18sp"/>
+
+    <TextView
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:paddingLeft="10dp"
+        android:paddingRight="10dp"
+        android:id="@+id/midiHasMIDILbl"
+        android:textSize="18sp"/>
+
+    <!--  USB Test -->
+    <TextView
+        android:text="@string/midiUSBTestLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="24sp"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/usbMidiInputLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiUSBInputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+                android:text="@string/usbMidiOutputLbl"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiUSBOutputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <Button
+        android:text="@string/midiTestUSBInterfaceBtn"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/midiTestUSBInterfaceBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiStatusLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiUSBTestStatusLbl"
+            android:text="@string/midiNotRunLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <!--  Virtual Test -->
+    <TextView
+        android:text="@string/midiVirtTestLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="24sp"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiVirtInputLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiVirtInputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+                android:text="@string/midiVirtOutputLbl"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiVirtOutputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <Button
+        android:text="@string/midiTestVirtInterfaceBtn"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/midiTestVirtInterfaceBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiStatusLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiVirtTestStatusLbl"
+            android:text="@string/midiNotRunLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <!--  Bluetooth Test -->
+    <TextView
+        android:text="@string/midiBTTestLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="24sp"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiBTInputLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiBTInputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+                android:text="@string/midiBTOutputLbl"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiBTOutputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <Button
+        android:text="@string/midiTestBTInterfaceBtn"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/midiTestBTInterfaceBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiStatusLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiBTTestStatusLbl"
+            android:text="@string/midiNotRunLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <include layout="@layout/pass_fail_buttons"/>
+</LinearLayout>
+</ScrollView>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/ndk_midi_activity.xml b/apps/CtsVerifier/res/layout/ndk_midi_activity.xml
new file mode 100644
index 0000000..d8daee1
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/ndk_midi_activity.xml
@@ -0,0 +1,232 @@
+<?xml version="1.0" encoding="utf-8"?>
+<ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent"
+    android:id="@+id/scrollView"
+    style="@style/RootLayoutPadding">
+
+<LinearLayout android:orientation="vertical"
+    android:layout_width="match_parent"
+    android:layout_height="wrap_content">
+
+    <TextView
+        android:text="@string/midiHasMIDILbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="18sp"/>
+
+    <TextView
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:paddingLeft="10dp"
+        android:paddingRight="10dp"
+        android:id="@+id/midiHasMIDILbl"
+        android:textSize="18sp"/>
+
+    <!--  USB Test -->
+    <TextView
+        android:text="@string/midiUSBTestLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="24sp"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/usbMidiInputLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiUSBInputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+                android:text="@string/usbMidiOutputLbl"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiUSBOutputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <Button
+        android:text="@string/midiTestUSBInterfaceBtn"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/midiTestUSBInterfaceBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiStatusLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiUSBTestStatusLbl"
+            android:text="@string/midiNotRunLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <!--  Virtual Test -->
+    <TextView
+        android:text="@string/midiVirtTestLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="24sp"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiVirtInputLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiVirtInputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+                android:text="@string/midiVirtOutputLbl"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiVirtOutputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <Button
+        android:text="@string/midiTestVirtInterfaceBtn"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/midiTestVirtInterfaceBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiStatusLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiVirtTestStatusLbl"
+            android:text="@string/midiNotRunLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <!--  Bluetooth Test -->
+    <TextView
+        android:text="@string/midiBTTestLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="24sp"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiBTInputLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiBTInputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+                android:text="@string/midiBTOutputLbl"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiBTOutputLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <Button
+        android:text="@string/midiTestBTInterfaceBtn"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/midiTestBTInterfaceBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/midiStatusLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/midiBTTestStatusLbl"
+            android:text="@string/midiNotRunLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <include layout="@layout/pass_fail_buttons"/>
+</LinearLayout>
+</ScrollView>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/pass_fail_set_password_complexity.xml b/apps/CtsVerifier/res/layout/pass_fail_set_password_complexity.xml
new file mode 100644
index 0000000..e8cb0ff
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/pass_fail_set_password_complexity.xml
@@ -0,0 +1,91 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+
+<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
+                android:layout_width="match_parent"
+                android:layout_height="match_parent">
+    <LinearLayout android:layout_width="match_parent"
+                  android:layout_height="match_parent"
+                  android:layout_alignParentTop="true"
+                  android:layout_alignParentStart="true"
+                  android:orientation="vertical"
+                  android:divider="@android:color/white"
+                  android:showDividers="middle">
+
+        <LinearLayout android:layout_height="wrap_content"
+                      android:layout_width="wrap_content"
+                      android:orientation="horizontal"
+                      android:gravity="center_vertical">
+            <Button android:id="@+id/set_complexity_high_btn"
+                    android:text="@string/set_complexity_high_txt"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <TextView android:id="@+id/set_complexity_high_desc"
+                      android:text="@string/set_complexity_high_desc"
+                      android:ellipsize="none"
+                      android:layout_width="wrap_content"
+                      android:layout_height="wrap_content"/>
+        </LinearLayout>
+
+        <LinearLayout android:layout_height="wrap_content"
+                      android:layout_width="wrap_content"
+                      android:orientation="horizontal"
+                      android:gravity="center_vertical">
+            <Button android:id="@+id/set_complexity_medium_btn"
+                    android:text="@string/set_complexity_medium_txt"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <TextView android:text="@string/set_complexity_medium_desc"
+                      android:ellipsize="none"
+                      android:layout_width="wrap_content"
+                      android:layout_height="wrap_content"/>
+        </LinearLayout>
+
+        <LinearLayout android:layout_height="wrap_content"
+                      android:layout_width="wrap_content"
+                      android:orientation="horizontal"
+                      android:gravity="center_vertical">
+            <Button android:id="@+id/set_complexity_low_btn"
+                    android:text="@string/set_complexity_low_txt"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <TextView android:text="@string/set_complexity_low_desc"
+                      android:ellipsize="none"
+                      android:layout_width="wrap_content"
+                      android:layout_height="wrap_content"/>
+        </LinearLayout>
+
+        <LinearLayout android:layout_height="wrap_content"
+                      android:layout_width="wrap_content"
+                      android:orientation="horizontal"
+                      android:gravity="center_vertical">
+            <Button android:id="@+id/set_complexity_none_btn"
+                    android:text="@string/set_complexity_none_txt"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <TextView android:text="@string/set_complexity_none_desc"
+                      android:ellipsize="none"
+                      android:layout_width="wrap_content"
+                      android:layout_height="wrap_content"/>
+        </LinearLayout>
+
+    </LinearLayout>
+
+    <include android:layout_width="match_parent"
+             android:layout_height="wrap_content"
+             android:layout_alignParentBottom="true"
+             layout="@layout/pass_fail_buttons"/>
+</RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/pro_audio.xml b/apps/CtsVerifier/res/layout/pro_audio.xml
new file mode 100644
index 0000000..3182499
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/pro_audio.xml
@@ -0,0 +1,198 @@
+<?xml version="1.0" encoding="utf-8"?>
+<ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent"
+    android:id="@+id/scrollView"
+    style="@style/RootLayoutPadding">
+
+<LinearLayout android:orientation="vertical"
+    android:layout_width="match_parent"
+    android:layout_height="wrap_content">
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioHasProAudiolbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioHasProAudioLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioHasLLAlbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioHasLLALbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioMidiHasMIDILbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioHasMIDILbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioMidiHasUSBHostLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioMidiHasUSBHostLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioMidiHasUSBPeripheralLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioMidiHasUSBPeripheralLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <CheckBox android:id="@+id/proAudioHasHDMICheckBox"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:text="@string/proAudioHasHDMICheckBox"
+        android:onClick="onCheckboxClicked"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioHDMISupportLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioHDMISupportLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <TextView
+        android:text="@string/proAudioInputLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="18sp"/>
+
+    <TextView
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:paddingLeft="10dp"
+        android:paddingRight="10dp"
+        android:id="@+id/proAudioInputLbl"
+        android:textSize="18sp"/>
+
+    <TextView
+        android:text="@string/proAudioOutputLbl"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:textSize="18sp"/>
+
+    <TextView
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:paddingLeft="10dp"
+        android:paddingRight="10dp"
+        android:id="@+id/proAudioOutputLbl"
+        android:textSize="18sp"/>
+
+    <Button
+        android:text="@string/audio_proaudio_roundtrip"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:id="@+id/proAudio_runRoundtripBtn"/>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioRoundTripLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioRoundTripLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <LinearLayout android:orientation="horizontal"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content">
+        <TextView
+            android:text="@string/proAudioConfidenceLbl"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:textSize="18sp"/>
+
+        <TextView
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:paddingLeft="10dp"
+            android:paddingRight="10dp"
+            android:id="@+id/proAudioConfidenceLbl"
+            android:textSize="18sp"/>
+    </LinearLayout>
+
+    <include layout="@layout/pass_fail_buttons"/>
+</LinearLayout>
+</ScrollView>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/tapjacking.xml b/apps/CtsVerifier/res/layout/tapjacking.xml
deleted file mode 100644
index 998e624..0000000
--- a/apps/CtsVerifier/res/layout/tapjacking.xml
+++ /dev/null
@@ -1,49 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2017 The Android Open Source Project
-
-     Licensed under the Apache License, Version 2.0 (the "License");
-     you may not use this file except in compliance with the License.
-     You may obtain a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-     Unless required by applicable law or agreed to in writing, software
-     distributed under the License is distributed on an "AS IS" BASIS,
-     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     See the License for the specific language governing permissions and
-     limitations under the License.
--->
-<ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
-            style="@style/RootLayoutPadding"
-            android:layout_width="match_parent"
-            android:layout_height="match_parent">
-    <LinearLayout
-            android:layout_width="match_parent"
-            android:layout_height="match_parent"
-            android:orientation="vertical">
-
-        <RelativeLayout
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content" >
-            <TextView
-                    android:id="@+id/usb_tapjacking_instructions"
-                    style="@style/InstructionsSmallFont"
-                    android:layout_width="match_parent"
-                    android:layout_height="wrap_content"
-                    android:layout_alignParentRight="true"
-                    android:layout_alignParentTop="true" />
-
-            <Button android:id="@+id/tapjacking_btn"
-                    android:text="@string/usb_tapjacking_button_text"
-                    android:layout_below="@id/usb_tapjacking_instructions"
-                    android:layout_width="match_parent"
-                    android:layout_height="wrap_content"
-                    android:layout_alignParentRight="true"
-                    android:layout_marginLeft="20dip"
-                    android:layout_marginRight="20dip"/>
-        </RelativeLayout>
-
-        <include layout="@layout/pass_fail_buttons" />
-    </LinearLayout>
-</ScrollView>
-
diff --git a/apps/CtsVerifier/res/layout/telecom_self_managed_answer.xml b/apps/CtsVerifier/res/layout/telecom_self_managed_answer.xml
index 0b0d9d9..2d73ffe 100644
--- a/apps/CtsVerifier/res/layout/telecom_self_managed_answer.xml
+++ b/apps/CtsVerifier/res/layout/telecom_self_managed_answer.xml
@@ -96,8 +96,8 @@
                 android:layout_marginLeft="20dip"
                 android:layout_marginRight="20dip"
                 android:layout_toRightOf="@id/step_2_status"
-                android:id="@+id/telecom_incoming_self_mgd_show_ui_button"
-                android:text="@string/telecom_incoming_self_mgd_show_ui_button"/>
+                android:id="@+id/telecom_incoming_self_mgd_verify_call_button"
+                android:text="@string/telecom_incoming_self_mgd_verify_call_button"/>
         </RelativeLayout>
 
         <RelativeLayout
@@ -131,8 +131,8 @@
                 android:layout_marginLeft="20dip"
                 android:layout_marginRight="20dip"
                 android:layout_toRightOf="@id/step_3_status"
-                android:id="@+id/telecom_incoming_self_mgd_confirm_answer_button"
-                android:text="@string/telecom_incoming_self_mgd_confirm_answer_button"/>
+                android:id="@+id/telecom_incoming_self_mgd_place_call_button"
+                android:text="@string/telecom_incoming_self_mgd_place_call_button"/>
         </RelativeLayout>
 
         <include layout="@layout/pass_fail_buttons" />
diff --git a/apps/CtsVerifier/res/layout/test_list_footer.xml b/apps/CtsVerifier/res/layout/test_list_footer.xml
index cb73ed1..831ea2e 100644
--- a/apps/CtsVerifier/res/layout/test_list_footer.xml
+++ b/apps/CtsVerifier/res/layout/test_list_footer.xml
@@ -28,12 +28,6 @@
         android:layout_width="wrap_content"
         android:layout_height="wrap_content" />
     <Button
-        android:id="@+id/view"
-        android:text="@string/view"
-        android:layout_gravity="center"
-        android:layout_width="wrap_content"
-        android:layout_height="wrap_content" />
-    <Button
         android:id="@+id/export"
         android:text="@string/export"
         android:layout_gravity="center"
diff --git a/apps/CtsVerifier/res/layout/tiles_item.xml b/apps/CtsVerifier/res/layout/tiles_item.xml
new file mode 100644
index 0000000..f2adaa4
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/tiles_item.xml
@@ -0,0 +1,73 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+   Copyright (C) 2019 The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="wrap_content" >
+
+    <ImageView
+        android:id="@+id/tiles_status"
+        android:layout_width="wrap_content"
+        android:layout_height="wrap_content"
+        android:layout_alignParentLeft="true"
+        android:layout_alignParentTop="true"
+        android:layout_marginTop="10dip"
+        android:contentDescription="@string/pass_button_text"
+        android:padding="10dip"
+        android:src="@drawable/fs_indeterminate" />
+
+    <TextView
+        android:id="@+id/tiles_instructions"
+        style="@style/InstructionsSmallFont"
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:layout_alignParentRight="true"
+        android:layout_alignParentTop="true"
+        android:layout_toRightOf="@id/tiles_status"/>
+
+    <LinearLayout
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:layout_below="@id/tiles_instructions"
+        android:layout_toRightOf="@id/tiles_status"
+        android:layout_alignParentRight="true">
+
+        <Button
+            android:id="@+id/tiles_action_pass"
+            android:layout_width="0dp"
+            android:layout_height="wrap_content"
+            android:layout_weight="1"
+            android:layout_marginLeft="20dip"
+            android:layout_marginRight="10dip"
+            android:onClick="actionPressed"
+            android:clickable="false"
+            android:enabled="false"
+            android:text="Pass"/>
+
+        <Button
+            android:id="@+id/tiles_action_fail"
+            android:layout_width="0dp"
+            android:layout_height="wrap_content"
+            android:layout_weight="1"
+            android:layout_marginLeft="10dip"
+            android:layout_marginRight="20dip"
+            android:clickable="false"
+            android:enabled="false"
+            android:text="Fail"
+            android:onClick="actionPressed" />
+    </LinearLayout>
+
+</RelativeLayout>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/tiles_main.xml b/apps/CtsVerifier/res/layout/tiles_main.xml
new file mode 100644
index 0000000..f523b4a
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/tiles_main.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+   Copyright (C) 2019 The Android Open Source Project
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+        http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+    android:layout_width="match_parent"
+    android:layout_height="match_parent"
+    android:orientation="vertical"
+    style="@style/RootLayoutPadding">
+
+    <ScrollView
+        android:id="@+id/tiles_test_scroller"
+        android:layout_width="match_parent"
+        android:layout_height="0dp"
+        android:layout_weight="1"
+        android:orientation="vertical">
+
+        <LinearLayout
+            android:id="@+id/tiles_test_items"
+            android:layout_width="match_parent"
+            android:layout_height="wrap_content"
+            android:orientation="vertical" >
+        </LinearLayout>
+    </ScrollView>
+
+    <include
+        android:layout_width="match_parent"
+        android:layout_height="wrap_content"
+        android:layout_weight="0"
+        layout="@layout/pass_fail_buttons" />
+
+</LinearLayout>
\ No newline at end of file
diff --git a/apps/CtsVerifier/res/layout/usb_tapjacking_overlay.xml b/apps/CtsVerifier/res/layout/usb_tapjacking_overlay.xml
deleted file mode 100644
index 6a5ba58..0000000
--- a/apps/CtsVerifier/res/layout/usb_tapjacking_overlay.xml
+++ /dev/null
@@ -1,36 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<!-- Copyright (C) 2017 The Android Open Source Project
-
-     Licensed under the Apache License, Version 2.0 (the "License");
-     you may not use this file except in compliance with the License.
-     You may obtain a copy of the License at
-
-          http://www.apache.org/licenses/LICENSE-2.0
-
-     Unless required by applicable law or agreed to in writing, software
-     distributed under the License is distributed on an "AS IS" BASIS,
-     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-     See the License for the specific language governing permissions and
-     limitations under the License.
--->
-
-<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
-    android:layout_width="match_parent"
-    android:layout_height="match_parent"
-    android:orientation="vertical">
-
-    <TextView
-        android:id="@+id/textView"
-        android:layout_width="wrap_content"
-        android:layout_height="wrap_content"
-        android:layout_gravity="center"
-        android:maxWidth="350dp"
-        android:paddingLeft="20dp"
-        android:paddingRight="20dp"
-        android:paddingBottom="6dp"
-        android:paddingTop="8dp"
-        android:background="#ffffff"
-        android:text="@string/usb_tapjacking_overlay_message"
-        android:textColor="#000000"
-        android:textSize="22sp" />
-</LinearLayout>
diff --git a/apps/CtsVerifier/res/layout/wifi_main.xml b/apps/CtsVerifier/res/layout/wifi_main.xml
new file mode 100644
index 0000000..fee710d
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/wifi_main.xml
@@ -0,0 +1,51 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2017 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+              android:orientation="vertical"
+              android:layout_width="match_parent"
+              android:layout_height="match_parent"
+>
+
+    <FrameLayout
+        android:layout_width="fill_parent"
+        android:layout_height="wrap_content"
+        android:layout_weight="1.0"
+    >
+
+        <ScrollView android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:layout_weight="1"
+        >
+            <TextView android:id="@+id/wifi_info"
+                      android:layout_width="match_parent"
+                      android:layout_height="wrap_content"
+                      style="@style/InstructionsFont"
+            />
+        </ScrollView>
+
+        <ProgressBar
+            android:id="@+id/wifi_progress"
+            android:layout_width="wrap_content"
+            android:layout_height="wrap_content"
+            android:indeterminate="true"
+            android:layout_gravity="center"
+            android:visibility="gone"
+        />
+    </FrameLayout>
+
+    <include layout="@layout/pass_fail_buttons" />
+
+</LinearLayout>
diff --git a/apps/CtsVerifier/res/menu/test_list_menu.xml b/apps/CtsVerifier/res/menu/test_list_menu.xml
index ecc6920..20e3cc9 100644
--- a/apps/CtsVerifier/res/menu/test_list_menu.xml
+++ b/apps/CtsVerifier/res/menu/test_list_menu.xml
@@ -4,12 +4,8 @@
           android:icon="@android:drawable/ic_menu_delete" 
           android:title="@string/clear"
           android:showAsAction="ifRoom" />
-    <item android:id="@+id/view"
-          android:icon="@android:drawable/ic_menu_view"
-          android:title="@string/view"
-          android:showAsAction="ifRoom" />
     <item android:id="@+id/export"
           android:icon="@android:drawable/ic_menu_save"
           android:title="@string/export"
           android:showAsAction="ifRoom" />
-</menu>
\ No newline at end of file
+</menu>
diff --git a/apps/CtsVerifier/res/raw/speech.mp3 b/apps/CtsVerifier/res/raw/speech.mp3
new file mode 100644
index 0000000..b8ed1c8
--- /dev/null
+++ b/apps/CtsVerifier/res/raw/speech.mp3
Binary files differ
diff --git a/apps/CtsVerifier/res/values/integers.xml b/apps/CtsVerifier/res/values/integers.xml
index 2ced54b..508c52c 100644
--- a/apps/CtsVerifier/res/values/integers.xml
+++ b/apps/CtsVerifier/res/values/integers.xml
@@ -14,5 +14,5 @@
      limitations under the License.
 -->
 <resources>
-    <integer name="test_list_footer_button_count">3</integer>
+    <integer name="test_list_footer_button_count">2</integer>
 </resources>
diff --git a/apps/CtsVerifier/res/values/strings.xml b/apps/CtsVerifier/res/values/strings.xml
index 4f44c1c..22109e7 100755
--- a/apps/CtsVerifier/res/values/strings.xml
+++ b/apps/CtsVerifier/res/values/strings.xml
@@ -45,6 +45,7 @@
     <string name="test_category_telecom">Telecom</string>
     <string name="test_category_telephony">Telephony</string>
     <string name="test_category_tv">TV</string>
+    <string name="test_category_instant_apps">Instant Apps</string>
     <string name="test_category_other">Other</string>
     <string name="clear">Clear</string>
     <string name="test_results_clear_title">Remove all test results?</string>
@@ -52,8 +53,10 @@
     <string name="test_results_clear_cancel">Cancel</string>
     <string name="test_results_cleared">Test results cleared.</string>
     <string name="view">View</string>
-    <string name="test_results_error">Couldn\'t create test results report.</string>
-    <string name="runtime_permissions_error">Cannot continue. Please grant runtime permissions</string>
+    <string name="test_results_error">Couldn\'t create test results report. Try running the
+        following command if you haven\'t yet.\n\"adb shell appops set com.android.cts.verifier
+        android:read_device_identifiers allow\" </string>
+    <string name="runtime_permissions_error">Please grant runtime permissions, otherwise, tests might fail.</string>
     <string name="export">Export</string>
     <string name="no_storage">Cannot save report to external storage, see log for details.</string>
     <string name="report_saved">Report saved to: %s</string>
@@ -65,6 +68,9 @@
     <!-- Strings for ReportViewerActivity -->
     <string name="report_viewer">Report Viewer</string>
 
+    <string name="result_success">Test passed!</string>
+    <string name="result_failure">Test failed!</string>
+
     <!-- String shared between BackupTestActivity and BackupAccessibilityTestActivity -->
     <string name="bu_loading">Loading...</string>
     <string name="bu_generate_error">Error occurred while generating test data...</string>
@@ -122,8 +128,6 @@
         settings that may specify a timeout.\n\nClick the \"Force Lock\" button to lock the screen.
         Your screen should be locked and require the password to be entered.
     </string>
-    <string name="da_kg_disabled_features_test">Keyguard Disabled Features Test</string>
-    <string name="rn_kg_disabled_features_test">Redacted Notifications Keyguard Disabled Features Test</string>
     <string name="da_force_lock">Force Lock</string>
     <string name="da_lock_success">It appears the screen was locked successfully!</string>
     <string name="da_lock_error">It does not look like the screen was locked...</string>
@@ -155,6 +159,39 @@
     </string>
     <string name="da_tapjacking_button_text">Enable device admin</string>
 
+    <!-- Strings for RecentTaskRemovalTestActivity -->
+    <string name="remove_from_recents_test">Recent Task Removal Test</string>
+    <string name="remove_from_recents_test_info">
+        This test verifies that an app whose task is removed from recents is not also force-stopped
+        without explicit user consent. This test requires CtsForceStopHelper.apk to be installed.
+    </string>
+    <string name="fs_test_app_install_instructions">Please install the \'Force stop helper app\' on the device.</string>
+    <string name="fs_test_app_installed_text">\'Force stop helper app\' installed on device. Proceed to the following steps.</string>
+    <string name="fs_test_app_launch_instructions">
+        Tap the button to launch the helper activity. Then return to this screen.
+    </string>
+    <string name="fs_launch_test_app_button_text">Launch test activity</string>
+    <string name="fs_test_app_recents_instructions">
+        Open recents and remove the task of the activity started in the previous step and return to this screen.
+        Deny any dialog that is shown asking for permission to force-stop or kill the app.
+    </string>
+    <string name="fs_force_stop_verification_pending">Verifying... Please wait.</string>
+
+    <!-- Strings for BiometricTest -->
+    <string name="biometric_test">Biometric Test</string>
+    <string name="biometric_test_info">
+        This test ensures that biometrics can be authenticated when templates are enrolled.
+    </string>
+    <string name="biometric_enroll">Enroll</string>
+    <string name="biometric_start_test1">Start Test 1</string>
+    <string name="biometric_start_test2">Start Test 2</string>
+    <string name="biometric_start_test3">Start Test 3</string>
+    <string name="biometric_start_test4">Start Test 4</string>
+    <string name="biometric_start_test5">Start Test 5</string>
+    <string name="biometric_test_strings_title">Instructions</string>
+    <string name="biometric_test_strings_instructions">For the next test, please write down the numbers shown in the title, subtitle, description, and negative button fields. After authenticating, please enter the strings in the appropriate box.</string>
+    <string name="biometric_test_strings_verify_title">Please enter the numbers you recorded</string>
+
     <!-- Strings for lock bound keys test -->
     <string name="sec_lock_bound_key_test">Lock Bound Keys Test</string>
     <string name="sec_lock_bound_key_test_info">
@@ -163,6 +200,7 @@
         complete this test. If available, this test should be run by using fingerprint authentication
         as well as PIN/pattern/password authentication.
     </string>
+
     <string name="sec_fingerprint_bound_key_test">Fingerprint Bound Keys Test</string>
     <string name="sec_fingerprint_bound_key_test_info">
         This test ensures that Keystore cryptographic keys that are bound to fingerprint authentication
@@ -172,7 +210,12 @@
     <string name="sec_fp_dialog_message">Authenticate now with fingerprint</string>
     <string name="sec_fp_auth_failed">Authentication failed</string>
     <string name="sec_start_test">Start Test</string>
-    <string name="sec_fingerprint_dialog_bound_key_test">Fingerprint Bound Keys Test (System Dialog)</string>
+
+    <string name="sec_biometric_prompt_bound_key_test">Biometric Prompt Bound Keys Test</string>
+    <string name="sec_biometric_prompt_bound_key_test_info">
+        This test ensures that Keystore cryptographic keys that are bound to biometric authentication
+        are unusable without an authentication.
+    </string>
 
     <!-- Strings for BluetoothActivity -->
     <string name="bluetooth_test">Bluetooth Test</string>
@@ -187,6 +230,10 @@
         \n\n\"Bluetooth LE Insecure Server Test\" x \"Bluetooth LE Insecure Client Test\"
         \n\n\"Bluetooth LE Secure Client Test\" x \"Bluetooth LE Secure Server Test\"
         \n\n\"Bluetooth LE Secure Server Test\" x \"Bluetooth LE Secure Client Test\"
+        \n\n\"Bluetooth LE CoC Insecure Server Test\" x \"Bluetooth LE CoC Insecure Client Test\"
+        \n\n\"Bluetooth LE CoC Insecure Client Test\" x \"Bluetooth LE CoC Insecure Server Test\"
+        \n\n\"Bluetooth LE CoC Secure Server Test\" x \"Bluetooth LE CoC Secure Client Test\"
+        \n\n\"Bluetooth LE CoC Secure Client Test\" x \"Bluetooth LE CoC Secure Server Test\"
         \n\nThe Device Communication tests require two
         devices to pair and exchange messages. The two devices must be:
         \n\n1. a candidate device implementation running the software build to be tested
@@ -197,6 +244,7 @@
     <string name="bt_device_communication">Device Communication</string>
     <string name="bt_le">Bluetooth LE</string>
     <string name="bt_hid">Bluetooth HID</string>
+    <string name="bt_le_coc">Bluetooth LE CoC</string>
 
     <string name="bt_toggle_bluetooth">Toggle Bluetooth</string>
     <string name="bt_toggle_instructions">Disable and enable Bluetooth to successfully complete this test.</string>
@@ -205,6 +253,65 @@
     <string name="bt_disabling">Disabling Bluetooth...</string>
     <string name="bt_disabling_error">Could not disable Bluetooth...</string>
 
+    <string name="ble_coc_insecure_client_test_list_name">Bluetooth LE CoC Insecure Client Test</string>
+    <string name="ble_coc_insecure_client_test_list_info">
+        The Bluetooth LE CoC test must be done simultaneously on two devices. This device is the client.
+        All tests listed here must be done without pairing. Tap \"Bluetooth LE CoC Insecure Server Test\" on the other device.
+        \n\nTap \"01 Bluetooth LE CoC Client Test\" on this device, then tap \"01 Bluetooth LE CoC Server Test\" on the other device.
+        \nWhen the test is complete, move to the next item. You must complete all tests.
+    </string>
+    <string name="ble_coc_insecure_client_test_info">
+        The Bluetooth LE CoC test must be done simultaneously on two devices. This device is the client.
+        All tests listed here must be done without pairing.
+    </string>
+    <string name="ble_coc_insecure_server_test_list_name">Bluetooth LE CoC Insecure Server Test</string>
+    <string name="ble_coc_insecure_server_test_list_info">
+        This test is mostly automated, but requires some user interaction.
+        Once the list items below have check marks, the test is complete.
+        \n\nTap \"01 Bluetooth LE CoC Server Test\" on this device, then tap \"01 Bluetooth LE CoC Client Test\" on the other device.
+        \nWhen the test is complete, move to the next item. You must complete all tests.
+    </string>
+
+    <string name="ble_coc_secure_client_test_list_name">Bluetooth LE CoC Secure Client Test</string>
+    <string name="ble_coc_secure_client_test_list_info">
+        The Bluetooth LE CoC test must be done simultaneously on two devices. This device is the client.
+        All tests listed here must be done without pairing. Tap \"Bluetooth LE CoC Secure Server Test\" on the other device.
+        \n\nTap \"01 Bluetooth LE CoC Client Test\" on this device, then tap \"01 Bluetooth LE CoC Server Test\" on the other device.
+        \nWhen the test is complete, move to the next item. You must complete all tests.
+    </string>
+    <string name="ble_coc_secure_client_test_info">
+        The Bluetooth LE CoC test must be done simultaneously on two devices. This device is the client.
+        All tests listed here must be done without pairing.
+    </string>
+    <string name="ble_coc_secure_server_test_list_name">Bluetooth LE CoC Secure Server Test</string>
+    <string name="ble_coc_secure_server_test_list_info">
+        This test is mostly automated, but requires some user interaction.
+        Once the list items below have check marks, the test is complete.
+        \n\nTap \"01 Bluetooth LE CoC Server Test\" on this device, then tap \"01 Bluetooth LE CoC Client Test\" on the other device.
+        \nWhen the test is complete, move to the next item. You must complete all tests.
+    </string>
+
+    <!-- BLE CoC client side strings -->
+    <string name="ble_coc_client_test_name">01 Bluetooth LE CoC Client Test</string>
+    <string name="ble_coc_client_le_connect">Bluetooth LE Client Connect</string>
+    <string name="ble_coc_client_get_psm">Get peer PSM value</string>
+    <string name="ble_coc_client_coc_connect">LE CoC client Connect</string>
+    <string name="ble_coc_client_check_connection_type">Check connection type</string>
+    <string name="ble_coc_client_send_data_8bytes">Send 8 bytes</string>
+    <string name="ble_coc_client_receive_data_8bytes">Receive 8 bytes</string>
+    <string name="ble_coc_client_data_exchange">Send and receive large data buffer</string>
+
+    <!-- BLE CoC server side strings -->
+    <string name="ble_coc_server_start_name">01 Bluetooth LE CoC Server Test</string>
+    <string name="ble_coc_server_le_connect">Bluetooth LE Server Connect</string>
+    <string name="ble_coc_server_create_listener">Create LE CoC listener</string>
+    <string name="ble_coc_server_psm_read">Waiting on PSM to be read</string>
+    <string name="ble_coc_server_connection">Waiting on LE CoC connection</string>
+    <string name="ble_coc_server_check_connection_type">Check connection type</string>
+    <string name="ble_coc_server_receive_data_8bytes">Waiting to receive 8 bytes</string>
+    <string name="ble_coc_server_send_data_8bytes">Sending 8 bytes</string>
+    <string name="ble_coc_server_data_exchange">Send and receive large data buffer</string>
+
     <string name="bt_connection_access_server">Connection Access Server</string>
     <string name="bt_connection_access_client">Connection Access Client</string>
     <string name="bt_connection_access_server_info">
@@ -457,6 +564,11 @@
     <string name="ble_secure_server_test_name">Bluetooth LE Secure Server Test</string>
     <string name="ble_insecure_server_test_name">Bluetooth LE Insecure Server Test</string>
 
+    <string name="ble_coc_secure_client_test_name">Bluetooth LE CoC Secure Client Test</string>
+    <string name="ble_coc_insecure_client_test_name">Bluetooth LE CoC Insecure Client Test</string>
+    <string name="ble_coc_secure_server_test_name">Bluetooth LE CoC Secure Server Test</string>
+    <string name="ble_coc_insecure_server_test_name">Bluetooth LE CoC Insecure Server Test</string>
+
     <string name="ble_read_characteristic_nopermission_name">Bluetooth LE Read Characteristic Without Perrmission</string>
     <string name="ble_write_characteristic_nopermission_name">Bluetooth LE Write Characteristic Without Permission</string>
     <string name="ble_read_descriptor_nopermission_name">Bluetooth LE Read Descriptor Without Perrmission</string>
@@ -665,7 +777,7 @@
            as failed.
 
     </string>
-    <string name="multinetwork_connectivity_test_pre_requisites">Prerequisite - Setup a Wi-Fi access point with WPA PSK in which we can turn on or off internet access. </string>
+    <string name="multinetwork_connectivity_test_pre_requisites">Prerequisite - Setup a Wi-Fi access point with WPA PSK in which we can turn on or off internet access. Delete that access point from this device if it exists already. </string>
     <string name="multinetwork_connectivity_test_start">Start</string>
     <string name="multinetwork_connectivity_test_ap_name">Wi-Fi SSID</string>
     <string name="multinetwork_connectivity_test_ap_passphrase">WPA 2 passphrase</string>
@@ -678,8 +790,9 @@
     <string name="multinetwork_connectivity_test_progress_1">Waiting for Wi-Fi to lose connectivity.</string>
     <string name="multinetwork_connectivity_test_progress_2">Waiting to check connectivity.</string>
     <string name="multinetwork_connectivity_test_progress_3">Waiting to make sure Wi-Fi has connectivity.</string>
-    <string name="multinetwork_connectivity_test_1_desc">Test 1 - Connect to Wi-Fi with no internet doesnt disable current connectivity</string>
-    <string name="multinetwork_connectivity_test_2_desc">Test 2 - When connected to Wi-Fi, on losing connectivity, restores mobile connectivity</string>
+    <string name="multinetwork_connectivity_test_1_desc">Test 1 - Connect to Wi-Fi with no internet doesnt disable current connectivity (new API)</string>
+    <string name="multinetwork_connectivity_test_2_desc">Test 2 - Connect to Wi-Fi with no internet doesnt disable current connectivity (legacy API)</string>
+    <string name="multinetwork_connectivity_test_3_desc">Test 3 - When connected to Wi-Fi, on losing connectivity, restores mobile connectivity (legacy API)</string>
     <string name="multinetwork_status_wifi_connect_success">Wi-Fi connect success.</string>
     <string name="multinetwork_status_mobile_connect_success">Mobile net connect success.</string>
     <string name="multinetwork_status_wifi_connect_timed_out">Wi-Fi connect timed out.</string>
@@ -693,7 +806,9 @@
     <string name="multinetwork_connectivity_test_all_prereq_1">Looks like your device does not support telephony or mobile data. If yes, you can mark test passed and proceed.</string>
     <string name="multinetwork_connectivity_test_all_prereq_2">Need mobile data to proceed. Please insert a mobile data capable sim and repeat the test. By marking test as passed, you acknowledge that the device cannot do mobile data.</string>
     <string name="multinetwork_status_wifi_connectivity_failed">Wi-Fi connectivity failed.</string>
-
+    <string name="multinetwork_connectivity_overlay_permission_message">This test requires the CTS verifier to have the system overlay permission, please enable it in the next screen.</string>
+    <string name="multinetwork_connectivity_overlay_permission_positive">Settings</string>
+    <string name="multinetwork_connectivity_overlay_permission_negative">Cancel</string>
     <!-- Strings for NfcTestActivity -->
     <string name="nfc_test">NFC Test</string>
     <string name="nfc_test_info">The Peer-to-Peer Data Exchange tests require two devices with
@@ -764,9 +879,6 @@
     <string name="nfc_reading_tag">Reading NFC tag...</string>
     <string name="nfc_reading_tag_error">Error reading NFC tag...</string>
 
-    <string name="nfc_result_success">Test passed!</string>
-    <string name="nfc_result_failure">Test failed!</string>
-
     <string name="nfc_result_message">Written data:\n%1$s\n\nRead data:\n%2$s</string>
     <string name="nfc_ndef_content">Id: %1$s\nMime: %2$s\nPayload: %3$s</string>
 
@@ -975,6 +1087,7 @@
     <!-- Magnetic Field -->
     <string name="snsr_mag_m_test">Magnetic Field Measurement Tests</string>
     <string name="snsr_mag_verify_norm">Verifying the Norm...</string>
+    <string name="snsr_mag_verify_offset">Verifying the Offset...</string>
     <string name="snsr_mag_verify_std_dev">Verifying the Standard Deviation...</string>
     <string name="snsr_mag_verify_calibrated_uncalibrated">Verifying the relationship between
         calibrated and uncalibrated measurements...</string>
@@ -1010,6 +1123,12 @@
     <string name="snsr_step_counter_event">%1$d | Step Counter event. count=%2$d.</string>
     <string name="snsr_step_detector_event">%1$d | Step Detector event.</string>
 
+    <!-- Step Counter and Detector Permission -->
+    <string name="snsr_step_permission_test">Step Permission Test</string>
+    <string name="snsr_step_permission_disable">Please change the \'Physical Activity\' permission for CtsVerifier to \'Deny\'</string>
+    <string name="snsr_step_permission_enable">Please change the \'Physical Activity\' permission for CtsVerifier to \'Allow\'</string>
+    <string name="snsr_step_permission_walk">Please begin walking while holding the device. A sound will play when you may stop walking</string>
+
     <!-- Device suspend tests -->
     <string name="snsr_device_suspend_test">Device Suspend Tests</string>
     <string name="snsr_device_did_not_go_into_suspend">Device did not go into suspend mode during test execution </string>
@@ -1131,8 +1250,12 @@
     This test verifies that the default camera app is firing intents
     after pictures/videos are taken. It also verifies that when the
     default camera app is invoked via intents, the launch intents work,
-    and the broadcast intents are received when appropriate per the SDK
-    documentation.\n\n
+    ,broadcast intents are received when appropriate per the SDK
+    documentation and also, that the intent results do not have location
+    information in them. Before starting with the tests, please go to the
+    Settings app and deny location permissions to the CtsVerifier app, and
+    after finishing with the camera intent tests, please go to the Settings app and
+    restore location permissions, otherwise, other tests may fail.\n\n
     - Read the message above the \"Start Test\" button for
     step-by-step instructions.
     </string>
@@ -1142,17 +1265,21 @@
     <string name="ci_intents_label">Intents Test</string>
     <string name="ci_intents_direction_label">clockwise</string>
     <string name="ci_instruction_heading_label">Instructions:</string>
+    <string name="ci_location_permissions_error">Please give CTS Verifier location permissions before clicking on the pass / fail button</string>
+    <string name="ci_location_permissions_fail_error">Please give CTS Verifier location permissions if the fail button needs to be clicked</string>
     <string name="ci_directory_creation_error">CTS Verifier debug directory could not be created, please try again</string>
     <string name="ci_instruction_text_photo_label">READ BEFORE STARTING TEST</string>
     <string name="ci_instruction_text_passfail_label">Choose \"Pass\" if the right intent is fired after taking a photo from the camera app. Otherwise, choose \"Fail\".</string>
     <string name="ci_instruction_text_app_picture_label">\n
-    1. Click Start Test. \n
-    2. Go to home screen (HOME key). \n
-    3. Launch Camera application. \n
-    4. Capture photo within 1 minute. \n
-    5. Return to CTS verifier app. \n
-    6. Pass button will light up if URI trigger was fired.\n
-    7. Click "Pass" if possible.
+
+    1. Click Open Settings and deny location permissions to CTS Verifier and return. \n
+    2. Click Start Test. \n
+    3. Go to home screen (HOME key). \n
+    4. Launch Camera application. \n
+    5. Capture photo within 1 minute. \n
+    6. Return to CTS verifier app. \n
+    7. Pass button will light up if intent and URI triggers were fired.\n
+    8. Click "Pass" if possible, otherwise open settings app, allow location again and click "Fail".
     </string>
     <string name="ci_instruction_text_app_video_label">\n
     1. Click Start Test. \n
@@ -1160,24 +1287,26 @@
     3. Launch Camera application. \n
     4. Capture video within 1 minute. \n
     5. Return to CTS verifier app. \n
-    6. Pass button will light up if URI trigger was fired.\n
-    7. Click "Pass" if possible.
+    6. Pass button will light up if intent and URI triggers were fired.\n
+    7. Click "Pass" if possible, otherwise open settings app, allow location again and click "Fail".
     </string>
     <string name="ci_instruction_text_intent_picture_label">\n
     1. Click Start Test.\n
     2. Camera app will launch, prompting to take photo.\n
     3. Capture/confirm photo using camera app controls within 1 minute.\n
-    4. Pass button will light up if URI trigger was NOT received.\n
-    5. Click "Pass" if possible.
+    4. Pass button will light up if intent and URI trigger were NOT received.\n
+    5. Click "Pass" if possible, otherwise open settings app, allow location again and click "Fail".
     </string>
     <string name="ci_instruction_text_intent_video_label">\n
     1. Click Start Test.\n
     2. Camera app will launch, prompting to take video.\n
     3. Capture/confirm video using camera app controls within 1 minute.\n
-    4. Pass button will light up if URI trigger was received.\n
+    4. Return to the CTS Verifier app. Click Open Settings and give back CTS Verifier location permissions \n
+    (Note this must be done before clicking on Pass / Fail).\n
     5. Click "Pass" if possible.
     </string>
     <string name="ci_start_test_button_caption">Start Test</string>
+    <string name="ci_settings_button_caption">Open Settings</string>
 
     <!-- Strings for Camera Formats -->
     <string name="camera_format">Camera Formats</string>
@@ -1255,9 +1384,9 @@
         1. Install the Cts Verifier USB Companion app on a separate helper device.
         \n\n2. Start the device test companion in the Cts Verifier USB Companion.
         \n\n3. Connect the two devices. If using a OTG adapter make sure the adapter is directly connected to this device. If using an Type-C cable make sure that this device is set as "supply power to the attached device".
-        \n\n4. Confirm access to the USB device on this device.
+        \n\n4. Confirm access to the USB device on this device. Do <u>not</u> make this app the default app for the device.
         \n\n5. Confirm access to the USB accessory on the helper device.
-        \n\n6. Confirm access to the USB device on this device again.
+        \n\n6. Confirm access to the USB device on this device again. Do <u>not</u> make this app the default app for the device.
         \n\n7. Test will run and complete automatically in less than 30 seconds.
         \n\n8. Cancel all further dialogs on the helper device.
     </string>
@@ -1269,12 +1398,12 @@
         \n\nResult: A dialog should show up on this device asking for access to a USB device.
     </string>
     <string name="usb_device_test_step2">
-        Confirm access to the USB device on this device.
+        Confirm access to the USB device on this device. Do <u>not</u> make this app the default app for the device.
         \n\nResult: Dialogs should show up on this device and on the helper device asking for access to a USB device/accessory.
     </string>
     <string name="usb_device_test_step3">
         1. Confirm access to the USB accessory on the helper device.
-        \n2. Confirm access to the USB device on this device again.
+        \n2. Confirm access to the USB device on this device again. Do <u>not</u> make this app the default app for the device.
         \n\nResult: A progress indicator should appear or test will finish.
     </string>
     <string name="usb_device_test_step4">
@@ -1338,6 +1467,16 @@
     <string name="camera_flashlight_passed_text">All tests passed. Press Done or Pass button.
     </string>
 
+    <!-- Strings for the Camera Performance test activity -->
+    <string name="camera_performance_test">Camera Performance</string>
+    <string name="camera_performance_test_info">
+        This activity will run performance test cases. For optimal and consistent results please
+        make sure that all camera sensors are pointing in a direction with sufficiently bright
+        light source.
+    </string>
+    <string name="camera_performance_spinner_text">Running CTS performance test case...</string>
+    <string name="camera_performance_result_title">Test Result</string>
+
     <!-- Strings for StreamingVideoActivity -->
     <string name="streaming_video">Streaming Video Quality Verifier</string>
     <string name="streaming_video_info">This is a test for assessing the quality of streaming videos.  Play each stream and verify that the video is smooth and in sync with the audio, and that there are no quality problems.</string>
@@ -1345,6 +1484,67 @@
     <string name="sv_failed_title">Test Failed</string>
     <string name="sv_failed_message">Unable to play stream.  See log for details.</string>
 
+    <!-- Strings for TestListActivity -->
+    <string name="wifi_test">Wi-Fi Test</string>
+    <string name="wifi_test_info">
+        The Wi-Fi tests requires an open (no security) access point in the environment along with the DUT.
+        \nPlease perform a network settings reset between each test to reset platform\'s internal state which
+        might interfere with the test flow.\nNavigate to \"Settings -> System -> Reset Options -> Reset Wi-Fi,
+        mobile &amp; Bluetooth\" to perform a network settings reset.
+    </string>
+    <string name="wifi_location_not_enabled">Wi-Fi / Location Mode is not enabled</string>
+    <string name="wifi_location_not_enabled_message">These tests require Wi-Fi and Location Mode to be enabled.
+        Click the button below to go to system settings and enable Wi-Fi and Location Mode.</string>
+    <string name="wifi_settings">Wi-Fi Settings</string>
+    <string name="location_settings">Location Settings</string>
+    <string name="wifi_setup_error">
+        Test failed.\n\nSet up error. Check whether Wi-Fi is enabled.</string>
+    <string name="wifi_unexpected_error">
+        Test failed.\n\nUnexpected error. Check logcat.</string>
+
+    <string name="wifi_status_initiating_scan">Initiating scan.</string>
+    <string name="wifi_status_scan_failure">Unable to initiate scan or find any open network in scan results.</string>
+    <string name="wifi_status_connected_to_other_network">Connected to some other network on the device. Please ensure that there is no saved networks on the device.</string>
+    <string name="wifi_status_initiating_network_request">Initiating network request.</string>
+    <string name="wifi_status_network_wait_for_available">Waiting for network connection. Please click the network in the dialog that pops up for approving the request.</string>
+    <string name="wifi_status_network_available">"Connected to network."</string>
+    <string name="wifi_status_network_wait_for_unavailable">"Ensuring device does not connect to any network. You should see an empty dialog that pops up for approving the request."</string>
+    <string name="wifi_status_network_unavailable">"Did not connect to any network."</string>
+    <string name="wifi_status_network_wait_for_lost">Ensuring device does not disconnect from the network until the request is released.</string>
+    <string name="wifi_status_network_lost">Disconnected from the network.</string>
+    <string name="wifi_status_network_cb_timeout">Network callback timed out.</string>
+
+    <string name="wifi_status_suggestion_add">Adding suggestions to the device.</string>
+    <string name="wifi_status_suggestion_add_failure">Failed to add suggestions.</string>
+    <string name="wifi_status_suggestion_remove">Removing suggestions from the device.</string>
+    <string name="wifi_status_suggestion_remove_failure">Failed to remove suggestions.</string>
+    <string name="wifi_status_suggestion_wait_for_connect">Waiting for network connection. Please click \"Yes\" in the notification that pops up for approving the request.</string>
+    <string name="wifi_status_suggestion_connect">Connected to the network.</string>
+    <string name="wifi_status_suggestion_wait_for_post_connect_bcast">Waiting for post connection broadcast.</string>
+    <string name="wifi_status_suggestion_post_connect_bcast">Received post connection broadcast.</string>
+    <string name="wifi_status_suggestion_post_connect_bcast_failure">Failed to receive post connection broadcast.</string>
+    <string name="wifi_status_suggestion_wait_for_disconnect">Ensuring device does not disconnect from the network after removing suggestions.</string>
+    <string name="wifi_status_suggestion_disconnected">Disconnected from the network.</string>
+
+    <string name="wifi_status_test_success">Test completed successfully!</string>
+    <string name="wifi_status_test_failed">Test failed!</string>
+
+    <string name="wifi_test_network_request">Network Request tests</string>
+    <string name="wifi_test_network_request_specific">Network Request with a specific SSID and BSSID.</string>
+    <string name="wifi_test_network_request_specific_info">Tests whether the API can be used to a connect to network with a specific SSID and BSSID specified in the request.</string>
+    <string name="wifi_test_network_request_pattern">Network Request with a SSID and BSSID pattern.</string>
+    <string name="wifi_test_network_request_pattern_info">Tests whether the API can be used to a connect to network with a SSID and BSSID pattern specified in the request.</string>
+    <string name="wifi_test_network_request_unavailable">Network Request with a specific network that is unavailable.</string>
+    <string name="wifi_test_network_request_unavailable_info">Tests that the API fails to connect when an unavailable network is specified in the request.</string>
+
+    <string name="wifi_test_network_suggestion">Network Suggestion tests</string>
+    <string name="wifi_test_network_suggestion_ssid">Network suggestion with SSID.</string>
+    <string name="wifi_test_network_suggestion_ssid_info">Tests whether the API can be used to suggest a network with SSID to the device and the device connects to it.</string>
+    <string name="wifi_test_network_suggestion_ssid_bssid">Network suggestion with SSID and BSSID.</string>
+    <string name="wifi_test_network_suggestion_ssid_bssid_info">Tests whether the API can be used to suggest a network with SSID and specific BSSID to the device and the device connects to it.</string>
+    <string name="wifi_test_network_suggestion_ssid_post_connect">Network suggestion with SSID and post connection broadcast.</string>
+    <string name="wifi_test_network_suggestion_ssid_post_connect_info">Tests whether the API can be used to suggest a network with SSID to the device and the device connects to it and sends the post connect broadcast back to the app.</string>
+
     <!-- Strings for P2pTestActivity -->
     <string name="p2p_test">Wi-Fi Direct Test</string>
     <string name="p2p_test_info">
@@ -1362,7 +1562,21 @@
     <string name="p2p_go_neg_responder_test">GO Negotiation Responder Test</string>
     <string name="p2p_go_neg_requester_test">GO Negotiation Requester Test</string>
     <string name="p2p_group_owner_test">Group Owner Test</string>
+    <string name="p2p_join_with_config">Group Join with Config</string>
+    <string name="p2p_join_with_config_2g_band">Group Join with Config 2G Band</string>
+    <string name="p2p_join_with_config_fixed_frequency">
+        Group Join with Config Fixed Frequency</string>
+    <string name="p2p_group_owner_with_config_test">Group Owner With Config Test</string>
+    <string name="p2p_group_owner_with_config_2g_band_test">
+        Group Owner With Config 2G Band Test</string>
+    <string name="p2p_group_owner_with_config_fixed_frequency_test">
+        Group Owner With Config Fixed Frequency Test</string>
     <string name="p2p_group_client_test">Group Client Test</string>
+    <string name="p2p_group_client_with_config_test">Group Client With Config Test</string>
+    <string name="p2p_group_client_with_config_2g_band_test">
+        Group Client With Config 2G Band Test</string>
+    <string name="p2p_group_client_with_config_fixed_frequency_test">
+        Group Client With Config Fixed Frequency Test</string>
     <string name="p2p_service_discovery_responder_test">
         Service Discovery Responder Test</string>
     <string name="p2p_service_discovery_requester_test">
@@ -1554,6 +1768,7 @@
     <string name="aware_status_network_requested">Network requested ...</string>
     <string name="aware_status_network_success">Network formed ...</string>
     <string name="aware_status_network_failed">Network request failure - timed out!</string>
+    <string name="aware_status_network_failed_leak">Failure: Network request success - but leaked information!</string>
     <string name="aware_status_sleeping_wait_for_responder">Pausing to let Responder time to set up ...</string>
     <string name="aware_status_ranging_peer_failure">Ranging to PeerHandle failure: %1$d failures of %2$d attempts!</string>
     <string name="aware_status_ranging_mac_failure">Ranging to MAC address failure: %1$d failures of %2$d attempts!</string>
@@ -1562,6 +1777,11 @@
     <string name="aware_status_lifecycle_failed">Discovery lifecycle FAILURE!</string>
     <string name="aware_status_lifecycle_ok">Discovery lifecycle validated!</string>
 
+    <string name="aware_status_socket_failure">Failure on socket connection setup!</string>
+    <string name="aware_status_socket_server_socket_started">ServerSocket started on port %1$d!</string>
+    <string name="aware_status_socket_server_info_rx">Peer server info: IPv6=%1$s @ port=%2$d!</string>
+    <string name="aware_status_socket_server_message_from_peer">Message from peer: \'%1$s\'</string>
+
     <string name="aware_data_path_open_unsolicited_publish">Data Path: Open: Unsolicited Publish</string>
     <string name="aware_data_path_open_unsolicited_publish_info">The publisher is now ready.\n\nOn the other device: start the \'Data Path: Open: Unsolicited/Passive\' / \'Subscribe\' test.</string>
     <string name="aware_data_path_open_passive_subscribe">Data Path: Open: Passive Subscribe</string>
@@ -1625,6 +1845,7 @@
         itself according to the current rotation of the device.</string>
 
     <string name="test_category_notifications">Notifications</string>
+    <string name="test_category_tiles">Tiles</string>
     <string name="package_priority_test">Notification Package Priority Test</string>
     <string name="package_priority_info">This test checks that the NotificationManagerService respects
         user preferences about relative package priorities.
@@ -1701,6 +1922,18 @@
         dismiss them.
     </string>
     <string name="msg_extras_preserved">Check that Message extras Bundle was preserved.</string>
+    <string name="tile_service_name">Tile Service for CTS Verifier</string>
+    <string name="tiles_test">Tile Service Test</string>
+    <string name="tiles_info">This test checks that a Tile Service added by a third party
+        application is not immediately added to the current Quick Settings tiles but can be added
+        by the user.
+    </string>
+    <string name="tiles_adding_tile">Check that Tile Service is enabled</string>
+    <string name="tiles_not_added">Open Quick Settings and check that the Tile Service for CTS
+        Verifier is not visible in any page</string>
+    <string name="tiles_in_customizer">Open Quick Settings and click the button to customize Quick
+        Settings. Check that the Tile Service for CTS Verifier is available to be added</string>
+    <string name="tiles_removing_tile">Check that Tile Service is disabled</string>
     <string name="vr_tests">VR Tests</string>
     <string name="test_category_vr">VR</string>
     <string name="vr_test_title">VR Listener Test</string>
@@ -1737,6 +1970,7 @@
     <string name="nas_note_enqueued_received">Check that notification was enqueued.</string>
     <string name="nls_note_received">Check that notification was received.</string>
     <string name="nls_payload_intact">Check that notification payload was intact.</string>
+    <string name="nls_audibly_alerted">Check that notification audibly alerting was reported correctly.</string>
     <string name="nas_adjustment_payload_intact">Check that the Assistant can adjust notifications.</string>
     <string name="nas_adjustment_enqueue_payload_intact">Check that the Assistant can adjust notifications on enqueue.</string>
     <string name="nas_create_channel">Check that the Assistant can create a Notification Channel for another app.</string>
@@ -1769,7 +2003,9 @@
     <string name="cp_disable_service">Please disable \"CTS Verifier\" under Do Not Disturb access and return here.</string>
     <string name="cp_start_settings">Launch Settings</string>
     <string name="cp_create_rule">Creating Automatic Zen Rule</string>
+    <string name="cp_create_rule_with_zen_policy">Creating Automatic Zen Rule with Zen Policy</string>
     <string name="cp_update_rule">Updating Automatic Zen Rule</string>
+    <string name="cp_update_rule_use_zen_policy">Updating Automatic Rule to Use Zen Policy</string>
     <string name="cp_subscribe_rule">Subscribing to Automatic Zen Rule</string>
     <string name="cp_service_started">Service should start once enabled.</string>
     <string name="cp_service_stopped">Service should stop once disabled.</string>
@@ -1814,9 +2050,9 @@
 In the case that these credentials were already installed, you may skip this step.</string>
     <string name="keychain_https_desc">The last test involves setting up an HTTPS connection using credentials from the KeyChain.\n\n
 You should be prompted to select credentials; choose the ones you just installed in the previous step.</string>
-    <string name="keychain_reset_desc">Before marking this test as passed, tap \'Next\' to open security settings and reset the following items:\n
- 1. Clear device credentials.\n
- 2. Change the lock screen type to \'None\'.</string>
+    <string name="keychain_reset_desc">Before marking this test as passed, tap \'Next\' to open security settings to clear credentials:\n
+ 1. Open Encryption and credentials.\n
+ 2. Tap Clear credentials.</string>
 
     <!-- Strings for Widget -->
     <string name="widget_framework_test">Widget Framework Test</string>
@@ -1827,6 +2063,12 @@
     <string name="widget_pass">Pass</string>
     <string name="widget_fail">Fail</string>
 
+    <string name="provisioning_byod_non_market_apps">Non-market app installation restrictions</string>
+    <string name="provisioning_byod_non_market_apps_info">
+        This test exercises user restrictions on installation of non-market apps. Follow
+        instructions in each test.
+    </string>
+
     <string name="provisioning_byod_nonmarket_allow">Enable non-market apps</string>
     <string name="provisioning_byod_nonmarket_allow_info">
         This test verifies that non-market apps can be installed if permitted.\n
@@ -1844,6 +2086,56 @@
         2. Verify that the installation of the package is refused.
     </string>
 
+    <string name="provisioning_byod_nonmarket_allow_global">Enable non-market apps (global restriction)</string>
+    <string name="provisioning_byod_nonmarket_allow_global_info">
+        This test verifies that non-market apps can be installed if permitted by device-wide block.\n
+        1. A package installation UI should appear.\n
+        2. If \'Cts Verifier\' is not allowed to install apps, a warning dialog will appear
+        blocking the install. In this case go to step 3, else skip to step 4.\n
+        3. Allow \'Cts Verifier\' to install apps. Return to package installer.\n
+        4. Accept the installation and verify that it succeeds (no error message is displayed).
+    </string>
+
+    <string name="provisioning_byod_nonmarket_deny_global">Disable non-market apps (global restriction)</string>
+    <string name="provisioning_byod_nonmarket_deny_global_info">
+        This test verifies that non-market apps cannot be installed unless permitted by device-wide block.\n
+        1. A package installation UI should appear.\n
+        2. Verify that the installation of the package is refused.
+    </string>
+
+    <string name="provisioning_byod_nonmarket_allow_global_primary">Enable primary user non-market apps (global restriction)</string>
+    <string name="provisioning_byod_nonmarket_allow_global_primary_info">
+        This test verifies that non-market apps from the primary user can be installed if permitted.\n
+        1. You should have received NotificationBot.apk together with the CTS verifier. If you built
+        the CTS verifier yourself, build the NotificationBot.apk by issuing the following command on
+        the host:\n
+        make NotificationBot\n
+        2. Upload the NotificationBot.apk to your device by issuing the following command on the
+        host:\n
+        adb push /path/to/NotificationBot.apk /data/local/tmp/\n
+        3. Press \"Go\" to install NotificationBot.apk in your personal profile. A package
+        installation UI should appear.\n
+        4. If \'Cts Verifier\' is not allowed to install apps, a warning dialog will appear
+        blocking the install. In this case go to step 5, else skip to step 6.\n
+        5. Allow \'Cts Verifier\' to install apps. Return to package installer.\n
+        6. Accept the installation and verify that it succeeds (no error message is displayed).
+    </string>
+
+    <string name="provisioning_byod_nonmarket_deny_global_primary">Disable primary user non-market apps (global restriction)</string>
+    <string name="provisioning_byod_nonmarket_deny_global_primary_info">
+        This test verifies that non-market apps from the primary user cannot be installed unless permitted.\n
+        1. You should have received NotificationBot.apk together with the CTS verifier. If you built
+        the CTS verifier yourself, build the NotificationBot.apk by issuing the following command on
+        the host:\n
+        make NotificationBot\n
+        2. Upload the NotificationBot.apk to your device by issuing the following command on the
+        host:\n
+        adb push /path/to/NotificationBot.apk /data/local/tmp/\n
+        3. Press \"Go\" to install NotificationBot.apk in your personal profile. A package
+        installation UI should appear.\n
+        4. Verify that the installation of the package is refused.
+    </string>
+
     <string name="provisioning_byod_capture_image_support">Camera support cross profile image capture</string>
     <string name="provisioning_byod_capture_image_support_info">
         This test verifies that images can be captured from the managed profile using the primary profile camera.\n
@@ -1980,13 +2272,24 @@
         3. Verify that the background image contains a suitcase.\n
         4. Verify that the background color of the remaining image is blue.\n
         5. Verify that the header text says \"CtsVerifier\".\n
-        6. Confirm your credentials and verify that the credentials you entered previously work.
+        6. Confirm your credentials and verify that the credentials you entered previously work.\n
         7. The work app should be launched.
     </string>
     <string name="provisioning_byod_confirm_work_credentials_header">
         CtsVerifier
     </string>
+    <string name="provisioning_byod_pattern_work_challenge">Confirm pattern lock test</string>
+    <string name="provisioning_byod_pattern_work_challenge_description">
+        This test verifies that when a work pattern lock is set, a work app can open correctly.
 
+        1. Verify that you get sent to the page for Choosing a new work lock.\n
+        2. Set a pattern lock.\n
+        3. Press the power button to turn the screen off and then back on and swipe to unlock.\n
+        4. Open a work app.\n
+        5. Verify that a screen asking you for your work credentials is shown.\n
+        6. Confirm your credentials and verify that the credentials you entered previously work.\n
+        7. The work app should be launched.
+    </string>
     <string name="provisioning_byod_recents">Recents redaction test</string>
     <string name="provisioning_byod_recents_info">
         This test verifies that if a work profile is locked with a separate password, Recents views
@@ -2016,14 +2319,16 @@
         Verify recents are not redacted when unlocked.
     </string>
     <string name="provisioning_byod_recents_verify_not_redacted_instruction">
-        1) Follow the instructions on-screen to remove the work password.\n
-        2) Open Recents.\n
-        3) Confirm that this "CTS Verifier" activity is shown in Recents.\n
-        4) Confirm that the contents of the activity <b>are not</b> hidden.\n
-        5) Return to this page and pass the test.
+        1) Press the Go button to go to security settings.\n
+        2) Make work profile use one lock with personal profile.\n
+        3) Open Recents.\n
+        4) Confirm that this "CTS Verifier" activity is shown in Recents.\n
+        5) Confirm that the contents of the activity <b>are not</b> hidden.\n
+        6) Return to this page and pass the test.
     </string>
     <string name="provisioning_byod_recents_remove_password">
-        The work profile still has a separate password. Please remove this before continuing.
+        The work profile still has a separate password. Please make it use one lock with the
+        personal profile.
     </string>
 
     <string name="provisioning_byod_keychain">KeyChain test</string>
@@ -2035,9 +2340,6 @@
         2) Testing that a generated key can be hidden from users.\n
         \n
         Tap \"Prepare Test\" button below to begin.\n
-        \n
-        NOTE: A screen lock must be configured for this test. Otherwise, test preparation
-        will fail to generate a key for use by the test.
     </string>
     <string name="provisioning_byod_keychain_info_first_test">
         Once you press \'Go\', a prompt titled \"Choose certificate\" should appear.\n
@@ -2048,10 +2350,8 @@
         Press \'Go\'.\n
     </string>
     <string name="provisioning_byod_keychain_info_second_test">
-        Once you press \'Run 2nd test\', the same prompt should appear again.\n
-        This time, verify that the title is \"No certificates found\" and the list is empty,
-        then press \'Cancel\'.\n
-        \n
+        Once you press \'Run 2nd test\', the prompt should NOT appear.\n
+        Verify that the prompt does not appear at all.\n
         Mark the test as passed if the text at the bottom shows \"PASSED (2/2)\"\n
     </string>
 
@@ -2212,7 +2512,7 @@
     <string name="provisioning_tests_byod_custom_color"> Custom provisioning color </string>
     <string name="provisioning_tests_byod_custom_color_info">
         Please press the Go button to start the provisioning.
-        Check that the top status bar and "Accept and continue" button are colorized in green.
+        Check that the top status bar is colorized in green.
         Then hit back and stop the provisioning.
     </string>
     <string name="provisioning_tests_byod_custom_image"> Custom provisioning image </string>
@@ -2220,7 +2520,7 @@
         1. Please press the Go button to start the provisioning.\n
         2. Press \"Accept and continue\" button to start work profile provisioning\n
         3. Check that the CtsVerifier logo is displayed during provisioning\n
-        4. After successful provisioning, you should be automatically redirected back to this page
+        4. After successful provisioning, come back to this page. You might need to press a button on the final provisioning screen.
     </string>
     <string name="provisioning_tests_byod_custom_terms">Custom terms</string>
     <string name="provisioning_tests_byod_custom_terms_instructions">
@@ -2246,7 +2546,7 @@
         1. Press the button below to start the managed provisioning flow.
         If your device has not been encrypted before, it will reboot to apply encryption.
         After reboot follow instructions in the notification area to complete the provisioning.\n
-        2. After successful provisioning, you should be automatically redirected back to this page.
+        2. After successful provisioning, come back to this page. You might need to press a button on the final provisioning screen.
         Please press through the following verification steps.
         Allow a few seconds after returning from provisioning, as the profile owner test should automatically pass.\n
         \n
@@ -2467,6 +2767,7 @@
         \n
         - A new set of work apps including CTS Verifier appear in the list.\n
         - Work badge overlay appears on work app\'s icon (see example icon below, color and style may vary).\n
+        - The work badge overlay has the same size and position on each work app\'s icon.
         \n
         Then navigate back to this screen using Recents button.
     </string>
@@ -2499,7 +2800,7 @@
         Please press the Go button to open the Settings page.
         (If this device has a separate app for work settings, ignore the Go button and navigate to that app manually).\n
         \n
-        Navigate to \"Data usage\" page and then into the \"Wi-Fi data usage\" category.\n
+        Navigate to the \"Network &amp; Internet\" page and then click on \"Wi-Fi\" and then \"Wi-Fi data usage\".\n
         Confirm that \"All work apps\" section is present and that it is possible to see the data usage for work (badged) apps.\n
         (If the section is not present, this might be because work apps have not used Wi-Fi data yet. Ensure that you have used Wi-Fi