am b087665b: Clean up ManifestTestListAdapter\'s filterTests loop.

* commit 'b087665b3a86cf44d1f6bb7b34c9ba6c0e6bcb4b':
  Clean up ManifestTestListAdapter's filterTests loop.
diff --git a/apps/CameraITS/.gitignore b/apps/CameraITS/.gitignore
new file mode 100644
index 0000000..259969b
--- /dev/null
+++ b/apps/CameraITS/.gitignore
@@ -0,0 +1,11 @@
+# Ignore files that are created asa result of running the ITS tests.
+
+*.json
+*.yuv
+*.jpg
+*.jpeg
+*.png
+*.pyc
+its.target.cfg
+.DS_Store
+
diff --git a/apps/CameraITS/Android.mk b/apps/CameraITS/Android.mk
new file mode 100644
index 0000000..8f7ed7c
--- /dev/null
+++ b/apps/CameraITS/Android.mk
@@ -0,0 +1,31 @@
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+its-dir-name := CameraITS
+its-dir := $(HOST_OUT)/$(its-dir-name)
+its-build-stamp := $(its-dir)/build_stamp
+
+camera-its: $(its-build-stamp)
+
+.PHONY: camera-its
+
+$(its-dir): $(its-build-stamp)
+
+$(its-build-stamp): $(ACP)
+	echo $(its_dir)
+	mkdir -p $(its-dir)
+	$(ACP) -rfp cts/apps/$(its-dir-name)/* $(its-dir)
+	rm $(its-dir)/Android.mk
+	touch $@
diff --git a/apps/CameraITS/CameraITS.pdf b/apps/CameraITS/CameraITS.pdf
new file mode 100644
index 0000000..5e5fd29
--- /dev/null
+++ b/apps/CameraITS/CameraITS.pdf
Binary files differ
diff --git a/apps/CameraITS/build/envsetup.sh b/apps/CameraITS/build/envsetup.sh
new file mode 100644
index 0000000..a95c445
--- /dev/null
+++ b/apps/CameraITS/build/envsetup.sh
@@ -0,0 +1,45 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This file should be sourced from bash. Sets environment variables for
+# running tests, and also checks that a number of dependences are present
+# and that the unit tests for the modules passed (indicating that the setup
+# is correct).
+
+[[ "${BASH_SOURCE[0]}" != "${0}" ]] || \
+    { echo ">> Script must be sourced with 'source $0'" >&2; exit 1; }
+
+command -v adb >/dev/null 2>&1 || \
+    echo ">> Require adb executable to be in path" >&2
+
+command -v python >/dev/null 2>&1 || \
+    echo ">> Require python executable to be in path" >&2
+
+python -V 2>&1 | grep -q "Python 2.7" || \
+    echo ">> Require python 2.7" >&2
+
+for M in numpy PIL Image matplotlib pylab
+do
+    python -c "import $M" >/dev/null 2>&1 || \
+        echo ">> Require Python $M module" >&2
+done
+
+export PYTHONPATH="$PWD/pymodules:$PYTHONPATH"
+
+for M in device objects image caps dng target error
+do
+    python "pymodules/its/$M.py" 2>&1 | grep -q "OK" || \
+        echo ">> Unit test for $M failed" >&2
+done
+
diff --git a/apps/CameraITS/pymodules/its/__init__.py b/apps/CameraITS/pymodules/its/__init__.py
new file mode 100644
index 0000000..59058be
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
diff --git a/apps/CameraITS/pymodules/its/caps.py b/apps/CameraITS/pymodules/its/caps.py
new file mode 100644
index 0000000..b713db9
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/caps.py
@@ -0,0 +1,221 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+import its.objects
+import sys
+
+
+def skip_unless(cond):
+    """Skips the test if the condition is false.
+
+    If a test is skipped, then it is exited and returns the special code
+    of 101 to the calling shell, which can be used by an external test
+    harness to differentiate a skip from a pass or fail.
+
+    Args:
+        cond: Boolean, which must be true for the test to not skip.
+
+    Returns:
+        Nothing.
+    """
+    SKIP_RET_CODE = 101
+
+    if not cond:
+        print "Test skipped"
+        sys.exit(SKIP_RET_CODE)
+
+
+def full(props):
+    """Returns whether a device is a FULL capability camera2 device.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return props.has_key("android.info.supportedHardwareLevel") and \
+           props["android.info.supportedHardwareLevel"] == 1
+
+def limited(props):
+    """Returns whether a device is a LIMITED capability camera2 device.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return props.has_key("android.info.supportedHardwareLevel") and \
+           props["android.info.supportedHardwareLevel"] == 0
+
+def legacy(props):
+    """Returns whether a device is a LEGACY capability camera2 device.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return props.has_key("android.info.supportedHardwareLevel") and \
+           props["android.info.supportedHardwareLevel"] == 2
+
+def manual_sensor(props):
+    """Returns whether a device supports MANUAL_SENSOR capabilities.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return    props.has_key("android.request.availableCapabilities") and \
+              1 in props["android.request.availableCapabilities"] \
+           or full(props)
+
+def manual_post_proc(props):
+    """Returns whether a device supports MANUAL_POST_PROCESSING capabilities.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return    props.has_key("android.request.availableCapabilities") and \
+              2 in props["android.request.availableCapabilities"] \
+           or full(props)
+
+def raw(props):
+    """Returns whether a device supports RAW capabilities.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return props.has_key("android.request.availableCapabilities") and \
+           3 in props["android.request.availableCapabilities"]
+
+def raw16(props):
+    """Returns whether a device supports RAW16 output.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return len(its.objects.get_available_output_sizes("raw", props)) > 0
+
+def raw10(props):
+    """Returns whether a device supports RAW10 output.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return len(its.objects.get_available_output_sizes("raw10", props)) > 0
+
+def sensor_fusion(props):
+    """Returns whether the camera and motion sensor timestamps for the device
+    are in the same time domain and can be compared directly.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return props.has_key("android.sensor.info.timestampSource") and \
+           props["android.sensor.info.timestampSource"] == 1
+
+def read_3a(props):
+    """Return whether a device supports reading out the following 3A settings:
+        sensitivity
+        exposure time
+        awb gain
+        awb cct
+        focus distance
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    # TODO: check available result keys explicitly
+    return manual_sensor(props) and manual_post_proc(props)
+
+def compute_target_exposure(props):
+    """Return whether a device supports target exposure computation in its.target module.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        Boolean.
+    """
+    return manual_sensor(props) and manual_post_proc(props)
+
+def freeform_crop(props):
+    """Returns whether a device supports freefrom cropping.
+
+    Args:
+        props: Camera properties object.
+
+    Return:
+        Boolean.
+    """
+    return props.has_key("android.scaler.croppingType") and \
+           props["android.scaler.croppingType"] == 1
+
+def flash(props):
+    """Returns whether a device supports flash control.
+
+    Args:
+        props: Camera properties object.
+
+    Return:
+        Boolean.
+    """
+    return props.has_key("android.flash.info.available") and \
+           props["android.flash.info.available"] == 1
+
+
+def per_frame_control(props):
+    """Returns whether a device supports per frame control
+
+    Args:
+        props: Camera properties object.
+
+    Return:
+        Boolean.
+    """
+    return props.has_key("android.sync.maxLatency") and \
+           props["android.sync.maxLatency"] == 0
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+    # TODO: Add more unit tests.
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
new file mode 100644
index 0000000..6f42051
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -0,0 +1,541 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.error
+import os
+import os.path
+import sys
+import re
+import json
+import time
+import unittest
+import socket
+import subprocess
+import hashlib
+import numpy
+
+class ItsSession(object):
+    """Controls a device over adb to run ITS scripts.
+
+    The script importing this module (on the host machine) prepares JSON
+    objects encoding CaptureRequests, specifying sets of parameters to use
+    when capturing an image using the Camera2 APIs. This class encapsulates
+    sending the requests to the device, monitoring the device's progress, and
+    copying the resultant captures back to the host machine when done. TCP
+    forwarded over adb is the transport mechanism used.
+
+    The device must have CtsVerifier.apk installed.
+
+    Attributes:
+        sock: The open socket.
+    """
+
+    # Open a connection to localhost:6000, forwarded to port 6000 on the device.
+    # TODO: Support multiple devices running over different TCP ports.
+    IPADDR = '127.0.0.1'
+    PORT = 6000
+    BUFFER_SIZE = 4096
+
+    # Seconds timeout on each socket operation.
+    SOCK_TIMEOUT = 10.0
+
+    PACKAGE = 'com.android.cts.verifier.camera.its'
+    INTENT_START = 'com.android.cts.verifier.camera.its.START'
+    ACTION_ITS_RESULT = 'com.android.cts.verifier.camera.its.ACTION_ITS_RESULT'
+    EXTRA_SUCCESS = 'camera.its.extra.SUCCESS'
+
+    # TODO: Handle multiple connected devices.
+    ADB = "adb -d"
+
+    # Definitions for some of the common output format options for do_capture().
+    # Each gets images of full resolution for each requested format.
+    CAP_RAW = {"format":"raw"}
+    CAP_DNG = {"format":"dng"}
+    CAP_YUV = {"format":"yuv"}
+    CAP_JPEG = {"format":"jpeg"}
+    CAP_RAW_YUV = [{"format":"raw"}, {"format":"yuv"}]
+    CAP_DNG_YUV = [{"format":"dng"}, {"format":"yuv"}]
+    CAP_RAW_JPEG = [{"format":"raw"}, {"format":"jpeg"}]
+    CAP_DNG_JPEG = [{"format":"dng"}, {"format":"jpeg"}]
+    CAP_YUV_JPEG = [{"format":"yuv"}, {"format":"jpeg"}]
+    CAP_RAW_YUV_JPEG = [{"format":"raw"}, {"format":"yuv"}, {"format":"jpeg"}]
+    CAP_DNG_YUV_JPEG = [{"format":"dng"}, {"format":"yuv"}, {"format":"jpeg"}]
+
+    # Method to handle the case where the service isn't already running.
+    # This occurs when a test is invoked directly from the command line, rather
+    # than as a part of a separate test harness which is setting up the device
+    # and the TCP forwarding.
+    def __pre_init(self):
+
+        # This also includes the optional reboot handling: if the user
+        # provides a "reboot" or "reboot=N" arg, then reboot the device,
+        # waiting for N seconds (default 30) before returning.
+        for s in sys.argv[1:]:
+            if s[:6] == "reboot":
+                duration = 30
+                if len(s) > 7 and s[6] == "=":
+                    duration = int(s[7:])
+                print "Rebooting device"
+                _run("%s reboot" % (ItsSession.ADB));
+                _run("%s wait-for-device" % (ItsSession.ADB))
+                time.sleep(duration)
+                print "Reboot complete"
+
+        # TODO: Figure out why "--user 0" is needed, and fix the problem.
+        _run('%s shell am force-stop --user 0 %s' % (ItsSession.ADB, self.PACKAGE))
+        _run(('%s shell am startservice --user 0 -t text/plain '
+              '-a %s') % (ItsSession.ADB, self.INTENT_START))
+
+        # Wait until the socket is ready to accept a connection.
+        proc = subprocess.Popen(
+                ItsSession.ADB.split() + ["logcat"],
+                stdout=subprocess.PIPE)
+        logcat = proc.stdout
+        while True:
+            line = logcat.readline().strip()
+            if line.find('ItsService ready') >= 0:
+                break
+        proc.kill()
+
+        # Setup the TCP-over-ADB forwarding.
+        _run('%s forward tcp:%d tcp:%d' % (ItsSession.ADB,self.PORT,self.PORT))
+
+    def __init__(self):
+        if "noinit" not in sys.argv:
+            self.__pre_init()
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((self.IPADDR, self.PORT))
+        self.sock.settimeout(self.SOCK_TIMEOUT)
+        self.__close_camera()
+        self.__open_camera()
+
+    def __del__(self):
+        if hasattr(self, 'sock') and self.sock:
+            self.__close_camera()
+            self.sock.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        return False
+
+    def __read_response_from_socket(self):
+        # Read a line (newline-terminated) string serialization of JSON object.
+        chars = []
+        while len(chars) == 0 or chars[-1] != '\n':
+            ch = self.sock.recv(1)
+            if len(ch) == 0:
+                # Socket was probably closed; otherwise don't get empty strings
+                raise its.error.Error('Problem with socket on device side')
+            chars.append(ch)
+        line = ''.join(chars)
+        jobj = json.loads(line)
+        # Optionally read a binary buffer of a fixed size.
+        buf = None
+        if jobj.has_key("bufValueSize"):
+            n = jobj["bufValueSize"]
+            buf = bytearray(n)
+            view = memoryview(buf)
+            while n > 0:
+                nbytes = self.sock.recv_into(view, n)
+                view = view[nbytes:]
+                n -= nbytes
+            buf = numpy.frombuffer(buf, dtype=numpy.uint8)
+        return jobj, buf
+
+    def __open_camera(self):
+        # Get the camera ID to open as an argument.
+        camera_id = 0
+        for s in sys.argv[1:]:
+            if s[:7] == "camera=" and len(s) > 7:
+                camera_id = int(s[7:])
+        cmd = {"cmdName":"open", "cameraId":camera_id}
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'cameraOpened':
+            raise its.error.Error('Invalid command response')
+
+    def __close_camera(self):
+        cmd = {"cmdName":"close"}
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'cameraClosed':
+            raise its.error.Error('Invalid command response')
+
+    def do_vibrate(self, pattern):
+        """Cause the device to vibrate to a specific pattern.
+
+        Args:
+            pattern: Durations (ms) for which to turn on or off the vibrator.
+                The first value indicates the number of milliseconds to wait
+                before turning the vibrator on. The next value indicates the
+                number of milliseconds for which to keep the vibrator on
+                before turning it off. Subsequent values alternate between
+                durations in milliseconds to turn the vibrator off or to turn
+                the vibrator on.
+
+        Returns:
+            Nothing.
+        """
+        cmd = {}
+        cmd["cmdName"] = "doVibrate"
+        cmd["pattern"] = pattern
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'vibrationStarted':
+            raise its.error.Error('Invalid command response')
+
+    def start_sensor_events(self):
+        """Start collecting sensor events on the device.
+
+        See get_sensor_events for more info.
+
+        Returns:
+            Nothing.
+        """
+        cmd = {}
+        cmd["cmdName"] = "startSensorEvents"
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'sensorEventsStarted':
+            raise its.error.Error('Invalid command response')
+
+    def get_sensor_events(self):
+        """Get a trace of all sensor events on the device.
+
+        The trace starts when the start_sensor_events function is called. If
+        the test runs for a long time after this call, then the device's
+        internal memory can fill up. Calling get_sensor_events gets all events
+        from the device, and then stops the device from collecting events and
+        clears the internal buffer; to start again, the start_sensor_events
+        call must be used again.
+
+        Events from the accelerometer, compass, and gyro are returned; each
+        has a timestamp and x,y,z values.
+
+        Note that sensor events are only produced if the device isn't in its
+        standby mode (i.e.) if the screen is on.
+
+        Returns:
+            A Python dictionary with three keys ("accel", "mag", "gyro") each
+            of which maps to a list of objects containing "time","x","y","z"
+            keys.
+        """
+        cmd = {}
+        cmd["cmdName"] = "getSensorEvents"
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'sensorEvents':
+            raise its.error.Error('Invalid command response')
+        return data['objValue']
+
+    def get_camera_properties(self):
+        """Get the camera properties object for the device.
+
+        Returns:
+            The Python dictionary object for the CameraProperties object.
+        """
+        cmd = {}
+        cmd["cmdName"] = "getCameraProperties"
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'cameraProperties':
+            raise its.error.Error('Invalid command response')
+        return data['objValue']['cameraProperties']
+
+    def do_3a(self, regions_ae=[[0,0,1,1,1]],
+                    regions_awb=[[0,0,1,1,1]],
+                    regions_af=[[0,0,1,1,1]],
+                    do_ae=True, do_awb=True, do_af=True,
+                    lock_ae=False, lock_awb=False,
+                    get_results=False):
+        """Perform a 3A operation on the device.
+
+        Triggers some or all of AE, AWB, and AF, and returns once they have
+        converged. Uses the vendor 3A that is implemented inside the HAL.
+
+        Throws an assertion if 3A fails to converge.
+
+        Args:
+            regions_ae: List of weighted AE regions.
+            regions_awb: List of weighted AWB regions.
+            regions_af: List of weighted AF regions.
+            do_ae: Trigger AE and wait for it to converge.
+            do_awb: Wait for AWB to converge.
+            do_af: Trigger AF and wait for it to converge.
+            lock_ae: Request AE lock after convergence, and wait for it.
+            lock_awb: Request AWB lock after convergence, and wait for it.
+            get_results: Return the 3A results from this function.
+
+        Region format in args:
+            Arguments are lists of weighted regions; each weighted region is a
+            list of 5 values, [x,y,w,h, wgt], and each argument is a list of
+            these 5-value lists. The coordinates are given as normalized
+            rectangles (x,y,w,h) specifying the region. For example:
+                [[0.0, 0.0, 1.0, 0.5, 5], [0.0, 0.5, 1.0, 0.5, 10]].
+            Weights are non-negative integers.
+
+        Returns:
+            Five values are returned if get_results is true::
+            * AE sensitivity; None if do_ae is False
+            * AE exposure time; None if do_ae is False
+            * AWB gains (list); None if do_awb is False
+            * AWB transform (list); None if do_awb is false
+            * AF focus position; None if do_af is false
+            Otherwise, it returns five None values.
+        """
+        print "Running vendor 3A on device"
+        cmd = {}
+        cmd["cmdName"] = "do3A"
+        cmd["regions"] = {"ae": sum(regions_ae, []),
+                          "awb": sum(regions_awb, []),
+                          "af": sum(regions_af, [])}
+        cmd["triggers"] = {"ae": do_ae, "af": do_af}
+        if lock_ae:
+            cmd["aeLock"] = True
+        if lock_awb:
+            cmd["awbLock"] = True
+        self.sock.send(json.dumps(cmd) + "\n")
+
+        # Wait for each specified 3A to converge.
+        ae_sens = None
+        ae_exp = None
+        awb_gains = None
+        awb_transform = None
+        af_dist = None
+        converged = False
+        while True:
+            data,_ = self.__read_response_from_socket()
+            vals = data['strValue'].split()
+            if data['tag'] == 'aeResult':
+                ae_sens, ae_exp = [int(i) for i in vals]
+            elif data['tag'] == 'afResult':
+                af_dist = float(vals[0])
+            elif data['tag'] == 'awbResult':
+                awb_gains = [float(f) for f in vals[:4]]
+                awb_transform = [float(f) for f in vals[4:]]
+            elif data['tag'] == '3aConverged':
+                converged = True
+            elif data['tag'] == '3aDone':
+                break
+            else:
+                raise its.error.Error('Invalid command response')
+        if converged and not get_results:
+            return None,None,None,None,None
+        if (do_ae and ae_sens == None or do_awb and awb_gains == None
+                or do_af and af_dist == None or not converged):
+            raise its.error.Error('3A failed to converge')
+        return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
+
+    def do_capture(self, cap_request, out_surfaces=None):
+        """Issue capture request(s), and read back the image(s) and metadata.
+
+        The main top-level function for capturing one or more images using the
+        device. Captures a single image if cap_request is a single object, and
+        captures a burst if it is a list of objects.
+
+        The out_surfaces field can specify the width(s), height(s), and
+        format(s) of the captured image. The formats may be "yuv", "jpeg",
+        "dng", "raw", or "raw10". The default is a YUV420 frame ("yuv")
+        corresponding to a full sensor frame.
+
+        Note that one or more surfaces can be specified, allowing a capture to
+        request images back in multiple formats (e.g.) raw+yuv, raw+jpeg,
+        yuv+jpeg, raw+yuv+jpeg. If the size is omitted for a surface, the
+        default is the largest resolution available for the format of that
+        surface. At most one output surface can be specified for a given format,
+        and raw+dng, raw10+dng, and raw+raw10 are not supported as combinations.
+
+        Example of a single capture request:
+
+            {
+                "android.sensor.exposureTime": 100*1000*1000,
+                "android.sensor.sensitivity": 100
+            }
+
+        Example of a list of capture requests:
+
+            [
+                {
+                    "android.sensor.exposureTime": 100*1000*1000,
+                    "android.sensor.sensitivity": 100
+                },
+                {
+                    "android.sensor.exposureTime": 100*1000*1000,
+                    "android.sensor.sensitivity": 200
+                }
+            ]
+
+        Examples of output surface specifications:
+
+            {
+                "width": 640,
+                "height": 480,
+                "format": "yuv"
+            }
+
+            [
+                {
+                    "format": "jpeg"
+                },
+                {
+                    "format": "raw"
+                }
+            ]
+
+        The following variables defined in this class are shortcuts for
+        specifying one or more formats where each output is the full size for
+        that format; they can be used as values for the out_surfaces arguments:
+
+            CAP_RAW
+            CAP_DNG
+            CAP_YUV
+            CAP_JPEG
+            CAP_RAW_YUV
+            CAP_DNG_YUV
+            CAP_RAW_JPEG
+            CAP_DNG_JPEG
+            CAP_YUV_JPEG
+            CAP_RAW_YUV_JPEG
+            CAP_DNG_YUV_JPEG
+
+        If multiple formats are specified, then this function returns multiple
+        capture objects, one for each requested format. If multiple formats and
+        multiple captures (i.e. a burst) are specified, then this function
+        returns multiple lists of capture objects. In both cases, the order of
+        the returned objects matches the order of the requested formats in the
+        out_surfaces parameter. For example:
+
+            yuv_cap            = do_capture( req1                           )
+            yuv_cap            = do_capture( req1,        yuv_fmt           )
+            yuv_cap,  raw_cap  = do_capture( req1,        [yuv_fmt,raw_fmt] )
+            yuv_caps           = do_capture( [req1,req2], yuv_fmt           )
+            yuv_caps, raw_caps = do_capture( [req1,req2], [yuv_fmt,raw_fmt] )
+
+        Args:
+            cap_request: The Python dict/list specifying the capture(s), which
+                will be converted to JSON and sent to the device.
+            out_surfaces: (Optional) specifications of the output image formats
+                and sizes to use for each capture.
+
+        Returns:
+            An object, list of objects, or list of lists of objects, where each
+            object contains the following fields:
+            * data: the image data as a numpy array of bytes.
+            * width: the width of the captured image.
+            * height: the height of the captured image.
+            * format: image the format, in ["yuv","jpeg","raw","raw10","dng"].
+            * metadata: the capture result object (Python dictionary).
+        """
+        cmd = {}
+        cmd["cmdName"] = "doCapture"
+        if not isinstance(cap_request, list):
+            cmd["captureRequests"] = [cap_request]
+        else:
+            cmd["captureRequests"] = cap_request
+        if out_surfaces is not None:
+            if not isinstance(out_surfaces, list):
+                cmd["outputSurfaces"] = [out_surfaces]
+            else:
+                cmd["outputSurfaces"] = out_surfaces
+            formats = [c["format"] if c.has_key("format") else "yuv"
+                       for c in cmd["outputSurfaces"]]
+            formats = [s if s != "jpg" else "jpeg" for s in formats]
+        else:
+            formats = ['yuv']
+        ncap = len(cmd["captureRequests"])
+        nsurf = 1 if out_surfaces is None else len(cmd["outputSurfaces"])
+        if len(formats) > len(set(formats)):
+            raise its.error.Error('Duplicate format requested')
+        if "dng" in formats and "raw" in formats or \
+                "dng" in formats and "raw10" in formats or \
+                "raw" in formats and "raw10" in formats:
+            raise its.error.Error('Different raw formats not supported')
+        print "Capturing %d frame%s with %d format%s [%s]" % (
+                  ncap, "s" if ncap>1 else "", nsurf, "s" if nsurf>1 else "",
+                  ",".join(formats))
+        self.sock.send(json.dumps(cmd) + "\n")
+
+        # Wait for ncap*nsurf images and ncap metadata responses.
+        # Assume that captures come out in the same order as requested in
+        # the burst, however individual images of different formats can come
+        # out in any order for that capture.
+        nbufs = 0
+        bufs = {"yuv":[], "raw":[], "raw10":[], "dng":[], "jpeg":[]}
+        mds = []
+        widths = None
+        heights = None
+        while nbufs < ncap*nsurf or len(mds) < ncap:
+            jsonObj,buf = self.__read_response_from_socket()
+            if jsonObj['tag'] in ['jpegImage', 'yuvImage', 'rawImage', \
+                    'raw10Image', 'dngImage'] and buf is not None:
+                fmt = jsonObj['tag'][:-5]
+                bufs[fmt].append(buf)
+                nbufs += 1
+            elif jsonObj['tag'] == 'captureResults':
+                mds.append(jsonObj['objValue']['captureResult'])
+                outputs = jsonObj['objValue']['outputs']
+                widths = [out['width'] for out in outputs]
+                heights = [out['height'] for out in outputs]
+            else:
+                # Just ignore other tags
+                None
+        rets = []
+        for j,fmt in enumerate(formats):
+            objs = []
+            for i in range(ncap):
+                obj = {}
+                obj["data"] = bufs[fmt][i]
+                obj["width"] = widths[j]
+                obj["height"] = heights[j]
+                obj["format"] = fmt
+                obj["metadata"] = mds[i]
+                objs.append(obj)
+            rets.append(objs if ncap>1 else objs[0])
+        return rets if len(rets)>1 else rets[0]
+
+def report_result(camera_id, success):
+    """Send a pass/fail result to the device, via an intent.
+
+    Args:
+        camera_id: The ID string of the camera for which to report pass/fail.
+        success: Boolean, indicating if the result was pass or fail.
+
+    Returns:
+        Nothing.
+    """
+    resultstr = "%s=%s" % (camera_id, 'True' if success else 'False')
+    _run(('%s shell am broadcast '
+          '-a %s --es %s %s') % (ItsSession.ADB, ItsSession.ACTION_ITS_RESULT,
+          ItsSession.EXTRA_SUCCESS, resultstr))
+
+
+def _run(cmd):
+    """Replacement for os.system, with hiding of stdout+stderr messages.
+    """
+    with open(os.devnull, 'wb') as devnull:
+        subprocess.check_call(
+                cmd.split(), stdout=devnull, stderr=subprocess.STDOUT)
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+
+    # TODO: Add some unit tests.
+    None
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/pymodules/its/dng.py b/apps/CameraITS/pymodules/its/dng.py
new file mode 100644
index 0000000..f331d02
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/dng.py
@@ -0,0 +1,174 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import numpy
+import numpy.linalg
+import unittest
+
+# Illuminant IDs
+A = 0
+D65 = 1
+
+def compute_cm_fm(illuminant, gains, ccm, cal):
+    """Compute the ColorMatrix (CM) and ForwardMatrix (FM).
+
+    Given a captured shot of a grey chart illuminated by either a D65 or a
+    standard A illuminant, the HAL will produce the WB gains and transform,
+    in the android.colorCorrection.gains and android.colorCorrection.transform
+    tags respectively. These values have both golden module and per-unit
+    calibration baked in.
+
+    This function is used to take the per-unit gains, ccm, and calibration
+    matrix, and compute the values that the DNG ColorMatrix and ForwardMatrix
+    for the specified illuminant should be. These CM and FM values should be
+    the same for all DNG files captured by all units of the same model (e.g.
+    all Nexus 5 units). The calibration matrix should be the same for all DNGs
+    saved by the same unit, but will differ unit-to-unit.
+
+    Args:
+        illuminant: 0 (A) or 1 (D65).
+        gains: White balance gains, as a list of 4 floats.
+        ccm: White balance transform matrix, as a list of 9 floats.
+        cal: Per-unit calibration matrix, as a list of 9 floats.
+
+    Returns:
+        CM: The 3x3 ColorMatrix for the specified illuminant, as a numpy array
+        FM: The 3x3 ForwardMatrix for the specified illuminant, as a numpy array
+    """
+
+    ###########################################################################
+    # Standard matrices.
+
+    # W is the matrix that maps sRGB to XYZ.
+    # See: http://www.brucelindbloom.com/
+    W = numpy.array([
+        [ 0.4124564,  0.3575761,  0.1804375],
+        [ 0.2126729,  0.7151522,  0.0721750],
+        [ 0.0193339,  0.1191920,  0.9503041]])
+
+    # HH is the chromatic adaptation matrix from D65 (since sRGB's ref white is
+    # D65) to D50 (since CIE XYZ's ref white is D50).
+    HH = numpy.array([
+        [ 1.0478112,  0.0228866, -0.0501270],
+        [ 0.0295424,  0.9904844, -0.0170491],
+        [-0.0092345,  0.0150436,  0.7521316]])
+
+    # H is a chromatic adaptation matrix from D65 (because sRGB's reference
+    # white is D65) to the calibration illuminant (which is a standard matrix
+    # depending on the illuminant). For a D65 illuminant, the matrix is the
+    # identity. For the A illuminant, the matrix uses the linear Bradford
+    # adaptation method to map from D65 to A.
+    # See: http://www.brucelindbloom.com/
+    H_D65 = numpy.array([
+        [ 1.0,        0.0,        0.0],
+        [ 0.0,        1.0,        0.0],
+        [ 0.0,        0.0,        1.0]])
+    H_A = numpy.array([
+        [ 1.2164557,  0.1109905, -0.1549325],
+        [ 0.1533326,  0.9152313, -0.0559953],
+        [-0.0239469,  0.0358984,  0.3147529]])
+    H = [H_A, H_D65][illuminant]
+
+    ###########################################################################
+    # Per-model matrices (that should be the same for all units of a particular
+    # phone/camera. These are statics in the HAL camera properties.
+
+    # G is formed by taking the r,g,b gains and putting them into a
+    # diagonal matrix.
+    G = numpy.array([[gains[0],0,0], [0,gains[1],0], [0,0,gains[3]]])
+
+    # S is just the CCM.
+    S = numpy.array([ccm[0:3], ccm[3:6], ccm[6:9]])
+
+    ###########################################################################
+    # Per-unit matrices.
+
+    # The per-unit calibration matrix for the given illuminant.
+    CC = numpy.array([cal[0:3],cal[3:6],cal[6:9]])
+
+    ###########################################################################
+    # Derived matrices. These should match up with DNG-related matrices
+    # provided by the HAL.
+
+    # The color matrix and forward matrix are computed as follows:
+    #   CM = inv(H * W * S * G * CC)
+    #   FM = HH * W * S
+    CM = numpy.linalg.inv(
+            numpy.dot(numpy.dot(numpy.dot(numpy.dot(H, W), S), G), CC))
+    FM = numpy.dot(numpy.dot(HH, W), S)
+
+    # The color matrix is normalized so that it maps the D50 (PCS) white
+    # point to a maximum component value of 1.
+    CM = CM / max(numpy.dot(CM, (0.9642957, 1.0, 0.8251046)))
+
+    return CM, FM
+
+def compute_asn(illuminant, cal, CM):
+    """Compute the AsShotNeutral DNG value.
+
+    This value is the only dynamic DNG value; the ForwardMatrix, ColorMatrix,
+    and CalibrationMatrix values should be the same for every DNG saved by
+    a given unit. The AsShotNeutral depends on the scene white balance
+    estimate.
+
+    This function computes what the DNG AsShotNeutral values should be, for
+    a given ColorMatrix (which is computed from the WB gains and CCM for a
+    shot taken of a grey chart under either A or D65 illuminants) and the
+    per-unit calibration matrix.
+
+    Args:
+        illuminant: 0 (A) or 1 (D65).
+        cal: Per-unit calibration matrix, as a list of 9 floats.
+        CM: The computed 3x3 ColorMatrix for the illuminant, as a numpy array.
+
+    Returns:
+        ASN: The AsShotNeutral value, as a length-3 numpy array.
+    """
+
+    ###########################################################################
+    # Standard matrices.
+
+    # XYZCAL is the  XYZ coordinate of calibration illuminant (so A or D65).
+    # See: Wyszecki & Stiles, "Color Science", second edition.
+    XYZCAL_A = numpy.array([1.098675, 1.0, 0.355916])
+    XYZCAL_D65 = numpy.array([0.950456, 1.0, 1.089058])
+    XYZCAL = [XYZCAL_A, XYZCAL_D65][illuminant]
+
+    ###########################################################################
+    # Per-unit matrices.
+
+    # The per-unit calibration matrix for the given illuminant.
+    CC = numpy.array([cal[0:3],cal[3:6],cal[6:9]])
+
+    ###########################################################################
+    # Derived matrices.
+
+    # The AsShotNeutral value is then the product of this final color matrix
+    # with the XYZ coordinate of calibration illuminant.
+    #   ASN = CC * CM * XYZCAL
+    ASN = numpy.dot(numpy.dot(CC, CM), XYZCAL)
+
+    # Normalize so the max vector element is 1.0.
+    ASN = ASN / max(ASN)
+
+    return ASN
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+    # TODO: Add more unit tests.
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/pymodules/its/error.py b/apps/CameraITS/pymodules/its/error.py
new file mode 100644
index 0000000..884389b
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/error.py
@@ -0,0 +1,26 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import unittest
+
+class Error(Exception):
+    pass
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/pymodules/its/image.py b/apps/CameraITS/pymodules/its/image.py
new file mode 100644
index 0000000..f2425e1
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/image.py
@@ -0,0 +1,745 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import matplotlib
+matplotlib.use('Agg')
+
+import its.error
+import pylab
+import sys
+import Image
+import numpy
+import math
+import unittest
+import cStringIO
+import scipy.stats
+import copy
+
+DEFAULT_YUV_TO_RGB_CCM = numpy.matrix([
+                                [1.000,  0.000,  1.402],
+                                [1.000, -0.344, -0.714],
+                                [1.000,  1.772,  0.000]])
+
+DEFAULT_YUV_OFFSETS = numpy.array([0, 128, 128])
+
+DEFAULT_GAMMA_LUT = numpy.array(
+        [math.floor(65535 * math.pow(i/65535.0, 1/2.2) + 0.5)
+         for i in xrange(65536)])
+
+DEFAULT_INVGAMMA_LUT = numpy.array(
+        [math.floor(65535 * math.pow(i/65535.0, 2.2) + 0.5)
+         for i in xrange(65536)])
+
+MAX_LUT_SIZE = 65536
+
+def convert_capture_to_rgb_image(cap,
+                                 ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
+                                 yuv_off=DEFAULT_YUV_OFFSETS,
+                                 props=None):
+    """Convert a captured image object to a RGB image.
+
+    Args:
+        cap: A capture object as returned by its.device.do_capture.
+        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
+        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.
+        props: (Optional) camera properties object (of static values);
+            required for processing raw images.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0].
+    """
+    w = cap["width"]
+    h = cap["height"]
+    if cap["format"] == "raw10":
+        assert(props is not None)
+        cap = unpack_raw10_capture(cap, props)
+    if cap["format"] == "yuv":
+        y = cap["data"][0:w*h]
+        u = cap["data"][w*h:w*h*5/4]
+        v = cap["data"][w*h*5/4:w*h*6/4]
+        return convert_yuv420_to_rgb_image(y, u, v, w, h)
+    elif cap["format"] == "jpeg":
+        return decompress_jpeg_to_rgb_image(cap["data"])
+    elif cap["format"] == "raw":
+        assert(props is not None)
+        r,gr,gb,b = convert_capture_to_planes(cap, props)
+        return convert_raw_to_rgb_image(r,gr,gb,b, props, cap["metadata"])
+    else:
+        raise its.error.Error('Invalid format %s' % (cap["format"]))
+
+def unpack_raw10_capture(cap, props):
+    """Unpack a raw-10 capture to a raw-16 capture.
+
+    Args:
+        cap: A raw-10 capture object.
+        props: Camera properties object.
+
+    Returns:
+        New capture object with raw-16 data.
+    """
+    # Data is packed as 4x10b pixels in 5 bytes, with the first 4 bytes holding
+    # the MSPs of the pixels, and the 5th byte holding 4x2b LSBs.
+    w,h = cap["width"], cap["height"]
+    if w % 4 != 0:
+        raise its.error.Error('Invalid raw-10 buffer width')
+    cap = copy.deepcopy(cap)
+    cap["data"] = unpack_raw10_image(cap["data"].reshape(h,w*5/4))
+    cap["format"] = "raw"
+    return cap
+
+def unpack_raw10_image(img):
+    """Unpack a raw-10 image to a raw-16 image.
+
+    Output image will have the 10 LSBs filled in each 16b word, and the 6 MSBs
+    will be set to zero.
+
+    Args:
+        img: A raw-10 image, as a uint8 numpy array.
+
+    Returns:
+        Image as a uint16 numpy array, with all row padding stripped.
+    """
+    if img.shape[1] % 5 != 0:
+        raise its.error.Error('Invalid raw-10 buffer width')
+    w = img.shape[1]*4/5
+    h = img.shape[0]
+    # Cut out the 4x8b MSBs and shift to bits [10:2] in 16b words.
+    msbs = numpy.delete(img, numpy.s_[4::5], 1)
+    msbs = msbs.astype(numpy.uint16)
+    msbs = numpy.left_shift(msbs, 2)
+    msbs = msbs.reshape(h,w)
+    # Cut out the 4x2b LSBs and put each in bits [2:0] of their own 8b words.
+    lsbs = img[::, 4::5].reshape(h,w/4)
+    lsbs = numpy.right_shift(
+            numpy.packbits(numpy.unpackbits(lsbs).reshape(h,w/4,4,2),3), 6)
+    lsbs = lsbs.reshape(h,w)
+    # Fuse the MSBs and LSBs back together
+    img16 = numpy.bitwise_or(msbs, lsbs).reshape(h,w)
+    return img16
+
+def convert_capture_to_planes(cap, props=None):
+    """Convert a captured image object to separate image planes.
+
+    Decompose an image into multiple images, corresponding to different planes.
+
+    For YUV420 captures ("yuv"):
+        Returns Y,U,V planes, where the Y plane is full-res and the U,V planes
+        are each 1/2 x 1/2 of the full res.
+
+    For Bayer captures ("raw" or "raw10"):
+        Returns planes in the order R,Gr,Gb,B, regardless of the Bayer pattern
+        layout. Each plane is 1/2 x 1/2 of the full res.
+
+    For JPEG captures ("jpeg"):
+        Returns R,G,B full-res planes.
+
+    Args:
+        cap: A capture object as returned by its.device.do_capture.
+        props: (Optional) camera properties object (of static values);
+            required for processing raw images.
+
+    Returns:
+        A tuple of float numpy arrays (one per plane), consisting of pixel
+            values in the range [0.0, 1.0].
+    """
+    w = cap["width"]
+    h = cap["height"]
+    if cap["format"] == "raw10":
+        assert(props is not None)
+        cap = unpack_raw10_capture(cap, props)
+    if cap["format"] == "yuv":
+        y = cap["data"][0:w*h]
+        u = cap["data"][w*h:w*h*5/4]
+        v = cap["data"][w*h*5/4:w*h*6/4]
+        return ((y.astype(numpy.float32) / 255.0).reshape(h, w, 1),
+                (u.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1),
+                (v.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1))
+    elif cap["format"] == "jpeg":
+        rgb = decompress_jpeg_to_rgb_image(cap["data"]).reshape(w*h*3)
+        return (rgb[::3].reshape(h,w,1),
+                rgb[1::3].reshape(h,w,1),
+                rgb[2::3].reshape(h,w,1))
+    elif cap["format"] == "raw":
+        assert(props is not None)
+        white_level = float(props['android.sensor.info.whiteLevel'])
+        img = numpy.ndarray(shape=(h*w,), dtype='<u2',
+                            buffer=cap["data"][0:w*h*2])
+        img = img.astype(numpy.float32).reshape(h,w) / white_level
+        imgs = [img[::2].reshape(w*h/2)[::2].reshape(h/2,w/2,1),
+                img[::2].reshape(w*h/2)[1::2].reshape(h/2,w/2,1),
+                img[1::2].reshape(w*h/2)[::2].reshape(h/2,w/2,1),
+                img[1::2].reshape(w*h/2)[1::2].reshape(h/2,w/2,1)]
+        idxs = get_canonical_cfa_order(props)
+        return [imgs[i] for i in idxs]
+    else:
+        raise its.error.Error('Invalid format %s' % (cap["format"]))
+
+def get_canonical_cfa_order(props):
+    """Returns a mapping from the Bayer 2x2 top-left grid in the CFA to
+    the standard order R,Gr,Gb,B.
+
+    Args:
+        props: Camera properties object.
+
+    Returns:
+        List of 4 integers, corresponding to the positions in the 2x2 top-
+            left Bayer grid of R,Gr,Gb,B, where the 2x2 grid is labeled as
+            0,1,2,3 in row major order.
+    """
+    # Note that raw streams aren't croppable, so the cropRegion doesn't need
+    # to be considered when determining the top-left pixel color.
+    cfa_pat = props['android.sensor.info.colorFilterArrangement']
+    if cfa_pat == 0:
+        # RGGB
+        return [0,1,2,3]
+    elif cfa_pat == 1:
+        # GRBG
+        return [1,0,3,2]
+    elif cfa_pat == 2:
+        # GBRG
+        return [2,3,0,1]
+    elif cfa_pat == 3:
+        # BGGR
+        return [3,2,1,0]
+    else:
+        raise its.error.Error("Not supported")
+
+def get_gains_in_canonical_order(props, gains):
+    """Reorders the gains tuple to the canonical R,Gr,Gb,B order.
+
+    Args:
+        props: Camera properties object.
+        gains: List of 4 values, in R,G_even,G_odd,B order.
+
+    Returns:
+        List of gains values, in R,Gr,Gb,B order.
+    """
+    cfa_pat = props['android.sensor.info.colorFilterArrangement']
+    if cfa_pat in [0,1]:
+        # RGGB or GRBG, so G_even is Gr
+        return gains
+    elif cfa_pat in [2,3]:
+        # GBRG or BGGR, so G_even is Gb
+        return [gains[0], gains[2], gains[1], gains[3]]
+    else:
+        raise its.error.Error("Not supported")
+
+def convert_raw_to_rgb_image(r_plane, gr_plane, gb_plane, b_plane,
+                             props, cap_res):
+    """Convert a Bayer raw-16 image to an RGB image.
+
+    Includes some extremely rudimentary demosaicking and color processing
+    operations; the output of this function shouldn't be used for any image
+    quality analysis.
+
+    Args:
+        r_plane,gr_plane,gb_plane,b_plane: Numpy arrays for each color plane
+            in the Bayer image, with pixels in the [0.0, 1.0] range.
+        props: Camera properties object.
+        cap_res: Capture result (metadata) object.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0]
+    """
+    # Values required for the RAW to RGB conversion.
+    assert(props is not None)
+    white_level = float(props['android.sensor.info.whiteLevel'])
+    black_levels = props['android.sensor.blackLevelPattern']
+    gains = cap_res['android.colorCorrection.gains']
+    ccm = cap_res['android.colorCorrection.transform']
+
+    # Reorder black levels and gains to R,Gr,Gb,B, to match the order
+    # of the planes.
+    idxs = get_canonical_cfa_order(props)
+    black_levels = [black_levels[i] for i in idxs]
+    gains = get_gains_in_canonical_order(props, gains)
+
+    # Convert CCM from rational to float, as numpy arrays.
+    ccm = numpy.array(its.objects.rational_to_float(ccm)).reshape(3,3)
+
+    # Need to scale the image back to the full [0,1] range after subtracting
+    # the black level from each pixel.
+    scale = white_level / (white_level - max(black_levels))
+
+    # Three-channel black levels, normalized to [0,1] by white_level.
+    black_levels = numpy.array([b/white_level for b in [
+            black_levels[i] for i in [0,1,3]]])
+
+    # Three-channel gains.
+    gains = numpy.array([gains[i] for i in [0,1,3]])
+
+    h,w = r_plane.shape[:2]
+    img = numpy.dstack([r_plane,(gr_plane+gb_plane)/2.0,b_plane])
+    img = (((img.reshape(h,w,3) - black_levels) * scale) * gains).clip(0.0,1.0)
+    img = numpy.dot(img.reshape(w*h,3), ccm.T).reshape(h,w,3).clip(0.0,1.0)
+    return img
+
+def convert_yuv420_to_rgb_image(y_plane, u_plane, v_plane,
+                                w, h,
+                                ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
+                                yuv_off=DEFAULT_YUV_OFFSETS):
+    """Convert a YUV420 8-bit planar image to an RGB image.
+
+    Args:
+        y_plane: The packed 8-bit Y plane.
+        u_plane: The packed 8-bit U plane.
+        v_plane: The packed 8-bit V plane.
+        w: The width of the image.
+        h: The height of the image.
+        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
+        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0].
+    """
+    y = numpy.subtract(y_plane, yuv_off[0])
+    u = numpy.subtract(u_plane, yuv_off[1]).view(numpy.int8)
+    v = numpy.subtract(v_plane, yuv_off[2]).view(numpy.int8)
+    u = u.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
+    v = v.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
+    yuv = numpy.dstack([y, u.reshape(w*h), v.reshape(w*h)])
+    flt = numpy.empty([h, w, 3], dtype=numpy.float32)
+    flt.reshape(w*h*3)[:] = yuv.reshape(h*w*3)[:]
+    flt = numpy.dot(flt.reshape(w*h,3), ccm_yuv_to_rgb.T).clip(0, 255)
+    rgb = numpy.empty([h, w, 3], dtype=numpy.uint8)
+    rgb.reshape(w*h*3)[:] = flt.reshape(w*h*3)[:]
+    return rgb.astype(numpy.float32) / 255.0
+
+def load_yuv420_to_rgb_image(yuv_fname,
+                             w, h,
+                             ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
+                             yuv_off=DEFAULT_YUV_OFFSETS):
+    """Load a YUV420 image file, and return as an RGB image.
+
+    Args:
+        yuv_fname: The path of the YUV420 file.
+        w: The width of the image.
+        h: The height of the image.
+        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
+        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0].
+    """
+    with open(yuv_fname, "rb") as f:
+        y = numpy.fromfile(f, numpy.uint8, w*h, "")
+        v = numpy.fromfile(f, numpy.uint8, w*h/4, "")
+        u = numpy.fromfile(f, numpy.uint8, w*h/4, "")
+        return convert_yuv420_to_rgb_image(y,u,v,w,h,ccm_yuv_to_rgb,yuv_off)
+
+def load_yuv420_to_yuv_planes(yuv_fname, w, h):
+    """Load a YUV420 image file, and return separate Y, U, and V plane images.
+
+    Args:
+        yuv_fname: The path of the YUV420 file.
+        w: The width of the image.
+        h: The height of the image.
+
+    Returns:
+        Separate Y, U, and V images as float-1 Numpy arrays, pixels in [0,1].
+        Note that pixel (0,0,0) is not black, since U,V pixels are centered at
+        0.5, and also that the Y and U,V plane images returned are different
+        sizes (due to chroma subsampling in the YUV420 format).
+    """
+    with open(yuv_fname, "rb") as f:
+        y = numpy.fromfile(f, numpy.uint8, w*h, "")
+        v = numpy.fromfile(f, numpy.uint8, w*h/4, "")
+        u = numpy.fromfile(f, numpy.uint8, w*h/4, "")
+        return ((y.astype(numpy.float32) / 255.0).reshape(h, w, 1),
+                (u.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1),
+                (v.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1))
+
+def decompress_jpeg_to_rgb_image(jpeg_buffer):
+    """Decompress a JPEG-compressed image, returning as an RGB image.
+
+    Args:
+        jpeg_buffer: The JPEG stream.
+
+    Returns:
+        A numpy array for the RGB image, with pixels in [0,1].
+    """
+    img = Image.open(cStringIO.StringIO(jpeg_buffer))
+    w = img.size[0]
+    h = img.size[1]
+    return numpy.array(img).reshape(h,w,3) / 255.0
+
+def apply_lut_to_image(img, lut):
+    """Applies a LUT to every pixel in a float image array.
+
+    Internally converts to a 16b integer image, since the LUT can work with up
+    to 16b->16b mappings (i.e. values in the range [0,65535]). The lut can also
+    have fewer than 65536 entries, however it must be sized as a power of 2
+    (and for smaller luts, the scale must match the bitdepth).
+
+    For a 16b lut of 65536 entries, the operation performed is:
+
+        lut[r * 65535] / 65535 -> r'
+        lut[g * 65535] / 65535 -> g'
+        lut[b * 65535] / 65535 -> b'
+
+    For a 10b lut of 1024 entries, the operation becomes:
+
+        lut[r * 1023] / 1023 -> r'
+        lut[g * 1023] / 1023 -> g'
+        lut[b * 1023] / 1023 -> b'
+
+    Args:
+        img: Numpy float image array, with pixel values in [0,1].
+        lut: Numpy table encoding a LUT, mapping 16b integer values.
+
+    Returns:
+        Float image array after applying LUT to each pixel.
+    """
+    n = len(lut)
+    if n <= 0 or n > MAX_LUT_SIZE or (n & (n - 1)) != 0:
+        raise its.error.Error('Invalid arg LUT size: %d' % (n))
+    m = float(n-1)
+    return (lut[(img * m).astype(numpy.uint16)] / m).astype(numpy.float32)
+
+def apply_matrix_to_image(img, mat):
+    """Multiplies a 3x3 matrix with each float-3 image pixel.
+
+    Each pixel is considered a column vector, and is left-multiplied by
+    the given matrix:
+
+        [     ]   r    r'
+        [ mat ] * g -> g'
+        [     ]   b    b'
+
+    Args:
+        img: Numpy float image array, with pixel values in [0,1].
+        mat: Numpy 3x3 matrix.
+
+    Returns:
+        The numpy float-3 image array resulting from the matrix mult.
+    """
+    h = img.shape[0]
+    w = img.shape[1]
+    img2 = numpy.empty([h, w, 3], dtype=numpy.float32)
+    img2.reshape(w*h*3)[:] = (numpy.dot(img.reshape(h*w, 3), mat.T)
+                             ).reshape(w*h*3)[:]
+    return img2
+
+def get_image_patch(img, xnorm, ynorm, wnorm, hnorm):
+    """Get a patch (tile) of an image.
+
+    Args:
+        img: Numpy float image array, with pixel values in [0,1].
+        xnorm,ynorm,wnorm,hnorm: Normalized (in [0,1]) coords for the tile.
+
+    Returns:
+        Float image array of the patch.
+    """
+    hfull = img.shape[0]
+    wfull = img.shape[1]
+    xtile = math.ceil(xnorm * wfull)
+    ytile = math.ceil(ynorm * hfull)
+    wtile = math.floor(wnorm * wfull)
+    htile = math.floor(hnorm * hfull)
+    return img[ytile:ytile+htile,xtile:xtile+wtile,:].copy()
+
+def compute_image_means(img):
+    """Calculate the mean of each color channel in the image.
+
+    Args:
+        img: Numpy float image array, with pixel values in [0,1].
+
+    Returns:
+        A list of mean values, one per color channel in the image.
+    """
+    means = []
+    chans = img.shape[2]
+    for i in xrange(chans):
+        means.append(numpy.mean(img[:,:,i], dtype=numpy.float64))
+    return means
+
+def compute_image_variances(img):
+    """Calculate the variance of each color channel in the image.
+
+    Args:
+        img: Numpy float image array, with pixel values in [0,1].
+
+    Returns:
+        A list of mean values, one per color channel in the image.
+    """
+    variances = []
+    chans = img.shape[2]
+    for i in xrange(chans):
+        variances.append(numpy.var(img[:,:,i], dtype=numpy.float64))
+    return variances
+
+def write_image(img, fname, apply_gamma=False):
+    """Save a float-3 numpy array image to a file.
+
+    Supported formats: PNG, JPEG, and others; see PIL docs for more.
+
+    Image can be 3-channel, which is interpreted as RGB, or can be 1-channel,
+    which is greyscale.
+
+    Can optionally specify that the image should be gamma-encoded prior to
+    writing it out; this should be done if the image contains linear pixel
+    values, to make the image look "normal".
+
+    Args:
+        img: Numpy image array data.
+        fname: Path of file to save to; the extension specifies the format.
+        apply_gamma: (Optional) apply gamma to the image prior to writing it.
+    """
+    if apply_gamma:
+        img = apply_lut_to_image(img, DEFAULT_GAMMA_LUT)
+    (h, w, chans) = img.shape
+    if chans == 3:
+        Image.fromarray((img * 255.0).astype(numpy.uint8), "RGB").save(fname)
+    elif chans == 1:
+        img3 = (img * 255.0).astype(numpy.uint8).repeat(3).reshape(h,w,3)
+        Image.fromarray(img3, "RGB").save(fname)
+    else:
+        raise its.error.Error('Unsupported image type')
+
+def downscale_image(img, f):
+    """Shrink an image by a given integer factor.
+
+    This function computes output pixel values by averaging over rectangular
+    regions of the input image; it doesn't skip or sample pixels, and all input
+    image pixels are evenly weighted.
+
+    If the downscaling factor doesn't cleanly divide the width and/or height,
+    then the remaining pixels on the right or bottom edge are discarded prior
+    to the downscaling.
+
+    Args:
+        img: The input image as an ndarray.
+        f: The downscaling factor, which should be an integer.
+
+    Returns:
+        The new (downscaled) image, as an ndarray.
+    """
+    h,w,chans = img.shape
+    f = int(f)
+    assert(f >= 1)
+    h = (h/f)*f
+    w = (w/f)*f
+    img = img[0:h:,0:w:,::]
+    chs = []
+    for i in xrange(chans):
+        ch = img.reshape(h*w*chans)[i::chans].reshape(h,w)
+        ch = ch.reshape(h,w/f,f).mean(2).reshape(h,w/f)
+        ch = ch.T.reshape(w/f,h/f,f).mean(2).T.reshape(h/f,w/f)
+        chs.append(ch.reshape(h*w/(f*f)))
+    img = numpy.vstack(chs).T.reshape(h/f,w/f,chans)
+    return img
+
+def __measure_color_checker_patch(img, xc,yc, patch_size):
+    r = patch_size/2
+    tile = img[yc-r:yc+r+1:, xc-r:xc+r+1:, ::]
+    means = tile.mean(1).mean(0)
+    return means
+
+def get_color_checker_chart_patches(img, debug_fname_prefix=None):
+    """Return the center coords of each patch in a color checker chart.
+
+    Assumptions:
+    * Chart is vertical or horizontal w.r.t. camera, but not diagonal.
+    * Chart is (roughly) planar-parallel to the camera.
+    * Chart is centered in frame (roughly).
+    * Around/behind chart is white/grey background.
+    * The only black pixels in the image are from the chart.
+    * Chart is 100% visible and contained within image.
+    * No other objects within image.
+    * Image is well-exposed.
+    * Standard color checker chart with standard-sized black borders.
+
+    The values returned are in the coordinate system of the chart; that is,
+    the "origin" patch is the brown patch that is in the chart's top-left
+    corner when it is in the normal upright/horizontal orientation. (The chart
+    may be any of the four main orientations in the image.)
+
+    The chart is 6x4 patches in the normal upright orientation. The return
+    values of this function are the center coordinate of the top-left patch,
+    and the displacement vectors to the next patches to the right and below
+    the top-left patch. From these pieces of data, the center coordinates of
+    any of the patches can be computed.
+
+    Args:
+        img: Input image, as a numpy array with pixels in [0,1].
+        debug_fname_prefix: If not None, the (string) name of a file prefix to
+            use to save a number of debug images for visualizing the output of
+            this function; can be used to see if the patches are being found
+            successfully.
+
+    Returns:
+        6x4 list of lists of integer (x,y) coords of the center of each patch,
+        ordered in the "chart order" (6x4 row major).
+    """
+
+    # Shrink the original image.
+    DOWNSCALE_FACTOR = 4
+    img_small = downscale_image(img, DOWNSCALE_FACTOR)
+
+    # Make a threshold image, which is 1.0 where the image is black,
+    # and 0.0 elsewhere.
+    BLACK_PIXEL_THRESH = 0.2
+    mask_img = scipy.stats.threshold(
+                img_small.max(2), BLACK_PIXEL_THRESH, 1.1, 0.0)
+    mask_img = 1.0 - scipy.stats.threshold(mask_img, -0.1, 0.1, 1.0)
+
+    if debug_fname_prefix is not None:
+        h,w = mask_img.shape
+        write_image(img, debug_fname_prefix+"_0.jpg")
+        write_image(mask_img.repeat(3).reshape(h,w,3),
+                debug_fname_prefix+"_1.jpg")
+
+    # Mask image flattened to a single row or column (by averaging).
+    # Also apply a threshold to these arrays.
+    FLAT_PIXEL_THRESH = 0.05
+    flat_row = mask_img.mean(0)
+    flat_col = mask_img.mean(1)
+    flat_row = [0 if v < FLAT_PIXEL_THRESH else 1 for v in flat_row]
+    flat_col = [0 if v < FLAT_PIXEL_THRESH else 1 for v in flat_col]
+
+    # Start and end of the non-zero region of the flattened row/column.
+    flat_row_nonzero = [i for i in range(len(flat_row)) if flat_row[i]>0]
+    flat_col_nonzero = [i for i in range(len(flat_col)) if flat_col[i]>0]
+    flat_row_min, flat_row_max = min(flat_row_nonzero), max(flat_row_nonzero)
+    flat_col_min, flat_col_max = min(flat_col_nonzero), max(flat_col_nonzero)
+
+    # Orientation of chart, and number of grid cells horz. and vertically.
+    orient = "h" if flat_row_max-flat_row_min>flat_col_max-flat_col_min else "v"
+    xgrids = 6 if orient=="h" else 4
+    ygrids = 6 if orient=="v" else 4
+
+    # Get better bounds on the patches region, lopping off some of the excess
+    # black border.
+    HRZ_BORDER_PAD_FRAC = 0.0138
+    VERT_BORDER_PAD_FRAC = 0.0395
+    xpad = HRZ_BORDER_PAD_FRAC if orient=="h" else VERT_BORDER_PAD_FRAC
+    ypad = HRZ_BORDER_PAD_FRAC if orient=="v" else VERT_BORDER_PAD_FRAC
+    xchart = flat_row_min + (flat_row_max - flat_row_min) * xpad
+    ychart = flat_col_min + (flat_col_max - flat_col_min) * ypad
+    wchart = (flat_row_max - flat_row_min) * (1 - 2*xpad)
+    hchart = (flat_col_max - flat_col_min) * (1 - 2*ypad)
+
+    # Get the colors of the 4 corner patches, in clockwise order, by measuring
+    # the average value of a small patch at each of the 4 patch centers.
+    colors = []
+    centers = []
+    for (x,y) in [(0,0), (xgrids-1,0), (xgrids-1,ygrids-1), (0,ygrids-1)]:
+        xc = xchart + (x + 0.5)*wchart/xgrids
+        yc = ychart + (y + 0.5)*hchart/ygrids
+        xc = int(xc * DOWNSCALE_FACTOR + 0.5)
+        yc = int(yc * DOWNSCALE_FACTOR + 0.5)
+        centers.append((xc,yc))
+        chan_means = __measure_color_checker_patch(img, xc,yc, 32)
+        colors.append(sum(chan_means) / len(chan_means))
+
+    # The brightest corner is the white patch, the darkest is the black patch.
+    # The black patch should be counter-clockwise from the white patch.
+    white_patch_index = None
+    for i in range(4):
+        if colors[i] == max(colors) and \
+                colors[(i-1+4)%4] == min(colors):
+            white_patch_index = i%4
+    assert(white_patch_index is not None)
+
+    # Return the coords of the origin (top-left when the chart is in the normal
+    # upright orientation) patch's center, and the vector displacement to the
+    # center of the second patch on the first row of the chart (when in the
+    # normal upright orientation).
+    origin_index = (white_patch_index+1)%4
+    prev_index = (origin_index-1+4)%4
+    next_index = (origin_index+1)%4
+    origin_center = centers[origin_index]
+    prev_center = centers[prev_index]
+    next_center = centers[next_index]
+    vec_across = tuple([(next_center[i]-origin_center[i])/5.0 for i in [0,1]])
+    vec_down = tuple([(prev_center[i]-origin_center[i])/3.0 for i in [0,1]])
+
+    # Compute the center of each patch.
+    patches = [[],[],[],[]]
+    for yi in range(4):
+        for xi in range(6):
+            x0,y0 = origin_center
+            dxh,dyh = vec_across
+            dxv,dyv = vec_down
+            xc = int(x0 + dxh*xi + dxv*yi)
+            yc = int(y0 + dyh*xi + dyv*yi)
+            patches[yi].append((xc,yc))
+
+    # Sanity check: test that the R,G,B,black,white patches are correct.
+    patch_info = [(2,2,[0]), # Red
+                  (2,1,[1]), # Green
+                  (2,0,[2]), # Blue
+                  (3,0,[0,1,2]), # White
+                  (3,5,[])] # Black
+    for i in range(len(patch_info)):
+        yi,xi,high_chans = patch_info[i]
+        low_chans = [i for i in [0,1,2] if i not in high_chans]
+        xc,yc = patches[yi][xi]
+        means = __measure_color_checker_patch(img, xc,yc, 64)
+        if (min([means[i] for i in high_chans]+[1]) < \
+                max([means[i] for i in low_chans]+[0])):
+            print "Color patch sanity check failed: patch", i
+            # If the debug info is requested, then don't assert that the patches
+            # are matched, to allow the caller to see the output.
+            if debug_fname_prefix is None:
+                assert(0)
+
+    if debug_fname_prefix is not None:
+        for (xc,yc) in sum(patches,[]):
+            img[yc,xc] = 1.0
+        write_image(img, debug_fname_prefix+"_2.jpg")
+
+    return patches
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+
+    # TODO: Add more unit tests.
+
+    def test_apply_matrix_to_image(self):
+        """Unit test for apply_matrix_to_image.
+
+        Test by using a canned set of values on a 1x1 pixel image.
+
+            [ 1 2 3 ]   [ 0.1 ]   [ 1.4 ]
+            [ 4 5 6 ] * [ 0.2 ] = [ 3.2 ]
+            [ 7 8 9 ]   [ 0.3 ]   [ 5.0 ]
+               mat         x         y
+        """
+        mat = numpy.array([[1,2,3],[4,5,6],[7,8,9]])
+        x = numpy.array([0.1,0.2,0.3]).reshape(1,1,3)
+        y = apply_matrix_to_image(x, mat).reshape(3).tolist()
+        y_ref = [1.4,3.2,5.0]
+        passed = all([math.fabs(y[i] - y_ref[i]) < 0.001 for i in xrange(3)])
+        self.assertTrue(passed)
+
+    def test_apply_lut_to_image(self):
+        """ Unit test for apply_lut_to_image.
+
+        Test by using a canned set of values on a 1x1 pixel image. The LUT will
+        simply double the value of the index:
+
+            lut[x] = 2*x
+        """
+        lut = numpy.array([2*i for i in xrange(65536)])
+        x = numpy.array([0.1,0.2,0.3]).reshape(1,1,3)
+        y = apply_lut_to_image(x, lut).reshape(3).tolist()
+        y_ref = [0.2,0.4,0.6]
+        passed = all([math.fabs(y[i] - y_ref[i]) < 0.001 for i in xrange(3)])
+        self.assertTrue(passed)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/pymodules/its/objects.py b/apps/CameraITS/pymodules/its/objects.py
new file mode 100644
index 0000000..809a98a
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/objects.py
@@ -0,0 +1,209 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path
+import sys
+import re
+import json
+import tempfile
+import time
+import unittest
+import subprocess
+import math
+
+def int_to_rational(i):
+    """Function to convert Python integers to Camera2 rationals.
+
+    Args:
+        i: Python integer or list of integers.
+
+    Returns:
+        Python dictionary or list of dictionaries representing the given int(s)
+        as rationals with denominator=1.
+    """
+    if isinstance(i, list):
+        return [{"numerator":val, "denominator":1} for val in i]
+    else:
+        return {"numerator":i, "denominator":1}
+
+def float_to_rational(f, denom=128):
+    """Function to convert Python floats to Camera2 rationals.
+
+    Args:
+        f: Python float or list of floats.
+        denom: (Optonal) the denominator to use in the output rationals.
+
+    Returns:
+        Python dictionary or list of dictionaries representing the given
+        float(s) as rationals.
+    """
+    if isinstance(f, list):
+        return [{"numerator":math.floor(val*denom+0.5), "denominator":denom}
+                for val in f]
+    else:
+        return {"numerator":math.floor(f*denom+0.5), "denominator":denom}
+
+def rational_to_float(r):
+    """Function to convert Camera2 rational objects to Python floats.
+
+    Args:
+        r: Rational or list of rationals, as Python dictionaries.
+
+    Returns:
+        Float or list of floats.
+    """
+    if isinstance(r, list):
+        return [float(val["numerator"]) / float(val["denominator"])
+                for val in r]
+    else:
+        return float(r["numerator"]) / float(r["denominator"])
+
+def manual_capture_request(sensitivity, exp_time, linear_tonemap=False):
+    """Return a capture request with everything set to manual.
+
+    Uses identity/unit color correction, and the default tonemap curve.
+    Optionally, the tonemap can be specified as being linear.
+
+    Args:
+        sensitivity: The sensitivity value to populate the request with.
+        exp_time: The exposure time, in nanoseconds, to populate the request
+            with.
+        linear_tonemap: [Optional] whether a linear tonemap should be used
+            in this request.
+
+    Returns:
+        The default manual capture request, ready to be passed to the
+        its.device.do_capture function.
+    """
+    req = {
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.control.effectMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.sensitivity": sensitivity,
+        "android.sensor.exposureTime": exp_time,
+        "android.colorCorrection.mode": 0,
+        "android.colorCorrection.transform":
+                int_to_rational([1,0,0, 0,1,0, 0,0,1]),
+        "android.colorCorrection.gains": [1,1,1,1],
+        "android.tonemap.mode": 1,
+        }
+    if linear_tonemap:
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = [0.0,0.0, 1.0,1.0]
+        req["android.tonemap.curveGreen"] = [0.0,0.0, 1.0,1.0]
+        req["android.tonemap.curveBlue"] = [0.0,0.0, 1.0,1.0]
+    return req
+
+def auto_capture_request():
+    """Return a capture request with everything set to auto.
+    """
+    return {
+        "android.control.mode": 1,
+        "android.control.aeMode": 1,
+        "android.control.awbMode": 1,
+        "android.control.afMode": 1,
+        "android.colorCorrection.mode": 1,
+        "android.tonemap.mode": 1,
+        }
+
+def get_available_output_sizes(fmt, props):
+    """Return a sorted list of available output sizes for a given format.
+
+    Args:
+        fmt: the output format, as a string in ["jpg", "yuv", "raw"].
+        props: the object returned from its.device.get_camera_properties().
+
+    Returns:
+        A sorted list of (w,h) tuples (sorted large-to-small).
+    """
+    fmt_codes = {"raw":0x20, "raw10":0x25, "yuv":0x23, "jpg":0x100, "jpeg":0x100}
+    configs = props['android.scaler.streamConfigurationMap']\
+                   ['availableStreamConfigurations']
+    fmt_configs = [cfg for cfg in configs if cfg['format'] == fmt_codes[fmt]]
+    out_configs = [cfg for cfg in fmt_configs if cfg['input'] == False]
+    out_sizes = [(cfg['width'],cfg['height']) for cfg in out_configs]
+    out_sizes.sort(reverse=True)
+    return out_sizes
+
+def get_fastest_manual_capture_settings(props):
+    """Return a capture request and format spec for the fastest capture.
+
+    Args:
+        props: the object returned from its.device.get_camera_properties().
+
+    Returns:
+        Two values, the first is a capture request, and the second is an output
+        format specification, for the fastest possible (legal) capture that
+        can be performed on this device (with the smallest output size).
+    """
+    fmt = "yuv"
+    size = get_available_output_sizes(fmt, props)[-1]
+    out_spec = {"format":fmt, "width":size[0], "height":size[1]}
+    s = min(props['android.sensor.info.sensitivityRange'])
+    e = min(props['android.sensor.info.exposureTimeRange'])
+    req = manual_capture_request(s,e)
+    return req, out_spec
+
+def get_max_digital_zoom(props):
+    """Returns the maximum amount of zooming possible by the camera device.
+
+    Args:
+        props: the object returned from its.device.get_camera_properties().
+
+    Return:
+        A float indicating the maximum amount of zooming possible by the
+        camera device.
+    """
+
+    maxz = 1.0
+
+    if props.has_key("android.scaler.availableMaxDigitalZoom"):
+        maxz = props["android.scaler.availableMaxDigitalZoom"]
+
+    return maxz
+
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+
+    def test_int_to_rational(self):
+        """Unit test for int_to_rational.
+        """
+        self.assertEqual(int_to_rational(10),
+                         {"numerator":10,"denominator":1})
+        self.assertEqual(int_to_rational([1,2]),
+                         [{"numerator":1,"denominator":1},
+                          {"numerator":2,"denominator":1}])
+
+    def test_float_to_rational(self):
+        """Unit test for float_to_rational.
+        """
+        self.assertEqual(float_to_rational(0.5001, 64),
+                        {"numerator":32, "denominator":64})
+
+    def test_rational_to_float(self):
+        """Unit test for rational_to_float.
+        """
+        self.assertTrue(
+                abs(rational_to_float({"numerator":32,"denominator":64})-0.5)
+                < 0.0001)
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/pymodules/its/target.py b/apps/CameraITS/pymodules/its/target.py
new file mode 100644
index 0000000..3715f34
--- /dev/null
+++ b/apps/CameraITS/pymodules/its/target.py
@@ -0,0 +1,266 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.image
+import its.objects
+import os
+import os.path
+import sys
+import json
+import unittest
+import json
+
+CACHE_FILENAME = "its.target.cfg"
+
+def __do_target_exposure_measurement(its_session):
+    """Use device 3A and captured shots to determine scene exposure.
+
+    Creates a new ITS device session (so this function should not be called
+    while another session to the device is open).
+
+    Assumes that the camera is pointed at a scene that is reasonably uniform
+    and reasonably lit -- that is, an appropriate target for running the ITS
+    tests that assume such uniformity.
+
+    Measures the scene using device 3A and then by taking a shot to hone in on
+    the exact exposure level that will result in a center 10% by 10% patch of
+    the scene having a intensity level of 0.5 (in the pixel range of [0,1])
+    when a linear tonemap is used. That is, the pixels coming off the sensor
+    should be at approximately 50% intensity (however note that it's actually
+    the luma value in the YUV image that is being targeted to 50%).
+
+    The computed exposure value is the product of the sensitivity (ISO) and
+    exposure time (ns) to achieve that sensor exposure level.
+
+    Args:
+        its_session: Holds an open device session.
+
+    Returns:
+        The measured product of sensitivity and exposure time that results in
+            the luma channel of captured shots having an intensity of 0.5.
+    """
+    print "Measuring target exposure"
+
+    # Get AE+AWB lock first, so the auto values in the capture result are
+    # populated properly.
+    r = [[0.45, 0.45, 0.1, 0.1, 1]]
+    sens, exp_time, gains, xform, _ \
+            = its_session.do_3a(r,r,r,do_af=False,get_results=True)
+
+    # Convert the transform to rational.
+    xform_rat = [{"numerator":int(100*x),"denominator":100} for x in xform]
+
+    # Linear tonemap
+    tmap = sum([[i/63.0,i/63.0] for i in range(64)], [])
+
+    # Capture a manual shot with this exposure, using a linear tonemap.
+    # Use the gains+transform returned by the AWB pass.
+    req = its.objects.manual_capture_request(sens, exp_time)
+    req["android.tonemap.mode"] = 0
+    req["android.tonemap.curveRed"] = tmap
+    req["android.tonemap.curveGreen"] = tmap
+    req["android.tonemap.curveBlue"] = tmap
+    req["android.colorCorrection.transform"] = xform_rat
+    req["android.colorCorrection.gains"] = gains
+    cap = its_session.do_capture(req)
+
+    # Compute the mean luma of a center patch.
+    yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)
+    tile = its.image.get_image_patch(yimg, 0.45, 0.45, 0.1, 0.1)
+    luma_mean = its.image.compute_image_means(tile)
+
+    # Compute the exposure value that would result in a luma of 0.5.
+    return sens * exp_time * 0.5 / luma_mean[0]
+
+def __set_cached_target_exposure(exposure):
+    """Saves the given exposure value to a cached location.
+
+    Once a value is cached, a call to __get_cached_target_exposure will return
+    the value, even from a subsequent test/script run. That is, the value is
+    persisted.
+
+    The value is persisted in a JSON file in the current directory (from which
+    the script calling this function is run).
+
+    Args:
+        exposure: The value to cache.
+    """
+    print "Setting cached target exposure"
+    with open(CACHE_FILENAME, "w") as f:
+        f.write(json.dumps({"exposure":exposure}))
+
+def __get_cached_target_exposure():
+    """Get the cached exposure value.
+
+    Returns:
+        The cached exposure value, or None if there is no valid cached value.
+    """
+    try:
+        with open(CACHE_FILENAME, "r") as f:
+            o = json.load(f)
+            return o["exposure"]
+    except:
+        return None
+
+def clear_cached_target_exposure():
+    """If there is a cached exposure value, clear it.
+    """
+    if os.path.isfile(CACHE_FILENAME):
+        os.remove(CACHE_FILENAME)
+
+def set_hardcoded_exposure(exposure):
+    """Set a hard-coded exposure value, rather than relying on measurements.
+
+    The exposure value is the product of sensitivity (ISO) and eposure time
+    (ns) that will result in a center-patch luma value of 0.5 (using a linear
+    tonemap) for the scene that the camera is pointing at.
+
+    If bringing up a new HAL implementation and the ability use the device to
+    measure the scene isn't there yet (e.g. device 3A doesn't work), then a
+    cache file of the appropriate name can be manually created and populated
+    with a hard-coded value using this function.
+
+    Args:
+        exposure: The hard-coded exposure value to set.
+    """
+    __set_cached_target_exposure(exposure)
+
+def get_target_exposure(its_session=None):
+    """Get the target exposure to use.
+
+    If there is a cached value and if the "target" command line parameter is
+    present, then return the cached value. Otherwise, measure a new value from
+    the scene, cache it, then return it.
+
+    Args:
+        its_session: Optional, holding an open device session.
+
+    Returns:
+        The target exposure value.
+    """
+    cached_exposure = None
+    for s in sys.argv[1:]:
+        if s == "target":
+            cached_exposure = __get_cached_target_exposure()
+    if cached_exposure is not None:
+        print "Using cached target exposure"
+        return cached_exposure
+    if its_session is None:
+        with its.device.ItsSession() as cam:
+            measured_exposure = __do_target_exposure_measurement(cam)
+    else:
+        measured_exposure = __do_target_exposure_measurement(its_session)
+    __set_cached_target_exposure(measured_exposure)
+    return measured_exposure
+
+def get_target_exposure_combos(its_session=None):
+    """Get a set of legal combinations of target (exposure time, sensitivity).
+
+    Gets the target exposure value, which is a product of sensitivity (ISO) and
+    exposure time, and returns equivalent tuples of (exposure time,sensitivity)
+    that are all legal and that correspond to the four extrema in this 2D param
+    space, as well as to two "middle" points.
+
+    Will open a device session if its_session is None.
+
+    Args:
+        its_session: Optional, holding an open device session.
+
+    Returns:
+        Object containing six legal (exposure time, sensitivity) tuples, keyed
+        by the following strings:
+            "minExposureTime"
+            "midExposureTime"
+            "maxExposureTime"
+            "minSensitivity"
+            "midSensitivity"
+            "maxSensitivity
+    """
+    if its_session is None:
+        with its.device.ItsSession() as cam:
+            exposure = get_target_exposure(cam)
+            props = cam.get_camera_properties()
+    else:
+        exposure = get_target_exposure(its_session)
+        props = its_session.get_camera_properties()
+
+    sens_range = props['android.sensor.info.sensitivityRange']
+    exp_time_range = props['android.sensor.info.exposureTimeRange']
+
+    # Combo 1: smallest legal exposure time.
+    e1_expt = exp_time_range[0]
+    e1_sens = exposure / e1_expt
+    if e1_sens > sens_range[1]:
+        e1_sens = sens_range[1]
+        e1_expt = exposure / e1_sens
+
+    # Combo 2: largest legal exposure time.
+    e2_expt = exp_time_range[1]
+    e2_sens = exposure / e2_expt
+    if e2_sens < sens_range[0]:
+        e2_sens = sens_range[0]
+        e2_expt = exposure / e2_sens
+
+    # Combo 3: smallest legal sensitivity.
+    e3_sens = sens_range[0]
+    e3_expt = exposure / e3_sens
+    if e3_expt > exp_time_range[1]:
+        e3_expt = exp_time_range[1]
+        e3_sens = exposure / e3_expt
+
+    # Combo 4: largest legal sensitivity.
+    e4_sens = sens_range[1]
+    e4_expt = exposure / e4_sens
+    if e4_expt < exp_time_range[0]:
+        e4_expt = exp_time_range[0]
+        e4_sens = exposure / e4_expt
+
+    # Combo 5: middle exposure time.
+    e5_expt = (exp_time_range[0] + exp_time_range[1]) / 2.0
+    e5_sens = exposure / e5_expt
+    if e5_sens > sens_range[1]:
+        e5_sens = sens_range[1]
+        e5_expt = exposure / e5_sens
+    if e5_sens < sens_range[0]:
+        e5_sens = sens_range[0]
+        e5_expt = exposure / e5_sens
+
+    # Combo 6: middle sensitivity.
+    e6_sens = (sens_range[0] + sens_range[1]) / 2.0
+    e6_expt = exposure / e6_sens
+    if e6_expt > exp_time_range[1]:
+        e6_expt = exp_time_range[1]
+        e6_sens = exposure / e6_expt
+    if e6_expt < exp_time_range[0]:
+        e6_expt = exp_time_range[0]
+        e6_sens = exposure / e6_expt
+
+    return {
+        "minExposureTime" : (int(e1_expt), int(e1_sens)),
+        "maxExposureTime" : (int(e2_expt), int(e2_sens)),
+        "minSensitivity" : (int(e3_expt), int(e3_sens)),
+        "maxSensitivity" : (int(e4_expt), int(e4_sens)),
+        "midExposureTime" : (int(e5_expt), int(e5_sens)),
+        "midSensitivity" : (int(e6_expt), int(e6_sens))
+        }
+
+class __UnitTest(unittest.TestCase):
+    """Run a suite of unit tests on this module.
+    """
+    # TODO: Add some unit tests.
+
+if __name__ == '__main__':
+    unittest.main()
+
diff --git a/apps/CameraITS/tests/inprog/scene2/README b/apps/CameraITS/tests/inprog/scene2/README
new file mode 100644
index 0000000..3a0953f
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/scene2/README
@@ -0,0 +1,8 @@
+Scene 2 requires a camera lab with controlled illuminants, for example
+light sources capable of producing D65, D50, A, TL84, etc. illumination.
+Specific charts may also be required, for example grey cards, color
+checker charts, and resolution charts. The individual tests will specify
+the setup that they require.
+
+If a test requires that the camera be in any particular orientaion, it will
+specify this too. Otherwise, the camara can be in either portrait or lanscape.
diff --git a/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py b/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py
new file mode 100644
index 0000000..0c96ca7
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/scene2/test_dng_tags.py
@@ -0,0 +1,94 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.dng
+import its.objects
+import numpy
+import os.path
+
+def main():
+    """Test that the DNG tags are internally self-consistent.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Assumes that illuminant 1 is D65, and illuminant 2 is standard A.
+        # TODO: Generalize DNG tags check for any provided illuminants.
+        illum_code = [21, 17] # D65, A
+        illum_str = ['D65', 'A']
+        ref_str = ['android.sensor.referenceIlluminant%d'%(i) for i in [1,2]]
+        cm_str = ['android.sensor.colorTransform%d'%(i) for i in [1,2]]
+        fm_str = ['android.sensor.forwardMatrix%d'%(i) for i in [1,2]]
+        cal_str = ['android.sensor.calibrationTransform%d'%(i) for i in [1,2]]
+        dng_illum = [its.dng.D65, its.dng.A]
+
+        for i in [0,1]:
+            assert(props[ref_str[i]] == illum_code[i])
+            raw_input("\n[Point camera at grey card under %s and press ENTER]"%(
+                    illum_str[i]))
+
+            cam.do_3a(do_af=False)
+            cap = cam.do_capture(its.objects.auto_capture_request())
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            ccm = its.objects.rational_to_float(
+                    cap["metadata"]["android.colorCorrection.transform"])
+            cal = its.objects.rational_to_float(props[cal_str[i]])
+            print "HAL reported gains:\n", numpy.array(gains)
+            print "HAL reported ccm:\n", numpy.array(ccm).reshape(3,3)
+            print "HAL reported cal:\n", numpy.array(cal).reshape(3,3)
+
+            # Dump the image.
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_%s.jpg" % (NAME, illum_str[i]))
+
+            # Compute the matrices that are expected under this illuminant from
+            # the HAL-reported WB gains, CCM, and calibration matrix.
+            cm, fm = its.dng.compute_cm_fm(dng_illum[i], gains, ccm, cal)
+            asn = its.dng.compute_asn(dng_illum[i], cal, cm)
+            print "Expected ColorMatrix:\n", cm
+            print "Expected ForwardMatrix:\n", fm
+            print "Expected AsShotNeutral:\n", asn
+
+            # Get the matrices that are reported by the HAL for this
+            # illuminant.
+            cm_ref = numpy.array(its.objects.rational_to_float(
+                    props[cm_str[i]])).reshape(3,3)
+            fm_ref = numpy.array(its.objects.rational_to_float(
+                    props[fm_str[i]])).reshape(3,3)
+            asn_ref = numpy.array(its.objects.rational_to_float(
+                    cap['metadata']['android.sensor.neutralColorPoint']))
+            print "Reported ColorMatrix:\n", cm_ref
+            print "Reported ForwardMatrix:\n", fm_ref
+            print "Reported AsShotNeutral:\n", asn_ref
+
+            # The color matrix may be scaled (between the reported and
+            # expected values).
+            cm_scale = cm.mean(1).mean(0) / cm_ref.mean(1).mean(0)
+            print "ColorMatrix scale factor:", cm_scale
+
+            # Compute the deltas between reported and expected.
+            print "Ratios in ColorMatrix:\n", cm / cm_ref
+            print "Deltas in ColorMatrix (after normalizing):\n", cm/cm_scale - cm_ref
+            print "Deltas in ForwardMatrix:\n", fm - fm_ref
+            print "Deltas in AsShotNeutral:\n", asn - asn_ref
+
+            # TODO: Add pass/fail test on DNG matrices.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_3a_remote.py b/apps/CameraITS/tests/inprog/test_3a_remote.py
new file mode 100644
index 0000000..c76ff6d
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_3a_remote.py
@@ -0,0 +1,70 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import pprint
+import math
+import numpy
+import matplotlib.pyplot
+import mpl_toolkits.mplot3d
+
+def main():
+    """Run 3A remotely (from this script).
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        w_map = props["android.lens.info.shadingMapSize"]["width"]
+        h_map = props["android.lens.info.shadingMapSize"]["height"]
+
+        # TODO: Test for 3A convergence, and exit this test once converged.
+
+        triggered = False
+        while True:
+            req = its.objects.auto_capture_request()
+            req["android.statistics.lensShadingMapMode"] = 1
+            req['android.control.aePrecaptureTrigger'] = (0 if triggered else 1)
+            req['android.control.afTrigger'] = (0 if triggered else 1)
+            triggered = True
+
+            cap = cam.do_capture(req)
+
+            ae_state = cap["metadata"]["android.control.aeState"]
+            awb_state = cap["metadata"]["android.control.awbState"]
+            af_state = cap["metadata"]["android.control.afState"]
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            transform = cap["metadata"]["android.colorCorrection.transform"]
+            exp_time = cap["metadata"]['android.sensor.exposureTime']
+            lsc_map = cap["metadata"]["android.statistics.lensShadingMap"]
+            foc_dist = cap["metadata"]['android.lens.focusDistance']
+            foc_range = cap["metadata"]['android.lens.focusRange']
+
+            print "States (AE,AWB,AF):", ae_state, awb_state, af_state
+            print "Gains:", gains
+            print "Transform:", [its.objects.rational_to_float(t)
+                                 for t in transform]
+            print "AE region:", cap["metadata"]['android.control.aeRegions']
+            print "AF region:", cap["metadata"]['android.control.afRegions']
+            print "AWB region:", cap["metadata"]['android.control.awbRegions']
+            print "LSC map:", w_map, h_map, lsc_map[:8]
+            print "Focus (dist,range):", foc_dist, foc_range
+            print ""
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_black_level.py b/apps/CameraITS/tests/inprog/test_black_level.py
new file mode 100644
index 0000000..37dab94
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_black_level.py
@@ -0,0 +1,99 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Black level consistence test.
+
+    Test: capture dark frames and check if black level correction is done
+    correctly.
+    1. Black level should be roughly consistent for repeating shots.
+    2. Noise distribution should be roughly centered at black level.
+
+    Shoot with the camera covered (i.e.) dark/black. The test varies the
+    sensitivity parameter.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_REPEAT = 3
+    NUM_STEPS = 3
+
+    # Only check the center part where LSC has little effects.
+    R = 200
+
+    # The most frequent pixel value in each image; assume this is the black
+    # level, since the images are all dark (shot with the lens covered).
+    ymodes = []
+    umodes = []
+    vmodes = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
+        sensitivities = [sens_range[0] + i*sens_step for i in range(NUM_STEPS)]
+        print "Sensitivities:", sensitivities
+
+        for si, s in enumerate(sensitivities):
+            for rep in xrange(NUM_REPEAT):
+                req = its.objects.manual_capture_request(100, 1*1000*1000)
+                req["android.blackLevel.lock"] = True
+                req["android.sensor.sensitivity"] = s
+                cap = cam.do_capture(req)
+                yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)
+                w = cap["width"]
+                h = cap["height"]
+
+                # Magnify the noise in saved images to help visualize.
+                its.image.write_image(yimg * 2,
+                                      "%s_s=%05d_y.jpg" % (NAME, s), True)
+                its.image.write_image(numpy.absolute(uimg - 0.5) * 2,
+                                      "%s_s=%05d_u.jpg" % (NAME, s), True)
+
+                yimg = yimg[w/2-R:w/2+R, h/2-R:h/2+R]
+                uimg = uimg[w/4-R/2:w/4+R/2, w/4-R/2:w/4+R/2]
+                vimg = vimg[w/4-R/2:w/4+R/2, w/4-R/2:w/4+R/2]
+                yhist,_ = numpy.histogram(yimg*255, 256, (0,256))
+                ymodes.append(numpy.argmax(yhist))
+                uhist,_ = numpy.histogram(uimg*255, 256, (0,256))
+                umodes.append(numpy.argmax(uhist))
+                vhist,_ = numpy.histogram(vimg*255, 256, (0,256))
+                vmodes.append(numpy.argmax(vhist))
+
+                # Take 32 bins from Y, U, and V.
+                # Histograms of U and V are cropped at the center of 128.
+                pylab.plot(range(32), yhist.tolist()[0:32], 'rgb'[si])
+                pylab.plot(range(32), uhist.tolist()[112:144], 'rgb'[si]+'--')
+                pylab.plot(range(32), vhist.tolist()[112:144], 'rgb'[si]+'--')
+
+    pylab.xlabel("DN: Y[0:32], U[112:144], V[112:144]")
+    pylab.ylabel("Pixel count")
+    pylab.title("Histograms for different sensitivities")
+    matplotlib.pyplot.savefig("%s_plot_histograms.png" % (NAME))
+
+    print "Y black levels:", ymodes
+    print "U black levels:", umodes
+    print "V black levels:", vmodes
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_blc_lsc.py b/apps/CameraITS/tests/inprog/test_blc_lsc.py
new file mode 100644
index 0000000..ce120a2
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_blc_lsc.py
@@ -0,0 +1,106 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that BLC and LSC look reasonable.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    r_means_center = []
+    g_means_center = []
+    b_means_center = []
+    r_means_corner = []
+    g_means_corner = []
+    b_means_corner = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        expt_range = props['android.sensor.info.exposureTimeRange']
+
+        # Get AE+AWB lock first, so the auto values in the capture result are
+        # populated properly.
+        r = [[0,0,1,1,1]]
+        ae_sen,ae_exp,awb_gains,awb_transform,_ \
+                = cam.do_3a(r,r,r,do_af=False,get_results=True)
+        print "AE:", ae_sen, ae_exp / 1000000.0
+        print "AWB:", awb_gains, awb_transform
+
+        # Set analog gain (sensitivity) to 800
+        ae_exp = ae_exp * ae_sen / 800
+        ae_sen = 800
+
+        # Capture range of exposures from 1/100x to 4x of AE estimate.
+        exposures = [ae_exp*x/100.0 for x in [1]+range(10,401,40)]
+        exposures = [e for e in exposures
+                     if e >= expt_range[0] and e <= expt_range[1]]
+
+        # Convert the transform back to rational.
+        awb_transform_rat = its.objects.float_to_rational(awb_transform)
+
+        # Linear tonemap
+        tmap = sum([[i/63.0,i/63.0] for i in range(64)], [])
+
+        reqs = []
+        for e in exposures:
+            req = its.objects.manual_capture_request(ae_sen,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = tmap
+            req["android.tonemap.curveGreen"] = tmap
+            req["android.tonemap.curveBlue"] = tmap
+            req["android.colorCorrection.transform"] = awb_transform_rat
+            req["android.colorCorrection.gains"] = awb_gains
+            reqs.append(req)
+
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg"%(NAME, i))
+
+            tile_center = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile_center)
+            r_means_center.append(rgb_means[0])
+            g_means_center.append(rgb_means[1])
+            b_means_center.append(rgb_means[2])
+
+            tile_corner = its.image.get_image_patch(img, 0.0, 0.0, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile_corner)
+            r_means_corner.append(rgb_means[0])
+            g_means_corner.append(rgb_means[1])
+            b_means_corner.append(rgb_means[2])
+
+    fig = matplotlib.pyplot.figure()
+    pylab.plot(exposures, r_means_center, 'r')
+    pylab.plot(exposures, g_means_center, 'g')
+    pylab.plot(exposures, b_means_center, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means_center.png" % (NAME))
+
+    fig = matplotlib.pyplot.figure()
+    pylab.plot(exposures, r_means_corner, 'r')
+    pylab.plot(exposures, g_means_corner, 'g')
+    pylab.plot(exposures, b_means_corner, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means_corner.png" % (NAME))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py b/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py
new file mode 100644
index 0000000..fdf72be
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_burst_sameness_auto.py
@@ -0,0 +1,91 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts, or if 3A isn't
+    stable, since this test converges 3A at the start but doesn't lock 3A
+    throughout capture.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 50
+    BURSTS = 5
+    FRAMES = BURST_LEN * BURSTS
+
+    SPREAD_THRESH = 0.03
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at the smallest resolution.
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props))
+
+        _, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        w,h = fmt["width"], fmt["height"]
+
+        # Converge 3A prior to capture.
+        cam.do_3a(lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.blackLevel.lock"] = True
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        # Also build a 4D array, which is an array of all RGB images.
+        r_means = []
+        g_means = []
+        b_means = []
+        imgs = numpy.empty([FRAMES,h,w,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN, [fmt])
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.convert_capture_to_rgb_image(cap)
+                tile = its.image.get_image_patch(imgs[n], 0.45, 0.45, 0.1, 0.1)
+                means = its.image.compute_image_means(tile)
+                r_means.append(means[0])
+                g_means.append(means[1])
+                b_means.append(means[2])
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print spread
+            assert(spread < SPREAD_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py b/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
new file mode 100644
index 0000000..a8d1d45
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_burst_sameness_fullres_auto.py
@@ -0,0 +1,91 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import numpy
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts, or if 3A isn't
+    stable, since this test converges 3A at the start but doesn't lock 3A
+    throughout capture.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 6
+    BURSTS = 2
+    FRAMES = BURST_LEN * BURSTS
+
+    DELTA_THRESH = 0.1
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at full resolution.
+        props = cam.get_camera_properties()
+        w,h = its.objects.get_available_output_sizes("yuv", props)[0]
+
+        # Converge 3A prior to capture.
+        cam.do_3a(lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.blackLevel.lock"] = True
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Build a 4D array, which is an array of all RGB images after down-
+        # scaling them by a factor of 4x4.
+        imgs = numpy.empty([FRAMES,h/4,w/4,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN)
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.downscale_image(
+                        its.image.convert_capture_to_rgb_image(cap), 4)
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Compute the deltas of each image from the mean image; this test
+        # passes if none of the deltas are large.
+        print "Computing frame differences"
+        delta_maxes = []
+        for i in range(FRAMES):
+            deltas = (imgs[i] - img_mean).reshape(h*w*3/16)
+            delta_max_pos = numpy.max(deltas)
+            delta_max_neg = numpy.min(deltas)
+            delta_maxes.append(max(abs(delta_max_pos), abs(delta_max_neg)))
+        max_delta_max = max(delta_maxes)
+        print "Frame %d has largest diff %f" % (
+                delta_maxes.index(max_delta_max), max_delta_max)
+        assert(max_delta_max < DELTA_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_crop_region.py b/apps/CameraITS/tests/inprog/test_crop_region.py
new file mode 100644
index 0000000..396603f
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_crop_region.py
@@ -0,0 +1,67 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import its.image
+import its.device
+import its.objects
+
+
+def main():
+    """Takes shots with different sensor crop regions.
+    """
+    name = os.path.basename(__file__).split(".")[0]
+
+    # Regions specified here in x,y,w,h normalized form.
+    regions = [[0.0, 0.0, 0.5, 0.5], # top left
+               [0.0, 0.5, 0.5, 0.5], # bottom left
+               [0.1, 0.9, 0.5, 1.0]] # right side (top + bottom)
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        r = props['android.sensor.info.pixelArraySize']
+        w = r['width']
+        h = r['height']
+
+        # Capture a full frame first.
+        reqs = [its.objects.auto_capture_request()]
+        print "Capturing img0 with the full sensor region"
+
+        # Capture a frame for each of the regions.
+        for i,region in enumerate(regions):
+            req = its.objects.auto_capture_request()
+            req['android.scaler.cropRegion'] = {
+                    "left": int(region[0] * w),
+                    "top": int(region[1] * h),
+                    "right": int((region[0]+region[2])*w),
+                    "bottom": int((region[1]+region[3])*h)}
+            reqs.append(req)
+            crop = req['android.scaler.cropRegion']
+            print "Capturing img%d with crop: %d,%d %dx%d"%(i+1,
+                    crop["left"],crop["top"],
+                    crop["right"]-crop["left"],crop["bottom"]-crop["top"])
+
+        cam.do_3a()
+        caps = cam.do_capture(reqs)
+
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            crop = cap["metadata"]['android.scaler.cropRegion']
+            its.image.write_image(img, "%s_img%d.jpg"%(name,i))
+            print "Captured img%d with crop: %d,%d %dx%d"%(i,
+                    crop["left"],crop["top"],
+                    crop["right"]-crop["left"],crop["bottom"]-crop["top"])
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/inprog/test_ev_compensation.py b/apps/CameraITS/tests/inprog/test_ev_compensation.py
new file mode 100644
index 0000000..f9b0cd3
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_ev_compensation.py
@@ -0,0 +1,71 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Tests that EV compensation is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    MAX_LUMA_DELTA_THRESH = 0.01
+    AVG_LUMA_DELTA_THRESH = 0.001
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        cam.do_3a()
+
+        # Capture auto shots, but with a linear tonemap.
+        req = its.objects.auto_capture_request()
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = (0.0, 0.0, 1.0, 1.0)
+        req["android.tonemap.curveGreen"] = (0.0, 0.0, 1.0, 1.0)
+        req["android.tonemap.curveBlue"] = (0.0, 0.0, 1.0, 1.0)
+
+        evs = range(-4,5)
+        lumas = []
+        for ev in evs:
+            req['android.control.aeExposureCompensation'] = ev
+            cap = cam.do_capture(req)
+            y = its.image.convert_capture_to_planes(cap)[0]
+            tile = its.image.get_image_patch(y, 0.45,0.45,0.1,0.1)
+            lumas.append(its.image.compute_image_means(tile)[0])
+
+        ev_step_size_in_stops = its.objects.rational_to_float(
+                props['android.control.aeCompensationStep'])
+        luma_increase_per_step = pow(2, ev_step_size_in_stops)
+        expected_lumas = [lumas[0] * pow(luma_increase_per_step, i) \
+                for i in range(len(evs))]
+
+        pylab.plot(evs, lumas, 'r')
+        pylab.plot(evs, expected_lumas, 'b')
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        luma_diffs = [expected_lumas[i] - lumas[i] for i in range(len(evs))]
+        max_diff = max(luma_diffs)
+        avg_diff = sum(luma_diffs) / len(luma_diffs)
+        print "Max delta between modeled and measured lumas:", max_diff
+        print "Avg delta between modeled and measured lumas:", avg_diff
+        assert(max_diff < MAX_LUMA_DELTA_THRESH)
+        assert(avg_diff < AVG_LUMA_DELTA_THRESH)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/inprog/test_faces.py b/apps/CameraITS/tests/inprog/test_faces.py
new file mode 100644
index 0000000..228dac8
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_faces.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test face detection.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        cam.do_3a()
+        req = its.objects.auto_capture_request()
+        req['android.statistics.faceDetectMode'] = 2
+        caps = cam.do_capture([req]*5)
+        for i,cap in enumerate(caps):
+            md = cap['metadata']
+            print "Frame %d face metadata:" % i
+            print "  Ids:", md['android.statistics.faceIds']
+            print "  Landmarks:", md['android.statistics.faceLandmarks']
+            print "  Rectangles:", md['android.statistics.faceRectangles']
+            print "  Scores:", md['android.statistics.faceScores']
+            print ""
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_param_black_level_lock.py b/apps/CameraITS/tests/inprog/test_param_black_level_lock.py
new file mode 100644
index 0000000..7d0be92
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_param_black_level_lock.py
@@ -0,0 +1,76 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Test that when the black level is locked, it doesn't change.
+
+    Shoot with the camera covered (i.e.) dark/black. The test varies the
+    sensitivity parameter and checks if the black level changes.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_STEPS = 5
+
+    req = {
+        "android.blackLevel.lock": True,
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.exposureTime": 10*1000*1000
+        }
+
+    # The most frequent pixel value in each image; assume this is the black
+    # level, since the images are all dark (shot with the lens covered).
+    modes = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sensitivities = range(sens_range[0],
+                              sens_range[1]+1,
+                              int((sens_range[1] - sens_range[0]) / NUM_STEPS))
+        for si, s in enumerate(sensitivities):
+            req["android.sensor.sensitivity"] = s
+            cap = cam.do_capture(req)
+            yimg,_,_ = its.image.convert_capture_to_planes(cap)
+            hist,_ = numpy.histogram(yimg*255, 256, (0,256))
+            modes.append(numpy.argmax(hist))
+
+            # Add this histogram to a plot; solid for shots without BL
+            # lock, dashes for shots with BL lock
+            pylab.plot(range(16), hist.tolist()[:16])
+
+    pylab.xlabel("Luma DN, showing [0:16] out of full [0:256] range")
+    pylab.ylabel("Pixel count")
+    pylab.title("Histograms for different sensitivities")
+    matplotlib.pyplot.savefig("%s_plot_histograms.png" % (NAME))
+
+    # Check that the black levels are all the same.
+    print "Black levels:", modes
+    assert(all([modes[i] == modes[0] for i in range(len(modes))]))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_param_edge_mode.py b/apps/CameraITS/tests/inprog/test_param_edge_mode.py
new file mode 100644
index 0000000..e928f21
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_param_edge_mode.py
@@ -0,0 +1,48 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.edge.mode parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    req = {
+        "android.control.mode": 0,
+        "android.control.aeMode": 0,
+        "android.control.awbMode": 0,
+        "android.control.afMode": 0,
+        "android.sensor.frameDuration": 0,
+        "android.sensor.exposureTime": 30*1000*1000,
+        "android.sensor.sensitivity": 100
+        }
+
+    with its.device.ItsSession() as cam:
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        for e in [0,1,2]:
+            req["android.edge.mode"] = e
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, e))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/inprog/test_test_patterns.py b/apps/CameraITS/tests/inprog/test_test_patterns.py
new file mode 100644
index 0000000..f75b141
--- /dev/null
+++ b/apps/CameraITS/tests/inprog/test_test_patterns.py
@@ -0,0 +1,41 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test sensor test patterns.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        caps = []
+        for i in range(1,6):
+            req = its.objects.manual_capture_request(100, 10*1000*1000)
+            req['android.sensor.testPatternData'] = [40, 100, 160, 220]
+            req['android.sensor.testPatternMode'] = i
+
+            # Capture the shot twice, and use the second one, so the pattern
+            # will have stabilized.
+            caps = cam.do_capture([req]*2)
+
+            img = its.image.convert_capture_to_rgb_image(caps[1])
+            its.image.write_image(img, "%s_pattern=%d.jpg" % (NAME, i))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_camera_properties.py b/apps/CameraITS/tests/scene0/test_camera_properties.py
new file mode 100644
index 0000000..eb638f0
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_camera_properties.py
@@ -0,0 +1,43 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.caps
+import its.device
+import its.objects
+import pprint
+
+def main():
+    """Basic test to query and print out camera properties.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        pprint.pprint(props)
+
+        its.caps.skip_unless(its.caps.manual_sensor(props))
+
+        # Test that a handful of required keys are present.
+        assert(props.has_key('android.sensor.info.sensitivityRange'))
+        assert(props.has_key('android.sensor.orientation'))
+        assert(props.has_key('android.scaler.streamConfigurationMap'))
+        assert(props.has_key('android.lens.facing'))
+
+        print "JPG sizes:", its.objects.get_available_output_sizes("jpg", props)
+        print "RAW sizes:", its.objects.get_available_output_sizes("raw", props)
+        print "YUV sizes:", its.objects.get_available_output_sizes("yuv", props)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_capture_result_dump.py b/apps/CameraITS/tests/scene0/test_capture_result_dump.py
new file mode 100644
index 0000000..66465573
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_capture_result_dump.py
@@ -0,0 +1,40 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.caps
+import its.image
+import its.device
+import its.objects
+import its.target
+import pprint
+
+def main():
+    """Test that a capture result is returned from a manual capture; dump it.
+    """
+
+    with its.device.ItsSession() as cam:
+        # Arbitrary capture request exposure values; image content is not
+        # important for this test, only the metadata.
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props))
+
+        req,fmt = its.objects.get_fastest_manual_capture_settings(props)
+        cap = cam.do_capture(req, fmt)
+        pprint.pprint(cap["metadata"])
+
+        # No pass/fail check; test passes if it completes.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_gyro_bias.py b/apps/CameraITS/tests/scene0/test_gyro_bias.py
new file mode 100644
index 0000000..7ea90c3
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_gyro_bias.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import time
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+
+def main():
+    """Test if the gyro has stable output when device is stationary.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Number of samples averaged together, in the plot.
+    N = 20
+
+    # Pass/fail thresholds for gyro drift
+    MEAN_THRESH = 0.01
+    VAR_THRESH = 0.001
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        # Only run test if the appropriate caps are claimed.
+        its.caps.skip_unless(its.caps.sensor_fusion(props))
+
+        print "Collecting gyro events"
+        cam.start_sensor_events()
+        time.sleep(5)
+        gyro_events = cam.get_sensor_events()["gyro"]
+
+    nevents = (len(gyro_events) / N) * N
+    gyro_events = gyro_events[:nevents]
+    times = numpy.array([(e["time"] - gyro_events[0]["time"])/1000000000.0
+                         for e in gyro_events])
+    xs = numpy.array([e["x"] for e in gyro_events])
+    ys = numpy.array([e["y"] for e in gyro_events])
+    zs = numpy.array([e["z"] for e in gyro_events])
+
+    # Group samples into size-N groups and average each together, to get rid
+    # of individual random spikes in the data.
+    times = times[N/2::N]
+    xs = xs.reshape(nevents/N, N).mean(1)
+    ys = ys.reshape(nevents/N, N).mean(1)
+    zs = zs.reshape(nevents/N, N).mean(1)
+
+    pylab.plot(times, xs, 'r', label="x")
+    pylab.plot(times, ys, 'g', label="y")
+    pylab.plot(times, zs, 'b', label="z")
+    pylab.xlabel("Time (seconds)")
+    pylab.ylabel("Gyro readings (mean of %d samples)"%(N))
+    pylab.legend()
+    matplotlib.pyplot.savefig("%s_plot.png" % (NAME))
+
+    for samples in [xs,ys,zs]:
+        assert(samples.mean() < MEAN_THRESH)
+        assert(numpy.var(samples) < VAR_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_jitter.py b/apps/CameraITS/tests/scene0/test_jitter.py
new file mode 100644
index 0000000..82e8e38
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_jitter.py
@@ -0,0 +1,65 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Measure jitter in camera timestamps.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Pass/fail thresholds
+    MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames
+    MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas
+    MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props))
+
+        req, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        caps = cam.do_capture([req]*50, [fmt])
+
+        # Print out the millisecond delta between the start of each exposure
+        tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]
+        deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]
+        deltas_ms = [d/1000000.0 for d in deltas]
+        avg = sum(deltas_ms) / len(deltas_ms)
+        var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg
+        range0 = min(deltas_ms) - avg
+        range1 = max(deltas_ms) - avg
+        print "Average:", avg
+        print "Variance:", var
+        print "Jitter range:", range0, "to", range1
+
+        # Draw a plot.
+        pylab.plot(range(len(deltas_ms)), deltas_ms)
+        matplotlib.pyplot.savefig("%s_deltas.png" % (NAME))
+
+        # Test for pass/fail.
+        assert(avg > MIN_AVG_FRAME_DELTA)
+        assert(var < MAX_VAR_FRAME_DELTA)
+        assert(abs(range0) < MAX_FRAME_DELTA_JITTER)
+        assert(abs(range1) < MAX_FRAME_DELTA_JITTER)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_metadata.py b/apps/CameraITS/tests/scene0/test_metadata.py
new file mode 100644
index 0000000..b4ca4cb
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_metadata.py
@@ -0,0 +1,98 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import its.target
+import its.caps
+
+def main():
+    """Test the validity of some metadata entries.
+
+    Looks at capture results and at the camera characteristics objects.
+    """
+    global md, props, failed
+
+    with its.device.ItsSession() as cam:
+        # Arbitrary capture request exposure values; image content is not
+        # important for this test, only the metadata.
+        props = cam.get_camera_properties()
+        auto_req = its.objects.auto_capture_request()
+        cap = cam.do_capture(auto_req)
+        md = cap["metadata"]
+
+    print "Hardware level"
+    print "  Legacy:", its.caps.legacy(props)
+    print "  Limited:", its.caps.limited(props)
+    print "  Full:", its.caps.full(props)
+    print "Capabilities"
+    print "  Manual sensor:", its.caps.manual_sensor(props)
+    print "  Manual post-proc:", its.caps.manual_post_proc(props)
+    print "  Raw:", its.caps.raw(props)
+    print "  Sensor fusion:", its.caps.sensor_fusion(props)
+
+    # Test: hardware level should be a valid value.
+    check('props.has_key("android.info.supportedHardwareLevel")')
+    check('props["android.info.supportedHardwareLevel"] is not None')
+    check('props["android.info.supportedHardwareLevel"] in [0,1,2]')
+    full = getval('props["android.info.supportedHardwareLevel"]') == 1
+
+    # Test: rollingShutterSkew, and frameDuration tags must all be present,
+    # and rollingShutterSkew must be greater than zero and smaller than all
+    # of the possible frame durations.
+    check('md.has_key("android.sensor.frameDuration")')
+    check('md["android.sensor.frameDuration"] is not None')
+    check('md.has_key("android.sensor.rollingShutterSkew")')
+    check('md["android.sensor.rollingShutterSkew"] is not None')
+    check('md["android.sensor.frameDuration"] > '
+          'md["android.sensor.rollingShutterSkew"] > 0')
+
+    # Test: timestampSource must be a valid value.
+    check('props.has_key("android.sensor.info.timestampSource")')
+    check('props["android.sensor.info.timestampSource"] is not None')
+    check('props["android.sensor.info.timestampSource"] in [0,1]')
+
+    # Test: croppingType must be a valid value, and for full devices, it
+    # must be FREEFORM=1.
+    check('props.has_key("android.scaler.croppingType")')
+    check('props["android.scaler.croppingType"] is not None')
+    check('props["android.scaler.croppingType"] in [0,1]')
+    if full:
+        check('props["android.scaler.croppingType"] == 1')
+
+    assert(not failed)
+
+def getval(expr, default=None):
+    try:
+        return eval(expr)
+    except:
+        return default
+
+failed = False
+def check(expr):
+    global md, props, failed
+    try:
+        if eval(expr):
+            print "Passed>", expr
+        else:
+            print "Failed>>", expr
+            failed = True
+    except:
+        print "Failed>>", expr
+        failed = True
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py b/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
new file mode 100644
index 0000000..a6a5214
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_param_sensitivity_burst.py
@@ -0,0 +1,48 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+
+def main():
+    """Test that the android.sensor.sensitivity parameter is applied properly
+    within a burst. Inspects the output metadata only (not the image data).
+    """
+
+    NUM_STEPS = 3
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props) and
+                             its.caps.per_frame_control(props))
+
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / NUM_STEPS
+        sens_list = range(sens_range[0], sens_range[1], sens_step)
+        e = min(props['android.sensor.info.exposureTimeRange'])
+        reqs = [its.objects.manual_capture_request(s,e) for s in sens_list]
+        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
+
+        caps = cam.do_capture(reqs, fmt)
+        for i,cap in enumerate(caps):
+            s_req = sens_list[i]
+            s_res = cap["metadata"]["android.sensor.sensitivity"]
+            assert(s_req == s_res)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_sensor_events.py b/apps/CameraITS/tests/scene0/test_sensor_events.py
new file mode 100644
index 0000000..5973de2
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_sensor_events.py
@@ -0,0 +1,42 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import time
+
+def main():
+    """Basic test to query and print out sensor events.
+
+    Test will only work if the screen is on (i.e.) the device isn't in standby.
+    Pass if some of each event are received.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        # Only run test if the appropriate caps are claimed.
+        its.caps.skip_unless(its.caps.sensor_fusion(props))
+
+        cam.start_sensor_events()
+        time.sleep(1)
+        events = cam.get_sensor_events()
+        print "Events over 1s: %d gyro, %d accel, %d mag"%(
+                len(events["gyro"]), len(events["accel"]), len(events["mag"]))
+        assert(len(events["gyro"]) > 0)
+        assert(len(events["accel"]) > 0)
+        assert(len(events["mag"]) > 0)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene0/test_unified_timestamps.py b/apps/CameraITS/tests/scene0/test_unified_timestamps.py
new file mode 100644
index 0000000..019e6c5
--- /dev/null
+++ b/apps/CameraITS/tests/scene0/test_unified_timestamps.py
@@ -0,0 +1,65 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.objects
+import its.caps
+import time
+
+def main():
+    """Test if image and motion sensor events are in the same time domain.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Only run test if the appropriate caps are claimed.
+        its.caps.skip_unless(its.caps.sensor_fusion(props))
+
+        # Get the timestamp of a captured image.
+        req, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        cap = cam.do_capture(req, fmt)
+        ts_image0 = cap['metadata']['android.sensor.timestamp']
+
+        # Get the timestamps of motion events.
+        print "Reading sensor measurements"
+        cam.start_sensor_events()
+        time.sleep(0.5)
+        events = cam.get_sensor_events()
+        assert(len(events["gyro"]) > 0)
+        assert(len(events["accel"]) > 0)
+        assert(len(events["mag"]) > 0)
+        ts_gyro0 = events["gyro"][0]["time"]
+        ts_gyro1 = events["gyro"][-1]["time"]
+        ts_accel0 = events["accel"][0]["time"]
+        ts_accel1 = events["accel"][-1]["time"]
+        ts_mag0 = events["mag"][0]["time"]
+        ts_mag1 = events["mag"][-1]["time"]
+
+        # Get the timestamp of another image.
+        cap = cam.do_capture(req, fmt)
+        ts_image1 = cap['metadata']['android.sensor.timestamp']
+
+        print "Image timestamps:", ts_image0, ts_image1
+        print "Gyro timestamps:", ts_gyro0, ts_gyro1
+        print "Accel timestamps:", ts_accel0, ts_accel1
+        print "Mag timestamps:", ts_mag0, ts_mag1
+
+        # The motion timestamps must be between the two image timestamps.
+        assert ts_image0 < min(ts_gyro0, ts_accel0, ts_mag0) < ts_image1
+        assert ts_image0 < max(ts_gyro1, ts_accel1, ts_mag1) < ts_image1
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_3a.py b/apps/CameraITS/tests/scene1/test_3a.py
new file mode 100644
index 0000000..08cd747
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_3a.py
@@ -0,0 +1,40 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+
+def main():
+    """Basic test for bring-up of 3A.
+
+    To pass, 3A must converge. Check that the returned 3A values are legal.
+    """
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.read_3a(props))
+
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        print "AE: sensitivity %d, exposure %dms" % (sens, exp/1000000)
+        print "AWB: gains", gains, "transform", xform
+        print "AF: distance", focus
+        assert(sens > 0)
+        assert(exp > 0)
+        assert(len(gains) == 4)
+        assert(len(xform) == 9)
+        assert(focus >= 0)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_ae_precapture_trigger.py b/apps/CameraITS/tests/scene1/test_ae_precapture_trigger.py
new file mode 100644
index 0000000..b1f51f3
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_ae_precapture_trigger.py
@@ -0,0 +1,77 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import its.objects
+import its.target
+
+def main():
+    """Test the AE state machine when using the precapture trigger.
+    """
+
+    INACTIVE = 0
+    SEARCHING = 1
+    CONVERGED = 2
+    LOCKED = 3
+    FLASHREQUIRED = 4
+    PRECAPTURE = 5
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
+
+        # Capture 5 manual requests, with AE disabled, and the last request
+        # has an AE precapture trigger (which should be ignored since AE is
+        # disabled).
+        manual_reqs = []
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        manual_req = its.objects.manual_capture_request(s,e)
+        manual_req['android.control.aeMode'] = 0 # Off
+        manual_reqs += [manual_req]*4
+        precap_req = its.objects.manual_capture_request(s,e)
+        precap_req['android.control.aeMode'] = 0 # Off
+        precap_req['android.control.aePrecaptureTrigger'] = 1 # Start
+        manual_reqs.append(precap_req)
+        caps = cam.do_capture(manual_reqs, fmt)
+        for cap in caps:
+            assert(cap['metadata']['android.control.aeState'] == INACTIVE)
+
+        # Capture an auto request and verify the AE state; no trigger.
+        auto_req = its.objects.auto_capture_request()
+        auto_req['android.control.aeMode'] = 1  # On
+        cap = cam.do_capture(auto_req, fmt)
+        state = cap['metadata']['android.control.aeState']
+        print "AE state after auto request:", state
+        assert(state in [SEARCHING, CONVERGED])
+
+        # Capture with auto request with a precapture trigger.
+        auto_req['android.control.aePrecaptureTrigger'] = 1  # Start
+        cap = cam.do_capture(auto_req, fmt)
+        state = cap['metadata']['android.control.aeState']
+        print "AE state after auto request with precapture trigger:", state
+        assert(state in [SEARCHING, CONVERGED, PRECAPTURE])
+
+        # Capture some more auto requests, and AE should converge.
+        auto_req['android.control.aePrecaptureTrigger'] = 0
+        caps = cam.do_capture([auto_req]*5, fmt)
+        state = caps[-1]['metadata']['android.control.aeState']
+        print "AE state after auto request:", state
+        assert(state == CONVERGED)
+
+if __name__ == '__main__':
+    main()
diff --git a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
new file mode 100644
index 0000000..a9efa0b
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
@@ -0,0 +1,94 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import math
+
+def main():
+    """Capture auto and manual shots that should look the same.
+
+    Manual shots taken with just manual WB, and also with manual WB+tonemap.
+
+    In all cases, the general color/look of the shots should be the same,
+    however there can be variations in brightness/contrast due to different
+    "auto" ISP blocks that may be disabled in the manual flows.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props) and
+                             its.caps.manual_post_proc(props) and
+                             its.caps.per_frame_control(props))
+
+        # Converge 3A and get the estimates.
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        xform_rat = its.objects.float_to_rational(xform)
+        print "AE sensitivity %d, exposure %dms" % (sens, exp/1000000.0)
+        print "AWB gains", gains
+        print "AWB transform", xform
+        print "AF distance", focus
+
+        # Auto capture.
+        req = its.objects.auto_capture_request()
+        cap_auto = cam.do_capture(req)
+        img_auto = its.image.convert_capture_to_rgb_image(cap_auto)
+        its.image.write_image(img_auto, "%s_auto.jpg" % (NAME))
+        xform_a = its.objects.rational_to_float(
+                cap_auto["metadata"]["android.colorCorrection.transform"])
+        gains_a = cap_auto["metadata"]["android.colorCorrection.gains"]
+        print "Auto gains:", gains_a
+        print "Auto transform:", xform_a
+
+        # Manual capture 1: WB
+        req = its.objects.manual_capture_request(sens, exp)
+        req["android.colorCorrection.transform"] = xform_rat
+        req["android.colorCorrection.gains"] = gains
+        cap_man1 = cam.do_capture(req)
+        img_man1 = its.image.convert_capture_to_rgb_image(cap_man1)
+        its.image.write_image(img_man1, "%s_manual_wb.jpg" % (NAME))
+        xform_m1 = its.objects.rational_to_float(
+                cap_man1["metadata"]["android.colorCorrection.transform"])
+        gains_m1 = cap_man1["metadata"]["android.colorCorrection.gains"]
+        print "Manual wb gains:", gains_m1
+        print "Manual wb transform:", xform_m1
+
+        # Manual capture 2: WB + tonemap
+        gamma = sum([[i/63.0,math.pow(i/63.0,1/2.2)] for i in xrange(64)],[])
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = gamma
+        req["android.tonemap.curveGreen"] = gamma
+        req["android.tonemap.curveBlue"] = gamma
+        cap_man2 = cam.do_capture(req)
+        img_man2 = its.image.convert_capture_to_rgb_image(cap_man2)
+        its.image.write_image(img_man2, "%s_manual_wb_tm.jpg" % (NAME))
+        xform_m2 = its.objects.rational_to_float(
+                cap_man2["metadata"]["android.colorCorrection.transform"])
+        gains_m2 = cap_man2["metadata"]["android.colorCorrection.gains"]
+        print "Manual wb+tm gains:", gains_m2
+        print "Manual wb+tm transform:", xform_m2
+
+        # Check that the WB gains and transform reported in each capture
+        # result match with the original AWB estimate from do_3a.
+        for g,x in [(gains_a,xform_a),(gains_m1,xform_m1),(gains_m2,xform_m2)]:
+            assert(all([abs(xform[i] - x[i]) < 0.05 for i in range(9)]))
+            assert(all([abs(gains[i] - g[i]) < 0.05 for i in range(4)]))
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_black_white.py b/apps/CameraITS/tests/scene1/test_black_white.py
new file mode 100644
index 0000000..68d7de6
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_black_white.py
@@ -0,0 +1,85 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the device will produce full black+white images.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props) and
+                             its.caps.per_frame_control(props))
+
+        expt_range = props['android.sensor.info.exposureTimeRange']
+        sens_range = props['android.sensor.info.sensitivityRange']
+
+        # Take a shot with very low ISO and exposure time. Expect it to
+        # be black.
+        print "Black shot: sens = %d, exp time = %.4fms" % (
+                sens_range[0], expt_range[0]/1000000.0)
+        req = its.objects.manual_capture_request(sens_range[0], expt_range[0])
+        cap = cam.do_capture(req)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, "%s_black.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        black_means = its.image.compute_image_means(tile)
+        r_means.append(black_means[0])
+        g_means.append(black_means[1])
+        b_means.append(black_means[2])
+        print "Dark pixel means:", black_means
+
+        # Take a shot with very high ISO and exposure time. Expect it to
+        # be white.
+        print "White shot: sens = %d, exp time = %.2fms" % (
+                sens_range[1], expt_range[1]/1000000.0)
+        req = its.objects.manual_capture_request(sens_range[1], expt_range[1])
+        cap = cam.do_capture(req)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, "%s_white.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        white_means = its.image.compute_image_means(tile)
+        r_means.append(white_means[0])
+        g_means.append(white_means[1])
+        b_means.append(white_means[2])
+        print "Bright pixel means:", white_means
+
+        # Draw a plot.
+        pylab.plot([0,1], r_means, 'r')
+        pylab.plot([0,1], g_means, 'g')
+        pylab.plot([0,1], b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        for val in black_means:
+            assert(val < 0.025)
+        for val in white_means:
+            assert(val > 0.975)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py b/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
new file mode 100644
index 0000000..edb8995
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_burst_sameness_manual.py
@@ -0,0 +1,85 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import numpy
+
+def main():
+    """Take long bursts of images and check that they're all identical.
+
+    Assumes a static scene. Can be used to idenfity if there are sporadic
+    frames that are processed differently or have artifacts. Uses manual
+    capture settings.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 50
+    BURSTS = 5
+    FRAMES = BURST_LEN * BURSTS
+
+    SPREAD_THRESH = 0.03
+
+    with its.device.ItsSession() as cam:
+
+        # Capture at the smallest resolution.
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props) and
+                             its.caps.per_frame_control(props))
+
+        _, fmt = its.objects.get_fastest_manual_capture_settings(props)
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        req = its.objects.manual_capture_request(s, e)
+        w,h = fmt["width"], fmt["height"]
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        # Also build a 4D array, which is an array of all RGB images.
+        r_means = []
+        g_means = []
+        b_means = []
+        imgs = numpy.empty([FRAMES,h,w,3])
+        for j in range(BURSTS):
+            caps = cam.do_capture([req]*BURST_LEN, [fmt])
+            for i,cap in enumerate(caps):
+                n = j*BURST_LEN + i
+                imgs[n] = its.image.convert_capture_to_rgb_image(cap)
+                tile = its.image.get_image_patch(imgs[n], 0.45, 0.45, 0.1, 0.1)
+                means = its.image.compute_image_means(tile)
+                r_means.append(means[0])
+                g_means.append(means[1])
+                b_means.append(means[2])
+
+        # Dump all images.
+        print "Dumping images"
+        for i in range(FRAMES):
+            its.image.write_image(imgs[i], "%s_frame%03d.jpg"%(NAME,i))
+
+        # The mean image.
+        img_mean = imgs.mean(0)
+        its.image.write_image(img_mean, "%s_mean.jpg"%(NAME))
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print spread
+            assert(spread < SPREAD_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_capture_result.py b/apps/CameraITS/tests/scene1/test_capture_result.py
new file mode 100644
index 0000000..331d1cd
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_capture_result.py
@@ -0,0 +1,213 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+import matplotlib.pyplot
+
+# Required for 3d plot to work
+import mpl_toolkits.mplot3d
+
+def main():
+    """Test that valid data comes back in CaptureResult objects.
+    """
+    global NAME, auto_req, manual_req, w_map, h_map
+    global manual_tonemap, manual_transform, manual_gains, manual_region
+    global manual_exp_time, manual_sensitivity, manual_gains_ok
+
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props) and
+                             its.caps.manual_post_proc(props) and
+                             its.caps.per_frame_control(props))
+
+        manual_tonemap = [0,0, 1,1] # Linear
+        manual_transform = its.objects.int_to_rational([1,2,3, 4,5,6, 7,8,9])
+        manual_gains = [1,2,3,4]
+        manual_region = [{"x":8,"y":8,"width":128,"height":128,"weight":1}]
+        manual_exp_time = min(props['android.sensor.info.exposureTimeRange'])
+        manual_sensitivity = min(props['android.sensor.info.sensitivityRange'])
+
+        # The camera HAL may not support different gains for two G channels.
+        manual_gains_ok = [[1,2,3,4],[1,2,2,4],[1,3,3,4]]
+
+        auto_req = its.objects.auto_capture_request()
+        auto_req["android.statistics.lensShadingMapMode"] = 1
+
+        manual_req = {
+            "android.control.mode": 0,
+            "android.control.aeMode": 0,
+            "android.control.awbMode": 0,
+            "android.control.afMode": 0,
+            "android.sensor.frameDuration": 0,
+            "android.sensor.sensitivity": manual_sensitivity,
+            "android.sensor.exposureTime": manual_exp_time,
+            "android.colorCorrection.mode": 0,
+            "android.colorCorrection.transform": manual_transform,
+            "android.colorCorrection.gains": manual_gains,
+            "android.tonemap.mode": 0,
+            "android.tonemap.curveRed": manual_tonemap,
+            "android.tonemap.curveGreen": manual_tonemap,
+            "android.tonemap.curveBlue": manual_tonemap,
+            "android.control.aeRegions": manual_region,
+            "android.control.afRegions": manual_region,
+            "android.control.awbRegions": manual_region,
+            "android.statistics.lensShadingMapMode":1
+            }
+
+        w_map = props["android.lens.info.shadingMapSize"]["width"]
+        h_map = props["android.lens.info.shadingMapSize"]["height"]
+
+        print "Testing auto capture results"
+        lsc_map_auto = test_auto(cam, w_map, h_map)
+        print "Testing manual capture results"
+        test_manual(cam, w_map, h_map, lsc_map_auto)
+        print "Testing auto capture results again"
+        test_auto(cam, w_map, h_map)
+
+# A very loose definition for two floats being close to each other;
+# there may be different interpolation and rounding used to get the
+# two values, and all this test is looking at is whether there is
+# something obviously broken; it's not looking for a perfect match.
+def is_close_float(n1, n2):
+    return abs(n1 - n2) < 0.05
+
+def is_close_rational(n1, n2):
+    return is_close_float(its.objects.rational_to_float(n1),
+                          its.objects.rational_to_float(n2))
+
+def draw_lsc_plot(w_map, h_map, lsc_map, name):
+    for ch in range(4):
+        fig = matplotlib.pyplot.figure()
+        ax = fig.gca(projection='3d')
+        xs = numpy.array([range(w_map)] * h_map).reshape(h_map, w_map)
+        ys = numpy.array([[i]*w_map for i in range(h_map)]).reshape(
+                h_map, w_map)
+        zs = numpy.array(lsc_map[ch::4]).reshape(h_map, w_map)
+        ax.plot_wireframe(xs, ys, zs)
+        matplotlib.pyplot.savefig("%s_plot_lsc_%s_ch%d.png"%(NAME,name,ch))
+
+def test_auto(cam, w_map, h_map):
+    # Get 3A lock first, so the auto values in the capture result are
+    # populated properly.
+    rect = [[0,0,1,1,1]]
+    cam.do_3a(rect, rect, rect, do_af=False)
+
+    cap = cam.do_capture(auto_req)
+    cap_res = cap["metadata"]
+
+    gains = cap_res["android.colorCorrection.gains"]
+    transform = cap_res["android.colorCorrection.transform"]
+    exp_time = cap_res['android.sensor.exposureTime']
+    lsc_map = cap_res["android.statistics.lensShadingMap"]
+    ctrl_mode = cap_res["android.control.mode"]
+
+    print "Control mode:", ctrl_mode
+    print "Gains:", gains
+    print "Transform:", [its.objects.rational_to_float(t)
+                         for t in transform]
+    print "AE region:", cap_res['android.control.aeRegions']
+    print "AF region:", cap_res['android.control.afRegions']
+    print "AWB region:", cap_res['android.control.awbRegions']
+    print "LSC map:", w_map, h_map, lsc_map[:8]
+
+    assert(ctrl_mode == 1)
+
+    # Color correction gain and transform must be valid.
+    assert(len(gains) == 4)
+    assert(len(transform) == 9)
+    assert(all([g > 0 for g in gains]))
+    assert(all([t["denominator"] != 0 for t in transform]))
+
+    # Color correction should not match the manual settings.
+    assert(any([not is_close_float(gains[i], manual_gains[i])
+                for i in xrange(4)]))
+    assert(any([not is_close_rational(transform[i], manual_transform[i])
+                for i in xrange(9)]))
+
+    # Exposure time must be valid.
+    assert(exp_time > 0)
+
+    # Lens shading map must be valid.
+    assert(w_map > 0 and h_map > 0 and w_map * h_map * 4 == len(lsc_map))
+    assert(all([m >= 1 for m in lsc_map]))
+
+    draw_lsc_plot(w_map, h_map, lsc_map, "auto")
+
+    return lsc_map
+
+def test_manual(cam, w_map, h_map, lsc_map_auto):
+    cap = cam.do_capture(manual_req)
+    cap_res = cap["metadata"]
+
+    gains = cap_res["android.colorCorrection.gains"]
+    transform = cap_res["android.colorCorrection.transform"]
+    curves = [cap_res["android.tonemap.curveRed"],
+              cap_res["android.tonemap.curveGreen"],
+              cap_res["android.tonemap.curveBlue"]]
+    exp_time = cap_res['android.sensor.exposureTime']
+    lsc_map = cap_res["android.statistics.lensShadingMap"]
+    ctrl_mode = cap_res["android.control.mode"]
+
+    print "Control mode:", ctrl_mode
+    print "Gains:", gains
+    print "Transform:", [its.objects.rational_to_float(t)
+                         for t in transform]
+    print "Tonemap:", curves[0][1::16]
+    print "AE region:", cap_res['android.control.aeRegions']
+    print "AF region:", cap_res['android.control.afRegions']
+    print "AWB region:", cap_res['android.control.awbRegions']
+    print "LSC map:", w_map, h_map, lsc_map[:8]
+
+    assert(ctrl_mode == 0)
+
+    # Color correction gain and transform must be valid.
+    # Color correction gains and transform should be the same size and
+    # values as the manually set values.
+    assert(len(gains) == 4)
+    assert(len(transform) == 9)
+    assert( all([is_close_float(gains[i], manual_gains_ok[0][i])
+                 for i in xrange(4)]) or
+            all([is_close_float(gains[i], manual_gains_ok[1][i])
+                 for i in xrange(4)]) or
+            all([is_close_float(gains[i], manual_gains_ok[2][i])
+                 for i in xrange(4)]))
+    assert(all([is_close_rational(transform[i], manual_transform[i])
+                for i in xrange(9)]))
+
+    # Tonemap must be valid.
+    # The returned tonemap must be linear.
+    for c in curves:
+        assert(len(c) > 0)
+        assert(all([is_close_float(c[i], c[i+1])
+                    for i in xrange(0,len(c),2)]))
+
+    # Exposure time must be close to the requested exposure time.
+    assert(is_close_float(exp_time/1000000.0, manual_exp_time/1000000.0))
+
+    # Lens shading map must be valid.
+    assert(w_map > 0 and h_map > 0 and w_map * h_map * 4 == len(lsc_map))
+    assert(all([m >= 1 for m in lsc_map]))
+
+    draw_lsc_plot(w_map, h_map, lsc_map, "manual")
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_crop_region_raw.py b/apps/CameraITS/tests/scene1/test_crop_region_raw.py
new file mode 100644
index 0000000..9fc52cb
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_crop_region_raw.py
@@ -0,0 +1,153 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import numpy
+import os.path
+
+
+def check_crop_region(expected, reported, active, err_threshold):
+    """Check if the reported region is within the tolerance.
+
+    Args:
+        expected: expected crop region
+        reported: reported crop region
+        active: active resolution
+        err_threshold: error threshold for the active resolution
+    """
+
+    ex = (active["right"] - active["left"]) * err_threshold
+    ey = (active["bottom"] - active["top"]) * err_threshold
+
+    assert ((abs(expected["left"] - reported["left"]) <= ex) and
+            (abs(expected["right"] - reported["right"]) <= ex) and
+            (abs(expected["top"] - reported["top"]) <= ey) and
+            (abs(expected["bottom"] - reported["bottom"]) <= ey))
+
+def main():
+    """Test that raw streams are not croppable.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    DIFF_THRESH = 0.05
+    CROP_REGION_ERROR_THRESHOLD = 0.01
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.raw16(props) and
+                             its.caps.per_frame_control(props))
+
+        a = props['android.sensor.info.activeArraySize']
+        ax, ay = a["left"], a["top"]
+        aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
+        print "Active sensor region: (%d,%d %dx%d)" % (ax, ay, aw, ah)
+
+        full_region = {
+            "left": 0,
+            "top": 0,
+            "right": aw,
+            "bottom": ah
+        }
+
+        # Capture without a crop region.
+        # Use a manual request with a linear tonemap so that the YUV and RAW
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        req = its.objects.manual_capture_request(s,e, True)
+        cap1_raw, cap1_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+        # Calculate a center crop region.
+        zoom = min(3.0, its.objects.get_max_digital_zoom(props))
+        assert(zoom >= 1)
+        cropw = aw / zoom
+        croph = ah / zoom
+
+        req["android.scaler.cropRegion"] = {
+            "left": aw / 2 - cropw / 2,
+            "top": ah / 2 - croph / 2,
+            "right": aw / 2 + cropw / 2,
+            "bottom": ah / 2 + croph / 2
+        }
+
+        # when both YUV and RAW are requested, the crop region that's
+        # applied to YUV should be reported.
+        crop_region = req["android.scaler.cropRegion"]
+        if crop_region == full_region:
+            crop_region_err_thresh = 0.0
+        else:
+            crop_region_err_thresh = CROP_REGION_ERROR_THRESHOLD
+
+        cap2_raw, cap2_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+        imgs = {}
+        for s, cap, cr, err_delta in [("yuv_full", cap1_yuv, full_region, 0),
+                      ("raw_full", cap1_raw, full_region, 0),
+                      ("yuv_crop", cap2_yuv, crop_region, crop_region_err_thresh),
+                      ("raw_crop", cap2_raw, crop_region, crop_region_err_thresh)]:
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, "%s_%s.jpg" % (NAME, s))
+            r = cap["metadata"]["android.scaler.cropRegion"]
+            x, y = r["left"], r["top"]
+            w, h = r["right"] - r["left"], r["bottom"] - r["top"]
+            imgs[s] = img
+            print "Crop on %s: (%d,%d %dx%d)" % (s, x, y, w, h)
+            check_crop_region(cr, r, a, err_delta)
+
+        # Also check the image content; 3 of the 4 shots should match.
+        # Note that all the shots are RGB below; the variable names correspond
+        # to what was captured.
+        # Average the images down 4x4 -> 1 prior to comparison to smooth out
+        # noise.
+        # Shrink the YUV images an additional 2x2 -> 1 to account for the size
+        # reduction that the raw images went through in the RGB conversion.
+        imgs2 = {}
+        for s,img in imgs.iteritems():
+            h,w,ch = img.shape
+            m = 4
+            if s in ["yuv_full", "yuv_crop"]:
+                m = 8
+            img = img.reshape(h/m,m,w/m,m,3).mean(3).mean(1).reshape(h/m,w/m,3)
+            imgs2[s] = img
+            print s, img.shape
+
+        # Strip any border pixels from the raw shots (since the raw images may
+        # be larger than the YUV images). Assume a symmetric padded border.
+        xpad = (imgs2["raw_full"].shape[1] - imgs2["yuv_full"].shape[1]) / 2
+        ypad = (imgs2["raw_full"].shape[0] - imgs2["yuv_full"].shape[0]) / 2
+        wyuv = imgs2["yuv_full"].shape[1]
+        hyuv = imgs2["yuv_full"].shape[0]
+        imgs2["raw_full"]=imgs2["raw_full"][ypad:ypad+hyuv:,xpad:xpad+wyuv:,::]
+        imgs2["raw_crop"]=imgs2["raw_crop"][ypad:ypad+hyuv:,xpad:xpad+wyuv:,::]
+        print "Stripping padding before comparison:", xpad, ypad
+
+        for s,img in imgs2.iteritems():
+            its.image.write_image(img, "%s_comp_%s.jpg" % (NAME, s))
+
+        # Compute image diffs.
+        diff_yuv = numpy.fabs((imgs2["yuv_full"] - imgs2["yuv_crop"])).mean()
+        diff_raw = numpy.fabs((imgs2["raw_full"] - imgs2["raw_crop"])).mean()
+        print "YUV diff (crop vs. non-crop):", diff_yuv
+        print "RAW diff (crop vs. non-crop):", diff_raw
+
+        assert(diff_yuv > DIFF_THRESH)
+        assert(diff_raw < DIFF_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_crop_regions.py b/apps/CameraITS/tests/scene1/test_crop_regions.py
new file mode 100644
index 0000000..6d3dad1
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_crop_regions.py
@@ -0,0 +1,106 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import numpy
+
+def main():
+    """Test that crop regions work.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # A list of 5 regions, specified in normalized (x,y,w,h) coords.
+    # The regions correspond to: TL, TR, BL, BR, CENT
+    REGIONS = [(0.0, 0.0, 0.5, 0.5),
+               (0.5, 0.0, 0.5, 0.5),
+               (0.0, 0.5, 0.5, 0.5),
+               (0.5, 0.5, 0.5, 0.5),
+               (0.25, 0.25, 0.5, 0.5)]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.freeform_crop(props) and
+                             its.caps.per_frame_control(props))
+
+        a = props['android.sensor.info.activeArraySize']
+        ax, ay = a["left"], a["top"]
+        aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        print "Active sensor region (%d,%d %dx%d)" % (ax, ay, aw, ah)
+
+        # Uses a 2x digital zoom.
+        assert(its.objects.get_max_digital_zoom(props) >= 2)
+
+        # Capture a full frame.
+        req = its.objects.manual_capture_request(s,e)
+        cap_full = cam.do_capture(req)
+        img_full = its.image.convert_capture_to_rgb_image(cap_full)
+        its.image.write_image(img_full, "%s_full.jpg" % (NAME))
+        wfull, hfull = cap_full["width"], cap_full["height"]
+
+        # Capture a burst of crop region frames.
+        # Note that each region is 1/2x1/2 of the full frame, and is digitally
+        # zoomed into the full size output image, so must be downscaled (below)
+        # by 2x when compared to a tile of the full image.
+        reqs = []
+        for x,y,w,h in REGIONS:
+            req = its.objects.manual_capture_request(s,e)
+            req["android.scaler.cropRegion"] = {
+                    "top": int(ah * y),
+                    "left": int(aw * x),
+                    "right": int(aw * (x + w)),
+                    "bottom": int(ah * (y + h))}
+            reqs.append(req)
+        caps_regions = cam.do_capture(reqs)
+        match_failed = False
+        for i,cap in enumerate(caps_regions):
+            a = cap["metadata"]["android.scaler.cropRegion"]
+            ax, ay = a["left"], a["top"]
+            aw, ah = a["right"] - a["left"], a["bottom"] - a["top"]
+
+            # Match this crop image against each of the five regions of
+            # the full image, to find the best match (which should be
+            # the region that corresponds to this crop image).
+            img_crop = its.image.convert_capture_to_rgb_image(cap)
+            img_crop = its.image.downscale_image(img_crop, 2)
+            its.image.write_image(img_crop, "%s_crop%d.jpg" % (NAME, i))
+            min_diff = None
+            min_diff_region = None
+            for j,(x,y,w,h) in enumerate(REGIONS):
+                tile_full = its.image.get_image_patch(img_full, x,y,w,h)
+                wtest = min(tile_full.shape[1], aw)
+                htest = min(tile_full.shape[0], ah)
+                tile_full = tile_full[0:htest:, 0:wtest:, ::]
+                tile_crop = img_crop[0:htest:, 0:wtest:, ::]
+                its.image.write_image(tile_full, "%s_fullregion%d.jpg"%(NAME,j))
+                diff = numpy.fabs(tile_full - tile_crop).mean()
+                if min_diff is None or diff < min_diff:
+                    min_diff = diff
+                    min_diff_region = j
+            if i != min_diff_region:
+                match_failed = True
+            print "Crop image %d (%d,%d %dx%d) best match with region %d"%(
+                    i, ax, ay, aw, ah, min_diff_region)
+
+        assert(not match_failed)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_exposure.py b/apps/CameraITS/tests/scene1/test_exposure.py
new file mode 100644
index 0000000..c55e7ad
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_exposure.py
@@ -0,0 +1,91 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import numpy
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that a constant exposure is seen as ISO and exposure time vary.
+
+    Take a series of shots that have ISO and exposure time chosen to balance
+    each other; result should be the same brightness, but over the sequence
+    the images should get noisier.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_OUTLIER_DIFF = 0.1
+    THRESHOLD_MIN_LEVEL = 0.1
+    THRESHOLD_MAX_LEVEL = 0.9
+    THRESHOLD_MAX_ABS_GRAD = 0.001
+
+    mults = []
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        e,s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        expt_range = props['android.sensor.info.exposureTimeRange']
+        sens_range = props['android.sensor.info.sensitivityRange']
+
+        m = 1
+        while s*m < sens_range[1] and e/m > expt_range[0]:
+            mults.append(m)
+            req = its.objects.manual_capture_request(s*m, e/m)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mult=%02d.jpg" % (NAME, m))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+            m = m + 4
+
+    # Draw a plot.
+    pylab.plot(mults, r_means, 'r')
+    pylab.plot(mults, g_means, 'g')
+    pylab.plot(mults, b_means, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+    # Check for linearity. For each R,G,B channel, fit a line y=mx+b, and
+    # assert that the gradient is close to 0 (flat) and that there are no
+    # crazy outliers. Also ensure that the images aren't clamped to 0 or 1
+    # (which would make them look like flat lines).
+    for chan in xrange(3):
+        values = [r_means, g_means, b_means][chan]
+        m, b = numpy.polyfit(mults, values, 1).tolist()
+        print "Channel %d line fit (y = mx+b): m = %f, b = %f" % (chan, m, b)
+        assert(abs(m) < THRESHOLD_MAX_ABS_GRAD)
+        assert(b > THRESHOLD_MIN_LEVEL and b < THRESHOLD_MAX_LEVEL)
+        for v in values:
+            assert(v > THRESHOLD_MIN_LEVEL and v < THRESHOLD_MAX_LEVEL)
+            assert(abs(v - b) < THRESHOLD_MAX_OUTLIER_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_format_combos.py b/apps/CameraITS/tests/scene1/test_format_combos.py
new file mode 100644
index 0000000..1b40826
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_format_combos.py
@@ -0,0 +1,124 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.error
+import its.target
+import sys
+import os
+import os.path
+
+# Change this to True, to have the test break at the first failure.
+stop_at_first_failure = False
+
+def main():
+    """Test different combinations of output formats.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.raw16(props))
+
+        successes = []
+        failures = []
+
+        # Two different requests: auto, and manual.
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req_aut = its.objects.auto_capture_request()
+        req_man = its.objects.manual_capture_request(s, e)
+        reqs = [req_aut, # R0
+                req_man] # R1
+
+        # 10 different combos of output formats; some are single surfaces, and
+        # some are multiple surfaces.
+        wyuv,hyuv = its.objects.get_available_output_sizes("yuv", props)[-1]
+        wjpg,hjpg = its.objects.get_available_output_sizes("jpg", props)[-1]
+        fmt_yuv_prev = {"format":"yuv", "width":wyuv, "height":hyuv}
+        fmt_yuv_full = {"format":"yuv"}
+        fmt_jpg_prev = {"format":"jpeg","width":wjpg, "height":hjpg}
+        fmt_jpg_full = {"format":"jpeg"}
+        fmt_raw_full = {"format":"raw"}
+        fmt_combos =[
+                [fmt_yuv_prev],                             # F0
+                [fmt_yuv_full],                             # F1
+                [fmt_jpg_prev],                             # F2
+                [fmt_jpg_full],                             # F3
+                [fmt_raw_full],                             # F4
+                [fmt_yuv_prev, fmt_jpg_prev],               # F5
+                [fmt_yuv_prev, fmt_jpg_full],               # F6
+                [fmt_yuv_prev, fmt_raw_full],               # F7
+                [fmt_yuv_prev, fmt_jpg_prev, fmt_raw_full], # F8
+                [fmt_yuv_prev, fmt_jpg_full, fmt_raw_full]] # F9
+
+        # Two different burst lengths: single frame, and 3 frames.
+        burst_lens = [1, # B0
+                      3] # B1
+
+        # There are 2x10x2=40 different combinations. Run through them all.
+        n = 0
+        for r,req in enumerate(reqs):
+            for f,fmt_combo in enumerate(fmt_combos):
+                for b,burst_len in enumerate(burst_lens):
+                    try:
+                        caps = cam.do_capture([req]*burst_len, fmt_combo)
+                        successes.append((n,r,f,b))
+                        print "==> Success[%02d]: R%d F%d B%d" % (n,r,f,b)
+
+                        # Dump the captures out to jpegs.
+                        if not isinstance(caps, list):
+                            caps = [caps]
+                        elif isinstance(caps[0], list):
+                            caps = sum(caps, [])
+                        for c,cap in enumerate(caps):
+                            img = its.image.convert_capture_to_rgb_image(cap,
+                                    props=props)
+                            its.image.write_image(img,
+                                    "%s_n%02d_r%d_f%d_b%d_c%d.jpg"%(NAME,n,r,f,b,c))
+
+                    except Exception as e:
+                        print e
+                        print "==> Failure[%02d]: R%d F%d B%d" % (n,r,f,b)
+                        failures.append((n,r,f,b))
+                        if stop_at_first_failure:
+                            sys.exit(0)
+                    n += 1
+
+        num_fail = len(failures)
+        num_success = len(successes)
+        num_total = len(reqs)*len(fmt_combos)*len(burst_lens)
+        num_not_run = num_total - num_success - num_fail
+
+        print "\nFailures (%d / %d):" % (num_fail, num_total)
+        for (n,r,f,b) in failures:
+            print "  %02d: R%d F%d B%d" % (n,r,f,b)
+        print "\nSuccesses (%d / %d):" % (num_success, num_total)
+        for (n,r,f,b) in successes:
+            print "  %02d: R%d F%d B%d" % (n,r,f,b)
+        if num_not_run > 0:
+            print "\nNumber of tests not run: %d / %d" % (num_not_run, num_total)
+        print ""
+
+        # The test passes if all the combinations successfully capture.
+        assert(num_fail == 0)
+        assert(num_success == num_total)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_jpeg.py b/apps/CameraITS/tests/scene1/test_jpeg.py
new file mode 100644
index 0000000..25c2038
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_jpeg.py
@@ -0,0 +1,63 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test that converted YUV images and device JPEG images look the same.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.01
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        # YUV
+        size = its.objects.get_available_output_sizes("yuv", props)[0]
+        out_surface = {"width":size[0], "height":size[1], "format":"yuv"}
+        cap = cam.do_capture(req, out_surface)
+        img = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(img, "%s_fmt=yuv.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        # JPEG
+        size = its.objects.get_available_output_sizes("jpg", props)[0]
+        out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+        cap = cam.do_capture(req, out_surface)
+        img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+        its.image.write_image(img, "%s_fmt=jpg.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_latching.py b/apps/CameraITS/tests/scene1/test_latching.py
new file mode 100644
index 0000000..3bc4356
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_latching.py
@@ -0,0 +1,90 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that settings latch on the right frame.
+
+    Takes a bunch of shots using back-to-back requests, varying the capture
+    request parameters between shots. Checks that the images that come back
+    have the expected properties.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.full(props) and
+                             its.caps.per_frame_control(props))
+
+        _,fmt = its.objects.get_fastest_manual_capture_settings(props)
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        e /= 2.0
+
+        r_means = []
+        g_means = []
+        b_means = []
+
+        reqs = [
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s*2,e,   True),
+            its.objects.manual_capture_request(s*2,e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s*2,e,   True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            its.objects.manual_capture_request(s,  e,   True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            its.objects.manual_capture_request(s,  e*2, True),
+            ]
+
+        caps = cam.do_capture(reqs, fmt)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%02d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+        # Draw a plot.
+        idxs = range(len(r_means))
+        pylab.plot(idxs, r_means, 'r')
+        pylab.plot(idxs, g_means, 'g')
+        pylab.plot(idxs, b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        g_avg = sum(g_means) / len(g_means)
+        g_ratios = [g / g_avg for g in g_means]
+        g_hilo = [g>1.0 for g in g_ratios]
+        assert(g_hilo == [False, False, True, True, False, False, True,
+                          False, True, False, True, False, True, True])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_linearity.py b/apps/CameraITS/tests/scene1/test_linearity.py
new file mode 100644
index 0000000..a9063a9
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_linearity.py
@@ -0,0 +1,98 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import numpy
+import math
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that device processing can be inverted to linear pixels.
+
+    Captures a sequence of shots with the device pointed at a uniform
+    target. Attempts to invert all the ISP processing to get back to
+    linear R,G,B pixel data.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    RESIDUAL_THRESHOLD = 0.00005
+
+    # The HAL3.2 spec requires that curves up to 64 control points in length
+    # must be supported.
+    L = 64
+    LM1 = float(L-1)
+
+    gamma_lut = numpy.array(
+            sum([[i/LM1, math.pow(i/LM1, 1/2.2)] for i in xrange(L)], []))
+    inv_gamma_lut = numpy.array(
+            sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        e,s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        s /= 2
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sensitivities = [s*1.0/3.0, s*2.0/3.0, s, s*4.0/3.0, s*5.0/3.0]
+        sensitivities = [s for s in sensitivities
+                if s > sens_range[0] and s < sens_range[1]]
+
+        req = its.objects.manual_capture_request(0, e)
+        req["android.blackLevel.lock"] = True
+        req["android.tonemap.mode"] = 0
+        req["android.tonemap.curveRed"] = gamma_lut.tolist()
+        req["android.tonemap.curveGreen"] = gamma_lut.tolist()
+        req["android.tonemap.curveBlue"] = gamma_lut.tolist()
+
+        r_means = []
+        g_means = []
+        b_means = []
+
+        for sens in sensitivities:
+            req["android.sensor.sensitivity"] = sens
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_sens=%04d.jpg" % (NAME, sens))
+            img = its.image.apply_lut_to_image(img, inv_gamma_lut[1::2] * LM1)
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+        pylab.plot(sensitivities, r_means, 'r')
+        pylab.plot(sensitivities, g_means, 'g')
+        pylab.plot(sensitivities, b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        # Check that each plot is actually linear.
+        for means in [r_means, g_means, b_means]:
+            line,residuals,_,_,_  = numpy.polyfit(range(5),means,1,full=True)
+            print "Line: m=%f, b=%f, resid=%f"%(line[0], line[1], residuals[0])
+            assert(residuals[0] < RESIDUAL_THRESHOLD)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_locked_burst.py b/apps/CameraITS/tests/scene1/test_locked_burst.py
new file mode 100644
index 0000000..5cea30c
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_locked_burst.py
@@ -0,0 +1,92 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.device
+import its.objects
+import os.path
+import numpy
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test 3A lock + YUV burst (using auto settings).
+
+    This is a test that is designed to pass even on limited devices that
+    don't have MANUAL_SENSOR or PER_FRAME_CONTROLS. (They must be able to
+    capture bursts with full res @ full frame rate to pass, however).
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    BURST_LEN = 8
+    SPREAD_THRESH = 0.005
+    FPS_MAX_DIFF = 2.0
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+
+        # Converge 3A prior to capture.
+        cam.do_3a(do_af=True, lock_ae=True, lock_awb=True)
+
+        # After 3A has converged, lock AE+AWB for the duration of the test.
+        req = its.objects.auto_capture_request()
+        req["android.control.awbLock"] = True
+        req["android.control.aeLock"] = True
+
+        # Capture bursts of YUV shots.
+        # Get the mean values of a center patch for each.
+        r_means = []
+        g_means = []
+        b_means = []
+        caps = cam.do_capture([req]*BURST_LEN)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_frame%d.jpg"%(NAME,i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            means = its.image.compute_image_means(tile)
+            r_means.append(means[0])
+            g_means.append(means[1])
+            b_means.append(means[2])
+
+        # Pass/fail based on center patch similarity.
+        for means in [r_means, g_means, b_means]:
+            spread = max(means) - min(means)
+            print "Patch mean spread", spread, \
+                   " (min/max: ",  min(means), "/", max(means), ")"
+            assert(spread < SPREAD_THRESH)
+
+        # Also ensure that the burst was at full frame rate.
+        fmt_code = 0x23
+        configs = props['android.scaler.streamConfigurationMap']\
+                       ['availableStreamConfigurations']
+        min_duration = None
+        for cfg in configs:
+            if cfg['format'] == fmt_code and cfg['input'] == False and \
+                    cfg['width'] == caps[0]["width"] and \
+                    cfg['height'] == caps[0]["height"]:
+                min_duration = cfg["minFrameDuration"]
+        assert(min_duration is not None)
+        tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]
+        deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]
+        actual_fps = 1.0 / (max(deltas) / 1000000000.0)
+        actual_fps_max = 1.0 / (min(deltas) / 1000000000.0)
+        max_fps = 1.0 / (min_duration / 1000000000.0)
+        print "Measure FPS min/max", actual_fps, "/", actual_fps_max
+        print "FPS measured %.1f, max advertized %.1f" %(actual_fps, max_fps)
+        assert(max_fps - FPS_MAX_DIFF <= actual_fps <= max_fps + FPS_MAX_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_color_correction.py b/apps/CameraITS/tests/scene1/test_param_color_correction.py
new file mode 100644
index 0000000..b7fdc7b
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_color_correction.py
@@ -0,0 +1,104 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.colorCorrection.* params are applied when set.
+
+    Takes shots with different transform and gains values, and tests that
+    they look correspondingly different. The transform and gains are chosen
+    to make the output go redder or bluer.
+
+    Uses a linear tonemap.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_DIFF = 0.1
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        # Baseline request
+        e, s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        req = its.objects.manual_capture_request(s, e, True)
+        req["android.colorCorrection.mode"] = 0
+
+        # Transforms:
+        # 1. Identity
+        # 2. Identity
+        # 3. Boost blue
+        transforms = [its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]),
+                      its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]),
+                      its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,2])]
+
+        # Gains:
+        # 1. Unit
+        # 2. Boost red
+        # 3. Unit
+        gains = [[1,1,1,1], [2,1,1,1], [1,1,1,1]]
+
+        r_means = []
+        g_means = []
+        b_means = []
+
+        # Capture requests:
+        # 1. With unit gains, and identity transform.
+        # 2. With a higher red gain, and identity transform.
+        # 3. With unit gains, and a transform that boosts blue.
+        for i in range(len(transforms)):
+            req["android.colorCorrection.transform"] = transforms[i]
+            req["android.colorCorrection.gains"] = gains[i]
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_req=%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+            ratios = [rgb_means[0] / rgb_means[1], rgb_means[2] / rgb_means[1]]
+            print "Means = ", rgb_means, "   Ratios =", ratios
+
+        # Draw a plot.
+        domain = range(len(transforms))
+        pylab.plot(domain, r_means, 'r')
+        pylab.plot(domain, g_means, 'g')
+        pylab.plot(domain, b_means, 'b')
+        pylab.ylim([0,1])
+        matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+        # Expect G0 == G1 == G2, R0 == 0.5*R1 == R2, B0 == B1 == 0.5*B2
+        # Also need to ensure that the image is not clamped to white/black.
+        assert(all(g_means[i] > 0.2 and g_means[i] < 0.8 for i in xrange(3)))
+        assert(abs(g_means[1] - g_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(g_means[2] - g_means[1]) < THRESHOLD_MAX_DIFF)
+        assert(abs(r_means[2] - r_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(r_means[1] - 2.0 * r_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(b_means[1] - b_means[0]) < THRESHOLD_MAX_DIFF)
+        assert(abs(b_means[2] - 2.0 * b_means[0]) < THRESHOLD_MAX_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_exposure_time.py b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
new file mode 100644
index 0000000..e6078d9
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
@@ -0,0 +1,68 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.sensor.exposureTime parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    exp_times = []
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        e,s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        for i,e_mult in enumerate([0.8, 0.9, 1.0, 1.1, 1.2]):
+            req = its.objects.manual_capture_request(s, e * e_mult, True)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_frame%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            exp_times.append(e * e_mult)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+    # Draw a plot.
+    pylab.plot(exp_times, r_means, 'r')
+    pylab.plot(exp_times, g_means, 'g')
+    pylab.plot(exp_times, b_means, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+    # Test for pass/fail: check that each shot is brighter than the previous.
+    for means in [r_means, g_means, b_means]:
+        for i in range(len(means)-1):
+            assert(means[i+1] > means[i])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_flash_mode.py b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
new file mode 100644
index 0000000..aae56aa
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
@@ -0,0 +1,66 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+
+def main():
+    """Test that the android.flash.mode parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.flash(props) and
+                             its.caps.per_frame_control(props))
+
+        flash_modes_reported = []
+        flash_states_reported = []
+        g_means = []
+
+        # Manually set the exposure to be a little on the dark side, so that
+        # it should be obvious whether the flash fired or not, and use a
+        # linear tonemap.
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        e /= 4
+        req = its.objects.manual_capture_request(s, e, True)
+
+        for f in [0,1,2]:
+            req["android.flash.mode"] = f
+            cap = cam.do_capture(req)
+            flash_modes_reported.append(cap["metadata"]["android.flash.mode"])
+            flash_states_reported.append(cap["metadata"]["android.flash.state"])
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, f))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb = its.image.compute_image_means(tile)
+            g_means.append(rgb[1])
+
+        assert(flash_modes_reported == [0,1,2])
+        assert(flash_states_reported[0] not in [3,4])
+        assert(flash_states_reported[1] in [3,4])
+        assert(flash_states_reported[2] in [3,4])
+
+        print "G brightnesses:", g_means
+        assert(g_means[1] > g_means[0])
+        assert(g_means[2] > g_means[0])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_noise_reduction.py b/apps/CameraITS/tests/scene1/test_param_noise_reduction.py
new file mode 100644
index 0000000..f5176a7
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_noise_reduction.py
@@ -0,0 +1,99 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.noiseReduction.mode param is applied when set.
+
+    Capture images with the camera dimly lit. Uses a high analog gain to
+    ensure the captured image is noisy.
+
+    Captures three images, for NR off, "fast", and "high quality".
+    Also captures an image with low gain and NR off, and uses the variance
+    of this as the baseline.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # List of variances for Y,U,V.
+    variances = [[],[],[]]
+
+    # Reference (baseline) variance for each of Y,U,V.
+    ref_variance = []
+
+    nr_modes_reported = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        # NR mode 0 with low gain
+        e, s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
+        req = its.objects.manual_capture_request(s, e)
+        req["android.noiseReduction.mode"] = 0
+        cap = cam.do_capture(req)
+        its.image.write_image(
+                its.image.convert_capture_to_rgb_image(cap),
+                "%s_low_gain.jpg" % (NAME))
+        planes = its.image.convert_capture_to_planes(cap)
+        for j in range(3):
+            img = planes[j]
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            ref_variance.append(its.image.compute_image_variances(tile)[0])
+        print "Ref variances:", ref_variance
+
+        for i in range(3):
+            # NR modes 0, 1, 2 with high gain
+            e, s = its.target.get_target_exposure_combos(cam)["maxSensitivity"]
+            req = its.objects.manual_capture_request(s, e)
+            req["android.noiseReduction.mode"] = i
+            cap = cam.do_capture(req)
+            nr_modes_reported.append(
+                    cap["metadata"]["android.noiseReduction.mode"])
+            its.image.write_image(
+                    its.image.convert_capture_to_rgb_image(cap),
+                    "%s_high_gain_nr=%d.jpg" % (NAME, i))
+            planes = its.image.convert_capture_to_planes(cap)
+            for j in range(3):
+                img = planes[j]
+                tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+                variance = its.image.compute_image_variances(tile)[0]
+                variances[j].append(variance / ref_variance[j])
+        print "Variances with NR mode [0,1,2]:", variances
+
+    # Draw a plot.
+    for j in range(3):
+        pylab.plot(range(3), variances[j], "rgb"[j])
+    matplotlib.pyplot.savefig("%s_plot_variances.png" % (NAME))
+
+    assert(nr_modes_reported == [0,1,2])
+
+    # Check that the variance of the NR=0 image is higher than for the
+    # NR=1 and NR=2 images.
+    for j in range(3):
+        for i in range(1,3):
+            assert(variances[j][i] < variances[j][0])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_sensitivity.py b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
new file mode 100644
index 0000000..d6b44a2
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
@@ -0,0 +1,73 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Test that the android.sensor.sensitivity parameter is applied.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    NUM_STEPS = 5
+
+    sensitivities = None
+    r_means = []
+    g_means = []
+    b_means = []
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        expt,_ = its.target.get_target_exposure_combos(cam)["midSensitivity"]
+        sens_range = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
+        sensitivities = [sens_range[0] + i * sens_step for i in range(NUM_STEPS)]
+
+        for s in sensitivities:
+            req = its.objects.manual_capture_request(s, expt)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_iso=%04d.jpg" % (NAME, s))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means = its.image.compute_image_means(tile)
+            r_means.append(rgb_means[0])
+            g_means.append(rgb_means[1])
+            b_means.append(rgb_means[2])
+
+    # Draw a plot.
+    pylab.plot(sensitivities, r_means, 'r')
+    pylab.plot(sensitivities, g_means, 'g')
+    pylab.plot(sensitivities, b_means, 'b')
+    pylab.ylim([0,1])
+    matplotlib.pyplot.savefig("%s_plot_means.png" % (NAME))
+
+    # Test for pass/fail: check that each shot is brighter than the previous.
+    for means in [r_means, g_means, b_means]:
+        for i in range(len(means)-1):
+            assert(means[i+1] > means[i])
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py b/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
new file mode 100644
index 0000000..8c8e626
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
@@ -0,0 +1,103 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os
+import os.path
+
+def main():
+    """Test that the android.tonemap.mode param is applied.
+
+    Applies different tonemap curves to each R,G,B channel, and checks
+    that the output images are modified as expected.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_RATIO_MIN_DIFF = 0.1
+    THRESHOLD_DIFF_MAX_DIFF = 0.05
+
+    # The HAL3.2 spec requires that curves up to 64 control points in length
+    # must be supported.
+    L = 32
+    LM1 = float(L-1)
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        e /= 2
+
+        # Test 1: that the tonemap curves have the expected effect. Take two
+        # shots, with n in [0,1], where each has a linear tonemap, with the
+        # n=1 shot having a steeper gradient. The gradient for each R,G,B
+        # channel increases (i.e.) R[n=1] should be brighter than R[n=0],
+        # and G[n=1] should be brighter than G[n=0] by a larger margin, etc.
+        rgb_means = []
+
+        for n in [0,1]:
+            req = its.objects.manual_capture_request(s,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = (
+                    sum([[i/LM1, min(1.0,(1+0.5*n)*i/LM1)] for i in range(L)], []))
+            req["android.tonemap.curveGreen"] = (
+                    sum([[i/LM1, min(1.0,(1+1.0*n)*i/LM1)] for i in range(L)], []))
+            req["android.tonemap.curveBlue"] = (
+                    sum([[i/LM1, min(1.0,(1+1.5*n)*i/LM1)] for i in range(L)], []))
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_n=%d.jpg" %(NAME, n))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means.append(its.image.compute_image_means(tile))
+
+        rgb_ratios = [rgb_means[1][i] / rgb_means[0][i] for i in xrange(3)]
+        print "Test 1: RGB ratios:", rgb_ratios
+        assert(rgb_ratios[0] + THRESHOLD_RATIO_MIN_DIFF < rgb_ratios[1])
+        assert(rgb_ratios[1] + THRESHOLD_RATIO_MIN_DIFF < rgb_ratios[2])
+
+
+        # Test 2: that the length of the tonemap curve (i.e. number of control
+        # points) doesn't affect the output.
+        rgb_means = []
+
+        for size in [32,64]:
+            m = float(size-1)
+            curve = sum([[i/m, i/m] for i in range(size)], [])
+            req = its.objects.manual_capture_request(s,e)
+            req["android.tonemap.mode"] = 0
+            req["android.tonemap.curveRed"] = curve
+            req["android.tonemap.curveGreen"] = curve
+            req["android.tonemap.curveBlue"] = curve
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(
+                    img, "%s_size=%02d.jpg" %(NAME, size))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb_means.append(its.image.compute_image_means(tile))
+
+        rgb_diffs = [rgb_means[1][i] - rgb_means[0][i] for i in xrange(3)]
+        print "Test 2: RGB diffs:", rgb_diffs
+        assert(abs(rgb_diffs[0]) < THRESHOLD_DIFF_MAX_DIFF)
+        assert(abs(rgb_diffs[1]) < THRESHOLD_DIFF_MAX_DIFF)
+        assert(abs(rgb_diffs[2]) < THRESHOLD_DIFF_MAX_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
new file mode 100644
index 0000000..6c2b5c1
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_raw_burst_sensitivity.py
@@ -0,0 +1,85 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import its.objects
+import its.image
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Capture a set of raw images with increasing gains and measure the noise.
+
+    Capture raw-only, in a burst.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Each shot must be 1% noisier (by the variance metric) than the previous
+    # one.
+    VAR_THRESH = 1.01
+
+    NUM_STEPS = 5
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.raw16(props) and
+                             its.caps.manual_sensor(props) and
+                             its.caps.read_3a(props) and
+                             its.caps.per_frame_control(props))
+
+        # Expose for the scene with min sensitivity
+        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_max - sens_min) / NUM_STEPS
+        s_ae,e_ae,_,_,_  = cam.do_3a(get_results=True)
+        s_e_prod = s_ae * e_ae
+
+        reqs = []
+        settings = []
+        for s in range(sens_min, sens_max, sens_step):
+            e = int(s_e_prod / float(s))
+            req = its.objects.manual_capture_request(s, e)
+            reqs.append(req)
+            settings.append((s,e))
+
+        caps = cam.do_capture(reqs, cam.CAP_RAW)
+
+        variances = []
+        for i,cap in enumerate(caps):
+            (s,e) = settings[i]
+
+            # Measure the variance. Each shot should be noisier than the
+            # previous shot (as the gain is increasing).
+            plane = its.image.convert_capture_to_planes(cap, props)[1]
+            tile = its.image.get_image_patch(plane, 0.45,0.45,0.1,0.1)
+            var = its.image.compute_image_variances(tile)[0]
+            variances.append(var)
+
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, "%s_s=%05d_var=%f.jpg" % (NAME,s,var))
+            print "s=%d, e=%d, var=%e"%(s,e,var)
+
+        pylab.plot(range(len(variances)), variances)
+        matplotlib.pyplot.savefig("%s_variances.png" % (NAME))
+
+        # Test that each shot is noisier than the previous one.
+        for i in range(len(variances) - 1):
+            assert(variances[i] < variances[i+1] / VAR_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_raw_sensitivity.py b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
new file mode 100644
index 0000000..14c5eb0
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_raw_sensitivity.py
@@ -0,0 +1,78 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.caps
+import its.objects
+import its.image
+import os.path
+import pylab
+import matplotlib
+import matplotlib.pyplot
+
+def main():
+    """Capture a set of raw images with increasing gains and measure the noise.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # Each shot must be 1% noisier (by the variance metric) than the previous
+    # one.
+    VAR_THRESH = 1.01
+
+    NUM_STEPS = 5
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.raw16(props) and
+                             its.caps.manual_sensor(props) and
+                             its.caps.read_3a(props) and
+                             its.caps.per_frame_control(props))
+
+        # Expose for the scene with min sensitivity
+        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
+        sens_step = (sens_max - sens_min) / NUM_STEPS
+        s_ae,e_ae,_,_,_  = cam.do_3a(get_results=True)
+        s_e_prod = s_ae * e_ae
+
+        variances = []
+        for s in range(sens_min, sens_max, sens_step):
+
+            e = int(s_e_prod / float(s))
+            req = its.objects.manual_capture_request(s, e)
+
+            # Capture raw+yuv, but only look at the raw.
+            cap,_ = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+            # Measure the variance. Each shot should be noisier than the
+            # previous shot (as the gain is increasing).
+            plane = its.image.convert_capture_to_planes(cap, props)[1]
+            tile = its.image.get_image_patch(plane, 0.45,0.45,0.1,0.1)
+            var = its.image.compute_image_variances(tile)[0]
+            variances.append(var)
+
+            img = its.image.convert_capture_to_rgb_image(cap, props=props)
+            its.image.write_image(img, "%s_s=%05d_var=%f.jpg" % (NAME,s,var))
+            print "s=%d, e=%d, var=%e"%(s,e,var)
+
+        pylab.plot(range(len(variances)), variances)
+        matplotlib.pyplot.savefig("%s_variances.png" % (NAME))
+
+        # Test that each shot is noisier than the previous one.
+        for i in range(len(variances) - 1):
+            assert(variances[i] < variances[i+1] / VAR_THRESH)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
new file mode 100644
index 0000000..18ca506
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
@@ -0,0 +1,70 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+import numpy
+
+def main():
+    """Test a sequence of shots with different tonemap curves.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # There should be 3 identical frames followed by a different set of
+    # 3 identical frames.
+    MAX_SAME_DELTA = 0.01
+    MIN_DIFF_DELTA = 0.10
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.manual_sensor(props) and
+                             its.caps.manual_post_proc(props) and
+                             its.caps.per_frame_control(props))
+
+        sens, exp_time, _,_,_ = cam.do_3a(do_af=False,get_results=True)
+
+        means = []
+
+        # Capture 3 manual shots with a linear tonemap.
+        req = its.objects.manual_capture_request(sens, exp_time, True)
+        for i in [0,1,2]:
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            means.append(tile.mean(0).mean(0))
+
+        # Capture 3 manual shots with the default tonemap.
+        req = its.objects.manual_capture_request(sens, exp_time, False)
+        for i in [3,4,5]:
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            means.append(tile.mean(0).mean(0))
+
+        # Compute the delta between each consecutive frame pair.
+        deltas = [numpy.max(numpy.fabs(means[i+1]-means[i])) \
+                  for i in range(len(means)-1)]
+        print "Deltas between consecutive frames:", deltas
+
+        assert(all([abs(deltas[i]) < MAX_SAME_DELTA for i in [0,1,3,4]]))
+        assert(abs(deltas[2]) > MIN_DIFF_DELTA)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py b/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
new file mode 100644
index 0000000..1b278ef
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_jpeg_all.py
@@ -0,0 +1,84 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test that the reported sizes and formats for image capture work.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.03
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.per_frame_control(props))
+
+        # Use a manual request with a linear tonemap so that the YUV and JPEG
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        rgbs = []
+
+        for size in its.objects.get_available_output_sizes("yuv", props):
+            out_surface = {"width":size[0], "height":size[1], "format":"yuv"}
+            cap = cam.do_capture(req, out_surface)
+            assert(cap["format"] == "yuv")
+            assert(cap["width"] == size[0])
+            assert(cap["height"] == size[1])
+            print "Captured YUV %dx%d" % (cap["width"], cap["height"])
+            img = its.image.convert_capture_to_rgb_image(cap)
+            its.image.write_image(img, "%s_yuv_w%d_h%d.jpg"%(
+                    NAME,size[0],size[1]))
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb = its.image.compute_image_means(tile)
+            rgbs.append(rgb)
+
+        for size in its.objects.get_available_output_sizes("jpg", props):
+            out_surface = {"width":size[0], "height":size[1], "format":"jpg"}
+            cap = cam.do_capture(req, out_surface)
+            assert(cap["format"] == "jpeg")
+            assert(cap["width"] == size[0])
+            assert(cap["height"] == size[1])
+            img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+            its.image.write_image(img, "%s_jpg_w%d_h%d.jpg"%(
+                    NAME,size[0], size[1]))
+            assert(img.shape[0] == size[1])
+            assert(img.shape[1] == size[0])
+            assert(img.shape[2] == 3)
+            print "Captured JPEG %dx%d" % (cap["width"], cap["height"])
+            tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+            rgb = its.image.compute_image_means(tile)
+            rgbs.append(rgb)
+
+        max_diff = 0
+        rgb0 = rgbs[0]
+        for rgb1 in rgbs[1:]:
+            rms_diff = math.sqrt(
+                    sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+            max_diff = max(max_diff, rms_diff)
+        print "Max RMS difference:", max_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
new file mode 100644
index 0000000..33e7763
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_dng.py
@@ -0,0 +1,47 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import os.path
+
+def main():
+    """Test capturing a single frame as both DNG and YUV outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.raw(props) and
+                             its.caps.read_3a(props))
+
+        cam.do_3a()
+
+        req = its.objects.auto_capture_request()
+        cap_dng, cap_yuv = cam.do_capture(req, cam.CAP_DNG_YUV)
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv)
+        its.image.write_image(img, "%s.jpg" % (NAME))
+
+        with open("%s.dng"%(NAME), "wb") as f:
+            f.write(cap_dng["data"])
+
+        # No specific pass/fail check; test is assumed to have succeeded if
+        # it completes.
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py b/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
new file mode 100644
index 0000000..6daa243
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_jpeg.py
@@ -0,0 +1,61 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test capturing a single frame as both YUV and JPEG outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.01
+
+    fmt_yuv =  {"format":"yuv"}
+    fmt_jpeg = {"format":"jpeg"}
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props))
+
+        # Use a manual request with a linear tonemap so that the YUV and JPEG
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        cap_yuv, cap_jpeg = cam.do_capture(req, [fmt_yuv, fmt_jpeg])
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv, True)
+        its.image.write_image(img, "%s_yuv.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        img = its.image.convert_capture_to_rgb_image(cap_jpeg, True)
+        its.image.write_image(img, "%s_jpeg.jpg" % (NAME))
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
new file mode 100644
index 0000000..eb01c1a
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw.py
@@ -0,0 +1,62 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test capturing a single frame as both RAW and YUV outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.035
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.raw16(props) and
+                             its.caps.per_frame_control(props))
+
+        # Use a manual request with a linear tonemap so that the YUV and RAW
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        cap_raw, cap_yuv = cam.do_capture(req, cam.CAP_RAW_YUV)
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv)
+        its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        # Raw shots are 1/2 x 1/2 smaller after conversion to RGB, so scale the
+        # tile appropriately.
+        img = its.image.convert_capture_to_rgb_image(cap_raw, props=props)
+        its.image.write_image(img, "%s_raw.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.475, 0.475, 0.05, 0.05)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py b/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
new file mode 100644
index 0000000..910a8ea
--- /dev/null
+++ b/apps/CameraITS/tests/scene1/test_yuv_plus_raw10.py
@@ -0,0 +1,63 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.image
+import its.caps
+import its.device
+import its.objects
+import its.target
+import os.path
+import math
+
+def main():
+    """Test capturing a single frame as both RAW10 and YUV outputs.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    THRESHOLD_MAX_RMS_DIFF = 0.035
+
+    with its.device.ItsSession() as cam:
+        props = cam.get_camera_properties()
+        its.caps.skip_unless(its.caps.compute_target_exposure(props) and
+                             its.caps.raw10(props) and
+                             its.caps.per_frame_control(props))
+
+        # Use a manual request with a linear tonemap so that the YUV and RAW
+        # should look the same (once converted by the its.image module).
+        e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
+        req = its.objects.manual_capture_request(s, e, True)
+
+        cap_raw, cap_yuv = cam.do_capture(req,
+                [{"format":"raw10"}, {"format":"yuv"}])
+
+        img = its.image.convert_capture_to_rgb_image(cap_yuv)
+        its.image.write_image(img, "%s_yuv.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
+        rgb0 = its.image.compute_image_means(tile)
+
+        # Raw shots are 1/2 x 1/2 smaller after conversion to RGB, so scale the
+        # tile appropriately.
+        img = its.image.convert_capture_to_rgb_image(cap_raw, props=props)
+        its.image.write_image(img, "%s_raw.jpg" % (NAME), True)
+        tile = its.image.get_image_patch(img, 0.475, 0.475, 0.05, 0.05)
+        rgb1 = its.image.compute_image_means(tile)
+
+        rms_diff = math.sqrt(
+                sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
+        print "RMS difference:", rms_diff
+        assert(rms_diff < THRESHOLD_MAX_RMS_DIFF)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tests/tutorial.py b/apps/CameraITS/tests/tutorial.py
new file mode 100644
index 0000000..c266d14
--- /dev/null
+++ b/apps/CameraITS/tests/tutorial.py
@@ -0,0 +1,188 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# --------------------------------------------------------------------------- #
+# The Google Python style guide should be used for scripts:                   #
+# http://google-styleguide.googlecode.com/svn/trunk/pyguide.html              #
+# --------------------------------------------------------------------------- #
+
+# The ITS modules that are in the pymodules/its/ directory. To see formatted
+# docs, use the "pydoc" command:
+#
+# > pydoc its.image
+#
+import its.image
+import its.device
+import its.objects
+import its.target
+
+# Standard Python modules.
+import os.path
+import pprint
+import math
+
+# Modules from the numpy, scipy, and matplotlib libraries. These are used for
+# the image processing code, and images are represented as numpy arrays.
+import pylab
+import numpy
+import matplotlib
+import matplotlib.pyplot
+
+# Each script has a "main" function.
+def main():
+
+    # Each script has a string description of what it does. This is the first
+    # entry inside the main function.
+    """Tutorial script to show how to use the ITS infrastructure.
+    """
+
+    # A convention in each script is to use the filename (without the extension)
+    # as the name of the test, when printing results to the screen or dumping
+    # files.
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    # The standard way to open a session with a connected camera device. This
+    # creates a cam object which encapsulates the session and which is active
+    # within the scope of the "with" block; when the block exits, the camera
+    # session is closed.
+    with its.device.ItsSession() as cam:
+
+        # Get the static properties of the camera device. Returns a Python
+        # associative array object; print it to the console.
+        props = cam.get_camera_properties()
+        pprint.pprint(props)
+
+        # Grab a YUV frame with manual exposure of sensitivity = 200, exposure
+        # duration = 50ms.
+        req = its.objects.manual_capture_request(200, 50*1000*1000)
+        cap = cam.do_capture(req)
+
+        # Print the properties of the captured frame; width and height are
+        # integers, and the metadata is a Python associative array object.
+        print "Captured image width:", cap["width"]
+        print "Captured image height:", cap["height"]
+        pprint.pprint(cap["metadata"])
+
+        # The captured image is YUV420. Convert to RGB, and save as a file.
+        rgbimg = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(rgbimg, "%s_rgb_1.jpg" % (NAME))
+
+        # Can also get the Y,U,V planes separately; save these to greyscale
+        # files.
+        yimg,uimg,vimg = its.image.convert_capture_to_planes(cap)
+        its.image.write_image(yimg, "%s_y_plane_1.jpg" % (NAME))
+        its.image.write_image(uimg, "%s_u_plane_1.jpg" % (NAME))
+        its.image.write_image(vimg, "%s_v_plane_1.jpg" % (NAME))
+
+        # Run 3A on the device. In this case, just use the entire image as the
+        # 3A region, and run each of AWB,AE,AF. Can also change the region and
+        # specify independently for each of AE,AWB,AF whether it should run.
+        #
+        # NOTE: This may fail, if the camera isn't pointed at a reasonable
+        # target scene. If it fails, the script will end. The logcat messages
+        # can be inspected to see the status of 3A running on the device.
+        #
+        # > adb logcat -s 'ItsService:v'
+        #
+        # If this keeps on failing, try also rebooting the device before
+        # running the test.
+        sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
+        print "AE: sensitivity %d, exposure %dms" % (sens, exp/1000000.0)
+        print "AWB: gains", gains, "transform", xform
+        print "AF: distance", focus
+
+        # Grab a new manual frame, using the 3A values, and convert it to RGB
+        # and save it to a file too. Note that the "req" object is just a
+        # Python dictionary that is pre-populated by the its.objets module
+        # functions (in this case a default manual capture), and the key/value
+        # pairs in the object can be used to set any field of the capture
+        # request. Here, the AWB gains and transform (CCM) are being used.
+        # Note that the CCM transform is in a rational format in capture
+        # requests, meaning it is an object with integer numerators and
+        # denominators. The 3A routine returns simple floats instead, however,
+        # so a conversion from float to rational must be performed.
+        req = its.objects.manual_capture_request(sens, exp)
+        xform_rat = its.objects.float_to_rational(xform)
+
+        req["android.colorCorrection.transform"] = xform_rat
+        req["android.colorCorrection.gains"] = gains
+        cap = cam.do_capture(req)
+        rgbimg = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(rgbimg, "%s_rgb_2.jpg" % (NAME))
+
+        # Print out the actual capture request object that was used.
+        pprint.pprint(req)
+
+        # Images are numpy arrays. The dimensions are (h,w,3) when indexing,
+        # in the case of RGB images. Greyscale images are (h,w,1). Pixels are
+        # generally float32 values in the [0,1] range, however some of the
+        # helper functions in its.image deal with the packed YUV420 and other
+        # formats of images that come from the device (and convert them to
+        # float32).
+        # Print the dimensions of the image, and the top-left pixel value,
+        # which is an array of 3 floats.
+        print "RGB image dimensions:", rgbimg.shape
+        print "RGB image top-left pixel:", rgbimg[0,0]
+
+        # Grab a center tile from the image; this returns a new image. Save
+        # this tile image. In this case, the tile is the middle 10% x 10%
+        # rectangle.
+        tile = its.image.get_image_patch(rgbimg, 0.45, 0.45, 0.1, 0.1)
+        its.image.write_image(tile, "%s_rgb_2_tile.jpg" % (NAME))
+
+        # Compute the mean values of the center tile image.
+        rgb_means = its.image.compute_image_means(tile)
+        print "RGB means:", rgb_means
+
+        # Apply a lookup table to the image, and save the new version. The LUT
+        # is basically a tonemap, and can be used to implement a gamma curve.
+        # In this case, the LUT is used to double the value of each pixel.
+        lut = numpy.array([2*i for i in xrange(65536)])
+        rgbimg_lut = its.image.apply_lut_to_image(rgbimg, lut)
+        its.image.write_image(rgbimg_lut, "%s_rgb_2_lut.jpg" % (NAME))
+
+        # Apply a 3x3 matrix to the image, and save the new version. The matrix
+        # is a numpy array, in row major order, and the pixel values are right-
+        # multiplied to it (when considered as column vectors). The example
+        # matrix here just boosts the blue channel by 10%.
+        mat = numpy.array([[1, 0, 0  ],
+                           [0, 1, 0  ],
+                           [0, 0, 1.1]])
+        rgbimg_mat = its.image.apply_matrix_to_image(rgbimg, mat)
+        its.image.write_image(rgbimg_mat, "%s_rgb_2_mat.jpg" % (NAME))
+
+        # Compute a histogram of the luma image, in 256 buckets.
+        yimg,_,_ = its.image.convert_capture_to_planes(cap)
+        hist,_ = numpy.histogram(yimg*255, 256, (0,256))
+
+        # Plot the histogram using matplotlib, and save as a PNG image.
+        pylab.plot(range(256), hist.tolist())
+        pylab.xlabel("Luma DN")
+        pylab.ylabel("Pixel count")
+        pylab.title("Histogram of luma channel of captured image")
+        matplotlib.pyplot.savefig("%s_histogram.png" % (NAME))
+
+        # Capture a frame to be returned as a JPEG. Load it as an RGB image,
+        # then save it back as a JPEG.
+        cap = cam.do_capture(req, cam.CAP_JPEG)
+        rgbimg = its.image.convert_capture_to_rgb_image(cap)
+        its.image.write_image(rgbimg, "%s_jpg.jpg" % (NAME))
+        r,g,b = its.image.convert_capture_to_planes(cap)
+        its.image.write_image(r, "%s_r.jpg" % (NAME))
+
+# This is the standard boilerplate in each test that allows the script to both
+# be executed directly and imported as a module.
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tools/compute_dng_noise_model.py b/apps/CameraITS/tools/compute_dng_noise_model.py
new file mode 100644
index 0000000..1b57754
--- /dev/null
+++ b/apps/CameraITS/tools/compute_dng_noise_model.py
@@ -0,0 +1,175 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.objects
+import its.image
+import pprint
+import pylab
+import os.path
+import matplotlib
+import matplotlib.pyplot
+import numpy
+import math
+
+def main():
+    """Compute the DNG noise model from a color checker chart.
+
+    TODO: Make this more robust; some manual futzing may be needed.
+    """
+    NAME = os.path.basename(__file__).split(".")[0]
+
+    with its.device.ItsSession() as cam:
+
+        props = cam.get_camera_properties()
+
+        white_level = float(props['android.sensor.info.whiteLevel'])
+        black_levels = props['android.sensor.blackLevelPattern']
+        idxs = its.image.get_canonical_cfa_order(props)
+        black_levels = [black_levels[i] for i in idxs]
+
+        # Expose for the scene with min sensitivity
+        sens_min, sens_max = props['android.sensor.info.sensitivityRange']
+        s_ae,e_ae,awb_gains,awb_ccm,_  = cam.do_3a(get_results=True)
+        s_e_prod = s_ae * e_ae
+
+        # Make the image brighter since the script looks at linear Bayer
+        # raw patches rather than gamma-encoded YUV patches (and the AE
+        # probably under-exposes a little for this use-case).
+        s_e_prod *= 2
+
+        # Capture raw frames across the full sensitivity range.
+        NUM_SENS_STEPS = 15
+        sens_step = int((sens_max - sens_min - 1) / float(NUM_SENS_STEPS))
+        reqs = []
+        sens = []
+        for s in range(sens_min, sens_max, sens_step):
+            e = int(s_e_prod / float(s))
+            req = its.objects.manual_capture_request(s, e)
+            req["android.colorCorrection.transform"] = \
+                    its.objects.float_to_rational(awb_ccm)
+            req["android.colorCorrection.gains"] = awb_gains
+            reqs.append(req)
+            sens.append(s)
+
+        caps = cam.do_capture(reqs, cam.CAP_RAW)
+
+        # A list of the (x,y) coords of the center pixel of a collection of
+        # patches of a color checker chart. Each patch should be uniform,
+        # however the actual color doesn't matter. Note that the coords are
+        # relative to the *converted* RGB image, which is 1/2 x 1/2 of the
+        # full size; convert back to full.
+        img = its.image.convert_capture_to_rgb_image(caps[0], props=props)
+        patches = its.image.get_color_checker_chart_patches(img, NAME+"_debug")
+        patches = [(2*x,2*y) for (x,y) in sum(patches,[])]
+
+        lines = []
+        for (s,cap) in zip(sens,caps):
+            # For each capture, compute the mean value in each patch, for each
+            # Bayer plane; discard patches where pixels are close to clamped.
+            # Also compute the variance.
+            CLAMP_THRESH = 0.2
+            planes = its.image.convert_capture_to_planes(cap, props)
+            points = []
+            for i,plane in enumerate(planes):
+                plane = (plane * white_level - black_levels[i]) / (
+                        white_level - black_levels[i])
+                for j,(x,y) in enumerate(patches):
+                    tile = plane[y/2-16:y/2+16:,x/2-16:x/2+16:,::]
+                    mean = its.image.compute_image_means(tile)[0]
+                    var = its.image.compute_image_variances(tile)[0]
+                    if (mean > CLAMP_THRESH and mean < 1.0-CLAMP_THRESH):
+                        # Each point is a (mean,variance) tuple for a patch;
+                        # for a given ISO, there should be a linear
+                        # relationship between these values.
+                        points.append((mean,var))
+
+            # Fit a line to the points, with a line equation: y = mx + b.
+            # This line is the relationship between mean and variance (i.e.)
+            # between signal level and noise, for this particular sensor.
+            # In the DNG noise model, the gradient (m) is "S", and the offset
+            # (b) is "O".
+            points.sort()
+            xs = [x for (x,y) in points]
+            ys = [y for (x,y) in points]
+            m,b = numpy.polyfit(xs, ys, 1)
+            lines.append((s,m,b))
+            print s, "->", m, b
+
+            # TODO: Clean up these checks (which currently fail in some cases).
+            # Some sanity checks:
+            # * Noise levels should increase with brightness.
+            # * Extrapolating to a black image, the noise should be positive.
+            # Basically, the "b" value should correspond to the read noise,
+            # which is the noise level if the sensor was operating in zero
+            # light.
+            #assert(m > 0)
+            #assert(b >= 0)
+
+            # Draw a plot.
+            pylab.plot(xs, ys, 'r')
+            pylab.plot([0,xs[-1]],[b,m*xs[-1]+b],'b')
+            matplotlib.pyplot.savefig("%s_plot_mean_vs_variance.png" % (NAME))
+
+        # Now fit a line across the (m,b) line parameters for each sensitivity.
+        # The gradient (m) params are fit to the "S" line, and the offset (b)
+        # params are fit to the "O" line, both as a function of sensitivity.
+        gains = [d[0] for d in lines]
+        Ss = [d[1] for d in lines]
+        Os = [d[2] for d in lines]
+        mS,bS = numpy.polyfit(gains, Ss, 1)
+        mO,bO = numpy.polyfit(gains, Os, 1)
+
+        # Plot curve "O" as 10x, so it fits in the same scale as curve "S".
+        pylab.plot(gains, [10*o for o in Os], 'r')
+        pylab.plot([gains[0],gains[-1]],
+                [10*mO*gains[0]+10*bO, 10*mO*gains[-1]+10*bO], 'b')
+        pylab.plot(gains, Ss, 'r')
+        pylab.plot([gains[0],gains[-1]], [mS*gains[0]+bS, mS*gains[-1]+bS], 'b')
+        matplotlib.pyplot.savefig("%s_plot_S_O.png" % (NAME))
+
+        print """
+        /* Generated test code to dump a table of data for external validation
+         * of the noise model parameters.
+         */
+        #include <stdio.h>
+        #include <assert.h>
+        double compute_noise_model_entry_S(int sens);
+        double compute_noise_model_entry_O(int sens);
+        int main(void) {
+            int sens;
+            for (sens = %d; sens <= %d; sens += 100) {
+                double o = compute_noise_model_entry_O(sens);
+                double s = compute_noise_model_entry_S(sens);
+                printf("%%d,%%lf,%%lf\\n", sens, o, s);
+            }
+            return 0;
+        }
+
+        /* Generated functions to map a given sensitivity to the O and S noise
+         * model parameters in the DNG noise model.
+         */
+        double compute_noise_model_entry_S(int sens) {
+            double s = %e * sens + %e;
+            return s < 0.0 ? 0.0 : s;
+        }
+        double compute_noise_model_entry_O(int sens) {
+            double o = %e * sens + %e;
+            return o < 0.0 ? 0.0 : o;
+        }
+        """%(sens_min,sens_max,mS,bS,mO,bO)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tools/config.py b/apps/CameraITS/tools/config.py
new file mode 100644
index 0000000..6e83412
--- /dev/null
+++ b/apps/CameraITS/tools/config.py
@@ -0,0 +1,66 @@
+# Copyright 2013 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import its.device
+import its.target
+import sys
+
+def main():
+    """Set the target exposure.
+
+    This program is just a wrapper around the its.target module, to allow the
+    functions in it to be invoked from the command line.
+
+    Usage:
+        python config.py        - Measure the target exposure, and cache it.
+        python config.py EXP    - Hard-code (and cache) the target exposure.
+
+    The "reboot" or "reboot=<N>" and "camera=<N>" arguments may also be
+    provided, just as with all the test scripts. The "target" argument is
+    may also be provided but it has no effect on this script since the cached
+    exposure value is cleared regardless.
+
+    If no exposure value is provided, the camera will be used to measure
+    the scene and set a level that will result in the luma (with linear
+    tonemap) being at the 0.5 level. This requires camera 3A and capture
+    to be functioning.
+
+    For bring-up purposes, the exposure value may be manually set to a hard-
+    coded value, without the camera having to be able to perform 3A (or even
+    capture a shot reliably).
+    """
+
+    # Command line args, ignoring any args that will be passed down to the
+    # ItsSession constructor.
+    args = [s for s in sys.argv if s[:6] not in \
+            ["reboot", "camera", "target", "noinit"]]
+
+    if len(args) == 1:
+        with its.device.ItsSession() as cam:
+            # Automatically measure target exposure.
+            its.target.clear_cached_target_exposure()
+            exposure = its.target.get_target_exposure(cam)
+    elif len(args) == 2:
+        # Hard-code the target exposure.
+        exposure = int(args[1])
+        its.target.set_hardcoded_exposure(exposure)
+    else:
+        print "Usage: python %s [EXPOSURE]"
+        sys.exit(0)
+    print "New target exposure set to", exposure
+    print "This corresponds to %dms at ISO 100" % int(exposure/100/1000000.0)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CameraITS/tools/run_all_tests.py b/apps/CameraITS/tools/run_all_tests.py
new file mode 100644
index 0000000..2202d5b
--- /dev/null
+++ b/apps/CameraITS/tools/run_all_tests.py
@@ -0,0 +1,116 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import os.path
+import tempfile
+import subprocess
+import time
+import sys
+import its.device
+
+def main():
+    """Run all the automated tests, saving intermediate files, and producing
+    a summary/report of the results.
+
+    Script should be run from the top-level CameraITS directory.
+    """
+
+    SKIP_RET_CODE = 101
+
+    # Not yet mandated tests
+    NOT_YET_MANDATED = {
+        "scene0":[
+            "test_jitter"
+        ],
+        "scene1":[
+            "test_ae_precapture_trigger",
+            "test_black_white",
+            "test_crop_region_raw",
+            "test_locked_burst",
+            "test_yuv_plus_jpeg"
+        ]
+    }
+
+    # Get all the scene0 and scene1 tests, which can be run using the same
+    # physical setup.
+    scenes = ["scene0", "scene1"]
+    tests = []
+    for d in scenes:
+        tests += [(d,s[:-3],os.path.join("tests", d, s))
+                  for s in os.listdir(os.path.join("tests",d))
+                  if s[-3:] == ".py"]
+    tests.sort()
+
+    # Make output directories to hold the generated files.
+    topdir = tempfile.mkdtemp()
+    for d in scenes:
+        os.mkdir(os.path.join(topdir, d))
+    print "Saving output files to:", topdir, "\n"
+
+    # determine camera id
+    camera_id = 0
+    for s in sys.argv[1:]:
+        if s[:7] == "camera=" and len(s) > 7:
+            camera_id = s[7:]
+
+    # Run each test, capturing stdout and stderr.
+    numpass = 0
+    numskip = 0
+    numnotmandatedfail = 0
+    numfail = 0
+    for (scene,testname,testpath) in tests:
+        cmd = ['python', os.path.join(os.getcwd(),testpath)] + sys.argv[1:]
+        outdir = os.path.join(topdir,scene)
+        outpath = os.path.join(outdir,testname+"_stdout.txt")
+        errpath = os.path.join(outdir,testname+"_stderr.txt")
+        t0 = time.time()
+        with open(outpath,"w") as fout, open(errpath,"w") as ferr:
+            retcode = subprocess.call(cmd,stderr=ferr,stdout=fout,cwd=outdir)
+        t1 = time.time()
+
+        if retcode == 0:
+            retstr = "PASS "
+            numpass += 1
+        elif retcode == SKIP_RET_CODE:
+            retstr = "SKIP "
+            numskip += 1
+        elif retcode != 0 and testname in NOT_YET_MANDATED[scene]:
+            retstr = "FAIL*"
+            numnotmandatedfail += 1
+        else:
+            retstr = "FAIL "
+            numfail += 1
+
+        print "%s %s/%s [%.1fs]" % (retstr, scene, testname, t1-t0)
+
+    if numskip > 0:
+        skipstr = ", %d test%s skipped" % (numskip, "s" if numskip > 1 else "")
+    else:
+        skipstr = ""
+
+    print "\n%d / %d tests passed (%.1f%%)%s" % (
+            numpass + numnotmandatedfail, len(tests) - numskip,
+            100.0 * float(numpass + numnotmandatedfail) / (len(tests) - numskip)
+                if len(tests) != numskip else 100.0,
+            skipstr)
+
+    if numnotmandatedfail > 0:
+        print "(*) tests are not yet mandated"
+
+    its.device.report_result(camera_id, numfail == 0)
+
+if __name__ == '__main__':
+    main()
+
diff --git a/apps/CtsVerifier/Android.mk b/apps/CtsVerifier/Android.mk
index 460b88a..e370c81 100644
--- a/apps/CtsVerifier/Android.mk
+++ b/apps/CtsVerifier/Android.mk
@@ -25,7 +25,7 @@
 
 LOCAL_SRC_FILES := $(call all-java-files-under, src) $(call all-Iaidl-files-under, src)
 
-LOCAL_STATIC_JAVA_LIBRARIES := cts-sensors-tests ctstestrunner
+LOCAL_STATIC_JAVA_LIBRARIES := cts-sensors-tests ctstestrunner android-ex-camera2
 
 LOCAL_PACKAGE_NAME := CtsVerifier
 
@@ -78,12 +78,15 @@
 ifeq ($(HOST_OS),linux)
 $(verifier-zip) : $(HOST_OUT)/bin/cts-usb-accessory
 endif
+$(verifier-zip) : $(HOST_OUT)/CameraITS
+
 $(verifier-zip) : $(call intermediates-dir-for,APPS,CtsVerifier)/package.apk | $(ACP)
 		$(hide) mkdir -p $(verifier-dir)
 		$(hide) $(ACP) -fp $< $(verifier-dir)/CtsVerifier.apk
 ifeq ($(HOST_OS),linux)
 		$(hide) $(ACP) -fp $(HOST_OUT)/bin/cts-usb-accessory $(verifier-dir)/cts-usb-accessory
 endif
+		$(hide) $(ACP) -fpr $(HOST_OUT)/CameraITS $(verifier-dir)
 		$(hide) cd $(cts-dir) && zip -rq $(verifier-dir-name) $(verifier-dir-name)
 
 ifneq ($(filter cts, $(MAKECMDGOALS)),)
diff --git a/apps/CtsVerifier/AndroidManifest.xml b/apps/CtsVerifier/AndroidManifest.xml
index b6abe30..d6d655a 100644
--- a/apps/CtsVerifier/AndroidManifest.xml
+++ b/apps/CtsVerifier/AndroidManifest.xml
@@ -23,6 +23,7 @@
     <uses-sdk android:minSdkVersion="19" android:targetSdkVersion="21"/>
 
     <uses-permission android:name="android.permission.ACCESS_FINE_LOCATION" />
+    <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
     <uses-permission android:name="android.permission.ACCESS_WIFI_STATE" />
     <uses-permission android:name="android.permission.BLUETOOTH" />
     <uses-permission android:name="android.permission.BLUETOOTH_ADMIN" />
@@ -34,6 +35,11 @@
     <uses-permission android:name="android.permission.INTERNET" />
     <uses-permission android:name="android.permission.NFC" />
     <uses-permission android:name="android.permission.VIBRATE" />
+    <uses-feature android:name="android.hardware.camera" android:required="false"/>
+    <uses-feature android:name="android.hardware.camera.flash" android:required="false"/>
+    <uses-feature android:name="android.hardware.sensor.accelerometer" android:required="false" />
+    <uses-feature android:name="android.hardware.sensor.compass" android:required="false" />
+    <uses-feature android:name="android.hardware.sensor.gyroscope" android:required="false" />
     <uses-feature android:name="android.hardware.camera.front"
                   android:required="false" />
     <uses-feature android:name="android.hardware.camera.autofocus"
@@ -717,6 +723,15 @@
             <meta-data android:name="android.nfc.cardemulation.host_apdu_service" android:resource="@xml/access_prefix_aid_list"/>
         </service>
 
+        <!-- Service used for Camera ITS tests -->
+        <service android:name=".camera.its.ItsService" >
+            <intent-filter>
+                <action android:name="com.android.cts.verifier.camera.its.START"/>
+                <category android:name="android.intent.category.DEFAULT" />
+                <data android:mimeType="text/plain" />
+            </intent-filter>
+        </service>
+
         <!--
             A DeviceAdmin receiver for sensor tests, it allows sensor tests to turn off the screen.
         -->
@@ -1040,6 +1055,17 @@
                     android:value="android.hardware.camera.any"/>
         </activity>
 
+        <activity android:name=".camera.its.ItsTestActivity"
+                  android:label="@string/camera_its_test"
+                  android:configChanges="keyboardHidden|orientation|screenSize">
+            <intent-filter>
+                <action android:name="android.intent.action.MAIN" />
+                <category android:name="android.cts.intent.category.MANUAL_TEST" />
+            </intent-filter>
+            <meta-data android:name="test_category" android:value="@string/test_category_camera" />
+            <meta-data android:name="test_required_features" android:value="android.hardware.camera.any" />
+        </activity>
+
         <activity android:name=".usb.UsbAccessoryTestActivity"
                 android:label="@string/usb_accessory_test"
                 android:configChanges="keyboardHidden|orientation|screenSize">
diff --git a/apps/CtsVerifier/res/layout/ble_advertiser_hardware_scan_filter.xml b/apps/CtsVerifier/res/layout/ble_advertiser_hardware_scan_filter.xml
index ce3e1e1..a545727 100644
--- a/apps/CtsVerifier/res/layout/ble_advertiser_hardware_scan_filter.xml
+++ b/apps/CtsVerifier/res/layout/ble_advertiser_hardware_scan_filter.xml
@@ -19,63 +19,56 @@
         android:orientation="vertical"
         android:padding="10dip"
         >
-
-    <LinearLayout android:orientation="vertical"
+    <ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
             android:layout_width="match_parent"
             android:layout_height="wrap_content"
-            android:layout_centerInParent="true"
-            >
-        <TextView android:text="@string/ble_advertiser_scannable"
-                android:layout_width="wrap_content"
-                android:layout_height="wrap_content"
-        />
-        <TextView android:text="@string/ble_advertiser_scannable_instruction"
-                android:layout_width="wrap_content"
-                android:layout_height="wrap_content"
-        />
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                >
-            <Button android:id="@+id/ble_advertiser_scannable_start"
-                    android:layout_width="wrap_content"
-                    android:layout_height="wrap_content"
-                    android:text="@string/ble_advertiser_start"
-                    />
-            <Button android:id="@+id/ble_advertiser_scannable_stop"
-                    android:layout_width="wrap_content"
-                    android:layout_height="wrap_content"
-                    android:text="@string/ble_advertiser_stop"
-                    />
-        </LinearLayout>
-        <TextView android:text="@string/ble_advertiser_unscannable"
-                android:layout_width="wrap_content"
-                android:layout_height="wrap_content"
-        />
-        <TextView android:text="@string/ble_advertiser_unscannable_instruction"
-                android:layout_width="wrap_content"
-                android:layout_height="wrap_content"
-        />
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                >
-            <Button android:id="@+id/ble_advertiser_unscannable_start"
-                    android:layout_width="wrap_content"
-                    android:layout_height="wrap_content"
-                    android:text="@string/ble_advertiser_start"
-                    />
-            <Button android:id="@+id/ble_advertiser_unscannable_stop"
-                    android:layout_width="wrap_content"
-                    android:layout_height="wrap_content"
-                    android:text="@string/ble_advertiser_stop"
-                    />
-        </LinearLayout>
-    </LinearLayout>
+            android:scrollbars="vertical">
 
-    <include android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_alignParentBottom="true"
-            layout="@layout/pass_fail_buttons"
-            />
+        <LinearLayout android:orientation="vertical"
+                android:layout_width="match_parent"
+                android:layout_height="wrap_content"
+                android:layout_centerInParent="true">
+            <TextView android:text="@string/ble_advertiser_scannable"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <TextView android:text="@string/ble_advertiser_scannable_instruction"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <LinearLayout android:orientation="horizontal"
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content">
+                <Button android:id="@+id/ble_advertiser_scannable_start"
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:text="@string/ble_advertiser_start"/>
+                <Button android:id="@+id/ble_advertiser_scannable_stop"
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:text="@string/ble_advertiser_stop"/>
+            </LinearLayout>
+            <TextView android:text="@string/ble_advertiser_unscannable"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <TextView android:text="@string/ble_advertiser_unscannable_instruction"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <LinearLayout android:orientation="horizontal"
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content">
+                <Button android:id="@+id/ble_advertiser_unscannable_start"
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:text="@string/ble_advertiser_start"/>
+                <Button android:id="@+id/ble_advertiser_unscannable_stop"
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:text="@string/ble_advertiser_stop"/>
+            </LinearLayout>
+
+            <include android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    android:layout_alignParentBottom="true"
+                    layout="@layout/pass_fail_buttons"/>
+        </LinearLayout>
+    </ScrollView>
 </RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/ble_advertiser_power_level.xml b/apps/CtsVerifier/res/layout/ble_advertiser_power_level.xml
index ec3284d..c8e0133 100644
--- a/apps/CtsVerifier/res/layout/ble_advertiser_power_level.xml
+++ b/apps/CtsVerifier/res/layout/ble_advertiser_power_level.xml
@@ -19,31 +19,34 @@
         android:orientation="vertical"
         android:padding="10dip"
         >
-
-    <TextView android:text="@string/ble_advertiser_power_level_instruction"
-            android:layout_width="wrap_content"
-            android:layout_height="wrap_content"
-    />
-    <LinearLayout android:orientation="horizontal"
+    <ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
+            android:layout_width="fill_parent"
+            android:layout_height="wrap_content">
+        <LinearLayout android:orientation="vertical"
             android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_centerInParent="true"
-            >
-        <Button android:id="@+id/ble_power_level_start"
-                android:layout_width="wrap_content"
-                android:layout_height="wrap_content"
-                android:text="@string/ble_advertiser_start"
-                />
-        <Button android:id="@+id/ble_power_level_stop"
-                android:layout_width="wrap_content"
-                android:layout_height="wrap_content"
-                android:text="@string/ble_advertiser_stop"
-                />
-    </LinearLayout>
+            android:layout_height="wrap_content">
+            <TextView android:text="@string/ble_advertiser_power_level_instruction"
+                    android:id="@+id/ble_advertiser_power_level_instruction"
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"
+                    android:scrollbars="vertical"/>
+            <LinearLayout android:orientation="horizontal"
+                    android:layout_below="@+id/ble_advertiser_power_level_instruction"
+                    android:layout_width="match_parent"
+                    android:layout_height="wrap_content">
+                <Button android:id="@+id/ble_power_level_start"
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:text="@string/ble_advertiser_start"/>
+                <Button android:id="@+id/ble_power_level_stop"
+                        android:layout_width="wrap_content"
+                        android:layout_height="wrap_content"
+                        android:text="@string/ble_advertiser_stop"/>
+            </LinearLayout>
 
-    <include android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_alignParentBottom="true"
-            layout="@layout/pass_fail_buttons"
-            />
+            <include android:layout_width="match_parent"
+                    android:layout_height="wrap_content"
+                    layout="@layout/pass_fail_buttons"/>
+        </LinearLayout>
+    </ScrollView>
 </RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/ble_client_connect.xml b/apps/CtsVerifier/res/layout/ble_client_connect.xml
index 54a0a99..30b4edb 100644
--- a/apps/CtsVerifier/res/layout/ble_client_connect.xml
+++ b/apps/CtsVerifier/res/layout/ble_client_connect.xml
@@ -20,22 +20,19 @@
         android:padding="10dip"
         >
 
-    <LinearLayout android:orientation="horizontal"
+    <LinearLayout android:orientation="vertical"
             android:layout_width="match_parent"
             android:layout_height="wrap_content"
             android:layout_centerInParent="true"
             >
-        <EditText android:id="@+id/ble_address"
-                android:layout_weight="1"
-                android:layout_width="0dp"
-                android:layout_height="wrap_content"
-                android:hint="@string/ble_address"
-                />
-        <Button android:id="@+id/ble_connect"
+        <Button android:id="@+id/ble_scan_start"
                 android:layout_width="wrap_content"
                 android:layout_height="wrap_content"
-                android:text="@string/ble_connect"
-                />
+                android:text="@string/ble_scan_start"/>
+        <Button android:id="@+id/ble_scan_stop"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content"
+                android:text="@string/ble_scan_stop"/>
     </LinearLayout>
 
     <include android:layout_width="match_parent"
@@ -43,4 +40,4 @@
             android:layout_alignParentBottom="true"
             layout="@layout/pass_fail_buttons"
             />
-</RelativeLayout>
\ No newline at end of file
+</RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/ble_client_read_write.xml b/apps/CtsVerifier/res/layout/ble_client_read_write.xml
index 7edba62..a263916 100644
--- a/apps/CtsVerifier/res/layout/ble_client_read_write.xml
+++ b/apps/CtsVerifier/res/layout/ble_client_read_write.xml
@@ -32,6 +32,7 @@
                     android:layout_width="0dp"
                     android:layout_weight="1"
                     android:layout_height="wrap_content"
+                    android:text="@string/ble_test_text"
                     android:hint="@string/ble_write_hint"
                     android:padding="10dip"
                     />
@@ -67,4 +68,4 @@
             android:layout_alignParentBottom="true"
             layout="@layout/pass_fail_buttons"
             />
-</RelativeLayout>
\ No newline at end of file
+</RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/ble_reliable_write.xml b/apps/CtsVerifier/res/layout/ble_reliable_write.xml
index 7db78ff..05b1812 100644
--- a/apps/CtsVerifier/res/layout/ble_reliable_write.xml
+++ b/apps/CtsVerifier/res/layout/ble_reliable_write.xml
@@ -27,6 +27,7 @@
         <EditText android:id="@+id/write_text"
                 android:layout_width="match_parent"
                 android:layout_height="wrap_content"
+                android:text="@string/ble_test_text"
                 android:hint="@string/ble_write_hint"
                 android:padding="5dip"
                 />
@@ -60,4 +61,4 @@
             android:layout_alignParentBottom="true"
             layout="@layout/pass_fail_buttons"
             />
-</RelativeLayout>
\ No newline at end of file
+</RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/ble_scanner_hardware_scan_filter.xml b/apps/CtsVerifier/res/layout/ble_scanner_hardware_scan_filter.xml
index f356ded..dabd640 100644
--- a/apps/CtsVerifier/res/layout/ble_scanner_hardware_scan_filter.xml
+++ b/apps/CtsVerifier/res/layout/ble_scanner_hardware_scan_filter.xml
@@ -17,40 +17,41 @@
         android:layout_width="match_parent"
         android:layout_height="match_parent"
         android:orientation="vertical"
-        android:padding="10dip"
-        >
-    <TextView android:text="@string/ble_scanner_scan_filter_instruction"
+        android:padding="10dip">
+    <ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
             android:layout_width="wrap_content"
-            android:layout_height="wrap_content"
-    />
-    <LinearLayout android:orientation="vertical"
-            android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_centerInParent="true"
-            >
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                >
-            <Button android:id="@+id/ble_scan_with_filter"
+            android:layout_height="wrap_content">
+        <LinearLayout android:orientation="vertical"
+                android:layout_width="wrap_content"
+                android:layout_height="wrap_content">
+            <TextView android:text="@string/ble_scanner_scan_filter_instruction"
                     android:layout_width="wrap_content"
+                    android:layout_height="wrap_content"/>
+            <LinearLayout android:orientation="vertical"
+                    android:layout_width="match_parent"
                     android:layout_height="wrap_content"
-                    android:text="@string/ble_scan_with_filter"
-                    />
-            <Button android:id="@+id/ble_scan_without_filter"
-                    android:layout_width="wrap_content"
+                    android:layout_centerInParent="true">
+                <LinearLayout android:orientation="vertical"
+                        android:layout_width="match_parent"
+                        android:layout_height="wrap_content">
+                    <Button android:id="@+id/ble_scan_with_filter"
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:text="@string/ble_scan_with_filter"/>
+                    <Button android:id="@+id/ble_scan_without_filter"
+                            android:layout_width="wrap_content"
+                            android:layout_height="wrap_content"
+                            android:text="@string/ble_scan_without_filter"/>
+                </LinearLayout>
+                <ListView android:id="@+id/ble_scan_result_list"
+                        android:layout_height="wrap_content"
+                        android:layout_width="match_parent">
+                </ListView>
+            </LinearLayout>
+            <include android:layout_width="match_parent"
                     android:layout_height="wrap_content"
-                    android:text="@string/ble_scan_without_filter"
-                    />
+                    android:layout_alignParentBottom="true"
+                    layout="@layout/pass_fail_buttons"/>
         </LinearLayout>
-        <ListView android:id="@+id/ble_scan_result_list"
-                android:layout_height="wrap_content"
-                android:layout_width="match_parent">
-        </ListView>
-    </LinearLayout>
-    <include android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_alignParentBottom="true"
-            layout="@layout/pass_fail_buttons"
-    />
+    </ScrollView>
 </RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/ble_scanner_power_level.xml b/apps/CtsVerifier/res/layout/ble_scanner_power_level.xml
index b240db6..c24dbb4 100644
--- a/apps/CtsVerifier/res/layout/ble_scanner_power_level.xml
+++ b/apps/CtsVerifier/res/layout/ble_scanner_power_level.xml
@@ -19,154 +19,139 @@
         android:orientation="vertical"
         android:padding="10dip"
         >
-    <TextView android:text="@string/ble_scanner_power_level_instruction"
+    <ScrollView xmlns:android="http://schemas.android.com/apk/res/android"
             android:layout_width="wrap_content"
-            android:layout_height="wrap_content"
-            android:id="@+id/ble_scanner_power_level_instruction"
-    />
-    <LinearLayout android:orientation="vertical"
-            android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_below="@+id/ble_scanner_power_level_instruction"
-            android:layout_centerInParent="true"
-            android:padding="10dp"
-            >
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                android:layout_centerInParent="true"
-                >
-            <TextView android:text="@string/ble_ultra_low"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_ultra_low_mac"
-                  android:layout_width="200dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_ultra_low_rssi"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
+            android:layout_height="wrap_content">
+        <LinearLayout android:orientation="vertical"
                 android:layout_width="match_parent"
                 android:layout_height="wrap_content">
-            <TextView android:layout_width="100dp"
-                  android:layout_height="wrap_content"/>
-            <TextView android:id="@+id/ble_ultra_low_count"
-                    android:layout_width="100dp"
+            <TextView android:text="@string/ble_scanner_power_level_instruction"
+                    android:layout_width="wrap_content"
                     android:layout_height="wrap_content"
+                    android:id="@+id/ble_scanner_power_level_instruction"
             />
-            <TextView android:id="@+id/ble_ultra_low_set_power"
-                    android:layout_width="100dp"
+            <HorizontalScrollView
+                    android:layout_width="wrap_content"
+                    android:layout_height="wrap_content">
+                <LinearLayout android:orientation="vertical"
+                        android:layout_width="match_parent"
+                        android:layout_height="wrap_content"
+                        android:layout_below="@+id/ble_scanner_power_level_instruction"
+                        android:layout_centerInParent="true"
+                        android:padding="10dp">
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content"
+                            android:layout_centerInParent="true">
+                        <TextView android:text="@string/ble_ultra_low"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_ultra_low_mac"
+                                android:layout_width="200dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_ultra_low_rssi"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content">
+                        <TextView android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_ultra_low_count"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_ultra_low_set_power"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content"
+                            android:layout_centerInParent="true">
+                        <TextView android:text="@string/ble_low"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_low_mac"
+                                android:layout_width="200dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_low_rssi"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content">
+                        <TextView android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_low_count"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_low_set_power"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content"
+                            android:layout_centerInParent="true">
+                        <TextView android:text="@string/ble_medium"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_medium_mac"
+                                android:layout_width="200dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_medium_rssi"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content">
+                        <TextView android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_medium_count"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_medium_set_power"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content"
+                            android:layout_centerInParent="true">
+                        <TextView android:text="@string/ble_high"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_high_mac"
+                                android:layout_width="200dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_high_rssi"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                    <LinearLayout android:orientation="horizontal"
+                            android:layout_width="match_parent"
+                            android:layout_height="wrap_content">
+                        <TextView android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_high_count"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                        <TextView android:id="@+id/ble_high_set_power"
+                                android:layout_width="100dp"
+                                android:layout_height="wrap_content"/>
+                    </LinearLayout>
+                </LinearLayout>
+            </HorizontalScrollView>
+            <TextView android:id="@+id/ble_timer"
+                    android:layout_width="fill_parent"
+                    android:layout_height="wrap_content" />
+            <include android:layout_width="match_parent"
                     android:layout_height="wrap_content"
-            />
+                    android:layout_alignParentBottom="true"
+                    layout="@layout/pass_fail_buttons"/>
         </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                android:layout_centerInParent="true"
-                >
-            <TextView android:text="@string/ble_low"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_low_mac"
-                  android:layout_width="200dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_low_rssi"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content">
-            <TextView android:layout_width="100dp"
-                  android:layout_height="wrap_content"/>
-            <TextView android:id="@+id/ble_low_count"
-                    android:layout_width="100dp"
-                    android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_low_set_power"
-                    android:layout_width="100dp"
-                    android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                android:layout_centerInParent="true"
-                >
-            <TextView android:text="@string/ble_medium"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_medium_mac"
-                  android:layout_width="200dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_medium_rssi"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content">
-            <TextView android:layout_width="100dp"
-                  android:layout_height="wrap_content"/>
-            <TextView android:id="@+id/ble_medium_count"
-                    android:layout_width="100dp"
-                    android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_medium_set_power"
-                    android:layout_width="100dp"
-                    android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content"
-                android:layout_centerInParent="true"
-                >
-            <TextView android:text="@string/ble_high"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_high_mac"
-                  android:layout_width="200dp"
-                  android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_high_rssi"
-                  android:layout_width="100dp"
-                  android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <LinearLayout android:orientation="horizontal"
-                android:layout_width="match_parent"
-                android:layout_height="wrap_content">
-            <TextView android:layout_width="100dp"
-                  android:layout_height="wrap_content"/>
-            <TextView android:id="@+id/ble_high_count"
-                    android:layout_width="100dp"
-                    android:layout_height="wrap_content"
-            />
-            <TextView android:id="@+id/ble_high_set_power"
-                    android:layout_width="100dp"
-                    android:layout_height="wrap_content"
-            />
-        </LinearLayout>
-        <TextView android:id="@+id/ble_timer"
-                android:layout_width="fill_parent"
-                android:layout_height="wrap_content" />
-    </LinearLayout>
-
-    <include android:layout_width="match_parent"
-            android:layout_height="wrap_content"
-            android:layout_alignParentBottom="true"
-            layout="@layout/pass_fail_buttons"
-            />
+    </ScrollView>
 </RelativeLayout>
diff --git a/apps/CtsVerifier/res/layout/bt_device_picker.xml b/apps/CtsVerifier/res/layout/bt_device_picker.xml
index ecca0e5..48a4b43 100644
--- a/apps/CtsVerifier/res/layout/bt_device_picker.xml
+++ b/apps/CtsVerifier/res/layout/bt_device_picker.xml
@@ -19,6 +19,13 @@
         android:orientation="vertical"
         >
 
+    <ProgressBar android:id="@+id/bt_progress_bar"
+            android:indeterminate="true"
+            android:layout_height="4dp"
+            android:layout_width="match_parent"
+            style="@android:style/Widget.DeviceDefault.ProgressBar.Horizontal"
+            />
+
     <TextView android:layout_width="match_parent"
             android:layout_height="wrap_content"
             android:text="@string/bt_paired_devices"
diff --git a/apps/CtsVerifier/res/layout/bt_messages.xml b/apps/CtsVerifier/res/layout/bt_messages.xml
index cb46811..1504431 100644
--- a/apps/CtsVerifier/res/layout/bt_messages.xml
+++ b/apps/CtsVerifier/res/layout/bt_messages.xml
@@ -18,6 +18,14 @@
         android:layout_width="match_parent"
         android:layout_height="match_parent"
         >
+
+    <ProgressBar android:id="@+id/bt_progress_bar"
+        android:indeterminate="true"
+        android:layout_height="4dp"
+        android:layout_width="match_parent"
+        style="@android:style/Widget.DeviceDefault.ProgressBar.Horizontal"
+        />
+
     <TextView android:layout_width="match_parent"
             android:layout_height="wrap_content"
             android:text="@string/bt_sent_messages"
diff --git a/apps/CtsVerifier/res/layout/its_main.xml b/apps/CtsVerifier/res/layout/its_main.xml
new file mode 100644
index 0000000..2f5eade
--- /dev/null
+++ b/apps/CtsVerifier/res/layout/its_main.xml
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2014 The Android Open Source Project
+
+     Licensed under the Apache License, Version 2.0 (the "License");
+     you may not use this file except in compliance with the License.
+     You may obtain a copy of the License at
+
+          http://www.apache.org/licenses/LICENSE-2.0
+
+     Unless required by applicable law or agreed to in writing, software
+     distributed under the License is distributed on an "AS IS" BASIS,
+     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+     See the License for the specific language governing permissions and
+     limitations under the License.
+-->
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
+              android:orientation="vertical"
+              android:layout_width="match_parent"
+              android:layout_height="match_parent"
+    >
+
+    <include layout="@layout/pass_fail_buttons" />
+
+</LinearLayout>
diff --git a/apps/CtsVerifier/res/layout/pass_fail_buttons.xml b/apps/CtsVerifier/res/layout/pass_fail_buttons.xml
index 5eec539..b269dcd 100644
--- a/apps/CtsVerifier/res/layout/pass_fail_buttons.xml
+++ b/apps/CtsVerifier/res/layout/pass_fail_buttons.xml
@@ -13,28 +13,31 @@
      See the License for the specific language governing permissions and
      limitations under the License.
 -->
-<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android" 
-        android:orientation="horizontal"
+<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
         android:layout_width="match_parent"
-        android:layout_height="wrap_content">
+        android:layout_height="wrap_content"
+        android:orientation="horizontal">
 
     <ImageButton android:id="@+id/pass_button"
             android:layout_width="wrap_content"
             android:layout_height="wrap_content"
-            android:layout_weight="1"            
+            android:layout_weight="1"
+            android:contentDescription="@string/pass_button_text"
             android:src="@drawable/fs_good"/>
-            
+
     <ImageButton android:id="@+id/info_button"
             android:layout_width="wrap_content"
             android:layout_height="wrap_content"
             android:layout_weight="1"
+            android:contentDescription="@string/info_button_text"
             android:src="@drawable/fs_indeterminate"
             android:visibility="gone"/>
 
     <ImageButton android:id="@+id/fail_button"
             android:layout_width="wrap_content"
             android:layout_height="wrap_content"
-            android:layout_weight="1"            
+            android:layout_weight="1"
+            android:contentDescription="@string/fail_button_text"
             android:src="@drawable/fs_error"/>
-            
+
 </LinearLayout>
diff --git a/apps/CtsVerifier/res/values/strings.xml b/apps/CtsVerifier/res/values/strings.xml
index cb35c3d..014191b 100644
--- a/apps/CtsVerifier/res/values/strings.xml
+++ b/apps/CtsVerifier/res/values/strings.xml
@@ -214,6 +214,7 @@
     <string name="ble_waiting_notification">Waiting on notification</string>
     <string name="ble_read_rssi">Read RSSI</string>
     <string name="ble_disconnect">Disconnect</string>
+    <string name="ble_test_text">TEST</string>
 
     <!-- BLE server side strings -->
     <string name="ble_server_service_name">Bluetooth LE GATT Server Handler Service</string>
@@ -263,6 +264,8 @@
     <string name="ble_scanner_scan_filter_instruction">Scan filter is to scan data with service UUID = 0x6666 only. If you scan without scan filter, data with service UUID = 0x5555 and 0x6666 will show up on screen.\nFor monsoon test:\n\tClick scan with filter, lock the screen, connect to monsoon. It will not wake up when advertiser is advertising unscannable data packets, but will show a peak in power usage when advertiser is advertising scannable data.\nFor logcat test:\n\tClick scan with filter, logcat the scanner. No data will be received by GattService when advertiser is advertising unscannable data.</string>
     <string name="ble_scan_with_filter">Scan with filter</string>
     <string name="ble_scan_without_filter">Scan without filter</string>
+    <string name="ble_scan_start">Start scan</string>
+    <string name="ble_scan_stop">Stop scan</string>
 
     <!-- Strings for FeatureSummaryActivity -->
     <string name="feature_summary">Hardware/Software Feature Summary</string>
@@ -766,6 +769,30 @@
     <string name="usb_test_passed">Received all expected messages. Pass button enabled!</string>
     <string name="usb_file_descriptor_error">Could not open file descriptor for USB accessory... try reconnecting and restarting the accessory?</string>
 
+    <!-- Strings for the Camera ITS test activity -->
+    <string name="camera_its_test">Camera ITS Test</string>
+    <string name="camera_its_test_info">
+        1. Connect your Android device to a computer with adb installed via a USB cable.
+        \n\n2. Setup the CameraITS test environment by following the setup instructions in the
+        README file found in the CameraITS directory included in the CTS Verifier bundle
+        (cd CameraITS; source build/envsetup.sh;).
+        \n\n3. Setup the test scene described in the CameraITS README file, and aim the camera
+        at it.
+        \n\n4. Run the full ITS test suite on all possible camera Ids.
+        (cd CameraITS; python tools/run_all_tests.py camera=[cameraId]).  Once all
+        of the tests have been run, the \'PASS\' button will be enabled if all of the tests have
+        succeeded.  Please note that these tests can take 20+ minutes to run.
+    </string>
+    <string name="no_camera_manager">
+        No camera manager exists!  This test device is in a bad state.
+    </string>
+    <string name="all_legacy_devices">
+        All cameras on this device are LEGACY mode only - ITS tests will only be applied to LIMITED
+        or better devices.  \'PASS\' button enabled.
+    </string>
+    <string name="its_test_passed">All Camera ITS tests passed.  Pass button enabled!</string>
+    <string name="its_test_failed">Some Camera ITS tests failed.</string>
+
     <!-- Strings for StreamingVideoActivity -->
     <string name="streaming_video">Streaming Video Quality Verifier</string>
     <string name="streaming_video_info">This is a test for assessing the quality of streaming videos.  Play each stream and verify that the video is smooth and in sync with the audio, and that there are no quality problems.</string>
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/PassFailButtons.java b/apps/CtsVerifier/src/com/android/cts/verifier/PassFailButtons.java
index 444a250..ab119bd 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/PassFailButtons.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/PassFailButtons.java
@@ -32,6 +32,7 @@
 import android.view.View;
 import android.view.View.OnClickListener;
 import android.widget.ImageButton;
+import android.widget.Toast;
 
 /**
  * {@link Activity}s to handle clicks to the pass and fail buttons of the pass fail buttons layout.
@@ -242,8 +243,25 @@
             }
         };
 
-        activity.findViewById(R.id.pass_button).setOnClickListener(clickListener);
-        activity.findViewById(R.id.fail_button).setOnClickListener(clickListener);
+        View passButton = activity.findViewById(R.id.pass_button);
+        passButton.setOnClickListener(clickListener);
+        passButton.setOnLongClickListener(new View.OnLongClickListener() {
+            @Override
+            public boolean onLongClick(View view) {
+                Toast.makeText(activity, R.string.pass_button_text, Toast.LENGTH_SHORT).show();
+                return true;
+            }
+        });
+
+        View failButton = activity.findViewById(R.id.fail_button);
+        failButton.setOnClickListener(clickListener);
+        failButton.setOnLongClickListener(new View.OnLongClickListener() {
+            @Override
+            public boolean onLongClick(View view) {
+                Toast.makeText(activity, R.string.fail_button_text, Toast.LENGTH_SHORT).show();
+                return true;
+            }
+        });
     }
 
     private static void setInfo(final android.app.Activity activity, final int titleId,
@@ -257,6 +275,13 @@
                 showInfoDialog(activity, titleId, messageId, viewId);
             }
         });
+        infoButton.setOnLongClickListener(new View.OnLongClickListener() {
+            @Override
+            public boolean onLongClick(View view) {
+                Toast.makeText(activity, R.string.info_button_text, Toast.LENGTH_SHORT).show();
+                return true;
+            }
+        });
 
         // Show the info dialog if the user has never seen it before.
         if (!hasSeenInfoDialog(activity)) {
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientConnectActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientConnectActivity.java
index fb351b1..4e1c268 100755
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientConnectActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientConnectActivity.java
@@ -45,22 +45,25 @@
                          R.string.ble_client_send_connect_info, -1);
         getPassButton().setEnabled(false);
 
-        mEditText = (EditText) findViewById(R.id.ble_address);
-
-        ((Button) findViewById(R.id.ble_connect)).setOnClickListener(new OnClickListener() {
+        ((Button) findViewById(R.id.ble_scan_start)).setOnClickListener(new OnClickListener() {
             @Override
             public void onClick(View v) {
-                String address = mEditText.getText().toString();
-                if (!BluetoothAdapter.checkBluetoothAddress(address)) {
-                    showMessage("Invalid bluetooth address.");
-                } else {
-                    Intent intent = new Intent(BleClientConnectActivity.this,
-                                               BleClientService.class);
-                    intent.putExtra(BleClientService.EXTRA_COMMAND,
-                                    BleClientService.COMMAND_CONNECT);
-                    intent.putExtra(BluetoothDevice.EXTRA_DEVICE, address);
-                    startService(intent);
-                }
+                Intent intent = new Intent(BleClientConnectActivity.this,
+                        BleClientService.class);
+                intent.putExtra(BleClientService.EXTRA_COMMAND,
+                        BleClientService.COMMAND_SCAN_START);
+                startService(intent);
+            }
+        });
+
+        ((Button) findViewById(R.id.ble_scan_stop)).setOnClickListener(new OnClickListener() {
+            @Override
+            public void onClick(View v) {
+                Intent intent = new Intent(BleClientConnectActivity.this,
+                        BleClientService.class);
+                intent.putExtra(BleClientService.EXTRA_COMMAND,
+                        BleClientService.COMMAND_SCAN_STOP);
+                startService(intent);
             }
         });
 
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientService.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientService.java
index 556ad06..6765362 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientService.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleClientService.java
@@ -16,6 +16,7 @@
 
 package com.android.cts.verifier.bluetooth;
 
+import java.util.Arrays;
 import java.util.UUID;
 import java.util.List;
 
@@ -29,10 +30,16 @@
 import android.bluetooth.BluetoothGattService;
 import android.bluetooth.BluetoothManager;
 import android.bluetooth.BluetoothProfile;
+import android.bluetooth.le.BluetoothLeScanner;
+import android.bluetooth.le.ScanCallback;
+import android.bluetooth.le.ScanFilter;
+import android.bluetooth.le.ScanResult;
+import android.bluetooth.le.ScanSettings;
 import android.content.Context;
 import android.content.Intent;
 import android.os.Handler;
 import android.os.IBinder;
+import android.os.ParcelUuid;
 import android.util.Log;
 import android.widget.Toast;
 
@@ -53,6 +60,8 @@
     public static final int COMMAND_BEGIN_WRITE = 9;
     public static final int COMMAND_EXECUTE_WRITE = 10;
     public static final int COMMAND_ABORT_RELIABLE = 11;
+    public static final int COMMAND_SCAN_START = 12;
+    public static final int COMMAND_SCAN_STOP = 13;
 
     public static final String BLE_BLUETOOTH_CONNECTED =
             "com.android.cts.verifier.bluetooth.BLE_BLUETOOTH_CONNECTED";
@@ -102,6 +111,8 @@
     private BluetoothDevice mDevice;
     private BluetoothGatt mBluetoothGatt;
     private Handler mHandler;
+    private Context mContext;
+    private BluetoothLeScanner mScanner;
 
     @Override
     public void onCreate() {
@@ -110,6 +121,8 @@
         mBluetoothManager = (BluetoothManager) getSystemService(Context.BLUETOOTH_SERVICE);
         mBluetoothAdapter = mBluetoothManager.getAdapter();
         mHandler = new Handler();
+        mContext = this;
+        mScanner = mBluetoothAdapter.getBluetoothLeScanner();
     }
 
     @Override
@@ -128,6 +141,7 @@
         super.onDestroy();
         mBluetoothGatt.disconnect();
         mBluetoothGatt.close();
+        stopScan();
     }
 
     private void handleIntent(Intent intent) {
@@ -177,6 +191,12 @@
             case COMMAND_ABORT_RELIABLE:
                 if (mBluetoothGatt != null) mBluetoothGatt.abortReliableWrite(mDevice);
                 break;
+            case COMMAND_SCAN_START:
+                startScan();
+                break;
+            case COMMAND_SCAN_STOP:
+                stopScan();
+                break;
             default:
                 showMessage("Unrecognized command: " + command);
                 break;
@@ -343,8 +363,8 @@
         @Override
         public void onCharacteristicWrite(BluetoothGatt gatt,
                                           BluetoothGattCharacteristic characteristic, int status) {
-            if (DEBUG) Log.d(TAG, "onCharacteristicWrite: characteristic.val=" + characteristic.getStringValue(0)
-                                  + " status=" + status);
+            if (DEBUG) Log.d(TAG, "onCharacteristicWrite: characteristic.val="
+                    + characteristic.getStringValue(0) + " status=" + status);
             BluetoothGattCharacteristic mCharacteristic = getCharacteristic(CHARACTERISTIC_UUID);
             if ((status == BluetoothGatt.GATT_SUCCESS) &&
                 (characteristic.getStringValue(0).equals(mCharacteristic.getStringValue(0)))) {
@@ -387,4 +407,25 @@
             if (status == BluetoothGatt.GATT_SUCCESS) notifyReadRemoteRssi(rssi);
         }
     };
-}
\ No newline at end of file
+
+    private final ScanCallback mScanCallback = new ScanCallback() {
+        @Override
+        public void onScanResult(int callbackType, ScanResult result) {
+            mBluetoothGatt = result.getDevice().connectGatt(mContext, false, mGattCallbacks);
+        }
+    };
+
+    private void startScan() {
+        if (DEBUG) Log.d(TAG, "startScan");
+        List<ScanFilter> filter = Arrays.asList(new ScanFilter.Builder().setServiceUuid(
+                new ParcelUuid(BleServerService.ADV_SERVICE_UUID)).build());
+        ScanSettings setting = new ScanSettings.Builder()
+                .setScanMode(ScanSettings.SCAN_MODE_LOW_POWER).build();
+        mScanner.startScan(filter, setting, mScanCallback);
+    }
+
+    private void stopScan() {
+        if (DEBUG) Log.d(TAG, "stopScan");
+        mScanner.stopScan(mScanCallback);
+    }
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReadWriteActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReadWriteActivity.java
index 22233ef..8041ce0 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReadWriteActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReadWriteActivity.java
@@ -124,4 +124,4 @@
             }
         }
     };
-}
\ No newline at end of file
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReliableWriteActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReliableWriteActivity.java
index c7460b5..9b65bb4 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReliableWriteActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleReliableWriteActivity.java
@@ -114,4 +114,4 @@
             }
         }
     };
-}
\ No newline at end of file
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleServerService.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleServerService.java
index 91b3a6c..8718f57 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleServerService.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/BleServerService.java
@@ -33,10 +33,15 @@
 import android.bluetooth.BluetoothGattService;
 import android.bluetooth.BluetoothManager;
 import android.bluetooth.BluetoothProfile;
+import android.bluetooth.le.AdvertiseCallback;
+import android.bluetooth.le.AdvertiseData;
+import android.bluetooth.le.AdvertiseSettings;
+import android.bluetooth.le.BluetoothLeAdvertiser;
 import android.content.Context;
 import android.content.Intent;
 import android.os.Handler;
 import android.os.IBinder;
+import android.os.ParcelUuid;
 import android.util.Log;
 import android.widget.Toast;
 
@@ -76,6 +81,8 @@
             UUID.fromString("00009997-0000-1000-8000-00805f9b34fb");
     private static final UUID DESCRIPTOR_UUID =
             UUID.fromString("00009996-0000-1000-8000-00805f9b34fb");
+    public static final UUID ADV_SERVICE_UUID=
+            UUID.fromString("00003333-0000-1000-8000-00805f9b34fb");
 
     private BluetoothManager mBluetoothManager;
     private BluetoothGattServer mGattServer;
@@ -84,12 +91,14 @@
     private Timer mNotificationTimer;
     private Handler mHandler;
     private String mReliableWriteValue;
+    private BluetoothLeAdvertiser mAdvertiser;
 
     @Override
     public void onCreate() {
         super.onCreate();
 
         mBluetoothManager = (BluetoothManager) getSystemService(Context.BLUETOOTH_SERVICE);
+        mAdvertiser = mBluetoothManager.getAdapter().getBluetoothLeAdvertiser();
         mGattServer = mBluetoothManager.openGattServer(this, mCallbacks);
         mService = createService();
         if (mGattServer != null) {
@@ -106,6 +115,7 @@
 
     @Override
     public int onStartCommand(Intent intent, int flags, int startId) {
+        startAdvertise();
         return START_NOT_STICKY;
     }
 
@@ -117,6 +127,7 @@
     @Override
     public void onDestroy() {
         super.onDestroy();
+        stopAdvertise();
         if (mGattServer == null) {
            return;
         }
@@ -366,5 +377,26 @@
             }
         }
     };
+
+    private void startAdvertise() {
+        if (DEBUG) Log.d(TAG, "startAdvertise");
+        AdvertiseData data = new AdvertiseData.Builder()
+            .addServiceData(new ParcelUuid(ADV_SERVICE_UUID), new byte[]{1,2,3})
+            .addServiceUuid(new ParcelUuid(ADV_SERVICE_UUID))
+            .build();
+        AdvertiseSettings setting = new AdvertiseSettings.Builder()
+            .setAdvertiseMode(AdvertiseSettings.ADVERTISE_MODE_LOW_LATENCY)
+            .setTxPowerLevel(AdvertiseSettings.ADVERTISE_TX_POWER_MEDIUM)
+            .setConnectable(true)
+            .build();
+        mAdvertiser.startAdvertising(setting, data, mAdvertiseCallback);
+    }
+
+    private void stopAdvertise() {
+        if (DEBUG) Log.d(TAG, "stopAdvertise");
+        mAdvertiser.stopAdvertising(mAdvertiseCallback);
+    }
+
+    private final AdvertiseCallback mAdvertiseCallback = new AdvertiseCallback(){};
 }
 
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/DevicePickerActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/DevicePickerActivity.java
index be71f66..a5dea4b 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/DevicePickerActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/DevicePickerActivity.java
@@ -29,12 +29,12 @@
 import android.content.IntentFilter;
 import android.os.Bundle;
 import android.view.View;
-import android.view.Window;
 import android.view.View.OnClickListener;
 import android.widget.AdapterView;
 import android.widget.ArrayAdapter;
 import android.widget.Button;
 import android.widget.ListView;
+import android.widget.ProgressBar;
 import android.widget.TextView;
 import android.widget.AdapterView.OnItemClickListener;
 
@@ -61,12 +61,15 @@
 
     private TextView mEmptyNewView;
 
+    private ProgressBar mProgressBar;
+
     @Override
     protected void onCreate(Bundle savedInstanceState) {
         super.onCreate(savedInstanceState);
-        requestWindowFeature(Window.FEATURE_INDETERMINATE_PROGRESS);
         setContentView(R.layout.bt_device_picker);
 
+        mProgressBar = (ProgressBar) findViewById(R.id.bt_progress_bar);
+
         mPairedDevicesAdapter = new ArrayAdapter<Device>(this, R.layout.bt_device_name);
         ListView pairedDevicesListView = (ListView) findViewById(R.id.bt_paired_devices);
         pairedDevicesListView.setAdapter(mPairedDevicesAdapter);
@@ -182,10 +185,10 @@
         public void onReceive(Context context, Intent intent) {
             if (BluetoothAdapter.ACTION_DISCOVERY_STARTED.equals(intent.getAction())) {
                 mEmptyNewView.setText(R.string.bt_scanning);
-                setProgressBarIndeterminateVisibility(true);
+                mProgressBar.setVisibility(View.VISIBLE);
             } else if (BluetoothAdapter.ACTION_DISCOVERY_FINISHED.equals(intent.getAction())) {
                 mEmptyNewView.setText(R.string.bt_no_devices);
-                setProgressBarIndeterminateVisibility(false);
+                mProgressBar.setVisibility(View.INVISIBLE);
             } else if (BluetoothDevice.ACTION_FOUND.equals(intent.getAction())) {
                 BluetoothDevice device = intent.getParcelableExtra(BluetoothDevice.EXTRA_DEVICE);
                 if (device.getBondState() != BluetoothDevice.BOND_BONDED) {
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/MessageTestActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/MessageTestActivity.java
index 2c6324b..4e0b78f 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/MessageTestActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/bluetooth/MessageTestActivity.java
@@ -33,10 +33,10 @@
 import android.os.Message;
 import android.view.View;
 import android.view.View.OnClickListener;
-import android.view.Window;
 import android.widget.ArrayAdapter;
 import android.widget.Button;
 import android.widget.ListView;
+import android.widget.ProgressBar;
 import android.widget.TextView;
 import android.widget.Toast;
 
@@ -71,6 +71,8 @@
 
     private AlertDialog mInstructionsDialog;
 
+    private ProgressBar mProgressBar;
+
     private String mDeviceAddress;
 
     private final boolean mSecure;
@@ -89,10 +91,11 @@
     @Override
     protected void onCreate(Bundle savedInstanceState) {
         super.onCreate(savedInstanceState);
-        requestWindowFeature(Window.FEATURE_INDETERMINATE_PROGRESS);
         setContentView(R.layout.bt_messages);
         setPassFailButtonClickListeners();
 
+        mProgressBar = (ProgressBar) findViewById(R.id.bt_progress_bar);
+
         if (mServer) {
             setTitle(mSecure ? R.string.bt_secure_server : R.string.bt_insecure_server);
         } else {
@@ -217,18 +220,18 @@
         switch (state) {
             case BluetoothChatService.STATE_LISTEN:
                 setEmptyViewText(R.string.bt_waiting);
-                setProgressBarIndeterminateVisibility(true);
+                mProgressBar.setVisibility(View.VISIBLE);
                 showInstructionsDialog();
                 break;
 
             case BluetoothChatService.STATE_CONNECTING:
                 setEmptyViewText(R.string.bt_connecting);
-                setProgressBarIndeterminateVisibility(true);
+                mProgressBar.setVisibility(View.VISIBLE);
                 break;
 
             case BluetoothChatService.STATE_CONNECTED:
                 setEmptyViewText(R.string.bt_no_messages);
-                setProgressBarIndeterminateVisibility(false);
+                mProgressBar.setVisibility(View.INVISIBLE);
 
                 hideInstructionsDialog();
                 sendInitialMessageFromClient();
@@ -236,7 +239,7 @@
 
             case BluetoothChatService.STATE_NONE:
                 setEmptyViewText(R.string.bt_no_messages);
-                setProgressBarIndeterminateVisibility(false);
+                mProgressBar.setVisibility(View.INVISIBLE);
                 break;
         }
     }
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsException.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsException.java
new file mode 100644
index 0000000..d390bb1
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsException.java
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.cts.verifier.camera.its;
+
+/**
+ * All exceptions are converted to ItsExceptions.
+ */
+class ItsException extends Exception {
+    public ItsException(Throwable cause) {
+        super(cause);
+    }
+
+    public ItsException(String message, Throwable cause) {
+        super(message, cause);
+    }
+
+    public ItsException(String message) {
+        super(message);
+    }
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsSerializer.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsSerializer.java
new file mode 100644
index 0000000..cf8365a
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsSerializer.java
@@ -0,0 +1,714 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.cts.verifier.camera.its;
+
+import android.graphics.Point;
+import android.graphics.Rect;
+import android.hardware.camera2.CameraCharacteristics;
+import android.hardware.camera2.CameraDevice;
+import android.hardware.camera2.CameraMetadata;
+import android.hardware.camera2.CaptureResult;
+import android.hardware.camera2.CaptureRequest;
+import android.hardware.camera2.TotalCaptureResult;
+import android.hardware.camera2.params.BlackLevelPattern;
+import android.hardware.camera2.params.ColorSpaceTransform;
+import android.hardware.camera2.params.Face;
+import android.hardware.camera2.params.LensShadingMap;
+import android.hardware.camera2.params.MeteringRectangle;
+import android.hardware.camera2.params.RggbChannelVector;
+import android.hardware.camera2.params.StreamConfigurationMap;
+import android.hardware.camera2.params.TonemapCurve;
+import android.location.Location;
+import android.util.Log;
+import android.util.Pair;
+import android.util.Rational;
+import android.util.Size;
+import android.util.SizeF;
+import android.util.Range;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.lang.reflect.GenericArrayType;
+import java.lang.reflect.Modifier;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+
+/**
+ * Class to deal with serializing and deserializing between JSON and Camera2 objects.
+ */
+public class ItsSerializer {
+    public static final String TAG = ItsSerializer.class.getSimpleName();
+
+    private static class MetadataEntry {
+        public MetadataEntry(String k, Object v) {
+            key = k;
+            value = v;
+        }
+        public String key;
+        public Object value;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeRational(Rational rat) throws org.json.JSONException {
+        JSONObject ratObj = new JSONObject();
+        ratObj.put("numerator", rat.getNumerator());
+        ratObj.put("denominator", rat.getDenominator());
+        return ratObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeSize(Size size) throws org.json.JSONException {
+        JSONObject sizeObj = new JSONObject();
+        sizeObj.put("width", size.getWidth());
+        sizeObj.put("height", size.getHeight());
+        return sizeObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeSizeF(SizeF size) throws org.json.JSONException {
+        JSONObject sizeObj = new JSONObject();
+        sizeObj.put("width", size.getWidth());
+        sizeObj.put("height", size.getHeight());
+        return sizeObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeRect(Rect rect) throws org.json.JSONException {
+        JSONObject rectObj = new JSONObject();
+        rectObj.put("left", rect.left);
+        rectObj.put("right", rect.right);
+        rectObj.put("top", rect.top);
+        rectObj.put("bottom", rect.bottom);
+        return rectObj;
+    }
+
+    private static Object serializePoint(Point point) throws org.json.JSONException {
+        JSONObject pointObj = new JSONObject();
+        pointObj.put("x", point.x);
+        pointObj.put("y", point.y);
+        return pointObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeFace(Face face)
+            throws org.json.JSONException {
+        JSONObject faceObj = new JSONObject();
+        faceObj.put("bounds", serializeRect(face.getBounds()));
+        faceObj.put("score", face.getScore());
+        faceObj.put("id", face.getId());
+        faceObj.put("leftEye", serializePoint(face.getLeftEyePosition()));
+        faceObj.put("rightEye", serializePoint(face.getRightEyePosition()));
+        faceObj.put("mouth", serializePoint(face.getMouthPosition()));
+        return faceObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeStreamConfigurationMap(
+            StreamConfigurationMap map)
+            throws org.json.JSONException {
+        // TODO: Serialize the rest of the StreamConfigurationMap fields.
+        JSONObject mapObj = new JSONObject();
+        JSONArray cfgArray = new JSONArray();
+        int fmts[] = map.getOutputFormats();
+        if (fmts != null) {
+            for (int fi = 0; fi < Array.getLength(fmts); fi++) {
+                Size sizes[] = map.getOutputSizes(fmts[fi]);
+                if (sizes != null) {
+                    for (int si = 0; si < Array.getLength(sizes); si++) {
+                        JSONObject obj = new JSONObject();
+                        obj.put("format", fmts[fi]);
+                        obj.put("width",sizes[si].getWidth());
+                        obj.put("height", sizes[si].getHeight());
+                        obj.put("input", false);
+                        obj.put("minFrameDuration",
+                                map.getOutputMinFrameDuration(fmts[fi],sizes[si]));
+                        cfgArray.put(obj);
+                    }
+                }
+            }
+        }
+        mapObj.put("availableStreamConfigurations", cfgArray);
+        return mapObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeMeteringRectangle(MeteringRectangle rect)
+            throws org.json.JSONException {
+        JSONObject rectObj = new JSONObject();
+        rectObj.put("x", rect.getX());
+        rectObj.put("y", rect.getY());
+        rectObj.put("width", rect.getWidth());
+        rectObj.put("height", rect.getHeight());
+        rectObj.put("weight", rect.getMeteringWeight());
+        return rectObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializePair(Pair pair)
+            throws org.json.JSONException {
+        JSONArray pairObj = new JSONArray();
+        pairObj.put(pair.first);
+        pairObj.put(pair.second);
+        return pairObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeRange(Range range)
+            throws org.json.JSONException {
+        JSONArray rangeObj = new JSONArray();
+        rangeObj.put(range.getLower());
+        rangeObj.put(range.getUpper());
+        return rangeObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeColorSpaceTransform(ColorSpaceTransform xform)
+            throws org.json.JSONException {
+        JSONArray xformObj = new JSONArray();
+        for (int row = 0; row < 3; row++) {
+            for (int col = 0; col < 3; col++) {
+                xformObj.put(serializeRational(xform.getElement(col,row)));
+            }
+        }
+        return xformObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeTonemapCurve(TonemapCurve curve)
+            throws org.json.JSONException {
+        JSONObject curveObj = new JSONObject();
+        String names[] = {"red", "green", "blue"};
+        for (int ch = 0; ch < 3; ch++) {
+            JSONArray curveArr = new JSONArray();
+            int len = curve.getPointCount(ch);
+            for (int i = 0; i < len; i++) {
+                curveArr.put(curve.getPoint(ch,i).x);
+                curveArr.put(curve.getPoint(ch,i).y);
+            }
+            curveObj.put(names[ch], curveArr);
+        }
+        return curveObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeRggbChannelVector(RggbChannelVector vec)
+            throws org.json.JSONException {
+        JSONArray vecObj = new JSONArray();
+        vecObj.put(vec.getRed());
+        vecObj.put(vec.getGreenEven());
+        vecObj.put(vec.getGreenOdd());
+        vecObj.put(vec.getBlue());
+        return vecObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeBlackLevelPattern(BlackLevelPattern pat)
+            throws org.json.JSONException {
+        int patVals[] = new int[4];
+        pat.copyTo(patVals, 0);
+        JSONArray patObj = new JSONArray();
+        patObj.put(patVals[0]);
+        patObj.put(patVals[1]);
+        patObj.put(patVals[2]);
+        patObj.put(patVals[3]);
+        return patObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeLocation(Location loc)
+            throws org.json.JSONException {
+        return loc.toString();
+    }
+
+    @SuppressWarnings("unchecked")
+    private static Object serializeLensShadingMap(LensShadingMap map)
+            throws org.json.JSONException {
+        JSONArray mapObj = new JSONArray();
+        for (int row = 0; row < map.getRowCount(); row++) {
+            for (int col = 0; col < map.getColumnCount(); col++) {
+                for (int ch = 0; ch < 4; ch++) {
+                    mapObj.put(map.getGainFactor(ch, col, row));
+                }
+            }
+        }
+        return mapObj;
+    }
+
+    private static String getKeyName(Object keyObj) throws ItsException {
+        if (keyObj.getClass() == CaptureResult.Key.class
+                || keyObj.getClass() == TotalCaptureResult.class) {
+            return ((CaptureResult.Key)keyObj).getName();
+        } else if (keyObj.getClass() == CaptureRequest.Key.class) {
+            return ((CaptureRequest.Key)keyObj).getName();
+        } else if (keyObj.getClass() == CameraCharacteristics.Key.class) {
+            return ((CameraCharacteristics.Key)keyObj).getName();
+        }
+        throw new ItsException("Invalid key object");
+    }
+
+    private static Object getKeyValue(CameraMetadata md, Object keyObj) throws ItsException {
+        if (md.getClass() == CaptureResult.class || md.getClass() == TotalCaptureResult.class) {
+            return ((CaptureResult)md).get((CaptureResult.Key)keyObj);
+        } else if (md.getClass() == CaptureRequest.class) {
+            return ((CaptureRequest)md).get((CaptureRequest.Key)keyObj);
+        } else if (md.getClass() == CameraCharacteristics.class) {
+            return ((CameraCharacteristics)md).get((CameraCharacteristics.Key)keyObj);
+        }
+        throw new ItsException("Invalid key object");
+    }
+
+    @SuppressWarnings("unchecked")
+    private static MetadataEntry serializeEntry(Type keyType, Object keyObj, CameraMetadata md)
+            throws ItsException {
+        String keyName = getKeyName(keyObj);
+
+        try {
+            Object keyValue = getKeyValue(md, keyObj);
+            if (keyValue == null) {
+                return new MetadataEntry(keyName, JSONObject.NULL);
+            } else if (keyType == Float.class) {
+                // The JSON serializer doesn't handle floating point NaN or Inf.
+                if (((Float)keyValue).isInfinite() || ((Float)keyValue).isNaN()) {
+                    Logt.w(TAG, "Inf/NaN floating point value serialized: " + keyName);
+                    return null;
+                }
+                return new MetadataEntry(keyName, keyValue);
+            } else if (keyType == Integer.class || keyType == Long.class || keyType == Byte.class ||
+                       keyType == Boolean.class || keyType == String.class) {
+                return new MetadataEntry(keyName, keyValue);
+            } else if (keyType == Rational.class) {
+                return new MetadataEntry(keyName, serializeRational((Rational)keyValue));
+            } else if (keyType == Size.class) {
+                return new MetadataEntry(keyName, serializeSize((Size)keyValue));
+            } else if (keyType == SizeF.class) {
+                return new MetadataEntry(keyName, serializeSizeF((SizeF)keyValue));
+            } else if (keyType == Rect.class) {
+                return new MetadataEntry(keyName, serializeRect((Rect)keyValue));
+            } else if (keyType == Face.class) {
+                return new MetadataEntry(keyName, serializeFace((Face)keyValue));
+            } else if (keyType == StreamConfigurationMap.class) {
+                return new MetadataEntry(keyName,
+                        serializeStreamConfigurationMap((StreamConfigurationMap)keyValue));
+            } else if (keyType instanceof ParameterizedType &&
+                    ((ParameterizedType)keyType).getRawType() == Range.class) {
+                return new MetadataEntry(keyName, serializeRange((Range)keyValue));
+            } else if (keyType == ColorSpaceTransform.class) {
+                return new MetadataEntry(keyName,
+                        serializeColorSpaceTransform((ColorSpaceTransform)keyValue));
+            } else if (keyType == MeteringRectangle.class) {
+                return new MetadataEntry(keyName,
+                        serializeMeteringRectangle((MeteringRectangle)keyValue));
+            } else if (keyType == Location.class) {
+                return new MetadataEntry(keyName,
+                        serializeLocation((Location)keyValue));
+            } else if (keyType == RggbChannelVector.class) {
+                return new MetadataEntry(keyName,
+                        serializeRggbChannelVector((RggbChannelVector)keyValue));
+            } else if (keyType == BlackLevelPattern.class) {
+                return new MetadataEntry(keyName,
+                        serializeBlackLevelPattern((BlackLevelPattern)keyValue));
+            } else if (keyType == TonemapCurve.class) {
+                return new MetadataEntry(keyName,
+                        serializeTonemapCurve((TonemapCurve)keyValue));
+            } else if (keyType == Point.class) {
+                return new MetadataEntry(keyName,
+                        serializePoint((Point)keyValue));
+            } else if (keyType == LensShadingMap.class) {
+                return new MetadataEntry(keyName,
+                        serializeLensShadingMap((LensShadingMap)keyValue));
+            } else {
+                Logt.w(TAG, String.format("Serializing unsupported key type: " + keyType));
+                return null;
+            }
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error for key: " + keyName + ": ", e);
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    private static MetadataEntry serializeArrayEntry(Type keyType, Object keyObj, CameraMetadata md)
+            throws ItsException {
+        String keyName = getKeyName(keyObj);
+        try {
+            Object keyValue = getKeyValue(md, keyObj);
+            if (keyValue == null) {
+                return new MetadataEntry(keyName, JSONObject.NULL);
+            }
+            int arrayLen = Array.getLength(keyValue);
+            Type elmtType = ((GenericArrayType)keyType).getGenericComponentType();
+            if (elmtType == int.class  || elmtType == float.class || elmtType == byte.class ||
+                elmtType == long.class || elmtType == double.class || elmtType == boolean.class) {
+                return new MetadataEntry(keyName, new JSONArray(keyValue));
+            } else if (elmtType == Rational.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeRational((Rational)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == Size.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeSize((Size)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == Rect.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeRect((Rect)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == Face.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeFace((Face)Array.get(keyValue, i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == StreamConfigurationMap.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeStreamConfigurationMap(
+                            (StreamConfigurationMap)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType instanceof ParameterizedType &&
+                    ((ParameterizedType)elmtType).getRawType() == Range.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeRange((Range)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType instanceof ParameterizedType &&
+                    ((ParameterizedType)elmtType).getRawType() == Pair.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializePair((Pair)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == MeteringRectangle.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeMeteringRectangle(
+                            (MeteringRectangle)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == Location.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeLocation((Location)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == RggbChannelVector.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeRggbChannelVector(
+                            (RggbChannelVector)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == BlackLevelPattern.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializeBlackLevelPattern(
+                            (BlackLevelPattern)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else if (elmtType == Point.class) {
+                JSONArray jsonArray = new JSONArray();
+                for (int i = 0; i < arrayLen; i++) {
+                    jsonArray.put(serializePoint((Point)Array.get(keyValue,i)));
+                }
+                return new MetadataEntry(keyName, jsonArray);
+            } else {
+                Logt.w(TAG, String.format("Serializing unsupported array type: " + elmtType));
+                return null;
+            }
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error for key: " + keyName + ": ", e);
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    public static JSONObject serialize(CameraMetadata md)
+            throws ItsException {
+        JSONObject jsonObj = new JSONObject();
+        Field[] allFields = md.getClass().getDeclaredFields();
+        if (md.getClass() == TotalCaptureResult.class) {
+            allFields = CaptureResult.class.getDeclaredFields();
+        }
+        for (Field field : allFields) {
+            if (Modifier.isPublic(field.getModifiers()) &&
+                    Modifier.isStatic(field.getModifiers()) &&
+                    (field.getType() == CaptureRequest.Key.class
+                      || field.getType() == CaptureResult.Key.class
+                      || field.getType() == TotalCaptureResult.Key.class
+                      || field.getType() == CameraCharacteristics.Key.class) &&
+                    field.getGenericType() instanceof ParameterizedType) {
+                ParameterizedType paramType = (ParameterizedType)field.getGenericType();
+                Type[] argTypes = paramType.getActualTypeArguments();
+                if (argTypes.length > 0) {
+                    try {
+                        Type keyType = argTypes[0];
+                        Object keyObj = field.get(md);
+                        MetadataEntry entry;
+                        if (keyType instanceof GenericArrayType) {
+                            entry = serializeArrayEntry(keyType, keyObj, md);
+                        } else {
+                            entry = serializeEntry(keyType, keyObj, md);
+                        }
+
+                        // TODO: Figure this weird case out.
+                        // There is a weird case where the entry is non-null but the toString
+                        // of the entry is null, and if this happens, the null-ness spreads like
+                        // a virus and makes the whole JSON object null from the top level down.
+                        // Not sure if it's a bug in the library or I'm just not using it right.
+                        // Workaround by checking for this case explicitly and not adding the
+                        // value to the jsonObj when it is detected.
+                        if (entry != null && entry.key != null && entry.value != null
+                                          && entry.value.toString() == null) {
+                            Logt.w(TAG, "Error encountered serializing value for key: " + entry.key);
+                        } else if (entry != null) {
+                            jsonObj.put(entry.key, entry.value);
+                        } else {
+                            // Ignore.
+                        }
+                    } catch (IllegalAccessException e) {
+                        throw new ItsException(
+                                "Access error for field: " + field + ": ", e);
+                    } catch (org.json.JSONException e) {
+                        throw new ItsException(
+                                "JSON error for field: " + field + ": ", e);
+                    }
+                }
+            }
+        }
+        return jsonObj;
+    }
+
+    @SuppressWarnings("unchecked")
+    public static CaptureRequest.Builder deserialize(CaptureRequest.Builder mdDefault,
+            JSONObject jsonReq) throws ItsException {
+        try {
+            Logt.i(TAG, "Parsing JSON capture request ...");
+
+            // Iterate over the CaptureRequest reflected fields.
+            CaptureRequest.Builder md = mdDefault;
+            Field[] allFields = CaptureRequest.class.getDeclaredFields();
+            for (Field field : allFields) {
+                if (Modifier.isPublic(field.getModifiers()) &&
+                        Modifier.isStatic(field.getModifiers()) &&
+                        field.getType() == CaptureRequest.Key.class &&
+                        field.getGenericType() instanceof ParameterizedType) {
+                    ParameterizedType paramType = (ParameterizedType)field.getGenericType();
+                    Type[] argTypes = paramType.getActualTypeArguments();
+                    if (argTypes.length > 0) {
+                        CaptureRequest.Key key = (CaptureRequest.Key)field.get(md);
+                        String keyName = key.getName();
+                        Type keyType = argTypes[0];
+
+                        // For each reflected CaptureRequest entry, look inside the JSON object
+                        // to see if it is being set. If it is found, remove the key from the
+                        // JSON object. After this process, there should be no keys left in the
+                        // JSON (otherwise an invalid key was specified).
+
+                        if (jsonReq.has(keyName) && !jsonReq.isNull(keyName)) {
+                            if (keyType instanceof GenericArrayType) {
+                                Type elmtType =
+                                        ((GenericArrayType)keyType).getGenericComponentType();
+                                JSONArray ja = jsonReq.getJSONArray(keyName);
+                                Object val[] = new Object[ja.length()];
+                                for (int i = 0; i < ja.length(); i++) {
+                                    if (elmtType == int.class) {
+                                        Array.set(val, i, ja.getInt(i));
+                                    } else if (elmtType == byte.class) {
+                                        Array.set(val, i, (byte)ja.getInt(i));
+                                    } else if (elmtType == float.class) {
+                                        Array.set(val, i, (float)ja.getDouble(i));
+                                    } else if (elmtType == long.class) {
+                                        Array.set(val, i, ja.getLong(i));
+                                    } else if (elmtType == double.class) {
+                                        Array.set(val, i, ja.getDouble(i));
+                                    } else if (elmtType == boolean.class) {
+                                        Array.set(val, i, ja.getBoolean(i));
+                                    } else if (elmtType == String.class) {
+                                        Array.set(val, i, ja.getString(i));
+                                    } else if (elmtType == Size.class){
+                                        JSONObject obj = ja.getJSONObject(i);
+                                        Array.set(val, i, new Size(
+                                                obj.getInt("width"), obj.getInt("height")));
+                                    } else if (elmtType == Rect.class) {
+                                        JSONObject obj = ja.getJSONObject(i);
+                                        Array.set(val, i, new Rect(
+                                                obj.getInt("left"), obj.getInt("top"),
+                                                obj.getInt("bottom"), obj.getInt("right")));
+                                    } else if (elmtType == Rational.class) {
+                                        JSONObject obj = ja.getJSONObject(i);
+                                        Array.set(val, i, new Rational(
+                                                obj.getInt("numerator"),
+                                                obj.getInt("denominator")));
+                                    } else if (elmtType == RggbChannelVector.class) {
+                                        JSONArray arr = ja.getJSONArray(i);
+                                        Array.set(val, i, new RggbChannelVector(
+                                                (float)arr.getDouble(0),
+                                                (float)arr.getDouble(1),
+                                                (float)arr.getDouble(2),
+                                                (float)arr.getDouble(3)));
+                                    } else if (elmtType == ColorSpaceTransform.class) {
+                                        JSONArray arr = ja.getJSONArray(i);
+                                        Rational xform[] = new Rational[9];
+                                        for (int j = 0; j < 9; j++) {
+                                            xform[j] = new Rational(
+                                                    arr.getJSONObject(j).getInt("numerator"),
+                                                    arr.getJSONObject(j).getInt("denominator"));
+                                        }
+                                        Array.set(val, i, new ColorSpaceTransform(xform));
+                                    } else if (elmtType == MeteringRectangle.class) {
+                                        JSONObject obj = ja.getJSONObject(i);
+                                        Array.set(val, i, new MeteringRectangle(
+                                                obj.getInt("x"),
+                                                obj.getInt("y"),
+                                                obj.getInt("width"),
+                                                obj.getInt("height"),
+                                                obj.getInt("weight")));
+                                    } else {
+                                        throw new ItsException(
+                                                "Failed to parse key from JSON: " + keyName);
+                                    }
+                                }
+                                if (val != null) {
+                                    Logt.i(TAG, "Set: "+keyName+" -> "+Arrays.toString(val));
+                                    md.set(key, val);
+                                    jsonReq.remove(keyName);
+                                }
+                            } else {
+                                Object val = null;
+                                if (keyType == Integer.class) {
+                                    val = jsonReq.getInt(keyName);
+                                } else if (keyType == Byte.class) {
+                                    val = (byte)jsonReq.getInt(keyName);
+                                } else if (keyType == Double.class) {
+                                    val = jsonReq.getDouble(keyName);
+                                } else if (keyType == Long.class) {
+                                    val = jsonReq.getLong(keyName);
+                                } else if (keyType == Float.class) {
+                                    val = (float)jsonReq.getDouble(keyName);
+                                } else if (keyType == Boolean.class) {
+                                    val = jsonReq.getBoolean(keyName);
+                                } else if (keyType == String.class) {
+                                    val = jsonReq.getString(keyName);
+                                } else if (keyType == Size.class) {
+                                    JSONObject obj = jsonReq.getJSONObject(keyName);
+                                    val = new Size(
+                                            obj.getInt("width"), obj.getInt("height"));
+                                } else if (keyType == Rect.class) {
+                                    JSONObject obj = jsonReq.getJSONObject(keyName);
+                                    val = new Rect(
+                                            obj.getInt("left"), obj.getInt("top"),
+                                            obj.getInt("right"), obj.getInt("bottom"));
+                                } else if (keyType == Rational.class) {
+                                    JSONObject obj = jsonReq.getJSONObject(keyName);
+                                    val = new Rational(obj.getInt("numerator"),
+                                                       obj.getInt("denominator"));
+                                } else if (keyType == RggbChannelVector.class) {
+                                    JSONObject obj = jsonReq.optJSONObject(keyName);
+                                    JSONArray arr = jsonReq.optJSONArray(keyName);
+                                    if (arr != null) {
+                                        val = new RggbChannelVector(
+                                                (float)arr.getDouble(0),
+                                                (float)arr.getDouble(1),
+                                                (float)arr.getDouble(2),
+                                                (float)arr.getDouble(3));
+                                    } else if (obj != null) {
+                                        val = new RggbChannelVector(
+                                                (float)obj.getDouble("red"),
+                                                (float)obj.getDouble("greenEven"),
+                                                (float)obj.getDouble("greenOdd"),
+                                                (float)obj.getDouble("blue"));
+                                    } else {
+                                        throw new ItsException("Invalid RggbChannelVector object");
+                                    }
+                                } else if (keyType == ColorSpaceTransform.class) {
+                                    JSONArray arr = jsonReq.getJSONArray(keyName);
+                                    Rational a[] = new Rational[9];
+                                    for (int i = 0; i < 9; i++) {
+                                        a[i] = new Rational(
+                                                arr.getJSONObject(i).getInt("numerator"),
+                                                arr.getJSONObject(i).getInt("denominator"));
+                                    }
+                                    val = new ColorSpaceTransform(a);
+                                } else if (keyType instanceof ParameterizedType &&
+                                        ((ParameterizedType)keyType).getRawType() == Range.class &&
+                                        ((ParameterizedType)keyType).getActualTypeArguments().length == 1 &&
+                                        ((ParameterizedType)keyType).getActualTypeArguments()[0] == Integer.class) {
+                                    JSONArray arr = jsonReq.getJSONArray(keyName);
+                                    val = new Range<Integer>(arr.getInt(0), arr.getInt(1));
+                                } else {
+                                    throw new ItsException(
+                                            "Failed to parse key from JSON: " +
+                                            keyName + ", " + keyType);
+                                }
+                                if (val != null) {
+                                    Logt.i(TAG, "Set: " + keyName + " -> " + val);
+                                    md.set(key ,val);
+                                    jsonReq.remove(keyName);
+                                }
+                            }
+                        }
+                    }
+                }
+            }
+
+            // Ensure that there were no invalid keys in the JSON request object.
+            if (jsonReq.length() != 0) {
+                throw new ItsException("Invalid JSON key(s): " + jsonReq.toString());
+            }
+
+            Logt.i(TAG, "Parsing JSON capture request completed");
+            return md;
+        } catch (java.lang.IllegalAccessException e) {
+            throw new ItsException("Access error: ", e);
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error: ", e);
+        }
+    }
+
+    @SuppressWarnings("unchecked")
+    public static List<CaptureRequest.Builder> deserializeRequestList(
+            CameraDevice device, JSONObject jsonObjTop)
+            throws ItsException {
+        try {
+            List<CaptureRequest.Builder> requests = null;
+            JSONArray jsonReqs = jsonObjTop.getJSONArray("captureRequests");
+            requests = new LinkedList<CaptureRequest.Builder>();
+            for (int i = 0; i < jsonReqs.length(); i++) {
+                CaptureRequest.Builder templateReq = device.createCaptureRequest(
+                        CameraDevice.TEMPLATE_STILL_CAPTURE);
+                requests.add(
+                    deserialize(templateReq, jsonReqs.getJSONObject(i)));
+            }
+            return requests;
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error: ", e);
+        } catch (android.hardware.camera2.CameraAccessException e) {
+            throw new ItsException("Access error: ", e);
+        }
+    }
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
new file mode 100644
index 0000000..e340c8a
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsService.java
@@ -0,0 +1,1319 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.cts.verifier.camera.its;
+
+import android.app.Service;
+import android.content.Context;
+import android.content.Intent;
+import android.graphics.ImageFormat;
+import android.hardware.camera2.CameraCaptureSession;
+import android.hardware.camera2.CameraAccessException;
+import android.hardware.camera2.CameraCharacteristics;
+import android.hardware.camera2.CameraDevice;
+import android.hardware.camera2.CameraManager;
+import android.hardware.camera2.CaptureFailure;
+import android.hardware.camera2.CaptureRequest;
+import android.hardware.camera2.CaptureResult;
+import android.hardware.camera2.DngCreator;
+import android.hardware.camera2.TotalCaptureResult;
+import android.hardware.camera2.params.MeteringRectangle;
+import android.hardware.Sensor;
+import android.hardware.SensorEvent;
+import android.hardware.SensorEventListener;
+import android.hardware.SensorManager;
+import android.media.Image;
+import android.media.ImageReader;
+import android.net.Uri;
+import android.os.ConditionVariable;
+import android.os.Handler;
+import android.os.HandlerThread;
+import android.os.IBinder;
+import android.os.Message;
+import android.os.Vibrator;
+import android.util.Log;
+import android.util.Rational;
+import android.util.Size;
+import android.view.Surface;
+
+import com.android.ex.camera2.blocking.BlockingCameraManager;
+import com.android.ex.camera2.blocking.BlockingCameraManager.BlockingOpenException;
+import com.android.ex.camera2.blocking.BlockingStateCallback;
+import com.android.ex.camera2.blocking.BlockingSessionCallback;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.math.BigInteger;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.security.MessageDigest;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+
+public class ItsService extends Service implements SensorEventListener {
+    public static final String TAG = ItsService.class.getSimpleName();
+
+    // Timeouts, in seconds.
+    public static final int TIMEOUT_CALLBACK = 3;
+    public static final int TIMEOUT_3A = 10;
+
+    // State transition timeouts, in ms.
+    private static final long TIMEOUT_IDLE_MS = 2000;
+    private static final long TIMEOUT_STATE_MS = 500;
+
+    // Timeout to wait for a capture result after the capture buffer has arrived, in ms.
+    private static final long TIMEOUT_CAP_RES = 2000;
+
+    private static final int MAX_CONCURRENT_READER_BUFFERS = 10;
+
+    // Supports at most RAW+YUV+JPEG, one surface each.
+    private static final int MAX_NUM_OUTPUT_SURFACES = 3;
+
+    public static final int SERVERPORT = 6000;
+
+    public static final String REGION_KEY = "regions";
+    public static final String REGION_AE_KEY = "ae";
+    public static final String REGION_AWB_KEY = "awb";
+    public static final String REGION_AF_KEY = "af";
+    public static final String LOCK_AE_KEY = "aeLock";
+    public static final String LOCK_AWB_KEY = "awbLock";
+    public static final String TRIGGER_KEY = "triggers";
+    public static final String TRIGGER_AE_KEY = "ae";
+    public static final String TRIGGER_AF_KEY = "af";
+    public static final String VIB_PATTERN_KEY = "pattern";
+
+    private CameraManager mCameraManager = null;
+    private HandlerThread mCameraThread = null;
+    private Handler mCameraHandler = null;
+    private BlockingCameraManager mBlockingCameraManager = null;
+    private BlockingStateCallback mCameraListener = null;
+    private CameraDevice mCamera = null;
+    private CameraCaptureSession mSession = null;
+    private ImageReader[] mCaptureReaders = null;
+    private CameraCharacteristics mCameraCharacteristics = null;
+
+    private Vibrator mVibrator = null;
+
+    private HandlerThread mSaveThreads[] = new HandlerThread[MAX_NUM_OUTPUT_SURFACES];
+    private Handler mSaveHandlers[] = new Handler[MAX_NUM_OUTPUT_SURFACES];
+    private HandlerThread mResultThread = null;
+    private Handler mResultHandler = null;
+
+    private volatile boolean mThreadExitFlag = false;
+
+    private volatile ServerSocket mSocket = null;
+    private volatile SocketRunnable mSocketRunnableObj = null;
+    private volatile BlockingQueue<ByteBuffer> mSocketWriteQueue =
+            new LinkedBlockingDeque<ByteBuffer>();
+    private final Object mSocketWriteEnqueueLock = new Object();
+    private final Object mSocketWriteDrainLock = new Object();
+
+    private volatile BlockingQueue<Object[]> mSerializerQueue =
+            new LinkedBlockingDeque<Object[]>();
+
+    private AtomicInteger mCountCallbacksRemaining = new AtomicInteger();
+    private AtomicInteger mCountRawOrDng = new AtomicInteger();
+    private AtomicInteger mCountRaw10 = new AtomicInteger();
+    private AtomicInteger mCountJpg = new AtomicInteger();
+    private AtomicInteger mCountYuv = new AtomicInteger();
+    private AtomicInteger mCountCapRes = new AtomicInteger();
+    private boolean mCaptureRawIsDng;
+    private CaptureResult mCaptureResults[] = null;
+
+    private volatile ConditionVariable mInterlock3A = new ConditionVariable(true);
+    private volatile boolean mIssuedRequest3A = false;
+    private volatile boolean mConvergedAE = false;
+    private volatile boolean mConvergedAF = false;
+    private volatile boolean mConvergedAWB = false;
+    private volatile boolean mLockedAE = false;
+    private volatile boolean mLockedAWB = false;
+    private volatile boolean mNeedsLockedAE = false;
+    private volatile boolean mNeedsLockedAWB = false;
+
+    class MySensorEvent {
+        public Sensor sensor;
+        public int accuracy;
+        public long timestamp;
+        public float values[];
+    }
+
+    // For capturing motion sensor traces.
+    private SensorManager mSensorManager = null;
+    private Sensor mAccelSensor = null;
+    private Sensor mMagSensor = null;
+    private Sensor mGyroSensor = null;
+    private volatile LinkedList<MySensorEvent> mEvents = null;
+    private volatile Object mEventLock = new Object();
+    private volatile boolean mEventsEnabled = false;
+
+    public interface CaptureCallback {
+        void onCaptureAvailable(Image capture);
+    }
+
+    public abstract class CaptureResultListener extends CameraCaptureSession.CaptureCallback {}
+
+    @Override
+    public IBinder onBind(Intent intent) {
+        return null;
+    }
+
+    @Override
+    public void onCreate() {
+        try {
+            mThreadExitFlag = false;
+
+            // Get handle to camera manager.
+            mCameraManager = (CameraManager) this.getSystemService(Context.CAMERA_SERVICE);
+            if (mCameraManager == null) {
+                throw new ItsException("Failed to connect to camera manager");
+            }
+            mBlockingCameraManager = new BlockingCameraManager(mCameraManager);
+            mCameraListener = new BlockingStateCallback();
+
+            // Register for motion events.
+            mEvents = new LinkedList<MySensorEvent>();
+            mSensorManager = (SensorManager)getSystemService(Context.SENSOR_SERVICE);
+            mAccelSensor = mSensorManager.getDefaultSensor(Sensor.TYPE_ACCELEROMETER);
+            mMagSensor = mSensorManager.getDefaultSensor(Sensor.TYPE_MAGNETIC_FIELD);
+            mGyroSensor = mSensorManager.getDefaultSensor(Sensor.TYPE_GYROSCOPE);
+            mSensorManager.registerListener(this, mAccelSensor, SensorManager.SENSOR_DELAY_FASTEST);
+            mSensorManager.registerListener(this, mMagSensor, SensorManager.SENSOR_DELAY_FASTEST);
+            mSensorManager.registerListener(this, mGyroSensor, SensorManager.SENSOR_DELAY_FASTEST);
+
+            // Get a handle to the system vibrator.
+            mVibrator = (Vibrator)getSystemService(Context.VIBRATOR_SERVICE);
+
+            // Create threads to receive images and save them.
+            for (int i = 0; i < MAX_NUM_OUTPUT_SURFACES; i++) {
+                mSaveThreads[i] = new HandlerThread("SaveThread" + i);
+                mSaveThreads[i].start();
+                mSaveHandlers[i] = new Handler(mSaveThreads[i].getLooper());
+            }
+
+            // Create a thread to handle object serialization.
+            (new Thread(new SerializerRunnable())).start();;
+
+            // Create a thread to receive capture results and process them.
+            mResultThread = new HandlerThread("ResultThread");
+            mResultThread.start();
+            mResultHandler = new Handler(mResultThread.getLooper());
+
+            // Create a thread for the camera device.
+            mCameraThread = new HandlerThread("ItsCameraThread");
+            mCameraThread.start();
+            mCameraHandler = new Handler(mCameraThread.getLooper());
+
+            // Create a thread to process commands, listening on a TCP socket.
+            mSocketRunnableObj = new SocketRunnable();
+            (new Thread(mSocketRunnableObj)).start();
+        } catch (ItsException e) {
+            Logt.e(TAG, "Service failed to start: ", e);
+        }
+    }
+
+    @Override
+    public int onStartCommand(Intent intent, int flags, int startId) {
+        try {
+            // Just log a message indicating that the service is running and is able to accept
+            // socket connections.
+            while (!mThreadExitFlag && mSocket==null) {
+                Thread.sleep(1);
+            }
+            if (!mThreadExitFlag){
+                Logt.i(TAG, "ItsService ready");
+            } else {
+                Logt.e(TAG, "Starting ItsService in bad state");
+            }
+        } catch (java.lang.InterruptedException e) {
+            Logt.e(TAG, "Error starting ItsService (interrupted)", e);
+        }
+        return START_STICKY;
+    }
+
+    @Override
+    public void onDestroy() {
+        mThreadExitFlag = true;
+        for (int i = 0; i < MAX_NUM_OUTPUT_SURFACES; i++) {
+            if (mSaveThreads[i] != null) {
+                mSaveThreads[i].quit();
+                mSaveThreads[i] = null;
+            }
+        }
+        if (mResultThread != null) {
+            mResultThread.quitSafely();
+            mResultThread = null;
+        }
+        if (mCameraThread != null) {
+            mCameraThread.quitSafely();
+            mCameraThread = null;
+        }
+    }
+
+    public void openCameraDevice(int cameraId) throws ItsException {
+        Logt.i(TAG, String.format("Opening camera %d", cameraId));
+
+        String[] devices;
+        try {
+            devices = mCameraManager.getCameraIdList();
+            if (devices == null || devices.length == 0) {
+                throw new ItsException("No camera devices");
+            }
+        } catch (CameraAccessException e) {
+            throw new ItsException("Failed to get device ID list", e);
+        }
+
+        try {
+            mCamera = mBlockingCameraManager.openCamera(devices[cameraId],
+                    mCameraListener, mCameraHandler);
+            mCameraCharacteristics = mCameraManager.getCameraCharacteristics(
+                    devices[cameraId]);
+        } catch (CameraAccessException e) {
+            throw new ItsException("Failed to open camera", e);
+        } catch (BlockingOpenException e) {
+            throw new ItsException("Failed to open camera (after blocking)", e);
+        }
+        mSocketRunnableObj.sendResponse("cameraOpened", "");
+    }
+
+    public void closeCameraDevice() throws ItsException {
+        try {
+            if (mCamera != null) {
+                Logt.i(TAG, "Closing camera");
+                mCamera.close();
+                mCamera = null;
+            }
+        } catch (Exception e) {
+            throw new ItsException("Failed to close device");
+        }
+        mSocketRunnableObj.sendResponse("cameraClosed", "");
+    }
+
+    class SerializerRunnable implements Runnable {
+        // Use a separate thread to perform JSON serialization (since this can be slow due to
+        // the reflection).
+        @Override
+        public void run() {
+            Logt.i(TAG, "Serializer thread starting");
+            while (! mThreadExitFlag) {
+                try {
+                    Object objs[] = mSerializerQueue.take();
+                    JSONObject jsonObj = new JSONObject();
+                    String tag = null;
+                    for (int i = 0; i < objs.length; i++) {
+                        Object obj = objs[i];
+                        if (obj instanceof String) {
+                            if (tag != null) {
+                                throw new ItsException("Multiple tags for socket response");
+                            }
+                            tag = (String)obj;
+                        } else if (obj instanceof CameraCharacteristics) {
+                            jsonObj.put("cameraProperties", ItsSerializer.serialize(
+                                    (CameraCharacteristics)obj));
+                        } else if (obj instanceof CaptureRequest) {
+                            jsonObj.put("captureRequest", ItsSerializer.serialize(
+                                    (CaptureRequest)obj));
+                        } else if (obj instanceof CaptureResult) {
+                            jsonObj.put("captureResult", ItsSerializer.serialize(
+                                    (CaptureResult)obj));
+                        } else if (obj instanceof JSONArray) {
+                            jsonObj.put("outputs", (JSONArray)obj);
+                        } else {
+                            throw new ItsException("Invalid object received for serialiation");
+                        }
+                    }
+                    if (tag == null) {
+                        throw new ItsException("No tag provided for socket response");
+                    }
+                    mSocketRunnableObj.sendResponse(tag, null, jsonObj, null);
+                    Logt.i(TAG, String.format("Serialized %s", tag));
+                } catch (org.json.JSONException e) {
+                    Logt.e(TAG, "Error serializing object", e);
+                    break;
+                } catch (ItsException e) {
+                    Logt.e(TAG, "Error serializing object", e);
+                    break;
+                } catch (java.lang.InterruptedException e) {
+                    Logt.e(TAG, "Error serializing object (interrupted)", e);
+                    break;
+                }
+            }
+            Logt.i(TAG, "Serializer thread terminated");
+        }
+    }
+
+    class SocketWriteRunnable implements Runnable {
+
+        // Use a separate thread to service a queue of objects to be written to the socket,
+        // writing each sequentially in order. This is needed since different handler functions
+        // (called on different threads) will need to send data back to the host script.
+
+        public Socket mOpenSocket = null;
+
+        public SocketWriteRunnable(Socket openSocket) {
+            mOpenSocket = openSocket;
+        }
+
+        public void setOpenSocket(Socket openSocket) {
+            mOpenSocket = openSocket;
+        }
+
+        @Override
+        public void run() {
+            Logt.i(TAG, "Socket writer thread starting");
+            while (true) {
+                try {
+                    ByteBuffer b = mSocketWriteQueue.take();
+                    synchronized(mSocketWriteDrainLock) {
+                        if (mOpenSocket == null) {
+                            continue;
+                        }
+                        if (b.hasArray()) {
+                            mOpenSocket.getOutputStream().write(b.array());
+                        } else {
+                            byte[] barray = new byte[b.capacity()];
+                            b.get(barray);
+                            mOpenSocket.getOutputStream().write(barray);
+                        }
+                        mOpenSocket.getOutputStream().flush();
+                        Logt.i(TAG, String.format("Wrote to socket: %d bytes", b.capacity()));
+                    }
+                } catch (IOException e) {
+                    Logt.e(TAG, "Error writing to socket", e);
+                    break;
+                } catch (java.lang.InterruptedException e) {
+                    Logt.e(TAG, "Error writing to socket (interrupted)", e);
+                    break;
+                }
+            }
+            Logt.i(TAG, "Socket writer thread terminated");
+        }
+    }
+
+    class SocketRunnable implements Runnable {
+
+        // Format of sent messages (over the socket):
+        // * Serialized JSON object on a single line (newline-terminated)
+        // * For byte buffers, the binary data then follows
+        //
+        // Format of received messages (from the socket):
+        // * Serialized JSON object on a single line (newline-terminated)
+
+        private Socket mOpenSocket = null;
+        private SocketWriteRunnable mSocketWriteRunnable = null;
+
+        @Override
+        public void run() {
+            Logt.i(TAG, "Socket thread starting");
+            try {
+                mSocket = new ServerSocket(SERVERPORT);
+            } catch (IOException e) {
+                Logt.e(TAG, "Failed to create socket", e);
+            }
+
+            // Create a new thread to handle writes to this socket.
+            mSocketWriteRunnable = new SocketWriteRunnable(null);
+            (new Thread(mSocketWriteRunnable)).start();
+
+            while (!mThreadExitFlag) {
+                // Receive the socket-open request from the host.
+                try {
+                    Logt.i(TAG, "Waiting for client to connect to socket");
+                    mOpenSocket = mSocket.accept();
+                    if (mOpenSocket == null) {
+                        Logt.e(TAG, "Socket connection error");
+                        break;
+                    }
+                    mSocketWriteQueue.clear();
+                    mSocketWriteRunnable.setOpenSocket(mOpenSocket);
+                    Logt.i(TAG, "Socket connected");
+                } catch (IOException e) {
+                    Logt.e(TAG, "Socket open error: ", e);
+                    break;
+                }
+
+                // Process commands over the open socket.
+                while (!mThreadExitFlag) {
+                    try {
+                        BufferedReader input = new BufferedReader(
+                                new InputStreamReader(mOpenSocket.getInputStream()));
+                        if (input == null) {
+                            Logt.e(TAG, "Failed to get socket input stream");
+                            break;
+                        }
+                        String line = input.readLine();
+                        if (line == null) {
+                            Logt.i(TAG, "Socket readline retuned null (host disconnected)");
+                            break;
+                        }
+                        processSocketCommand(line);
+                    } catch (IOException e) {
+                        Logt.e(TAG, "Socket read error: ", e);
+                        break;
+                    } catch (ItsException e) {
+                        Logt.e(TAG, "Script error: ", e);
+                        break;
+                    }
+                }
+
+                // Close socket and go back to waiting for a new connection.
+                try {
+                    synchronized(mSocketWriteDrainLock) {
+                        mSocketWriteQueue.clear();
+                        mOpenSocket.close();
+                        mOpenSocket = null;
+                        Logt.i(TAG, "Socket disconnected");
+                    }
+                } catch (java.io.IOException e) {
+                    Logt.e(TAG, "Exception closing socket");
+                }
+            }
+
+            // It's an overall error state if the code gets here; no recevery.
+            // Try to do some cleanup, but the service probably needs to be restarted.
+            Logt.i(TAG, "Socket server loop exited");
+            mThreadExitFlag = true;
+            try {
+                if (mOpenSocket != null) {
+                    mOpenSocket.close();
+                    mOpenSocket = null;
+                }
+            } catch (java.io.IOException e) {
+                Logt.w(TAG, "Exception closing socket");
+            }
+            try {
+                if (mSocket != null) {
+                    mSocket.close();
+                    mSocket = null;
+                }
+            } catch (java.io.IOException e) {
+                Logt.w(TAG, "Exception closing socket");
+            }
+        }
+
+        public void processSocketCommand(String cmd)
+                throws ItsException {
+            // Each command is a serialized JSON object.
+            try {
+                JSONObject cmdObj = new JSONObject(cmd);
+                if ("open".equals(cmdObj.getString("cmdName"))) {
+                    int cameraId = cmdObj.getInt("cameraId");
+                    openCameraDevice(cameraId);
+                } else if ("close".equals(cmdObj.getString("cmdName"))) {
+                    closeCameraDevice();
+                } else if ("getCameraProperties".equals(cmdObj.getString("cmdName"))) {
+                    doGetProps();
+                } else if ("startSensorEvents".equals(cmdObj.getString("cmdName"))) {
+                    doStartSensorEvents();
+                } else if ("getSensorEvents".equals(cmdObj.getString("cmdName"))) {
+                    doGetSensorEvents();
+                } else if ("do3A".equals(cmdObj.getString("cmdName"))) {
+                    do3A(cmdObj);
+                } else if ("doCapture".equals(cmdObj.getString("cmdName"))) {
+                    doCapture(cmdObj);
+                } else if ("doVibrate".equals(cmdObj.getString("cmdName"))) {
+                    doVibrate(cmdObj);
+                } else {
+                    throw new ItsException("Unknown command: " + cmd);
+                }
+            } catch (org.json.JSONException e) {
+                Logt.e(TAG, "Invalid command: ", e);
+            }
+        }
+
+        public void sendResponse(String tag, String str, JSONObject obj, ByteBuffer bbuf)
+                throws ItsException {
+            try {
+                JSONObject jsonObj = new JSONObject();
+                jsonObj.put("tag", tag);
+                if (str != null) {
+                    jsonObj.put("strValue", str);
+                }
+                if (obj != null) {
+                    jsonObj.put("objValue", obj);
+                }
+                if (bbuf != null) {
+                    jsonObj.put("bufValueSize", bbuf.capacity());
+                }
+                ByteBuffer bstr = ByteBuffer.wrap(
+                        (jsonObj.toString()+"\n").getBytes(Charset.defaultCharset()));
+                synchronized(mSocketWriteEnqueueLock) {
+                    if (bstr != null) {
+                        mSocketWriteQueue.put(bstr);
+                    }
+                    if (bbuf != null) {
+                        mSocketWriteQueue.put(bbuf);
+                    }
+                }
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error: ", e);
+            } catch (java.lang.InterruptedException e) {
+                throw new ItsException("Socket error: ", e);
+            }
+        }
+
+        public void sendResponse(String tag, String str)
+                throws ItsException {
+            sendResponse(tag, str, null, null);
+        }
+
+        public void sendResponse(String tag, JSONObject obj)
+                throws ItsException {
+            sendResponse(tag, null, obj, null);
+        }
+
+        public void sendResponseCaptureBuffer(String tag, ByteBuffer bbuf)
+                throws ItsException {
+            sendResponse(tag, null, null, bbuf);
+        }
+
+        public void sendResponse(LinkedList<MySensorEvent> events)
+                throws ItsException {
+            try {
+                JSONArray accels = new JSONArray();
+                JSONArray mags = new JSONArray();
+                JSONArray gyros = new JSONArray();
+                for (MySensorEvent event : events) {
+                    JSONObject obj = new JSONObject();
+                    obj.put("time", event.timestamp);
+                    obj.put("x", event.values[0]);
+                    obj.put("y", event.values[1]);
+                    obj.put("z", event.values[2]);
+                    if (event.sensor.getType() == Sensor.TYPE_ACCELEROMETER) {
+                        accels.put(obj);
+                    } else if (event.sensor.getType() == Sensor.TYPE_MAGNETIC_FIELD) {
+                        mags.put(obj);
+                    } else if (event.sensor.getType() == Sensor.TYPE_GYROSCOPE) {
+                        gyros.put(obj);
+                    }
+                }
+                JSONObject obj = new JSONObject();
+                obj.put("accel", accels);
+                obj.put("mag", mags);
+                obj.put("gyro", gyros);
+                sendResponse("sensorEvents", null, obj, null);
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error: ", e);
+            }
+        }
+
+        public void sendResponse(CameraCharacteristics props)
+                throws ItsException {
+            try {
+                Object objs[] = new Object[2];
+                objs[0] = "cameraProperties";
+                objs[1] = props;
+                mSerializerQueue.put(objs);
+            } catch (InterruptedException e) {
+                throw new ItsException("Interrupted: ", e);
+            }
+        }
+
+        public void sendResponseCaptureResult(CameraCharacteristics props,
+                                              CaptureRequest request,
+                                              CaptureResult result,
+                                              ImageReader[] readers)
+                throws ItsException {
+            try {
+                JSONArray jsonSurfaces = new JSONArray();
+                for (int i = 0; i < readers.length; i++) {
+                    JSONObject jsonSurface = new JSONObject();
+                    jsonSurface.put("width", readers[i].getWidth());
+                    jsonSurface.put("height", readers[i].getHeight());
+                    int format = readers[i].getImageFormat();
+                    if (format == ImageFormat.RAW_SENSOR) {
+                        jsonSurface.put("format", "raw");
+                    } else if (format == ImageFormat.RAW10) {
+                        jsonSurface.put("format", "raw10");
+                    } else if (format == ImageFormat.JPEG) {
+                        jsonSurface.put("format", "jpeg");
+                    } else if (format == ImageFormat.YUV_420_888) {
+                        jsonSurface.put("format", "yuv");
+                    } else {
+                        throw new ItsException("Invalid format");
+                    }
+                    jsonSurfaces.put(jsonSurface);
+                }
+
+                Object objs[] = new Object[5];
+                objs[0] = "captureResults";
+                objs[1] = props;
+                objs[2] = request;
+                objs[3] = result;
+                objs[4] = jsonSurfaces;
+                mSerializerQueue.put(objs);
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error: ", e);
+            } catch (InterruptedException e) {
+                throw new ItsException("Interrupted: ", e);
+            }
+        }
+    }
+
+    public ImageReader.OnImageAvailableListener
+            createAvailableListener(final CaptureCallback listener) {
+        return new ImageReader.OnImageAvailableListener() {
+            @Override
+            public void onImageAvailable(ImageReader reader) {
+                Image i = null;
+                try {
+                    i = reader.acquireNextImage();
+                    listener.onCaptureAvailable(i);
+                } finally {
+                    if (i != null) {
+                        i.close();
+                    }
+                }
+            }
+        };
+    }
+
+    private ImageReader.OnImageAvailableListener
+            createAvailableListenerDropper(final CaptureCallback listener) {
+        return new ImageReader.OnImageAvailableListener() {
+            @Override
+            public void onImageAvailable(ImageReader reader) {
+                Image i = reader.acquireNextImage();
+                i.close();
+            }
+        };
+    }
+
+    private void doStartSensorEvents() throws ItsException {
+        synchronized(mEventLock) {
+            mEventsEnabled = true;
+        }
+        mSocketRunnableObj.sendResponse("sensorEventsStarted", "");
+    }
+
+    private void doGetSensorEvents() throws ItsException {
+        synchronized(mEventLock) {
+            mSocketRunnableObj.sendResponse(mEvents);
+            mEvents.clear();
+            mEventsEnabled = false;
+        }
+    }
+
+    private void doGetProps() throws ItsException {
+        mSocketRunnableObj.sendResponse(mCameraCharacteristics);
+    }
+
+    private void prepareCaptureReader(int[] widths, int[] heights, int formats[], int numSurfaces) {
+        if (mCaptureReaders != null) {
+            for (int i = 0; i < mCaptureReaders.length; i++) {
+                if (mCaptureReaders[i] != null) {
+                    mCaptureReaders[i].close();
+                }
+            }
+        }
+        mCaptureReaders = new ImageReader[numSurfaces];
+        for (int i = 0; i < numSurfaces; i++) {
+            mCaptureReaders[i] = ImageReader.newInstance(widths[i], heights[i], formats[i],
+                    MAX_CONCURRENT_READER_BUFFERS);
+        }
+    }
+
+    private void do3A(JSONObject params) throws ItsException {
+        try {
+            // Start a 3A action, and wait for it to converge.
+            // Get the converged values for each "A", and package into JSON result for caller.
+
+            // 3A happens on full-res frames.
+            Size sizes[] = ItsUtils.getYuvOutputSizes(mCameraCharacteristics);
+            int widths[] = new int[1];
+            int heights[] = new int[1];
+            int formats[] = new int[1];
+            widths[0] = sizes[0].getWidth();
+            heights[0] = sizes[0].getHeight();
+            formats[0] = ImageFormat.YUV_420_888;
+            int width = widths[0];
+            int height = heights[0];
+
+            prepareCaptureReader(widths, heights, formats, 1);
+            List<Surface> outputSurfaces = new ArrayList<Surface>(1);
+            outputSurfaces.add(mCaptureReaders[0].getSurface());
+            BlockingSessionCallback sessionListener = new BlockingSessionCallback();
+            mCamera.createCaptureSession(outputSurfaces, sessionListener, mCameraHandler);
+            mSession = sessionListener.waitAndGetSession(TIMEOUT_IDLE_MS);
+
+            // Add a listener that just recycles buffers; they aren't saved anywhere.
+            ImageReader.OnImageAvailableListener readerListener =
+                    createAvailableListenerDropper(mCaptureCallback);
+            mCaptureReaders[0].setOnImageAvailableListener(readerListener, mSaveHandlers[0]);
+
+            // Get the user-specified regions for AE, AWB, AF.
+            // Note that the user specifies normalized [x,y,w,h], which is converted below
+            // to an [x0,y0,x1,y1] region in sensor coords. The capture request region
+            // also has a fifth "weight" element: [x0,y0,x1,y1,w].
+            MeteringRectangle[] regionAE = new MeteringRectangle[]{
+                    new MeteringRectangle(0,0,width,height,1)};
+            MeteringRectangle[] regionAF = new MeteringRectangle[]{
+                    new MeteringRectangle(0,0,width,height,1)};
+            MeteringRectangle[] regionAWB = new MeteringRectangle[]{
+                    new MeteringRectangle(0,0,width,height,1)};
+            if (params.has(REGION_KEY)) {
+                JSONObject regions = params.getJSONObject(REGION_KEY);
+                if (regions.has(REGION_AE_KEY)) {
+                    regionAE = ItsUtils.getJsonWeightedRectsFromArray(
+                            regions.getJSONArray(REGION_AE_KEY), true, width, height);
+                }
+                if (regions.has(REGION_AF_KEY)) {
+                    regionAF = ItsUtils.getJsonWeightedRectsFromArray(
+                            regions.getJSONArray(REGION_AF_KEY), true, width, height);
+                }
+                if (regions.has(REGION_AWB_KEY)) {
+                    regionAWB = ItsUtils.getJsonWeightedRectsFromArray(
+                            regions.getJSONArray(REGION_AWB_KEY), true, width, height);
+                }
+            }
+
+            // If AE or AWB lock is specified, then the 3A will converge first and then lock these
+            // values, waiting until the HAL has reported that the lock was successful.
+            mNeedsLockedAE = params.optBoolean(LOCK_AE_KEY, false);
+            mNeedsLockedAWB = params.optBoolean(LOCK_AWB_KEY, false);
+
+            // By default, AE and AF both get triggered, but the user can optionally override this.
+            // Also, AF won't get triggered if the lens is fixed-focus.
+            boolean doAE = true;
+            boolean doAF = true;
+            if (params.has(TRIGGER_KEY)) {
+                JSONObject triggers = params.getJSONObject(TRIGGER_KEY);
+                if (triggers.has(TRIGGER_AE_KEY)) {
+                    doAE = triggers.getBoolean(TRIGGER_AE_KEY);
+                }
+                if (triggers.has(TRIGGER_AF_KEY)) {
+                    doAF = triggers.getBoolean(TRIGGER_AF_KEY);
+                }
+            }
+            if (doAF && mCameraCharacteristics.get(
+                            CameraCharacteristics.LENS_INFO_MINIMUM_FOCUS_DISTANCE) == 0) {
+                // Send a dummy result back for the code that is waiting for this message to see
+                // that AF has converged.
+                Logt.i(TAG, "Ignoring request for AF on fixed-focus camera");
+                mSocketRunnableObj.sendResponse("afResult", "0.0");
+                doAF = false;
+            }
+
+            mInterlock3A.open();
+            mIssuedRequest3A = false;
+            mConvergedAE = false;
+            mConvergedAWB = false;
+            mConvergedAF = false;
+            mLockedAE = false;
+            mLockedAWB = false;
+            long tstart = System.currentTimeMillis();
+            boolean triggeredAE = false;
+            boolean triggeredAF = false;
+
+            Logt.i(TAG, String.format("Initiating 3A: AE:%d, AF:%d, AWB:1, AELOCK:%d, AWBLOCK:%d",
+                    doAE?1:0, doAF?1:0, mNeedsLockedAE?1:0, mNeedsLockedAWB?1:0));
+
+            // Keep issuing capture requests until 3A has converged.
+            while (true) {
+
+                // Block until can take the next 3A frame. Only want one outstanding frame
+                // at a time, to simplify the logic here.
+                if (!mInterlock3A.block(TIMEOUT_3A * 1000) ||
+                        System.currentTimeMillis() - tstart > TIMEOUT_3A * 1000) {
+                    throw new ItsException("3A failed to converge (timeout)");
+                }
+                mInterlock3A.close();
+
+                // If not converged yet, issue another capture request.
+                if (       (doAE && (!triggeredAE || !mConvergedAE))
+                        || !mConvergedAWB
+                        || (doAF && (!triggeredAF || !mConvergedAF))
+                        || (doAE && mNeedsLockedAE && !mLockedAE)
+                        || (mNeedsLockedAWB && !mLockedAWB)) {
+
+                    // Baseline capture request for 3A.
+                    CaptureRequest.Builder req = mCamera.createCaptureRequest(
+                            CameraDevice.TEMPLATE_PREVIEW);
+                    req.set(CaptureRequest.FLASH_MODE, CaptureRequest.FLASH_MODE_OFF);
+                    req.set(CaptureRequest.CONTROL_MODE, CaptureRequest.CONTROL_MODE_AUTO);
+                    req.set(CaptureRequest.CONTROL_CAPTURE_INTENT,
+                            CaptureRequest.CONTROL_CAPTURE_INTENT_PREVIEW);
+                    req.set(CaptureRequest.CONTROL_AE_MODE,
+                            CaptureRequest.CONTROL_AE_MODE_ON);
+                    req.set(CaptureRequest.CONTROL_AE_EXPOSURE_COMPENSATION, 0);
+                    req.set(CaptureRequest.CONTROL_AE_LOCK, false);
+                    req.set(CaptureRequest.CONTROL_AE_REGIONS, regionAE);
+                    req.set(CaptureRequest.CONTROL_AF_MODE,
+                            CaptureRequest.CONTROL_AF_MODE_AUTO);
+                    req.set(CaptureRequest.CONTROL_AF_REGIONS, regionAF);
+                    req.set(CaptureRequest.CONTROL_AWB_MODE,
+                            CaptureRequest.CONTROL_AWB_MODE_AUTO);
+                    req.set(CaptureRequest.CONTROL_AWB_LOCK, false);
+                    req.set(CaptureRequest.CONTROL_AWB_REGIONS, regionAWB);
+
+                    if (mConvergedAE && mNeedsLockedAE) {
+                        req.set(CaptureRequest.CONTROL_AE_LOCK, true);
+                    }
+                    if (mConvergedAWB && mNeedsLockedAWB) {
+                        req.set(CaptureRequest.CONTROL_AWB_LOCK, true);
+                    }
+
+                    // Trigger AE first.
+                    if (doAE && !triggeredAE) {
+                        Logt.i(TAG, "Triggering AE");
+                        req.set(CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER,
+                                CaptureRequest.CONTROL_AE_PRECAPTURE_TRIGGER_START);
+                        triggeredAE = true;
+                    }
+
+                    // After AE has converged, trigger AF.
+                    if (doAF && !triggeredAF && (!doAE || (triggeredAE && mConvergedAE))) {
+                        Logt.i(TAG, "Triggering AF");
+                        req.set(CaptureRequest.CONTROL_AF_TRIGGER,
+                                CaptureRequest.CONTROL_AF_TRIGGER_START);
+                        triggeredAF = true;
+                    }
+
+                    req.addTarget(mCaptureReaders[0].getSurface());
+
+                    mIssuedRequest3A = true;
+                    mSession.capture(req.build(), mCaptureResultListener, mResultHandler);
+                } else {
+                    mSocketRunnableObj.sendResponse("3aConverged", "");
+                    Logt.i(TAG, "3A converged");
+                    break;
+                }
+            }
+        } catch (android.hardware.camera2.CameraAccessException e) {
+            throw new ItsException("Access error: ", e);
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error: ", e);
+        } finally {
+            mSocketRunnableObj.sendResponse("3aDone", "");
+        }
+    }
+
+    private void doVibrate(JSONObject params) throws ItsException {
+        try {
+            if (mVibrator == null) {
+                throw new ItsException("Unable to start vibrator");
+            }
+            JSONArray patternArray = params.getJSONArray(VIB_PATTERN_KEY);
+            int len = patternArray.length();
+            long pattern[] = new long[len];
+            for (int i = 0; i < len; i++) {
+                pattern[i] = patternArray.getLong(i);
+            }
+            Logt.i(TAG, String.format("Starting vibrator, pattern length %d",len));
+            mVibrator.vibrate(pattern, -1);
+            mSocketRunnableObj.sendResponse("vibrationStarted", "");
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error: ", e);
+        }
+    }
+
+    private void doCapture(JSONObject params) throws ItsException {
+        try {
+            // Parse the JSON to get the list of capture requests.
+            List<CaptureRequest.Builder> requests = ItsSerializer.deserializeRequestList(
+                    mCamera, params);
+
+            // Set the output surface(s) and listeners.
+            int widths[] = new int[MAX_NUM_OUTPUT_SURFACES];
+            int heights[] = new int[MAX_NUM_OUTPUT_SURFACES];
+            int formats[] = new int[MAX_NUM_OUTPUT_SURFACES];
+            int numSurfaces = 0;
+            try {
+                mCountRawOrDng.set(0);
+                mCountJpg.set(0);
+                mCountYuv.set(0);
+                mCountRaw10.set(0);
+                mCountCapRes.set(0);
+                mCaptureRawIsDng = false;
+                mCaptureResults = new CaptureResult[requests.size()];
+
+                JSONArray jsonOutputSpecs = ItsUtils.getOutputSpecs(params);
+                if (jsonOutputSpecs != null) {
+                    numSurfaces = jsonOutputSpecs.length();
+                    if (numSurfaces > MAX_NUM_OUTPUT_SURFACES) {
+                        throw new ItsException("Too many output surfaces");
+                    }
+                    for (int i = 0; i < numSurfaces; i++) {
+                        // Get the specified surface.
+                        JSONObject surfaceObj = jsonOutputSpecs.getJSONObject(i);
+                        String sformat = surfaceObj.optString("format");
+                        Size sizes[];
+                        if ("yuv".equals(sformat) || "".equals(sformat)) {
+                            // Default to YUV if no format is specified.
+                            formats[i] = ImageFormat.YUV_420_888;
+                            sizes = ItsUtils.getYuvOutputSizes(mCameraCharacteristics);
+                        } else if ("jpg".equals(sformat) || "jpeg".equals(sformat)) {
+                            formats[i] = ImageFormat.JPEG;
+                            sizes = ItsUtils.getJpegOutputSizes(mCameraCharacteristics);
+                        } else if ("raw".equals(sformat)) {
+                            formats[i] = ImageFormat.RAW_SENSOR;
+                            sizes = ItsUtils.getRawOutputSizes(mCameraCharacteristics);
+                        } else if ("raw10".equals(sformat)) {
+                            formats[i] = ImageFormat.RAW10;
+                            sizes = ItsUtils.getRawOutputSizes(mCameraCharacteristics);
+                        } else if ("dng".equals(sformat)) {
+                            formats[i] = ImageFormat.RAW_SENSOR;
+                            sizes = ItsUtils.getRawOutputSizes(mCameraCharacteristics);
+                            mCaptureRawIsDng = true;
+                        } else {
+                            throw new ItsException("Unsupported format: " + sformat);
+                        }
+                        // If the size is omitted, then default to the largest allowed size for the
+                        // format.
+                        widths[i] = surfaceObj.optInt("width");
+                        heights[i] = surfaceObj.optInt("height");
+                        if (widths[i] <= 0) {
+                            if (sizes == null || sizes.length == 0) {
+                                throw new ItsException(String.format(
+                                        "Zero stream configs available for requested format: %s",
+                                        sformat));
+                            }
+                            widths[i] = sizes[0].getWidth();
+                        }
+                        if (heights[i] <= 0) {
+                            heights[i] = sizes[0].getHeight();
+                        }
+                    }
+                } else {
+                    // No surface(s) specified at all.
+                    // Default: a single output surface which is full-res YUV.
+                    Size sizes[] =
+                            ItsUtils.getYuvOutputSizes(mCameraCharacteristics);
+                    numSurfaces = 1;
+                    widths[0] = sizes[0].getWidth();
+                    heights[0] = sizes[0].getHeight();
+                    formats[0] = ImageFormat.YUV_420_888;
+                }
+
+                prepareCaptureReader(widths, heights, formats, numSurfaces);
+                List<Surface> outputSurfaces = new ArrayList<Surface>(numSurfaces);
+                for (int i = 0; i < numSurfaces; i++) {
+                    outputSurfaces.add(mCaptureReaders[i].getSurface());
+                }
+                BlockingSessionCallback sessionListener = new BlockingSessionCallback();
+                mCamera.createCaptureSession(outputSurfaces, sessionListener, mCameraHandler);
+                mSession = sessionListener.waitAndGetSession(TIMEOUT_IDLE_MS);
+
+                for (int i = 0; i < numSurfaces; i++) {
+                    ImageReader.OnImageAvailableListener readerListener =
+                            createAvailableListener(mCaptureCallback);
+                    mCaptureReaders[i].setOnImageAvailableListener(readerListener,mSaveHandlers[i]);
+                }
+
+                // Plan for how many callbacks need to be received throughout the duration of this
+                // sequence of capture requests. There is one callback per image surface, and one
+                // callback for the CaptureResult, for each capture.
+                int numCaptures = requests.size();
+                mCountCallbacksRemaining.set(numCaptures * (numSurfaces + 1));
+
+            } catch (CameraAccessException e) {
+                throw new ItsException("Error configuring outputs", e);
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error", e);
+            }
+
+            // Initiate the captures.
+            for (int i = 0; i < requests.size(); i++) {
+                // For DNG captures, need the LSC map to be available.
+                if (mCaptureRawIsDng) {
+                    requests.get(i).set(CaptureRequest.STATISTICS_LENS_SHADING_MAP_MODE, 1);
+                }
+
+                CaptureRequest.Builder req = requests.get(i);
+                for (int j = 0; j < numSurfaces; j++) {
+                    req.addTarget(mCaptureReaders[j].getSurface());
+                }
+                mSession.capture(req.build(), mCaptureResultListener, mResultHandler);
+            }
+
+            // Make sure all callbacks have been hit (wait until captures are done).
+            // If no timeouts are received after a timeout, then fail.
+            int currentCount = mCountCallbacksRemaining.get();
+            while (currentCount > 0) {
+                try {
+                    Thread.sleep(TIMEOUT_CALLBACK*1000);
+                } catch (InterruptedException e) {
+                    throw new ItsException("Timeout failure", e);
+                }
+                int newCount = mCountCallbacksRemaining.get();
+                if (newCount == currentCount) {
+                    throw new ItsException(
+                            "No callback received within timeout");
+                }
+                currentCount = newCount;
+            }
+        } catch (android.hardware.camera2.CameraAccessException e) {
+            throw new ItsException("Access error: ", e);
+        }
+    }
+
+    @Override
+    public final void onSensorChanged(SensorEvent event) {
+        synchronized(mEventLock) {
+            if (mEventsEnabled) {
+                MySensorEvent ev2 = new MySensorEvent();
+                ev2.sensor = event.sensor;
+                ev2.accuracy = event.accuracy;
+                ev2.timestamp = event.timestamp;
+                ev2.values = new float[event.values.length];
+                System.arraycopy(event.values, 0, ev2.values, 0, event.values.length);
+                mEvents.add(ev2);
+            }
+        }
+    }
+
+    @Override
+    public final void onAccuracyChanged(Sensor sensor, int accuracy) {
+    }
+
+    private final CaptureCallback mCaptureCallback = new CaptureCallback() {
+        @Override
+        public void onCaptureAvailable(Image capture) {
+            try {
+                int format = capture.getFormat();
+                if (format == ImageFormat.JPEG) {
+                    Logt.i(TAG, "Received JPEG capture");
+                    byte[] img = ItsUtils.getDataFromImage(capture);
+                    ByteBuffer buf = ByteBuffer.wrap(img);
+                    int count = mCountJpg.getAndIncrement();
+                    mSocketRunnableObj.sendResponseCaptureBuffer("jpegImage", buf);
+                } else if (format == ImageFormat.YUV_420_888) {
+                    Logt.i(TAG, "Received YUV capture");
+                    byte[] img = ItsUtils.getDataFromImage(capture);
+                    ByteBuffer buf = ByteBuffer.wrap(img);
+                    int count = mCountYuv.getAndIncrement();
+                    mSocketRunnableObj.sendResponseCaptureBuffer("yuvImage", buf);
+                } else if (format == ImageFormat.RAW10) {
+                    Logt.i(TAG, "Received RAW10 capture");
+                    byte[] img = ItsUtils.getDataFromImage(capture);
+                    ByteBuffer buf = ByteBuffer.wrap(img);
+                    int count = mCountRaw10.getAndIncrement();
+                    mSocketRunnableObj.sendResponseCaptureBuffer("raw10Image", buf);
+                } else if (format == ImageFormat.RAW_SENSOR) {
+                    Logt.i(TAG, "Received RAW16 capture");
+                    int count = mCountRawOrDng.getAndIncrement();
+                    if (! mCaptureRawIsDng) {
+                        byte[] img = ItsUtils.getDataFromImage(capture);
+                        ByteBuffer buf = ByteBuffer.wrap(img);
+                        mSocketRunnableObj.sendResponseCaptureBuffer("rawImage", buf);
+                    } else {
+                        // Wait until the corresponding capture result is ready, up to a timeout.
+                        long t0 = android.os.SystemClock.elapsedRealtime();
+                        while (! mThreadExitFlag
+                                && android.os.SystemClock.elapsedRealtime()-t0 < TIMEOUT_CAP_RES) {
+                            if (mCaptureResults[count] != null) {
+                                Logt.i(TAG, "Writing capture as DNG");
+                                DngCreator dngCreator = new DngCreator(
+                                        mCameraCharacteristics, mCaptureResults[count]);
+                                ByteArrayOutputStream dngStream = new ByteArrayOutputStream();
+                                dngCreator.writeImage(dngStream, capture);
+                                byte[] dngArray = dngStream.toByteArray();
+                                ByteBuffer dngBuf = ByteBuffer.wrap(dngArray);
+                                mSocketRunnableObj.sendResponseCaptureBuffer("dngImage", dngBuf);
+                                break;
+                            } else {
+                                Thread.sleep(1);
+                            }
+                        }
+                    }
+                } else {
+                    throw new ItsException("Unsupported image format: " + format);
+                }
+                mCountCallbacksRemaining.decrementAndGet();
+            } catch (IOException e) {
+                Logt.e(TAG, "Script error: ", e);
+            } catch (InterruptedException e) {
+                Logt.e(TAG, "Script error: ", e);
+            } catch (ItsException e) {
+                Logt.e(TAG, "Script error: ", e);
+            }
+        }
+    };
+
+    private static float r2f(Rational r) {
+        return (float)r.getNumerator() / (float)r.getDenominator();
+    }
+
+    private final CaptureResultListener mCaptureResultListener = new CaptureResultListener() {
+        @Override
+        public void onCaptureStarted(CameraCaptureSession session, CaptureRequest request,
+                long timestamp, long frameNumber) {
+        }
+
+        @Override
+        public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request,
+                TotalCaptureResult result) {
+            try {
+                // Currently result has all 0 values.
+                if (request == null || result == null) {
+                    throw new ItsException("Request/result is invalid");
+                }
+
+                StringBuilder logMsg = new StringBuilder();
+                logMsg.append(String.format(
+                        "Capt result: AE=%d, AF=%d, AWB=%d, sens=%d, exp=%.1fms, dur=%.1fms, ",
+                        result.get(CaptureResult.CONTROL_AE_STATE),
+                        result.get(CaptureResult.CONTROL_AF_STATE),
+                        result.get(CaptureResult.CONTROL_AWB_STATE),
+                        result.get(CaptureResult.SENSOR_SENSITIVITY),
+                        result.get(CaptureResult.SENSOR_EXPOSURE_TIME).intValue() / 1000000.0f,
+                        result.get(CaptureResult.SENSOR_FRAME_DURATION).intValue() / 1000000.0f));
+                if (result.get(CaptureResult.COLOR_CORRECTION_GAINS) != null) {
+                    logMsg.append(String.format(
+                            "gains=[%.1f, %.1f, %.1f, %.1f], ",
+                            result.get(CaptureResult.COLOR_CORRECTION_GAINS).getRed(),
+                            result.get(CaptureResult.COLOR_CORRECTION_GAINS).getGreenEven(),
+                            result.get(CaptureResult.COLOR_CORRECTION_GAINS).getGreenOdd(),
+                            result.get(CaptureResult.COLOR_CORRECTION_GAINS).getBlue()));
+                } else {
+                    logMsg.append("gains=[], ");
+                }
+                if (result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM) != null) {
+                    logMsg.append(String.format(
+                            "xform=[%.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f, %.1f], ",
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(0,0)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(1,0)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(2,0)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(0,1)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(1,1)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(2,1)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(0,2)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(1,2)),
+                            r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(2,2))));
+                } else {
+                    logMsg.append("xform=[], ");
+                }
+                logMsg.append(String.format(
+                        "foc=%.1f",
+                        result.get(CaptureResult.LENS_FOCUS_DISTANCE)));
+                Logt.i(TAG, logMsg.toString());
+
+                if (result.get(CaptureResult.CONTROL_AE_STATE) != null) {
+                    mConvergedAE = result.get(CaptureResult.CONTROL_AE_STATE) ==
+                                              CaptureResult.CONTROL_AE_STATE_CONVERGED ||
+                                   result.get(CaptureResult.CONTROL_AE_STATE) ==
+                                              CaptureResult.CONTROL_AE_STATE_FLASH_REQUIRED ||
+                                   result.get(CaptureResult.CONTROL_AE_STATE) ==
+                                              CaptureResult.CONTROL_AE_STATE_LOCKED;
+                    mLockedAE = result.get(CaptureResult.CONTROL_AE_STATE) ==
+                                           CaptureResult.CONTROL_AE_STATE_LOCKED;
+                }
+                if (result.get(CaptureResult.CONTROL_AF_STATE) != null) {
+                    mConvergedAF = result.get(CaptureResult.CONTROL_AF_STATE) ==
+                                              CaptureResult.CONTROL_AF_STATE_FOCUSED_LOCKED;
+                }
+                if (result.get(CaptureResult.CONTROL_AWB_STATE) != null) {
+                    mConvergedAWB = result.get(CaptureResult.CONTROL_AWB_STATE) ==
+                                               CaptureResult.CONTROL_AWB_STATE_CONVERGED ||
+                                    result.get(CaptureResult.CONTROL_AWB_STATE) ==
+                                               CaptureResult.CONTROL_AWB_STATE_LOCKED;
+                    mLockedAWB = result.get(CaptureResult.CONTROL_AWB_STATE) ==
+                                            CaptureResult.CONTROL_AWB_STATE_LOCKED;
+                }
+
+                if (mConvergedAE && (!mNeedsLockedAE || mLockedAE)) {
+                    if (result.get(CaptureResult.SENSOR_SENSITIVITY) != null
+                            && result.get(CaptureResult.SENSOR_EXPOSURE_TIME) != null) {
+                        mSocketRunnableObj.sendResponse("aeResult", String.format("%d %d",
+                                result.get(CaptureResult.SENSOR_SENSITIVITY).intValue(),
+                                result.get(CaptureResult.SENSOR_EXPOSURE_TIME).intValue()
+                                ));
+                    } else {
+                        Logt.i(TAG, String.format(
+                                "AE converged but NULL exposure values, sensitivity:%b, expTime:%b",
+                                result.get(CaptureResult.SENSOR_SENSITIVITY) == null,
+                                result.get(CaptureResult.SENSOR_EXPOSURE_TIME) == null));
+                    }
+                }
+
+                if (mConvergedAF) {
+                    if (result.get(CaptureResult.LENS_FOCUS_DISTANCE) != null) {
+                        mSocketRunnableObj.sendResponse("afResult", String.format("%f",
+                                result.get(CaptureResult.LENS_FOCUS_DISTANCE)
+                                ));
+                    } else {
+                        Logt.i(TAG, "AF converged but NULL focus distance values");
+                    }
+                }
+
+                if (mConvergedAWB && (!mNeedsLockedAWB || mLockedAWB)) {
+                    if (result.get(CaptureResult.COLOR_CORRECTION_GAINS) != null
+                            && result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM) != null) {
+                        mSocketRunnableObj.sendResponse("awbResult", String.format(
+                                "%f %f %f %f %f %f %f %f %f %f %f %f %f",
+                                result.get(CaptureResult.COLOR_CORRECTION_GAINS).getRed(),
+                                result.get(CaptureResult.COLOR_CORRECTION_GAINS).getGreenEven(),
+                                result.get(CaptureResult.COLOR_CORRECTION_GAINS).getGreenOdd(),
+                                result.get(CaptureResult.COLOR_CORRECTION_GAINS).getBlue(),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(0,0)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(1,0)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(2,0)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(0,1)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(1,1)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(2,1)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(0,2)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(1,2)),
+                                r2f(result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM).getElement(2,2))
+                                ));
+                    } else {
+                        Logt.i(TAG, String.format(
+                                "AWB converged but NULL color correction values, gains:%b, ccm:%b",
+                                result.get(CaptureResult.COLOR_CORRECTION_GAINS) == null,
+                                result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM) == null));
+                    }
+                }
+
+                if (mIssuedRequest3A) {
+                    mIssuedRequest3A = false;
+                    mInterlock3A.open();
+                } else {
+                    int count = mCountCapRes.getAndIncrement();
+                    mCaptureResults[count] = result;
+                    mSocketRunnableObj.sendResponseCaptureResult(mCameraCharacteristics,
+                            request, result, mCaptureReaders);
+                    mCountCallbacksRemaining.decrementAndGet();
+                }
+            } catch (ItsException e) {
+                Logt.e(TAG, "Script error: ", e);
+            } catch (Exception e) {
+                Logt.e(TAG, "Script error: ", e);
+            }
+        }
+
+        @Override
+        public void onCaptureFailed(CameraCaptureSession session, CaptureRequest request,
+                CaptureFailure failure) {
+            Logt.e(TAG, "Script error: capture failed");
+        }
+    };
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsTestActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsTestActivity.java
new file mode 100644
index 0000000..12b9bfc
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsTestActivity.java
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.cts.verifier.camera.its;
+
+import android.content.BroadcastReceiver;
+import android.content.Context;
+import android.content.Intent;
+import android.content.IntentFilter;
+import android.content.res.Configuration;
+import android.hardware.camera2.CameraAccessException;
+import android.hardware.camera2.CameraCharacteristics;
+import android.hardware.camera2.CameraManager;
+import android.os.Bundle;
+import android.util.Log;
+import android.widget.Toast;
+import java.util.HashSet;
+import java.util.Arrays;
+
+import com.android.cts.verifier.PassFailButtons;
+import com.android.cts.verifier.R;
+
+
+/**
+ * Test for Camera features that require that the camera be aimed at a specific test scene.
+ * This test activity requires a USB connection to a computer, and a corresponding host-side run of
+ * the python scripts found in the CameraITS directory.
+ */
+public class ItsTestActivity extends PassFailButtons.Activity {
+    private static final String TAG = "ItsTestActivity";
+    private static final String EXTRA_SUCCESS = "camera.its.extra.SUCCESS";
+    private static final String ACTION_ITS_RESULT =
+            "com.android.cts.verifier.camera.its.ACTION_ITS_RESULT";
+
+    class SuccessReceiver extends BroadcastReceiver {
+        @Override
+        public void onReceive(Context context, Intent intent) {
+            Log.i(TAG, "Received result for Camera ITS tests");
+            if (ACTION_ITS_RESULT.equals(intent.getAction())) {
+                String result = intent.getStringExtra(EXTRA_SUCCESS);
+                String[] parts = result.split("=");
+                if (parts.length != 2) {
+                    Toast.makeText(ItsTestActivity.this,
+                            "Received unknown ITS result string: " + result,
+                            Toast.LENGTH_SHORT).show();
+                }
+                String cameraId = parts[0];
+                boolean pass = parts[1].equals("True");
+                if(pass) {
+                    Log.i(TAG, "Received Camera " + cameraId + " ITS SUCCESS from host.");
+                    mITSPassedCameraIds.add(cameraId);
+                    if (mCameraIds != null &&
+                            mITSPassedCameraIds.containsAll(Arrays.asList(mCameraIds))) {
+                        ItsTestActivity.this.showToast(R.string.its_test_passed);
+                        ItsTestActivity.this.getPassButton().setEnabled(true);
+                    }
+                } else {
+                    Log.i(TAG, "Received Camera " + cameraId + " ITS FAILURE from host.");
+                    ItsTestActivity.this.showToast(R.string.its_test_failed);
+                }
+            }
+        }
+    }
+
+    private final SuccessReceiver mSuccessReceiver = new SuccessReceiver();
+    private final HashSet<String> mITSPassedCameraIds = new HashSet<>();
+    private String[] mCameraIds = null;
+
+    @Override
+    protected void onCreate(Bundle savedInstanceState) {
+        super.onCreate(savedInstanceState);
+        setContentView(R.layout.its_main);
+        setInfoResources(R.string.camera_its_test, R.string.camera_its_test_info, -1);
+        setPassFailButtonClickListeners();
+        getPassButton().setEnabled(false);
+    }
+
+    @Override
+    protected void onResume() {
+        super.onResume();
+        CameraManager manager = (CameraManager) this.getSystemService(Context.CAMERA_SERVICE);
+        if (manager == null) {
+            showToast(R.string.no_camera_manager);
+        } else {
+            try {
+                mCameraIds = manager.getCameraIdList();
+                boolean allCamerasAreLegacy = true;
+                for (String id : mCameraIds) {
+                    CameraCharacteristics characteristics = manager.getCameraCharacteristics(id);
+                    if (characteristics.get(CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL)
+                            != CameraCharacteristics.INFO_SUPPORTED_HARDWARE_LEVEL_LEGACY) {
+                        allCamerasAreLegacy = false;
+                        break;
+                    }
+                }
+                if (allCamerasAreLegacy) {
+                    showToast(R.string.all_legacy_devices);
+                    getPassButton().setEnabled(false);
+                }
+            } catch (CameraAccessException e) {
+                Toast.makeText(ItsTestActivity.this,
+                        "Received error from camera service while checking device capabilities: "
+                                + e, Toast.LENGTH_SHORT).show();
+            }
+            IntentFilter filter = new IntentFilter(ACTION_ITS_RESULT);
+            registerReceiver(mSuccessReceiver, filter);
+        }
+    }
+
+    @Override
+    protected void onPause() {
+        super.onPause();
+        unregisterReceiver(mSuccessReceiver);
+    }
+
+    @Override
+    public void onConfigurationChanged(Configuration newConfig) {
+        super.onConfigurationChanged(newConfig);
+        setContentView(R.layout.its_main);
+        setInfoResources(R.string.camera_its_test, R.string.camera_its_test_info, -1);
+        setPassFailButtonClickListeners();
+        getPassButton().setEnabled(false);
+    }
+
+    private void showToast(int messageId) {
+        Toast.makeText(ItsTestActivity.this, messageId, Toast.LENGTH_SHORT).show();
+    }
+
+}
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
new file mode 100644
index 0000000..2541142
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/ItsUtils.java
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.cts.verifier.camera.its;
+
+import android.content.Context;
+import android.graphics.ImageFormat;
+import android.hardware.camera2.CameraDevice;
+import android.hardware.camera2.CameraCharacteristics;
+import android.hardware.camera2.CaptureRequest;
+import android.hardware.camera2.CaptureResult;
+import android.hardware.camera2.params.MeteringRectangle;
+import android.hardware.camera2.params.StreamConfigurationMap;
+import android.media.Image;
+import android.media.Image.Plane;
+import android.net.Uri;
+import android.os.Environment;
+import android.util.Log;
+import android.util.Size;
+
+import org.json.JSONArray;
+import org.json.JSONObject;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ItsUtils {
+    public static final String TAG = ItsUtils.class.getSimpleName();
+
+    public static ByteBuffer jsonToByteBuffer(JSONObject jsonObj) {
+        return ByteBuffer.wrap(jsonObj.toString().getBytes(Charset.defaultCharset()));
+    }
+
+    public static MeteringRectangle[] getJsonWeightedRectsFromArray(
+            JSONArray a, boolean normalized, int width, int height)
+            throws ItsException {
+        try {
+            // Returns [x0,y0,x1,y1,wgt,  x0,y0,x1,y1,wgt,  x0,y0,x1,y1,wgt,  ...]
+            assert(a.length() % 5 == 0);
+            MeteringRectangle[] ma = new MeteringRectangle[a.length() / 5];
+            for (int i = 0; i < a.length(); i += 5) {
+                int x,y,w,h;
+                if (normalized) {
+                    x = (int)Math.floor(a.getDouble(i+0) * width + 0.5f);
+                    y = (int)Math.floor(a.getDouble(i+1) * height + 0.5f);
+                    w = (int)Math.floor(a.getDouble(i+2) * width + 0.5f);
+                    h = (int)Math.floor(a.getDouble(i+3) * height + 0.5f);
+                } else {
+                    x = a.getInt(i+0);
+                    y = a.getInt(i+1);
+                    w = a.getInt(i+2);
+                    h = a.getInt(i+3);
+                }
+                x = Math.max(x, 0);
+                y = Math.max(y, 0);
+                w = Math.min(w, width-x);
+                h = Math.min(h, height-y);
+                int wgt = a.getInt(i+4);
+                ma[i/5] = new MeteringRectangle(x,y,w,h,wgt);
+            }
+            return ma;
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error: ", e);
+        }
+    }
+
+    public static JSONArray getOutputSpecs(JSONObject jsonObjTop)
+            throws ItsException {
+        try {
+            if (jsonObjTop.has("outputSurfaces")) {
+                return jsonObjTop.getJSONArray("outputSurfaces");
+            }
+            return null;
+        } catch (org.json.JSONException e) {
+            throw new ItsException("JSON error: ", e);
+        }
+    }
+
+    public static Size[] getRawOutputSizes(CameraCharacteristics ccs)
+            throws ItsException {
+        return getOutputSizes(ccs, ImageFormat.RAW_SENSOR);
+    }
+
+    public static Size[] getJpegOutputSizes(CameraCharacteristics ccs)
+            throws ItsException {
+        return getOutputSizes(ccs, ImageFormat.JPEG);
+    }
+
+    public static Size[] getYuvOutputSizes(CameraCharacteristics ccs)
+            throws ItsException {
+        return getOutputSizes(ccs, ImageFormat.YUV_420_888);
+    }
+
+    private static Size[] getOutputSizes(CameraCharacteristics ccs, int format)
+            throws ItsException {
+        StreamConfigurationMap configMap = ccs.get(
+                CameraCharacteristics.SCALER_STREAM_CONFIGURATION_MAP);
+        if (configMap == null) {
+            throw new ItsException("Failed to get stream config");
+        }
+        return configMap.getOutputSizes(format);
+    }
+
+    public static byte[] getDataFromImage(Image image)
+            throws ItsException {
+        int format = image.getFormat();
+        int width = image.getWidth();
+        int height = image.getHeight();
+        byte[] data = null;
+
+        // Read image data
+        Plane[] planes = image.getPlanes();
+
+        // Check image validity
+        if (!checkAndroidImageFormat(image)) {
+            throw new ItsException(
+                    "Invalid image format passed to getDataFromImage: " + image.getFormat());
+        }
+
+        if (format == ImageFormat.JPEG) {
+            // JPEG doesn't have pixelstride and rowstride, treat it as 1D buffer.
+            ByteBuffer buffer = planes[0].getBuffer();
+            data = new byte[buffer.capacity()];
+            buffer.get(data);
+            return data;
+        } else if (format == ImageFormat.YUV_420_888 || format == ImageFormat.RAW_SENSOR
+                || format == ImageFormat.RAW10) {
+            int offset = 0;
+            data = new byte[width * height * ImageFormat.getBitsPerPixel(format) / 8];
+            int maxRowSize = planes[0].getRowStride();
+            for (int i = 0; i < planes.length; i++) {
+                if (maxRowSize < planes[i].getRowStride()) {
+                    maxRowSize = planes[i].getRowStride();
+                }
+            }
+            byte[] rowData = new byte[maxRowSize];
+            for (int i = 0; i < planes.length; i++) {
+                ByteBuffer buffer = planes[i].getBuffer();
+                int rowStride = planes[i].getRowStride();
+                int pixelStride = planes[i].getPixelStride();
+                int bytesPerPixel = ImageFormat.getBitsPerPixel(format) / 8;
+                Logt.i(TAG, String.format(
+                        "Reading image: fmt %d, plane %d, w %d, h %d, rowStride %d, pixStride %d",
+                        format, i, width, height, rowStride, pixelStride));
+                // For multi-planar yuv images, assuming yuv420 with 2x2 chroma subsampling.
+                int w = (i == 0) ? width : width / 2;
+                int h = (i == 0) ? height : height / 2;
+                for (int row = 0; row < h; row++) {
+                    if (pixelStride == bytesPerPixel) {
+                        // Special case: optimized read of the entire row
+                        int length = w * bytesPerPixel;
+                        buffer.get(data, offset, length);
+                        // Advance buffer the remainder of the row stride
+                        buffer.position(buffer.position() + rowStride - length);
+                        offset += length;
+                    } else {
+                        // Generic case: should work for any pixelStride but slower.
+                        // Use intermediate buffer to avoid read byte-by-byte from
+                        // DirectByteBuffer, which is very bad for performance.
+                        // Also need avoid access out of bound by only reading the available
+                        // bytes in the bytebuffer.
+                        int readSize = rowStride;
+                        if (buffer.remaining() < readSize) {
+                            readSize = buffer.remaining();
+                        }
+                        buffer.get(rowData, 0, readSize);
+                        if (pixelStride >= 1) {
+                            for (int col = 0; col < w; col++) {
+                                data[offset++] = rowData[col * pixelStride];
+                            }
+                        } else {
+                            // PixelStride of 0 can mean pixel isn't a multiple of 8 bits, for
+                            // example with RAW10. Just copy the buffer, dropping any padding at
+                            // the end of the row.
+                            int length = (w * ImageFormat.getBitsPerPixel(format)) / 8;
+                            System.arraycopy(rowData,0,data,offset,length);
+                            offset += length;
+                        }
+                    }
+                }
+            }
+            Logt.i(TAG, String.format("Done reading image, format %d", format));
+            return data;
+        } else {
+            throw new ItsException("Unsupported image format: " + format);
+        }
+    }
+
+    private static boolean checkAndroidImageFormat(Image image) {
+        int format = image.getFormat();
+        Plane[] planes = image.getPlanes();
+        switch (format) {
+            case ImageFormat.YUV_420_888:
+            case ImageFormat.NV21:
+            case ImageFormat.YV12:
+                return 3 == planes.length;
+            case ImageFormat.RAW_SENSOR:
+            case ImageFormat.RAW10:
+            case ImageFormat.JPEG:
+                return 1 == planes.length;
+            default:
+                return false;
+        }
+    }
+}
+
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/Logt.java b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/Logt.java
new file mode 100644
index 0000000..852a1ce
--- /dev/null
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/camera/its/Logt.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.cts.verifier.camera.its;
+
+import android.util.Log;
+
+public class Logt {
+    public static void i(String tag, String msg) {
+        long t = android.os.SystemClock.elapsedRealtime();
+        Log.i(tag, String.format("[%d] %s", t, msg));
+    }
+    public static void e(String tag, String msg) {
+        long t = android.os.SystemClock.elapsedRealtime();
+        Log.e(tag, String.format("[%d] %s", t, msg));
+    }
+    public static void w(String tag, String msg) {
+        long t = android.os.SystemClock.elapsedRealtime();
+        Log.w(tag, String.format("[%d] %s", t, msg));
+    }
+    public static void e(String tag, String msg, Throwable tr) {
+        long t = android.os.SystemClock.elapsedRealtime();
+        Log.e(tag, String.format("[%d] %s", t, msg), tr);
+    }
+}
+
diff --git a/apps/CtsVerifier/src/com/android/cts/verifier/managedprovisioning/ByodFlowTestActivity.java b/apps/CtsVerifier/src/com/android/cts/verifier/managedprovisioning/ByodFlowTestActivity.java
index da823e8..6a9de44 100644
--- a/apps/CtsVerifier/src/com/android/cts/verifier/managedprovisioning/ByodFlowTestActivity.java
+++ b/apps/CtsVerifier/src/com/android/cts/verifier/managedprovisioning/ByodFlowTestActivity.java
@@ -69,7 +69,6 @@
     protected DevicePolicyManager mDevicePolicyManager;
 
     private TestItem mProfileOwnerInstalled;
-    private TestItem mDiskEncryptionTest;
     private TestItem mProfileVisibleTest;
     private TestItem mDeviceAdminVisibleTest;
     private TestItem mWorkAppVisibleTest;
@@ -155,13 +154,6 @@
             }
         };
 
-        mDiskEncryptionTest = new TestItem(this, R.string.provisioning_byod_diskencryption) {
-            @Override
-            public TestResult getPassFailState() {
-                return isDeviceEncrypted() ? TestResult.Passed : TestResult.Failed;
-            }
-        };
-
         mProfileVisibleTest = new TestItem(this, R.string.provisioning_byod_profile_visible,
                 R.string.provisioning_byod_profile_visible_instruction,
                 new Intent(Settings.ACTION_SETTINGS));
@@ -181,7 +173,6 @@
                 R.string.provisioning_byod_cross_profile_instruction,
                 chooser);
 
-        mTests.add(mDiskEncryptionTest);
         mTests.add(mProfileOwnerInstalled);
         mTests.add(mProfileVisibleTest);
         mTests.add(mDeviceAdminVisibleTest);
@@ -284,11 +275,6 @@
                 PackageManager.DONT_KILL_APP);
     }
 
-    private boolean isDeviceEncrypted() {
-        return mDevicePolicyManager.getStorageEncryptionStatus()
-                == DevicePolicyManager.ENCRYPTION_STATUS_ACTIVE;
-    }
-
     private void showToast(int messageId) {
         String message = getString(messageId);
         Toast.makeText(this, message, Toast.LENGTH_SHORT).show();
diff --git a/tests/tests/app/src/android/app/cts/DialogTest.java b/tests/tests/app/src/android/app/cts/DialogTest.java
index 56e731b..6df2eee 100644
--- a/tests/tests/app/src/android/app/cts/DialogTest.java
+++ b/tests/tests/app/src/android/app/cts/DialogTest.java
@@ -393,25 +393,28 @@
         d.isOnTouchEventCalled = false;
         assertTrue(d.isShowing());
 
-        // Send a touch event outside the activity.  This time the dialog will be dismissed
-        // because closeOnTouchOutside is true.
-        d.setCanceledOnTouchOutside(true);
+        // Watch activities cover the entire screen, so there is no way to touch outside.
+        if (!mContext.getPackageManager().hasSystemFeature(PackageManager.FEATURE_WATCH)) {
+            // Send a touch event outside the activity.  This time the dialog will be dismissed
+            // because closeOnTouchOutside is true.
+            d.setCanceledOnTouchOutside(true);
 
-        touchMotionEvent = MotionEvent.obtain(now, now + 1, MotionEvent.ACTION_DOWN,
-                1, 100, 0);
-        mInstrumentation.sendPointerSync(touchMotionEvent);
+            touchMotionEvent = MotionEvent.obtain(now, now + 1, MotionEvent.ACTION_DOWN,
+                    1, 100, 0);
+            mInstrumentation.sendPointerSync(touchMotionEvent);
 
-        new PollingCheck(TEST_TIMEOUT) {
-            protected boolean check() {
-                return d.dispatchTouchEventResult;
-            }
-        }.run();
+            new PollingCheck(TEST_TIMEOUT) {
+                protected boolean check() {
+                    return d.dispatchTouchEventResult;
+                }
+            }.run();
 
-        assertMotionEventEquals(touchMotionEvent, d.touchEvent);
+            assertMotionEventEquals(touchMotionEvent, d.touchEvent);
 
-        assertTrue(d.isOnTouchEventCalled);
-        assertMotionEventEquals(touchMotionEvent, d.onTouchEvent);
-        assertFalse(d.isShowing());
+            assertTrue(d.isOnTouchEventCalled);
+            assertMotionEventEquals(touchMotionEvent, d.onTouchEvent);
+            assertFalse(d.isShowing());
+        }
     }
 
     public void testTrackballEvent() {
diff --git a/tests/tests/display/src/android/display/cts/VirtualDisplayTest.java b/tests/tests/display/src/android/display/cts/VirtualDisplayTest.java
index f2f859a..872de91 100644
--- a/tests/tests/display/src/android/display/cts/VirtualDisplayTest.java
+++ b/tests/tests/display/src/android/display/cts/VirtualDisplayTest.java
@@ -57,7 +57,7 @@
     private static final int WIDTH = 720;
     private static final int HEIGHT = 480;
     private static final int DENSITY = DisplayMetrics.DENSITY_MEDIUM;
-    private static final int TIMEOUT = 10000;
+    private static final int TIMEOUT = 40000;
 
     // Colors that we use as a signal to determine whether some desired content was
     // drawn.  The colors themselves doesn't matter but we choose them to have with distinct
diff --git a/tests/tests/hardware/AndroidManifest.xml b/tests/tests/hardware/AndroidManifest.xml
index ca148f9..1a02d0a 100644
--- a/tests/tests/hardware/AndroidManifest.xml
+++ b/tests/tests/hardware/AndroidManifest.xml
@@ -23,6 +23,7 @@
     <uses-permission android:name="android.permission.RECORD_AUDIO" />
     <uses-permission android:name="android.permission.WAKE_LOCK" />
     <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
+    <uses-permission android:name="android.permission.BODY_SENSORS" />
 
     <application>
         <uses-library android:name="android.test.runner" />
diff --git a/tests/tests/hardware/src/android/hardware/camera2/cts/RecordingTest.java b/tests/tests/hardware/src/android/hardware/camera2/cts/RecordingTest.java
index 90cb18a..669de2d 100644
--- a/tests/tests/hardware/src/android/hardware/camera2/cts/RecordingTest.java
+++ b/tests/tests/hardware/src/android/hardware/camera2/cts/RecordingTest.java
@@ -78,6 +78,7 @@
     private static final int MAX_VIDEO_SNAPSHOT_IMAGES = 5;
     private static final int BURST_VIDEO_SNAPSHOT_NUM = 3;
     private static final int SLOWMO_SLOW_FACTOR = 4;
+    private static final int MAX_NUM_FRAME_DROP_ALLOWED = 4;
     private List<Size> mSupportedVideoSizes;
     private Surface mRecordingSurface;
     private MediaRecorder mMediaRecorder;
@@ -909,6 +910,15 @@
                 // Snapshots in legacy mode pause the preview briefly.  Skip the duration
                 // requirements for legacy mode unless this is fixed.
                 if (!mStaticInfo.isHardwareLevelLegacy()) {
+                    mCollector.expectTrue(
+                            String.format(
+                                    "Video %dx%d Frame drop detected before video snapshot: " +
+                                            "duration %dms (expected %dms)",
+                                    mVideoSize.getWidth(), mVideoSize.getHeight(),
+                                    durationMs, expectedDurationMs
+                            ),
+                            durationMs <= (expectedDurationMs * MAX_NUM_FRAME_DROP_ALLOWED)
+                    );
                     // Log a warning is there is any frame drop detected.
                     if (durationMs >= expectedDurationMs * 2) {
                         Log.w(TAG, String.format(
@@ -920,6 +930,15 @@
                     }
 
                     durationMs = (int) (nextTS - currentTS) / 1000000;
+                    mCollector.expectTrue(
+                            String.format(
+                                    "Video %dx%d Frame drop detected after video snapshot: " +
+                                            "duration %dms (expected %dms)",
+                                    mVideoSize.getWidth(), mVideoSize.getHeight(),
+                                    durationMs, expectedDurationMs
+                            ),
+                            durationMs <= (expectedDurationMs * MAX_NUM_FRAME_DROP_ALLOWED)
+                    );
                     // Log a warning is there is any frame drop detected.
                     if (durationMs >= expectedDurationMs * 2) {
                         Log.w(TAG, String.format(
diff --git a/tests/tests/media/src/android/media/cts/AdaptivePlaybackTest.java b/tests/tests/media/src/android/media/cts/AdaptivePlaybackTest.java
index 5c9f1b1..dbe2c92 100644
--- a/tests/tests/media/src/android/media/cts/AdaptivePlaybackTest.java
+++ b/tests/tests/media/src/android/media/cts/AdaptivePlaybackTest.java
@@ -19,6 +19,7 @@
 import com.android.cts.media.R;
 
 import android.content.Context;
+import android.content.pm.PackageManager;
 import android.content.res.AssetFileDescriptor;
 import android.media.MediaCodec;
 import android.media.MediaCodecInfo;
@@ -285,6 +286,11 @@
     }
 
     private void ex(Iterable<Codec> codecList, Test[] testList) {
+        if (codecList == null) {
+            Log.i(TAG, "CodecList was empty. Skipping test.");
+            return;
+        }
+
         TestList tests = new TestList();
         for (Codec c : codecList) {
             for (Test test : testList) {
@@ -1342,8 +1348,21 @@
 }
 
 class CodecFactory {
+    protected boolean hasCodec(String codecName) {
+        MediaCodecList list = new MediaCodecList(MediaCodecList.ALL_CODECS);
+        for (MediaCodecInfo info : list.getCodecInfos()) {
+            if (codecName.equals(info.getName())) {
+                return true;
+            }
+        }
+        return false;
+    }
+
     public CodecList createCodecList(
             Context context, String mime, String googleCodecName, int ...resources) {
+        if (!hasCodec(googleCodecName)) {
+            return null;
+        }
         return new CodecFamily(context, mime, googleCodecName, resources);
     }
 }
@@ -1351,6 +1370,9 @@
 class SWCodecFactory extends CodecFactory {
     public CodecList createCodecList(
             Context context, String mime, String googleCodecName, int ...resources) {
+        if (!hasCodec(googleCodecName)) {
+            return null;
+        }
         return new CodecByName(context, mime, googleCodecName, resources);
     }
 }
@@ -1358,6 +1380,9 @@
 class HWCodecFactory extends CodecFactory {
     public CodecList createCodecList(
             Context context, String mime, String googleCodecName, int ...resources) {
+        if (!hasCodec(googleCodecName)) {
+            return null;
+        }
         return new CodecFamilyExcept(context, mime, googleCodecName, resources);
     }
 }
diff --git a/tests/tests/media/src/android/media/cts/ClearKeySystemTest.java b/tests/tests/media/src/android/media/cts/ClearKeySystemTest.java
index c05a605..673c1d7 100644
--- a/tests/tests/media/src/android/media/cts/ClearKeySystemTest.java
+++ b/tests/tests/media/src/android/media/cts/ClearKeySystemTest.java
@@ -16,6 +16,7 @@
 package android.media.cts;
 
 import android.content.Context;
+import android.content.pm.PackageManager;
 import android.media.MediaCodec;
 import android.media.MediaCodecInfo;
 import android.media.MediaCodecInfo.CodecCapabilities;
@@ -402,10 +403,19 @@
         }
     }
 
+    private boolean hasAudioOutput() {
+        return getInstrumentation().getTargetContext().getPackageManager()
+            .hasSystemFeature(PackageManager.FEATURE_AUDIO_OUTPUT);
+    }
+
     /**
      * Tests clear key system playback.
      */
     public void testClearKeyPlayback() throws Exception {
+        if (!hasAudioOutput()) {
+            return;
+        }
+        
         MediaDrm drm = startDrm();
         if (null == drm) {
             throw new Error("Failed to create drm.");
diff --git a/tests/tests/security/jni/android_security_cts_NetlinkSocket.cpp b/tests/tests/security/jni/android_security_cts_NetlinkSocket.cpp
index 2411f74..de315ea 100644
--- a/tests/tests/security/jni/android_security_cts_NetlinkSocket.cpp
+++ b/tests/tests/security/jni/android_security_cts_NetlinkSocket.cpp
@@ -32,7 +32,7 @@
     int sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
     if (sock == -1) {
         ALOGE("Can't create socket %s", strerror(errno));
-        jclass SocketException = env->FindClass("java/security/SocketException");
+        jclass SocketException = env->FindClass("java/net/SocketException");
         env->ThrowNew(SocketException, "Can't create socket");
         return;
     }
diff --git a/tests/tests/security/src/android/security/cts/NetlinkSocket.java b/tests/tests/security/src/android/security/cts/NetlinkSocket.java
index 1ea6d26..5ea80ca 100644
--- a/tests/tests/security/src/android/security/cts/NetlinkSocket.java
+++ b/tests/tests/security/src/android/security/cts/NetlinkSocket.java
@@ -18,6 +18,7 @@
 
 import java.io.FileDescriptor;
 import java.io.IOException;
+import java.net.SocketException;
 
 public class NetlinkSocket {
 
@@ -25,7 +26,7 @@
         System.loadLibrary("ctssecurity_jni");
     }
 
-    private static native void create_native(FileDescriptor fd);
+    private static native void create_native(FileDescriptor fd) throws SocketException;
     private static native int sendmsg(FileDescriptor fd, int pid, byte[] bytes);
 
     private FileDescriptor fd = new FileDescriptor();
@@ -33,7 +34,7 @@
     /** no public constructors */
     private NetlinkSocket() { }
 
-    public static NetlinkSocket create() {
+    public static NetlinkSocket create() throws SocketException {
         NetlinkSocket retval = new NetlinkSocket();
         create_native(retval.fd);
         return retval;
diff --git a/tests/tests/security/src/android/security/cts/VoldExploitTest.java b/tests/tests/security/src/android/security/cts/VoldExploitTest.java
index edaf82a..103158f 100644
--- a/tests/tests/security/src/android/security/cts/VoldExploitTest.java
+++ b/tests/tests/security/src/android/security/cts/VoldExploitTest.java
@@ -26,6 +26,7 @@
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
+import java.net.SocketException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashSet;
@@ -103,7 +104,13 @@
           return;
         }
 
-        NetlinkSocket ns = NetlinkSocket.create();
+        NetlinkSocket ns;
+        try {
+            ns = NetlinkSocket.create();
+        } catch (SocketException e) {
+            // Can't create netlink socket. Not vulnerable.
+            return;
+        }
         for (int i : pids) {
             for (String j : devices) {
                 doAttack(ns, i, j);
diff --git a/tests/tests/uirendering/res/layout/simple_rect_layout.xml b/tests/tests/uirendering/res/layout/simple_rect_layout.xml
index 24c9b6b..e64c4e9 100644
--- a/tests/tests/uirendering/res/layout/simple_rect_layout.xml
+++ b/tests/tests/uirendering/res/layout/simple_rect_layout.xml
@@ -17,11 +17,10 @@
 <LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
     android:orientation="vertical"
     android:layout_width="match_parent"
-    android:layout_height="match_parent"
-    android:background="#f00">
+    android:layout_height="match_parent">
 
-    <View android:layout_width="180px"
-        android:layout_height="120px"
-        android:background="#0f0" />
+    <View android:layout_width="100px"
+        android:layout_height="100px"
+        android:background="#00f" />
 
 </LinearLayout>
diff --git a/tests/tests/uirendering/src/android/uirendering/cts/testclasses/ExactCanvasTests.java b/tests/tests/uirendering/src/android/uirendering/cts/testclasses/ExactCanvasTests.java
index 3088142..afbad65 100644
--- a/tests/tests/uirendering/src/android/uirendering/cts/testclasses/ExactCanvasTests.java
+++ b/tests/tests/uirendering/src/android/uirendering/cts/testclasses/ExactCanvasTests.java
@@ -16,8 +16,6 @@
 
 package android.uirendering.cts.testclasses;
 
-import com.android.cts.uirendering.R;
-
 import android.graphics.Canvas;
 import android.graphics.Color;
 import android.graphics.Paint;
@@ -31,8 +29,7 @@
 import android.uirendering.cts.bitmapverifiers.RectVerifier;
 import android.uirendering.cts.testinfrastructure.ActivityTestBase;
 import android.uirendering.cts.testinfrastructure.CanvasClient;
-import android.uirendering.cts.testinfrastructure.ViewInitializer;
-import android.view.View;
+import com.android.cts.uirendering.R;
 
 public class ExactCanvasTests extends ActivityTestBase {
     private final BitmapComparer mExactComparer = new ExactComparer();
@@ -212,14 +209,4 @@
                 .addLayout(R.layout.blue_padded_square, null)
                 .runWithVerifier(verifier);
     }
-
-    @SmallTest
-    public void testClipping() {
-        createTest().addLayout(R.layout.simple_red_layout, new ViewInitializer() {
-            @Override
-            public void intializeView(View view) {
-                view.setClipBounds(new Rect(0, 0, 50, 50));
-            }
-        }).runWithComparer(mExactComparer);
-    }
 }
diff --git a/tests/tests/uirendering/src/android/uirendering/cts/testclasses/PathClippingTests.java b/tests/tests/uirendering/src/android/uirendering/cts/testclasses/PathClippingTests.java
new file mode 100644
index 0000000..8df8057
--- /dev/null
+++ b/tests/tests/uirendering/src/android/uirendering/cts/testclasses/PathClippingTests.java
@@ -0,0 +1,146 @@
+package android.uirendering.cts.testclasses;
+
+import android.graphics.Canvas;
+import android.graphics.Color;
+import android.graphics.Paint;
+import android.graphics.Path;
+import android.graphics.Point;
+import android.graphics.Typeface;
+import android.test.suitebuilder.annotation.SmallTest;
+import android.uirendering.cts.bitmapcomparers.MSSIMComparer;
+import android.uirendering.cts.bitmapverifiers.SamplePointVerifier;
+import android.uirendering.cts.testinfrastructure.ActivityTestBase;
+import android.uirendering.cts.testinfrastructure.CanvasClient;
+import android.uirendering.cts.testinfrastructure.ViewInitializer;
+import android.view.View;
+import android.view.ViewGroup;
+import com.android.cts.uirendering.R;
+
+public class PathClippingTests extends ActivityTestBase {
+    // draw circle with whole in it, with stroked circle
+    static final CanvasClient sCircleDrawCanvasClient = new CanvasClient() {
+        @Override
+        public String getDebugString() {
+            return "StrokedCircleDraw";
+        }
+
+        @Override
+        public void draw(Canvas canvas, int width, int height) {
+            Paint paint = new Paint();
+            paint.setAntiAlias(false);
+            paint.setColor(Color.BLUE);
+            paint.setStyle(Paint.Style.STROKE);
+            paint.setStrokeWidth(20);
+            canvas.drawCircle(50, 50, 40, paint);
+        }
+    };
+
+    // draw circle with whole in it, by path operations + path clipping
+    static final CanvasClient sCircleClipCanvasClient = new CanvasClient() {
+        @Override
+        public String getDebugString() {
+            return "CircleClipDraw";
+        }
+
+        @Override
+        public void draw(Canvas canvas, int width, int height) {
+            canvas.save();
+
+            Path path = new Path();
+            path.addCircle(50, 50, 50, Path.Direction.CW);
+            path.addCircle(50, 50, 30, Path.Direction.CCW);
+
+            canvas.clipPath(path);
+            canvas.drawColor(Color.BLUE);
+
+            canvas.restore();
+        }
+    };
+
+    @SmallTest
+    public void testCircleWithCircle() {
+        createTest()
+                .addCanvasClient(sCircleDrawCanvasClient, false)
+                .addCanvasClient(sCircleClipCanvasClient)
+                .runWithComparer(new MSSIMComparer(0.90));
+    }
+
+    @SmallTest
+    public void testCircleWithPoints() {
+        createTest()
+                .addCanvasClient(sCircleClipCanvasClient)
+                .runWithVerifier(new SamplePointVerifier(
+                        new Point[] {
+                                // inside of circle
+                                new Point(50, 50),
+                                // on circle
+                                new Point(50 + 32, 50 + 32),
+                                // outside of circle
+                                new Point(50 + 38, 50 + 38),
+                                new Point(100, 100)
+                        },
+                        new int[] {
+                                Color.WHITE,
+                                Color.BLUE,
+                                Color.WHITE,
+                                Color.WHITE,
+                        }));
+    }
+
+    @SmallTest
+    public void testViewRotate() {
+        createTest()
+                .addLayout(R.layout.blue_padded_layout, new ViewInitializer() {
+                    @Override
+                    public void intializeView(View view) {
+                        ViewGroup rootView = (ViewGroup) view;
+                        rootView.setClipChildren(true);
+                        View childView = rootView.getChildAt(0);
+                        childView.setPivotX(50);
+                        childView.setPivotY(50);
+                        childView.setRotation(45f);
+
+                    }
+                })
+                .runWithVerifier(new SamplePointVerifier(
+                        new Point[] {
+                                // inside of rotated rect
+                                new Point(50, 50),
+                                new Point(50 + 32, 50 + 32),
+                                // outside of rotated rect
+                                new Point(50 + 38, 50 + 38),
+                                new Point(100, 100)
+                        },
+                        new int[] {
+                                Color.BLUE,
+                                Color.BLUE,
+                                Color.WHITE,
+                                Color.WHITE,
+                        }));
+    }
+
+    @SmallTest
+    public void testTextClip() {
+        createTest()
+                .addCanvasClient(new CanvasClient() {
+                    @Override
+                    public void draw(Canvas canvas, int width, int height) {
+                        canvas.save();
+
+                        Path path = new Path();
+                        path.addCircle(0, 50, 50, Path.Direction.CW);
+                        path.addCircle(100, 50, 50, Path.Direction.CW);
+                        canvas.clipPath(path);
+
+                        Paint paint = new Paint();
+                        paint.setAntiAlias(true);
+                        paint.setTextSize(100);
+                        paint.setTypeface(Typeface.defaultFromStyle(Typeface.BOLD));
+                        canvas.drawText("STRING", 0, 100, paint);
+
+                        canvas.restore();
+                    }
+                })
+                .runWithComparer(new MSSIMComparer(0.90));
+    }
+}
diff --git a/tests/tests/uirendering/src/android/uirendering/cts/testclasses/view/UnclippedBlueView.java b/tests/tests/uirendering/src/android/uirendering/cts/testclasses/view/UnclippedBlueView.java
index e2037f7..7a16e3c 100644
--- a/tests/tests/uirendering/src/android/uirendering/cts/testclasses/view/UnclippedBlueView.java
+++ b/tests/tests/uirendering/src/android/uirendering/cts/testclasses/view/UnclippedBlueView.java
@@ -18,12 +18,12 @@
 
     public UnclippedBlueView(Context context, AttributeSet attrs, int defStyleAttr) {
         this(context, attrs, defStyleAttr, 0);
-        setWillNotDraw(false);
     }
 
     public UnclippedBlueView(Context context, AttributeSet attrs,
             int defStyleAttr, int defStyleRes) {
         super(context, attrs, defStyleAttr, defStyleRes);
+        setWillNotDraw(false);
     }
 
     @Override
diff --git a/tests/tests/view/src/android/view/cts/WindowTest.java b/tests/tests/view/src/android/view/cts/WindowTest.java
index ead4d5b..3c5386d 100644
--- a/tests/tests/view/src/android/view/cts/WindowTest.java
+++ b/tests/tests/view/src/android/view/cts/WindowTest.java
@@ -370,7 +370,9 @@
     public void testSetBackgroundDrawable() throws Throwable {
         // DecorView holds the background
         View decor = mWindow.getDecorView();
-        assertEquals(PixelFormat.OPAQUE, decor.getBackground().getOpacity());
+        if (!mWindow.hasFeature(Window.FEATURE_SWIPE_TO_DISMISS)) {
+            assertEquals(PixelFormat.OPAQUE, decor.getBackground().getOpacity());
+        }
         runTestOnUiThread(new Runnable() {
             public void run() {
                 // setBackgroundDrawableResource(int resId) has the same
diff --git a/tests/tests/webkit/src/android/webkit/cts/WebViewSslTest.java b/tests/tests/webkit/src/android/webkit/cts/WebViewSslTest.java
index 378bf6e..dcdeead 100644
--- a/tests/tests/webkit/src/android/webkit/cts/WebViewSslTest.java
+++ b/tests/tests/webkit/src/android/webkit/cts/WebViewSslTest.java
@@ -703,6 +703,9 @@
     }
 
     public void testSecureServerRequestingClientCertDoesNotCancelRequest() throws Throwable {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         mWebServer = new CtsTestServer(getActivity(), CtsTestServer.SslMode.WANTS_CLIENT_AUTH);
         final String url = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
         final SslErrorWebViewClient webViewClient = new SslErrorWebViewClient(mOnUiThread);
@@ -716,6 +719,9 @@
     }
 
     public void testSecureServerRequiringClientCertDoesCancelRequest() throws Throwable {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         mWebServer = new CtsTestServer(getActivity(), CtsTestServer.SslMode.NEEDS_CLIENT_AUTH);
         final String url = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
         final SslErrorWebViewClient webViewClient = new SslErrorWebViewClient(mOnUiThread);
@@ -732,6 +738,9 @@
     }
 
     public void testProceedClientCertRequest() throws Throwable {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         mWebServer = new CtsTestServer(getActivity(), CtsTestServer.SslMode.NEEDS_CLIENT_AUTH);
         String url = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
         final ClientCertWebViewClient webViewClient = new ClientCertWebViewClient(mOnUiThread);
@@ -756,6 +765,9 @@
     }
 
     public void testIgnoreClientCertRequest() throws Throwable {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         mWebServer = new CtsTestServer(getActivity(), CtsTestServer.SslMode.NEEDS_CLIENT_AUTH);
         String url = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
         final ClientCertWebViewClient webViewClient = new ClientCertWebViewClient(mOnUiThread);
@@ -784,6 +796,9 @@
     }
 
     public void testCancelClientCertRequest() throws Throwable {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         mWebServer = new CtsTestServer(getActivity(), CtsTestServer.SslMode.NEEDS_CLIENT_AUTH);
         final String url = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
         final ClientCertWebViewClient webViewClient = new ClientCertWebViewClient(mOnUiThread);
@@ -830,6 +845,9 @@
     }
 
     public void testClientCertIssuersReceivedCorrectly() throws Throwable {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         mWebServer = new CtsTestServer(getActivity(), CtsTestServer.SslMode.NEEDS_CLIENT_AUTH,
                 new TrustManager());
         final String url = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
diff --git a/tests/tests/webkit/src/android/webkit/cts/WebViewTest.java b/tests/tests/webkit/src/android/webkit/cts/WebViewTest.java
index ef64f4d..1e22acc 100755
--- a/tests/tests/webkit/src/android/webkit/cts/WebViewTest.java
+++ b/tests/tests/webkit/src/android/webkit/cts/WebViewTest.java
@@ -474,6 +474,9 @@
 
     @UiThreadTest
     public void testPostUrlWithNonNetworkUrl() throws Exception {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         final String nonNetworkUrl = "file:///android_asset/" + TestHtmlConstants.HELLO_WORLD_URL;
 
         mOnUiThread.postUrlAndWaitForCompletion(nonNetworkUrl, new byte[1]);
@@ -484,6 +487,9 @@
 
     @UiThreadTest
     public void testPostUrlWithNetworkUrl() throws Exception {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         startWebServer(false);
         final String networkUrl = mWebServer.getAssetUrl(TestHtmlConstants.HELLO_WORLD_URL);
         final String postDataString = "username=my_username&password=my_password";
@@ -565,6 +571,10 @@
     }
 
     public void testCanInjectHeaders() throws Exception {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
+
         final String X_FOO = "X-foo";
         final String X_FOO_VALUE = "test";
 
@@ -899,6 +909,9 @@
     }
 
     public void testAddJavascriptInterfaceExceptions() throws Exception {
+        if (!NullWebViewUtils.isWebViewAvailable()) {
+            return;
+        }
         WebSettings settings = mOnUiThread.getSettings();
         settings.setJavaScriptEnabled(true);
         settings.setJavaScriptCanOpenWindowsAutomatically(true);