CameraITS: Use TCP as transport layer.

Change-Id: Ia51864f9a918857d4e5c17fc1f0070c1329aed20
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
index e89a529..4a1efd3 100644
--- a/apps/CameraITS/pymodules/its/device.py
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -18,10 +18,12 @@
 import sys
 import re
 import json
-import tempfile
 import time
 import unittest
+import socket
 import subprocess
+import hashlib
+import numpy
 
 class ItsSession(object):
     """Controls a device over adb to run ITS scripts.
@@ -30,16 +32,13 @@
     objects encoding CaptureRequests, specifying sets of parameters to use
     when capturing an image using the Camera2 APIs. This class encapsualtes
     sending the requests to the device, monitoring the device's progress, and
-    copying the resultant captures back to the host machine when done.
+    copying the resultant captures back to the host machine when done. TCP
+    forwarded over adb is the transport mechanism used.
 
     The device must have ItsService.apk installed.
 
-    The "adb logcat" command is used to receive messages from the service
-    running on the device.
-
     Attributes:
-        proc: The handle to the process in which "adb logcat" is invoked.
-        logcat: The stdout stream from the logcat process.
+        sock: The open socket.
     """
 
     # TODO: Handle multiple connected devices.
@@ -48,36 +47,32 @@
     # to adb, which causes it to fail if there is more than one device.
     ADB = "adb -d"
 
-    # Set to True to take a pre-shot before capture and throw it away (for
-    # debug purposes).
-    CAPTURE_THROWAWAY_SHOTS = False
+    # Open a connection to localhost:6000, forwarded to port 6000 on the device.
+    # TODO: Support multiple devices running over different TCP ports.
+    IPADDR = '127.0.0.1'
+    PORT = 6000
+    BUFFER_SIZE = 4096
 
-    DEVICE_FOLDER_ROOT = '/sdcard/its'
-    DEVICE_FOLDER_CAPTURE = 'captures'
-    INTENT_CAPTURE = 'com.android.camera2.its.CAPTURE'
-    INTENT_3A = 'com.android.camera2.its.3A'
-    INTENT_GETPROPS = 'com.android.camera2.its.GETPROPS'
-    TAG = 'CAMERA-ITS-PY'
+    # Seconds timeout on each socket operation.
+    SOCK_TIMEOUT = 10.0
 
-    MSG_RECV = "RECV"
-    MSG_SIZE = "SIZE"
-    MSG_FILE = "FILE"
-    MSG_CAPT = "CAPT"
-    MSG_DONE = "DONE"
-    MSG_FAIL = "FAIL"
-    MSG_AF   = "3A-F"
-    MSG_AE   = "3A-E"
-    MSG_AWB  = "3A-W"
-    MSGS = [MSG_RECV, MSG_SIZE, MSG_FILE, MSG_CAPT, MSG_DONE,
-            MSG_FAIL, MSG_AE,   MSG_AF,   MSG_AWB]
+    PACKAGE = 'com.android.camera2.its'
+    INTENT_START = 'com.android.camera2.its.START'
 
     def __init__(self):
-        self.proc = None
         reboot_device_on_argv()
-        self.__open_logcat()
+        # TODO: Figure out why "--user 0" is needed, and fix the problem
+        _run('%s shell am force-stop --user 0 %s' % (self.ADB, self.PACKAGE))
+        _run(('%s shell am startservice --user 0 -t text/plain '
+              '-a %s') % (self.ADB, self.INTENT_START))
+        _run('%s forward tcp:%d tcp:%d' % (self.ADB,self.PORT,self.PORT))
+        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+        self.sock.connect((self.IPADDR, self.PORT))
+        self.sock.settimeout(self.SOCK_TIMEOUT)
 
     def __del__(self):
-        self.__kill_logcat()
+        if self.sock:
+            self.sock.close()
 
     def __enter__(self):
         return self
@@ -85,260 +80,25 @@
     def __exit__(self, type, value, traceback):
         return False
 
-    def __open_logcat(self):
-        """Opens the "adb logcat" stream.
-
-        Internal function, called by this class's constructor.
-
-        Gets the adb logcat stream that is intended for parsing by this python
-        script. Flushes it first to clear out existing messages.
-
-        Populates the proc and logcat members of this class.
-        """
-        _run('%s logcat -c' % (self.ADB))
-        self.proc = subprocess.Popen(
-                self.ADB.split() + ["logcat", "-s", "'%s:v'" % (self.TAG)],
-                stdout=subprocess.PIPE)
-        self.logcat = self.proc.stdout
-
-    def __get_next_msg(self):
-        """Gets the next message from the logcat stream.
-
-        Reads from the logcat stdout stream. Blocks until a new line is ready,
-        but exits in the event of a keyboard interrupt (to allow the script to
-        be Ctrl-C killed).
-
-        If the special message "FAIL" is received, kills the script; the test
-        shouldn't continue running if something went wrong. The user can then
-        manually inspect the device to see what the problem is, for example by
-        looking at logcat themself.
-
-        Returns:
-            The next string from the logcat stdout stream.
-        """
-        while True:
-            # Get the next logcat line.
-            line = self.logcat.readline().strip()
-            # Get the message, which is the string following the "###" code.
-            idx = line.find('### ')
-            if idx >= 0:
-                msg = line[idx+4:]
-                if self.__unpack_msg(msg)[0] == self.MSG_FAIL:
-                    raise its.error.Error('FAIL device msg received')
-                return msg
-
-    def __kill_logcat(self):
-        """Kill the logcat process.
-
-        Internal function called by this class's destructor.
-        """
-        if self.proc:
-            self.proc.kill()
-
-    def __send_intent(self, intent_string, intent_params=None):
-        """Send an intent to the device.
-
-        Takes a Python object object specifying the operation to be performed
-        on the device, converts it to JSON, sends it to the device over adb,
-        then sends an intent to ItsService.apk running on the device with
-        the path to that JSON file (including starting the service).
-
-        Args:
-            intent_string: The string corresponding to the intent to send (3A
-                or capture).
-            intent_params: A Python dictionary object containing the operations
-                to perform; for a capture intent, the dict. contains either
-                captureRequest or captureRequestList key, and for a 3A intent,
-                the dictionary contains a 3A params key.
-        """
-        _run('%s shell mkdir -p "%s"' % (
-             self.ADB, self.DEVICE_FOLDER_ROOT))
-        intent_args = ""
-        if intent_params:
-            with tempfile.NamedTemporaryFile(
-                    mode="w", suffix=".json", delete=False) as f:
-                tmpfname = f.name
-                f.write(json.dumps(intent_params))
-            _run('%s push %s %s' % (
-                 self.ADB, tmpfname, self.DEVICE_FOLDER_ROOT))
-            os.remove(tmpfname)
-            intent_args = ' -d "file://%s/%s"' % (
-                      self.DEVICE_FOLDER_ROOT, os.path.basename(tmpfname))
-        # TODO: Figure out why "--user 0" is needed, and fix the problem
-        _run(('%s shell am startservice --user 0 -t text/plain '
-              '-a %s%s') % (self.ADB, intent_string, intent_args))
-
-    def __start_capture(self, request):
-        self.__send_intent(self.INTENT_CAPTURE, request)
-
-    def __start_3a(self, params):
-        self.__send_intent(self.INTENT_3A, params)
-
-    def __start_getprops(self):
-        self.__send_intent(self.INTENT_GETPROPS)
-
-    def __unpack_msg(self, msg):
-        """Process a string containing a coded message from the device.
-
-        The logcat messages intended to be parsed by this script are of the
-        following form:
-            RECV                    - Indicates capture command was received
-            SIZE <WIDTH> <HEIGHT>   - The width,height of the captured image
-            FILE <PATH>             - The path on the device of the captured image
-            CAPT <I> of <N>         - Indicates capt cmd #I out of #N was issued
-            DONE                    - Indicates the capture sequence completed
-            FAIL                    - Indicates an error occurred
-
-        Args:
-            msg: The string message from the device.
-
-        Returns:
-            Tuple containing the message type (a string) and the message
-            payload (a list).
-        """
-        a = msg.split()
-        if a[0] not in self.MSGS:
-            raise its.error.Error('Invalid device message: %s' % (msg))
-        return a[0], a[1:]
-
-    def __wait_for_camera_properties(self):
-        """Block until the requested camera properties object is available.
-
-        Monitors messages from the service on the device (via logcat), looking
-        for special coded messages that indicate the status of the request.
-
-        Returns:
-            The remote path (on the device) where the camera properties JSON
-            file is stored.
-        """
-        fname = None
-        msg = self.__get_next_msg()
-        if self.__unpack_msg(msg)[0] != self.MSG_RECV:
-            raise its.error.Error('Device msg not RECV: %s' % (msg))
-        while True:
-            msg = self.__get_next_msg()
-            msgtype, msgparams = self.__unpack_msg(msg)
-            if msgtype == self.MSG_FILE:
-                fname = msgparams[0]
-            elif msgtype == self.MSG_DONE:
-                return fname
-
-    def __wait_for_capture_done_single(self):
-        """Block until a single capture is done.
-
-        Monitors messages from the service on the device (via logcat), looking
-        for special coded messages that indicate the status of the captures.
-
-        Returns:
-            The remote path (on the device) where the image file was stored,
-            along with the image's width and height.
-        """
-        fname = None
-        w = None
-        h = None
-        msg = self.__get_next_msg()
-        if self.__unpack_msg(msg)[0] != self.MSG_RECV:
-            raise its.error.Error('Device msg not RECV: %s' % (msg))
-        while True:
-            msg = self.__get_next_msg()
-            msgtype, msgparams = self.__unpack_msg(msg)
-            if msgtype == self.MSG_SIZE:
-                w = int(msgparams[0])
-                h = int(msgparams[1])
-            elif msgtype == self.MSG_FILE:
-                fname = msgparams[0]
-            elif msgtype == self.MSG_DONE:
-                return fname, w, h
-
-    def __wait_for_capture_done_burst(self, num_req):
-        """Block until a burst of captures is done.
-
-        Monitors messages from the service on the device (via logcat), looking
-        for special coded messages that indicate the status of the captures.
-
-        Args:
-            num_req: The number of captures to wait for.
-
-        Returns:
-            The remote paths (on the device) where the image files were stored,
-            along with their width and height.
-        """
-        fnames = []
-        w = None
-        h = None
-        msg = self.__get_next_msg()
-        if self.__unpack_msg(msg)[0] != self.MSG_RECV:
-            raise its.error.Error('Device msg not RECV: %s' % (msg))
-        while True:
-            msg = self.__get_next_msg()
-            msgtype, msgparams = self.__unpack_msg(msg)
-            if msgtype == self.MSG_SIZE:
-                w = int(msgparams[0])
-                h = int(msgparams[1])
-            elif msgtype == self.MSG_FILE:
-                fnames.append(msgparams[0])
-            elif msgtype == self.MSG_DONE:
-                if len(fnames) != num_req or not w or not h:
-                    raise its.error.Error('Missing FILE or SIZE device msg')
-                return fnames, w, h
-
-    def __get_json_path(self, image_fname):
-        """Get the path of the JSON metadata file associated with an image.
-
-        Args:
-            image_fname: Path of the image file (local or remote).
-
-        Returns:
-            The path of the associated JSON metadata file, which has the same
-            basename but different extension.
-        """
-        base, ext = os.path.splitext(image_fname)
-        return base + ".json"
-
-    def __copy_captured_files(self, remote_fnames):
-        """Copy captured data from device back to host machine over adb.
-
-        Copy captured images and associated metadata from the device to the
-        host machine. The image and metadata files have the same basename, but
-        different file extensions; the captured image is .yuv/.jpg/.raw, and
-        the captured metadata is .json.
-
-        File names are unique, as each has the timestamp of the capture in it.
-
-        Deletes the files from the device after they have been transferred off.
-
-        Args:
-            remote_fnames: List of paths of the captured image files on the
-                remote device.
-
-        Returns:
-            List of paths of captured image files on the local host machine
-            (which is just in the current directory).
-        """
-        local_fnames = []
-        for fname in remote_fnames:
-            _run('%s pull %s .' % (self.ADB, fname))
-            _run('%s pull %s .' % (
-                       self.ADB, self.__get_json_path(fname)))
-            local_fnames.append(os.path.basename(fname))
-        _run('%s shell rm -rf %s/*' % (self.ADB, self.DEVICE_FOLDER_ROOT))
-        return local_fnames
-
-    def __parse_captured_json(self, local_fnames):
-        """Parse the JSON objects that are returned alongside captured images.
-
-        Args:
-            local_fnames: List of paths of captured image on the local machine.
-
-        Returns:
-            List of Python objects obtained from loading the argument files
-            and converting from the JSON object form to native Python.
-        """
-        a = []
-        for fname in local_fnames:
-            with open(self.__get_json_path(fname), "r") as f:
-                a.append(json.load(f))
-        return a
+    def __read_response_from_socket(self):
+        # Read a line (newline-terminated) string serialization of JSON object.
+        chars = []
+        while len(chars) == 0 or chars[-1] != '\n':
+            chars.append(self.sock.recv(1))
+        line = ''.join(chars)
+        jobj = json.loads(line)
+        # Optionally read a binary buffer of a fixed size.
+        buf = None
+        if jobj.has_key("bufValueSize"):
+            n = jobj["bufValueSize"]
+            buf = bytearray(n)
+            view = memoryview(buf)
+            while n > 0:
+                nbytes = self.sock.recv_into(view, n)
+                view = view[nbytes:]
+                n -= nbytes
+            buf = numpy.frombuffer(buf, dtype=numpy.uint8)
+        return jobj, buf
 
     def get_camera_properties(self):
         """Get the camera properties object for the device.
@@ -346,11 +106,13 @@
         Returns:
             The Python dictionary object for the CameraProperties object.
         """
-        self.__start_getprops()
-        remote_fname = self.__wait_for_camera_properties()
-        _run('%s pull %s .' % (self.ADB, remote_fname))
-        local_fname = os.path.basename(remote_fname)
-        return self.__parse_captured_json([local_fname])[0]['cameraProperties']
+        cmd = {}
+        cmd["cmdName"] = "getCameraProperties"
+        self.sock.send(json.dumps(cmd) + "\n")
+        data,_ = self.__read_response_from_socket()
+        if data['tag'] != 'cameraProperties':
+            raise its.error.Error('Invalid command response')
+        return data['objValue']['cameraProperties']
 
     def do_3a(self, region_ae, region_awb, region_af,
               do_ae=True, do_awb=True, do_af=True):
@@ -374,36 +136,39 @@
             * AWB transform (list); None if do_awb is false
             * AF focus position; None if do_af is false
         """
-        params = {"regions" : {"ae": region_ae,
-                               "awb": region_awb,
-                               "af": region_af },
-                  "triggers": {"ae": do_ae,
-                               "af": do_af } }
         print "Running vendor 3A on device"
-        self.__start_3a(params)
+        cmd = {}
+        cmd["cmdName"] = "do3A"
+        cmd["regions"] = {"ae": region_ae, "awb": region_awb, "af": region_af}
+        cmd["triggers"] = {"ae": do_ae, "af": do_af}
+        self.sock.send(json.dumps(cmd) + "\n")
+
+        # Wait for each specified 3A to converge.
         ae_sens = None
         ae_exp = None
         awb_gains = None
         awb_transform = None
         af_dist = None
         while True:
-            msg = self.__get_next_msg()
-            msgtype, msgparams = self.__unpack_msg(msg)
-            if msgtype == self.MSG_AE:
-                ae_sens = int(msgparams[0])
-                ae_exp = int(msgparams[1])
-            elif msgtype == self.MSG_AWB:
-                awb_gains = [float(x) for x in msgparams[:4]]
-                awb_transform = [float(x) for x in msgparams[4:]]
-            elif msgtype == self.MSG_AF:
-                af_dist = float(msgparams[0]) if msgparams[0] != "null" else 0
-            elif msgtype == self.MSG_DONE:
-                if (do_ae and ae_sens == None or do_awb and awb_gains == None
-                                              or do_af and af_dist == None):
-                    raise its.error.Error('3A failed to converge')
-                return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
+            data,_ = self.__read_response_from_socket()
+            vals = data['strValue'].split()
+            if data['tag'] == 'aeResult':
+                ae_sens, ae_exp = [int(i) for i in vals]
+            elif data['tag'] == 'afResult':
+                af_dist = float(vals[0])
+            elif data['tag'] == 'awbResult':
+                awb_gains = [float(f) for f in vals[:4]]
+                awb_transform = [float(f) for f in vals[4:]]
+            elif data['tag'] == '3aDone':
+                break
+            else:
+                raise its.error.Error('Invalid command response')
+        if (do_ae and ae_sens == None or do_awb and awb_gains == None
+                                      or do_af and af_dist == None):
+            raise its.error.Error('3A failed to converge')
+        return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
 
-    def do_capture(self, cap_request, out_surface=None, out_fname_prefix=None):
+    def do_capture(self, cap_request, out_surface=None):
         """Issue capture request(s), and read back the image(s) and metadata.
 
         The main top-level function for capturing one or more images using the
@@ -445,63 +210,57 @@
         Args:
             cap_request: The Python dict/list specifying the capture(s), which
                 will be converted to JSON and sent to the device.
-            out_fname_prefix: (Optionally) the file name prefix to use for the
-                captured files. If this arg is present, then the captured files
-                will be renamed appropriately.
+            out_surface: (Optional) the width,height,format to use for all
+                captured images.
 
         Returns:
-            Four values:
-            * The path or list of paths of the captured images (depending on
-              whether the request was for a single or burst capture). The paths
-              are on the host machine. The captured metadata file(s) have the
-              same file names as their corresponding images, with a ".json"
-              extension.
-            * The width and height of the captured image(s). For a burst, all
-              are the same size.
-            * The Python dictionary or list of dictionaries (in the case of a
-              burst capture) containing the returned capture result objects.
+            An object or list of objects (depending on whether the request was
+            for a single or burst capture), where each object contains the
+            following fields:
+            * data: the image data as a numpy array of bytes.
+            * width: the width of the captured image.
+            * height: the height of the captured image.
+            * format: the format of the image, in ["yuv", "jpeg"].
+            * metadata: the capture result object (Python dictionaty).
         """
+        cmd = {}
+        cmd["cmdName"] = "doCapture"
         if not isinstance(cap_request, list):
-            request = {"captureRequest" : cap_request}
-            if out_surface is not None:
-                request["outputSurface"] = out_surface
-            if self.CAPTURE_THROWAWAY_SHOTS:
-                print "Capturing throw-away image"
-                self.__start_capture(request)
-                self.__wait_for_capture_done_single()
-            print "Capturing image"
-            self.__start_capture(request)
-            remote_fname, w, h = self.__wait_for_capture_done_single()
-            local_fname = self.__copy_captured_files([remote_fname])[0]
-            out_metadata_obj = self.__parse_captured_json([local_fname])[0]
-            if out_fname_prefix:
-                _, image_ext = os.path.splitext(local_fname)
-                os.rename(local_fname, out_fname_prefix + image_ext)
-                os.rename(self.__get_json_path(local_fname),
-                          out_fname_prefix + ".json")
-                local_fname = out_fname_prefix + image_ext
-            return local_fname, w, h, out_metadata_obj["captureResult"]
+            cmd["captureRequests"] = [cap_request]
         else:
-            request = {"captureRequestList" : cap_request}
-            if out_surface is not None:
-                request["outputSurface"] = out_surface
-            n = len(request['captureRequestList'])
-            print "Capture burst of %d images" % (n)
-            self.__start_capture(request)
-            remote_fnames, w, h = self.__wait_for_capture_done_burst(n)
-            local_fnames = self.__copy_captured_files(remote_fnames)
-            out_metadata_objs = self.__parse_captured_json(local_fnames)
-            for i in range(len(out_metadata_objs)):
-                out_metadata_objs[i] = out_metadata_objs[i]["captureResult"]
-            if out_fname_prefix is not None:
-                for i in range(len(local_fnames)):
-                    _, image_ext = os.path.splitext(local_fnames[i])
-                    os.rename(local_fnames[i],
-                              "%s-%04d%s" % (out_fname_prefix, i, image_ext))
-                    os.rename(self.__get_json_path(local_fnames[i]),
-                              "%s-%04d.json" % (out_fname_prefix, i))
-                    local_fnames[i] = out_fname_prefix + image_ext
-            return local_fnames, w, h, out_metadata_objs
+            cmd["captureRequests"] = cap_request
+        if out_surface is not None:
+            cmd["outputSurface"] = out_surface
+        n = len(cmd["captureRequests"])
+        print "Capturing %d image%s" % (n, "s" if n>1 else "")
+        self.sock.send(json.dumps(cmd) + "\n")
+
+        # Wait for n images and n metadata responses from the device.
+        bufs = []
+        mds = []
+        fmts = []
+        width = None
+        height = None
+        while len(bufs) < n or len(mds) < n:
+            jsonObj,buf = self.__read_response_from_socket()
+            if jsonObj['tag'] in ['jpegImage','yuvImage'] and buf is not None:
+                bufs.append(buf)
+                fmts.append(jsonObj['tag'][:-5])
+            elif jsonObj['tag'] == 'captureResults':
+                mds.append(jsonObj['objValue']['captureResult'])
+                width = jsonObj['objValue']['width']
+                height = jsonObj['objValue']['height']
+
+        objs = []
+        for i in range(n):
+            obj = {}
+            obj["data"] = bufs[i]
+            obj["width"] = width
+            obj["height"] = height
+            obj["format"] = fmts[i]
+            obj["metadata"] = mds[i]
+            objs.append(obj)
+        return objs if n>1 else objs[0]
 
 def _run(cmd):
     """Replacement for os.system, with hiding of stdout+stderr messages.
diff --git a/apps/CameraITS/pymodules/its/image.py b/apps/CameraITS/pymodules/its/image.py
index 667eecc..10ddf7b 100644
--- a/apps/CameraITS/pymodules/its/image.py
+++ b/apps/CameraITS/pymodules/its/image.py
@@ -22,6 +22,7 @@
 import numpy
 import math
 import unittest
+import cStringIO
 
 DEFAULT_YUV_TO_RGB_CCM = numpy.matrix([
                                 [1.000,  1.402,  0.000],
@@ -38,6 +39,88 @@
 
 MAX_LUT_SIZE = 65536
 
+def convert_capture_to_rgb_image(cap,
+                                 ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
+                                 yuv_off=DEFAULT_YUV_OFFSETS):
+    """Convert a captured image object to a RGB image.
+
+    Args:
+        cap: A capture object as returned by its.device.do_capture.
+        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
+        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0].
+    """
+    w = cap["width"]
+    h = cap["height"]
+    if cap["format"] == "yuv":
+        y = cap["data"][0:w*h]
+        u = cap["data"][w*h:w*h*5/4]
+        v = cap["data"][w*h*5/4:w*h*6/4]
+        return convert_yuv420_to_rgb_image(y, u, v, w, h)
+    elif cap["format"] == "jpeg":
+        # TODO: Convert JPEG to RGB.
+        raise its.error.Error('Invalid format %s' % (cap["format"]))
+    else:
+        raise its.error.Error('Invalid format %s' % (cap["format"]))
+
+def convert_capture_to_yuv_planes(cap):
+    """Convert a captured image object to separate Y,U,V image planes.
+
+    The only input format that is supported is planar YUV420, and the planes
+    that are returned are such that the U,V planes are 1/2 x 1/2 of the Y
+    plane size.
+
+    Args:
+        cap: A capture object as returned by its.device.do_capture.
+
+    Returns:
+        Three float arrays, for the Y,U,V planes, with pixel values in [0,1].
+    """
+    w = cap["width"]
+    h = cap["height"]
+    if cap["format"] == "yuv":
+        y = cap["data"][0:w*h]
+        u = cap["data"][w*h:w*h*5/4]
+        v = cap["data"][w*h*5/4:w*h*6/4]
+        return ((y.astype(numpy.float32) / 255.0).reshape(h, w, 1),
+                (u.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1),
+                (v.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1))
+    else:
+        raise its.error.Error('Invalid format %s' % (cap["format"]))
+
+def convert_yuv420_to_rgb_image(y_plane, u_plane, v_plane,
+                                w, h,
+                                ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
+                                yuv_off=DEFAULT_YUV_OFFSETS):
+    """Convert a YUV420 8-bit planar image to an RGB image.
+
+    Args:
+        y_plane: The packed 8-bit Y plane.
+        u_plane: The packed 8-bit U plane.
+        v_plane: The packed 8-bit V plane.
+        w: The width of the image.
+        h: The height of the image.
+        ccm_yuv_to_rgb: (Optional) the 3x3 CCM to convert from YUV to RGB.
+        yuv_off: (Optional) offsets to subtract from each of Y,U,V values.
+
+    Returns:
+        RGB float-3 image array, with pixel values in [0.0, 1.0].
+    """
+    y = numpy.subtract(y_plane, yuv_off[0])
+    u = numpy.subtract(u_plane, yuv_off[1]).view(numpy.int8)
+    v = numpy.subtract(v_plane, yuv_off[2]).view(numpy.int8)
+    u = u.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
+    v = v.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
+    yuv = numpy.dstack([y, u.reshape(w*h), v.reshape(w*h)])
+    flt = numpy.empty([h, w, 3], dtype=numpy.float32)
+    flt.reshape(w*h*3)[:] = yuv.reshape(h*w*3)[:]
+    flt = numpy.dot(flt.reshape(w*h,3), ccm_yuv_to_rgb.T).clip(0, 255)
+    rgb = numpy.empty([h, w, 3], dtype=numpy.uint8)
+    rgb.reshape(w*h*3)[:] = flt.reshape(w*h*3)[:]
+    return rgb.astype(numpy.float32) / 255.0
+
 def load_yuv420_to_rgb_image(yuv_fname,
                              w, h,
                              ccm_yuv_to_rgb=DEFAULT_YUV_TO_RGB_CCM,
@@ -58,18 +141,7 @@
         y = numpy.fromfile(f, numpy.uint8, w*h, "")
         v = numpy.fromfile(f, numpy.uint8, w*h/4, "")
         u = numpy.fromfile(f, numpy.uint8, w*h/4, "")
-        y = numpy.subtract(y, yuv_off[0])
-        u = numpy.subtract(u, yuv_off[1]).view(numpy.int8)
-        v = numpy.subtract(v, yuv_off[2]).view(numpy.int8)
-        u = u.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
-        v = v.reshape(h/2, w/2).repeat(2, axis=1).repeat(2, axis=0)
-        yuv = numpy.dstack([y, u.reshape(w*h), v.reshape(w*h)])
-        flt = numpy.empty([h, w, 3], dtype=numpy.float32)
-        flt.reshape(w*h*3)[:] = yuv.reshape(h*w*3)[:]
-        flt = numpy.dot(flt.reshape(w*h,3), ccm_yuv_to_rgb.T).clip(0, 255)
-        rgb = numpy.empty([h, w, 3], dtype=numpy.uint8)
-        rgb.reshape(w*h*3)[:] = flt.reshape(w*h*3)[:]
-        return rgb.astype(numpy.float32) / 255.0
+        return convert_yuv420_to_rgb_image(y,u,v,w,h,ccm_yuv_to_rgb,yuv_off)
 
 def load_yuv420_to_yuv_planes(yuv_fname, w, h):
     """Load a YUV420 image file, and return separate Y, U, and V plane images.
@@ -93,6 +165,20 @@
                 (u.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1),
                 (v.astype(numpy.float32) / 255.0).reshape(h/2, w/2, 1))
 
+def decompress_jpeg_to_rgb_image(jpeg_buffer):
+    """Decompress a JPEG-compressed image, returning as an RGB image.
+
+    Args:
+        jpeg_buffer: The JPEG stream.
+
+    Returns:
+        A numpy array for the RGB image, with pixels in [0,1].
+    """
+    img = Image.open(cStringIO.StringIO(jpeg_buffer))
+    w = img.size[0]
+    h = img.size[1]
+    return numpy.array(img).reshape(h,w,3) / 255.0
+
 def apply_lut_to_image(img, lut):
     """Applies a LUT to every pixel in a float image array.
 
diff --git a/apps/CameraITS/pymodules/its/target.py b/apps/CameraITS/pymodules/its/target.py
index 50ba98e..1761ec5 100644
--- a/apps/CameraITS/pymodules/its/target.py
+++ b/apps/CameraITS/pymodules/its/target.py
@@ -73,10 +73,10 @@
     req["android.tonemap.curveBlue"] = tmap
     req["android.colorCorrection.transform"] = xform_rat
     req["android.colorCorrection.gains"] = gains
-    fname, w, h, cap_res = its_session.do_capture(req)
+    cap = its_session.do_capture(req)
 
     # Compute the mean luma of a center patch.
-    yimg,uimg,vimg = its.image.load_yuv420_to_yuv_planes(fname,w,h)
+    yimg,uimg,vimg = its.image.convert_capture_to_yuv_planes(cap)
     tile = its.image.get_image_patch(yimg, 0.45, 0.45, 0.1, 0.1)
     luma_mean = its.image.compute_image_means(tile)
 
diff --git a/apps/CameraITS/service/AndroidManifest.xml b/apps/CameraITS/service/AndroidManifest.xml
index 62ca022..acd8a98 100644
--- a/apps/CameraITS/service/AndroidManifest.xml
+++ b/apps/CameraITS/service/AndroidManifest.xml
@@ -27,6 +27,8 @@
   <uses-permission android:name="android.permission.CAMERA" />
   <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
   <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
+  <uses-permission android:name="android.permission.INTERNET" />
+  <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
   <application
       android:label="@string/app_name"
       android:theme="@android:style/Theme.Holo"
@@ -36,17 +38,7 @@
         android:label="@string/app_name"
         >
       <intent-filter>
-        <action android:name="com.android.camera2.its.CAPTURE"/>
-        <category android:name="android.intent.category.DEFAULT" />
-        <data android:mimeType="text/plain" />
-      </intent-filter>
-      <intent-filter>
-        <action android:name="com.android.camera2.its.GETPROPS"/>
-        <category android:name="android.intent.category.DEFAULT" />
-        <data android:mimeType="text/plain" />
-      </intent-filter>
-      <intent-filter>
-        <action android:name="com.android.camera2.its.3A"/>
+        <action android:name="com.android.camera2.its.START"/>
         <category android:name="android.intent.category.DEFAULT" />
         <data android:mimeType="text/plain" />
       </intent-filter>
diff --git a/apps/CameraITS/service/src/com/android/camera2/its/ItsSerializer.java b/apps/CameraITS/service/src/com/android/camera2/its/ItsSerializer.java
index e00eea9..a2a6b15 100644
--- a/apps/CameraITS/service/src/com/android/camera2/its/ItsSerializer.java
+++ b/apps/CameraITS/service/src/com/android/camera2/its/ItsSerializer.java
@@ -342,21 +342,13 @@
             throws ItsException {
         try {
             List<CaptureRequest.Builder> requests = null;
-            if (jsonObjTop.has("captureRequest")) {
-                JSONObject jsonReq = jsonObjTop.getJSONObject("captureRequest");
+            JSONArray jsonReqs = jsonObjTop.getJSONArray("captureRequests");
+            requests = new LinkedList<CaptureRequest.Builder>();
+            for (int i = 0; i < jsonReqs.length(); i++) {
                 CaptureRequest.Builder templateReq = device.createCaptureRequest(
                         CameraDevice.TEMPLATE_STILL_CAPTURE);
-                requests = new LinkedList<CaptureRequest.Builder>();
-                requests.add(deserialize(templateReq, jsonReq));
-            } else if (jsonObjTop.has("captureRequestList")) {
-                JSONArray jsonReqs = jsonObjTop.getJSONArray("captureRequestList");
-                requests = new LinkedList<CaptureRequest.Builder>();
-                for (int i = 0; i < jsonReqs.length(); i++) {
-                    CaptureRequest.Builder templateReq = device.createCaptureRequest(
-                            CameraDevice.TEMPLATE_STILL_CAPTURE);
-                    requests.add(
-                        deserialize(templateReq, jsonReqs.getJSONObject(i)));
-                }
+                requests.add(
+                    deserialize(templateReq, jsonReqs.getJSONObject(i)));
             }
             return requests;
         } catch (org.json.JSONException e) {
diff --git a/apps/CameraITS/service/src/com/android/camera2/its/ItsService.java b/apps/CameraITS/service/src/com/android/camera2/its/ItsService.java
index f3a7ac5..b80529d 100644
--- a/apps/CameraITS/service/src/com/android/camera2/its/ItsService.java
+++ b/apps/CameraITS/service/src/com/android/camera2/its/ItsService.java
@@ -45,25 +45,29 @@
 
 import org.json.JSONObject;
 
-import java.io.File;
+import java.io.BufferedReader;
+import java.io.BufferedWriter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStreamWriter;
+import java.io.PrintWriter;
+import java.math.BigInteger;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
 import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.LinkedBlockingDeque;
 import java.util.concurrent.TimeUnit;
 
 public class ItsService extends Service {
     public static final String TAG = ItsService.class.getSimpleName();
-    public static final String PYTAG = "CAMERA-ITS-PY";
-
-    // Supported intents
-    public static final String ACTION_CAPTURE = "com.android.camera2.its.CAPTURE";
-    public static final String ACTION_3A = "com.android.camera2.its.3A";
-    public static final String ACTION_GETPROPS = "com.android.camera2.its.GETPROPS";
-    private static final int MESSAGE_CAPTURE = 1;
-    private static final int MESSAGE_3A = 2;
-    private static final int MESSAGE_GETPROPS = 3;
 
     // Timeouts, in seconds.
     public static final int TIMEOUT_CAPTURE = 10;
@@ -75,6 +79,8 @@
 
     private static final int MAX_CONCURRENT_READER_BUFFERS = 8;
 
+    public static final int SERVERPORT = 6000;
+
     public static final String REGION_KEY = "regions";
     public static final String REGION_AE_KEY = "ae";
     public static final String REGION_AWB_KEY = "awb";
@@ -91,14 +97,20 @@
     private ImageReader mCaptureReader = null;
     private CameraCharacteristics mCameraCharacteristics = null;
 
-    private HandlerThread mCommandThread;
-    private Handler mCommandHandler;
     private HandlerThread mSaveThread;
     private Handler mSaveHandler;
     private HandlerThread mResultThread;
     private Handler mResultHandler;
 
-    private ConditionVariable mInterlock3A = new ConditionVariable(true);
+    private volatile ServerSocket mSocket = null;
+    private volatile SocketRunnable mSocketRunnableObj = null;
+    private volatile Thread mSocketThread = null;
+    private volatile Thread mSocketWriteRunnable = null;
+    private volatile boolean mSocketThreadExitFlag = false;
+    private volatile BlockingQueue<ByteBuffer> mSocketWriteQueue = new LinkedBlockingDeque<ByteBuffer>();
+    private final Object mSocketWriteLock = new Object();
+
+    private volatile ConditionVariable mInterlock3A = new ConditionVariable(true);
     private volatile boolean mIssuedRequest3A = false;
     private volatile boolean mConvergedAE = false;
     private volatile boolean mConvergedAF = false;
@@ -119,7 +131,6 @@
 
     @Override
     public void onCreate() {
-
         try {
             // Get handle to camera manager.
             mCameraManager = (CameraManager) this.getSystemService(Context.CAMERA_SERVICE);
@@ -165,49 +176,19 @@
             mResultThread.start();
             mResultHandler = new Handler(mResultThread.getLooper());
 
-            // Create a thread to process commands.
-            mCommandThread = new HandlerThread("CaptureThread");
-            mCommandThread.start();
-            mCommandHandler = new Handler(mCommandThread.getLooper(), new Handler.Callback() {
-                @Override
-                public boolean handleMessage(Message msg) {
-                    try {
-                        switch (msg.what) {
-                            case MESSAGE_CAPTURE:
-                                doCapture((Uri) msg.obj);
-                                break;
-                            case MESSAGE_3A:
-                                do3A((Uri) msg.obj);
-                                break;
-                            case MESSAGE_GETPROPS:
-                                doGetProps();
-                                break;
-                            default:
-                                throw new ItsException("Unknown message type");
-                        }
-                        Log.i(PYTAG, "### DONE");
-                        return true;
-                    }
-                    catch (ItsException e) {
-                        Log.e(TAG, "Script failed: ", e);
-                        Log.e(PYTAG, "### FAIL");
-                        return true;
-                    }
-                }
-            });
+            // Create a thread to process commands, listening on a TCP socket.
+            mSocketRunnableObj = new SocketRunnable();
+            mSocketThread = new Thread(mSocketRunnableObj);
+            mSocketThread.start();
         } catch (ItsException e) {
-            Log.e(TAG, "Script failed: ", e);
-            Log.e(PYTAG, "### FAIL");
+            Log.e(TAG, "Service failed to start: ", e);
         }
     }
 
     @Override
     public void onDestroy() {
         try {
-            if (mCommandThread != null) {
-                mCommandThread.quit();
-                mCommandThread = null;
-            }
+            mSocketThreadExitFlag = true;
             if (mSaveThread != null) {
                 mSaveThread.quit();
                 mSaveThread = null;
@@ -223,38 +204,225 @@
             }
         } catch (ItsException e) {
             Log.e(TAG, "Script failed: ", e);
-            Log.e(PYTAG, "### FAIL");
         }
     }
 
     @Override
     public int onStartCommand(Intent intent, int flags, int startId) {
-        try {
-            Log.i(PYTAG, "### RECV");
-            String action = intent.getAction();
-            if (ACTION_CAPTURE.equals(action)) {
-                Uri uri = intent.getData();
-                Message m = mCommandHandler.obtainMessage(MESSAGE_CAPTURE, uri);
-                mCommandHandler.sendMessage(m);
-            } else if (ACTION_3A.equals(action)) {
-                Uri uri = intent.getData();
-                Message m = mCommandHandler.obtainMessage(MESSAGE_3A, uri);
-                mCommandHandler.sendMessage(m);
-            } else if (ACTION_GETPROPS.equals(action)) {
-                Uri uri = intent.getData();
-                Message m = mCommandHandler.obtainMessage(MESSAGE_GETPROPS, uri);
-                mCommandHandler.sendMessage(m);
-            } else {
-                throw new ItsException("Unhandled intent: " + intent.toString());
-            }
-        } catch (ItsException e) {
-            Log.e(TAG, "Script failed: ", e);
-            Log.e(PYTAG, "### FAIL");
-        }
         return START_STICKY;
     }
 
-    private ImageReader.OnImageAvailableListener
+    class SocketWriteRunnable implements Runnable {
+
+        // Use a separate thread to service a queue of objects to be written to the socket,
+        // writing each sequentially in order. This is needed since different handler functions
+        // (called on different threads) will need to send data back to the host script.
+
+        public Socket mOpenSocket = null;
+
+        public SocketWriteRunnable(Socket openSocket) {
+            mOpenSocket = openSocket;
+        }
+
+        public void run() {
+            Log.i(TAG, "Socket writer thread starting");
+            while (true) {
+                try {
+                    ByteBuffer b = mSocketWriteQueue.take();
+                    //Log.i(TAG, String.format("Writing to socket: %d bytes", b.capacity()));
+                    if (b.hasArray()) {
+                        mOpenSocket.getOutputStream().write(b.array());
+                    } else {
+                        byte[] barray = new byte[b.capacity()];
+                        b.get(barray);
+                        mOpenSocket.getOutputStream().write(barray);
+                    }
+                    mOpenSocket.getOutputStream().flush();
+                } catch (IOException e) {
+                    Log.e(TAG, "Error writing to socket");
+                    break;
+                } catch (java.lang.InterruptedException e) {
+                    Log.e(TAG, "Error writing to socket (interrupted)");
+                    break;
+                }
+            }
+            Log.i(TAG, "Socket writer thread terminated");
+        }
+    }
+
+    class SocketRunnable implements Runnable {
+
+        // Format of sent messages (over the socket):
+        // * Serialized JSON object on a single line (newline-terminated)
+        // * For byte buffers, the binary data then follows
+        //
+        // Format of received messages (from the socket):
+        // * Serialized JSON object on a single line (newline-terminated)
+
+        private Socket mOpenSocket = null;
+        private SocketWriteRunnable mSocketWriteRunnable = null;
+
+        public void run() {
+            Log.i(TAG, "Socket thread starting");
+            try {
+                mSocket = new ServerSocket(SERVERPORT);
+            } catch (IOException e) {
+                Log.e(TAG, "Failed to create socket");
+            }
+            try {
+                Log.i(TAG, "Waiting for client to connect to socket");
+                mOpenSocket = mSocket.accept();
+                if (mOpenSocket == null) {
+                    Log.e(TAG, "Socket connection error");
+                    return;
+                }
+                Log.i(TAG, "Socket connected");
+            } catch (IOException e) {
+                Log.e(TAG, "Socket open error: " + e);
+                return;
+            }
+            mSocketThread = new Thread(new SocketWriteRunnable(mOpenSocket));
+            mSocketThread.start();
+            while (!mSocketThreadExitFlag) {
+                try {
+                    BufferedReader input = new BufferedReader(
+                            new InputStreamReader(mOpenSocket.getInputStream()));
+                    if (input == null) {
+                        Log.e(TAG, "Failed to get socket input stream");
+                        break;
+                    }
+                    String line = input.readLine();
+                    if (line == null) {
+                        Log.e(TAG, "Failed to read socket line");
+                        break;
+                    }
+                    processSocketCommand(line);
+                } catch (IOException e) {
+                    Log.e(TAG, "Socket read error: " + e);
+                    break;
+                } catch (ItsException e) {
+                    Log.e(TAG, "Script error: " + e);
+                    break;
+                }
+            }
+            Log.i(TAG, "Socket server loop exited");
+            try {
+                if (mOpenSocket != null) {
+                    mOpenSocket.close();
+                    mOpenSocket = null;
+                }
+            } catch (java.io.IOException e) {
+                Log.w(TAG, "Exception closing socket");
+            }
+            try {
+                if (mSocket != null) {
+                    mSocket.close();
+                    mSocket = null;
+                }
+            } catch (java.io.IOException e) {
+                Log.w(TAG, "Exception closing socket");
+            }
+            Log.i(TAG, "Socket server thread exited");
+        }
+
+        public void processSocketCommand(String cmd)
+                throws ItsException {
+            // Each command is a serialized JSON object.
+            try {
+                JSONObject cmdObj = new JSONObject(cmd);
+                if ("getCameraProperties".equals(cmdObj.getString("cmdName"))) {
+                    doGetProps();
+                }
+                else if ("do3A".equals(cmdObj.getString("cmdName"))) {
+                    do3A(cmdObj);
+                }
+                else if ("doCapture".equals(cmdObj.getString("cmdName"))) {
+                    doCapture(cmdObj);
+                }
+                else {
+                    throw new ItsException("Unknown command: " + cmd);
+                }
+            } catch (org.json.JSONException e) {
+                Log.e(TAG, "Invalid command: ", e);
+            }
+        }
+
+        public void sendResponse(String tag, String str, JSONObject obj, ByteBuffer bbuf)
+                throws ItsException {
+            try {
+                JSONObject jsonObj = new JSONObject();
+                jsonObj.put("tag", tag);
+                if (str != null) {
+                    jsonObj.put("strValue", str);
+                }
+                if (obj != null) {
+                    jsonObj.put("objValue", obj);
+                }
+                if (bbuf != null) {
+                    jsonObj.put("bufValueSize", bbuf.capacity());
+                }
+                ByteBuffer bstr = ByteBuffer.wrap(
+                        (jsonObj.toString()+"\n").getBytes(Charset.defaultCharset()));
+                synchronized(mSocketWriteLock) {
+                    if (bstr != null) {
+                        mSocketWriteQueue.put(bstr);
+                    }
+                    if (bbuf != null) {
+                        mSocketWriteQueue.put(bbuf);
+                    }
+                }
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error: ", e);
+            } catch (java.lang.InterruptedException e) {
+                throw new ItsException("Socket error: ", e);
+            }
+        }
+
+        public void sendResponse(String tag, String str)
+                throws ItsException {
+            sendResponse(tag, str, null, null);
+        }
+
+        public void sendResponse(String tag, JSONObject obj)
+                throws ItsException {
+            sendResponse(tag, null, obj, null);
+        }
+
+        public void sendResponse(String tag, ByteBuffer bbuf)
+                throws ItsException {
+            sendResponse(tag, null, null, bbuf);
+        }
+
+        public void sendResponse(CameraCharacteristics props)
+                throws ItsException {
+            try {
+                JSONObject jsonObj = new JSONObject();
+                jsonObj.put("cameraProperties", ItsSerializer.serialize(props));
+                sendResponse("cameraProperties", null, jsonObj, null);
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error: ", e);
+            }
+        }
+
+        public void sendResponse(CameraCharacteristics props,
+                                 CaptureRequest request,
+                                 CaptureResult result)
+                throws ItsException {
+            try {
+                JSONObject jsonObj = new JSONObject();
+                jsonObj.put("cameraProperties", ItsSerializer.serialize(props));
+                jsonObj.put("captureRequest", ItsSerializer.serialize(request));
+                jsonObj.put("captureResult", ItsSerializer.serialize(result));
+                jsonObj.put("width", mCaptureReader.getWidth());
+                jsonObj.put("height", mCaptureReader.getHeight());
+                sendResponse("captureResults", null, jsonObj, null);
+            } catch (org.json.JSONException e) {
+                throw new ItsException("JSON error: ", e);
+            }
+        }
+    }
+
+    public ImageReader.OnImageAvailableListener
             createAvailableListener(final CaptureListener listener) {
         return new ImageReader.OnImageAvailableListener() {
             @Override
@@ -284,12 +452,7 @@
     }
 
     private void doGetProps() throws ItsException {
-        String fileName = ItsUtils.getMetadataFileName(0);
-        File mdFile = ItsUtils.getOutputFile(ItsService.this, fileName);
-        ItsUtils.storeCameraCharacteristics(mCameraCharacteristics, mdFile);
-        Log.i(PYTAG,
-              String.format("### FILE %s",
-                            ItsUtils.getExternallyVisiblePath(ItsService.this, mdFile.toString())));
+        mSocketRunnableObj.sendResponse(mCameraCharacteristics);
     }
 
     private void prepareCaptureReader(int width, int height, int format) {
@@ -305,12 +468,8 @@
         }
     }
 
-    private void do3A(Uri uri) throws ItsException {
+    private void do3A(JSONObject params) throws ItsException {
         try {
-            if (uri == null || !uri.toString().endsWith(".json")) {
-                throw new ItsException("Invalid URI: " + uri);
-            }
-
             // Start a 3A action, and wait for it to converge.
             // Get the converged values for each "A", and package into JSON result for caller.
 
@@ -342,7 +501,6 @@
             int[] regionAE = new int[]{0,0,width-1,height-1,1};
             int[] regionAF = new int[]{0,0,width-1,height-1,1};
             int[] regionAWB = new int[]{0,0,width-1,height-1,1};
-            JSONObject params = ItsUtils.loadJsonFile(uri);
             if (params.has(REGION_KEY)) {
                 JSONObject regions = params.getJSONObject(REGION_KEY);
                 if (regions.has(REGION_AE_KEY)) {
@@ -388,7 +546,6 @@
             boolean triggeredAF = false;
 
             // Keep issuing capture requests until 3A has converged.
-            // First do AE, then do AF and AWB together.
             while (true) {
 
                 // Block until can take the next 3A frame. Only want one outstanding frame
@@ -451,17 +608,16 @@
             throw new ItsException("Access error: ", e);
         } catch (org.json.JSONException e) {
             throw new ItsException("JSON error: ", e);
+        } finally {
+            mSocketRunnableObj.sendResponse("3aDone", "");
         }
     }
 
-    private void doCapture(Uri uri) throws ItsException {
+    private void doCapture(JSONObject params) throws ItsException {
         try {
-            if (uri == null || !uri.toString().endsWith(".json")) {
-                throw new ItsException("Invalid URI: " + uri);
-            }
-
             // Parse the JSON to get the list of capture requests.
-            List<CaptureRequest.Builder> requests = ItsUtils.loadRequestList(mCamera, uri);
+            List<CaptureRequest.Builder> requests = ItsSerializer.deserializeRequestList(
+                    mCamera, params);
 
             // Set the output surface and listeners.
             try {
@@ -475,7 +631,7 @@
                 int height = sizes[0].getHeight();
                 int format = ImageFormat.YUV_420_888;
 
-                JSONObject jsonOutputSpecs = ItsUtils.getOutputSpecs(uri);
+                JSONObject jsonOutputSpecs = ItsUtils.getOutputSpecs(params);
                 if (jsonOutputSpecs != null) {
                     // Use the user's JSON capture spec.
                     int width2 = jsonOutputSpecs.optInt("width");
@@ -498,8 +654,6 @@
                     }
                 }
 
-                Log.i(PYTAG, String.format("### SIZE %d %d", width, height));
-
                 prepareCaptureReader(width, height, format);
                 List<Surface> outputSurfaces = new ArrayList<Surface>(1);
                 outputSurfaces.add(mCaptureReader.getSurface());
@@ -526,7 +680,6 @@
             // Initiate the captures.
             for (int i = 0; i < requests.size(); i++) {
                 CaptureRequest.Builder req = requests.get(i);
-                Log.i(PYTAG, String.format("### CAPT %d of %d", i+1, requests.size()));
                 req.addTarget(mCaptureReader.getSurface());
                 mCamera.capture(req.build(), mCaptureResultListener, mResultHandler);
             }
@@ -553,22 +706,21 @@
                 int format = capture.getFormat();
                 String extFileName = null;
                 if (format == ImageFormat.JPEG) {
-                    String fileName = ItsUtils.getJpegFileName(capture.getTimestamp());
                     ByteBuffer buf = capture.getPlanes()[0].getBuffer();
-                    extFileName = ItsUtils.writeImageToFile(ItsService.this, buf, fileName);
+                    Log.i(TAG, "Received JPEG capture");
+                    mSocketRunnableObj.sendResponse("jpegImage", buf);
                 } else if (format == ImageFormat.YUV_420_888) {
-                    String fileName = ItsUtils.getYuvFileName(capture.getTimestamp());
                     byte[] img = ItsUtils.getDataFromImage(capture);
                     ByteBuffer buf = ByteBuffer.wrap(img);
-                    extFileName = ItsUtils.writeImageToFile(ItsService.this, buf, fileName);
+                    Log.i(TAG, "Received YUV capture");
+                    mSocketRunnableObj.sendResponse("yuvImage", buf);
                 } else {
                     throw new ItsException("Unsupported image format: " + format);
                 }
-                Log.i(PYTAG, String.format("### FILE %s", extFileName));
                 mCaptureCallbackLatch.countDown();
             } catch (ItsException e) {
                 Log.e(TAG, "Script error: " + e);
-                Log.e(PYTAG, "### FAIL");
+                mSocketThreadExitFlag = true;
             }
         }
     };
@@ -630,32 +782,36 @@
                         result.get(CaptureResult.LENS_FOCUS_DISTANCE)));
                 Log.i(TAG, logMsg.toString());
 
-                mConvergedAE = result.get(CaptureResult.CONTROL_AE_STATE) ==
-                                          CaptureResult.CONTROL_AE_STATE_CONVERGED;
-                mConvergedAF = result.get(CaptureResult.CONTROL_AF_STATE) ==
-                                          CaptureResult.CONTROL_AF_STATE_FOCUSED_LOCKED;
-                mConvergedAWB = result.get(CaptureResult.CONTROL_AWB_STATE) ==
-                                           CaptureResult.CONTROL_AWB_STATE_CONVERGED;
+                if (result.get(CaptureResult.CONTROL_AE_STATE) != null) {
+                    mConvergedAE = result.get(CaptureResult.CONTROL_AE_STATE) ==
+                                              CaptureResult.CONTROL_AE_STATE_CONVERGED;
+                }
+                if (result.get(CaptureResult.CONTROL_AF_STATE) != null) {
+                    mConvergedAF = result.get(CaptureResult.CONTROL_AF_STATE) ==
+                                              CaptureResult.CONTROL_AF_STATE_FOCUSED_LOCKED;
+                }
+                if (result.get(CaptureResult.CONTROL_AWB_STATE) != null) {
+                    mConvergedAWB = result.get(CaptureResult.CONTROL_AWB_STATE) ==
+                                               CaptureResult.CONTROL_AWB_STATE_CONVERGED;
+                }
 
                 if (mConvergedAE) {
-                    Log.i(PYTAG, String.format(
-                            "### 3A-E %d %d",
+                    mSocketRunnableObj.sendResponse("aeResult", String.format("%d %d",
                             result.get(CaptureResult.SENSOR_SENSITIVITY).intValue(),
                             result.get(CaptureResult.SENSOR_EXPOSURE_TIME).intValue()
                             ));
                 }
 
                 if (mConvergedAF) {
-                    Log.i(PYTAG, String.format(
-                            "### 3A-F %f",
+                    mSocketRunnableObj.sendResponse("afResult", String.format("%f",
                             result.get(CaptureResult.LENS_FOCUS_DISTANCE)
                             ));
                 }
 
                 if (mConvergedAWB && result.get(CaptureResult.COLOR_CORRECTION_GAINS) != null
                         && result.get(CaptureResult.COLOR_CORRECTION_TRANSFORM) != null) {
-                    Log.i(PYTAG, String.format(
-                            "### 3A-W %f %f %f %f %f %f %f %f %f %f %f %f %f",
+                    mSocketRunnableObj.sendResponse("awbResult", String.format(
+                            "%f %f %f %f %f %f %f %f %f %f %f %f %f",
                             result.get(CaptureResult.COLOR_CORRECTION_GAINS)[0],
                             result.get(CaptureResult.COLOR_CORRECTION_GAINS)[1],
                             result.get(CaptureResult.COLOR_CORRECTION_GAINS)[2],
@@ -676,18 +832,15 @@
                     mIssuedRequest3A = false;
                     mInterlock3A.open();
                 } else {
-                    String fileName = ItsUtils.getMetadataFileName(
-                            result.get(CaptureResult.SENSOR_TIMESTAMP));
-                    File mdFile = ItsUtils.getOutputFile(ItsService.this, fileName);
-                    ItsUtils.storeResults(mCameraCharacteristics, request, result, mdFile);
+                    mSocketRunnableObj.sendResponse(mCameraCharacteristics, request, result);
                     mCaptureCallbackLatch.countDown();
                 }
             } catch (ItsException e) {
                 Log.e(TAG, "Script error: " + e);
-                Log.e(PYTAG, "### FAIL");
+                mSocketThreadExitFlag = true;
             } catch (Exception e) {
                 Log.e(TAG, "Script error: " + e);
-                Log.e(PYTAG, "### FAIL");
+                mSocketThreadExitFlag = true;
             }
         }
 
@@ -696,8 +849,6 @@
                 CaptureFailure failure) {
             mCaptureCallbackLatch.countDown();
             Log.e(TAG, "Script error: capture failed");
-            Log.e(PYTAG, "### FAIL");
         }
     };
-
 }
diff --git a/apps/CameraITS/service/src/com/android/camera2/its/ItsUtils.java b/apps/CameraITS/service/src/com/android/camera2/its/ItsUtils.java
index 861f4ae..5824b36 100644
--- a/apps/CameraITS/service/src/com/android/camera2/its/ItsUtils.java
+++ b/apps/CameraITS/service/src/com/android/camera2/its/ItsUtils.java
@@ -31,106 +31,15 @@
 import org.json.JSONArray;
 import org.json.JSONObject;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
 import java.nio.ByteBuffer;
-import java.nio.channels.FileChannel;
 import java.nio.charset.Charset;
 import java.util.List;
 
 public class ItsUtils {
     public static final String TAG = ItsUtils.class.getSimpleName();
 
-    // The externally visible (over adb) base path for the files that are saved by this app
-    // to the external media. Currently hardcoded to "/sdcard", which can work on any device
-    // by creating a symlink to the actual mount location.
-    // TODO: Fix this, by querying mount/vold to get the actual externally visible path.
-    public static final String EXT_VISIBLE_BASE_PATH = "/sdcard";
-
-    // State related to output files created by the script.
-    public static final String DEFAULT_CAPTURE_DIR = "its";
-    public static final String DEFAULT_IMAGE_DIR = "captures";
-    public static final String FILE_PREFIX = "IMG_";
-    public static final String JPEG_SUFFIX = ".jpg";
-    public static final String YUV_SUFFIX = ".yuv";
-    public static final String METADATA_SUFFIX = ".json";
-
-    // The indent amount to use when printing the JSON objects out as strings.
-    private static final int PPRINT_JSON_INDENT = 2;
-
-    public static void storeCameraCharacteristics(CameraCharacteristics props,
-                                               File file)
-            throws ItsException {
-        try {
-            JSONObject jsonObj = new JSONObject();
-            jsonObj.put("cameraProperties", ItsSerializer.serialize(props));
-            storeJsonObject(jsonObj, file);
-        } catch (org.json.JSONException e) {
-            throw new ItsException("JSON error: ", e);
-        }
-    }
-
-    public static void storeResults(CameraCharacteristics props,
-                                      CaptureRequest request,
-                                      CaptureResult result,
-                                      File file)
-            throws ItsException {
-        try {
-            JSONObject jsonObj = new JSONObject();
-            jsonObj.put("cameraProperties", ItsSerializer.serialize(props));
-            jsonObj.put("captureRequest", ItsSerializer.serialize(request));
-            jsonObj.put("captureResult", ItsSerializer.serialize(result));
-            storeJsonObject(jsonObj, file);
-        } catch (org.json.JSONException e) {
-            throw new ItsException("JSON error: ", e);
-        }
-    }
-
-    public static void storeJsonObject(JSONObject jsonObj, File file)
-            throws ItsException {
-        ByteBuffer buf = null;
-        try {
-            buf = ByteBuffer.wrap(jsonObj.toString(PPRINT_JSON_INDENT).
-                                  getBytes(Charset.defaultCharset()));
-        } catch (org.json.JSONException e) {
-            throw new ItsException("JSON error: ", e);
-        }
-        FileChannel channel = null;
-        try {
-            channel = new FileOutputStream(file, false).getChannel();
-            channel.write(buf);
-            channel.close();
-        } catch (FileNotFoundException e) {
-            throw new ItsException("Failed to write file: " + file.toString() + ": ", e);
-        } catch (IOException e) {
-            throw new ItsException("Failed to write file: " + file.toString() + ": ", e);
-        }
-    }
-
-    public static List<CaptureRequest.Builder> loadRequestList(CameraDevice device, Uri uri)
-            throws ItsException {
-        return ItsSerializer.deserializeRequestList(device, loadJsonFile(uri));
-    }
-
-    public static JSONObject loadJsonFile(Uri uri) throws ItsException {
-        FileInputStream input = null;
-        try {
-            input = new FileInputStream(uri.getPath());
-            byte[] fileData = new byte[input.available()];
-            input.read(fileData);
-            input.close();
-            String text = new String(fileData, Charset.defaultCharset());
-            return new JSONObject(text);
-        } catch (FileNotFoundException e) {
-            throw new ItsException("Failed to read file: " + uri.toString() + ": ", e);
-        } catch (org.json.JSONException e) {
-            throw new ItsException("JSON error: ", e);
-        } catch (IOException e) {
-            throw new ItsException("Failed to read file: " + uri.toString() + ": ", e);
-        }
+    public static ByteBuffer jsonToByteBuffer(JSONObject jsonObj) {
+        return ByteBuffer.wrap(jsonObj.toString().getBytes(Charset.defaultCharset()));
     }
 
     public static int[] getJsonRectFromArray(
@@ -174,79 +83,18 @@
         return n;
     }
 
-    public static JSONObject getOutputSpecs(Uri uri)
+    public static JSONObject getOutputSpecs(JSONObject jsonObjTop)
             throws ItsException {
-        FileInputStream input = null;
         try {
-            input = new FileInputStream(uri.getPath());
-            byte[] fileData = new byte[input.available()];
-            input.read(fileData);
-            input.close();
-            String text = new String(fileData, Charset.defaultCharset());
-            JSONObject jsonObjTop = new JSONObject(text);
             if (jsonObjTop.has("outputSurface")) {
                 return jsonObjTop.getJSONObject("outputSurface");
             }
             return null;
-        } catch (FileNotFoundException e) {
-            throw new ItsException("Failed to read file: " + uri.toString() + ": ", e);
-        } catch (IOException e) {
-            throw new ItsException("Failed to read file: " + uri.toString() + ": ", e);
         } catch (org.json.JSONException e) {
             throw new ItsException("JSON error: ", e);
         }
     }
 
-    public static boolean isExternalStorageWritable() {
-        String state = Environment.getExternalStorageState();
-        if (Environment.MEDIA_MOUNTED.equals(state)) {
-            return true;
-        }
-        return false;
-    }
-
-    public static File getStorageDirectory(Context context, String dirName)
-            throws ItsException {
-        if (!isExternalStorageWritable()) {
-            throw new ItsException(
-                    "External storage is not writable, cannot save capture image");
-        }
-        File file = Environment.getExternalStorageDirectory();
-        if (file == null) {
-            throw new ItsException("No external storage available");
-        }
-        File newDir = new File(file, dirName);
-        newDir.mkdirs();
-        if (!newDir.isDirectory()) {
-            throw new ItsException("Could not create directory: " + dirName);
-        }
-        return newDir;
-    }
-
-    public static String getExternallyVisiblePath(Context context, String path)
-            throws ItsException {
-        File file = Environment.getExternalStorageDirectory();
-        if (file == null) {
-            throw new ItsException("No external storage available");
-        }
-        String base = file.toString();
-        String newPath = path.replaceFirst(base, EXT_VISIBLE_BASE_PATH);
-        if (newPath == null) {
-            throw new ItsException("Error getting external path: " + path);
-        }
-        return newPath;
-    }
-
-    public static String getJpegFileName(long fileNumber) {
-        return String.format("%s%016x%s", FILE_PREFIX, fileNumber, JPEG_SUFFIX);
-    }
-    public static String getYuvFileName(long fileNumber) {
-        return String.format("%s%016x%s", FILE_PREFIX, fileNumber, YUV_SUFFIX);
-    }
-    public static String getMetadataFileName(long fileNumber) {
-        return String.format("%s%016x%s", FILE_PREFIX, fileNumber, METADATA_SUFFIX);
-    }
-
     public static byte[] getDataFromImage(Image image)
             throws ItsException {
         int format = image.getFormat();
@@ -327,32 +175,5 @@
                 return false;
         }
     }
-
-    public static File getOutputFile(Context context, String name)
-        throws ItsException {
-        File dir = getStorageDirectory(context, DEFAULT_CAPTURE_DIR + '/' + DEFAULT_IMAGE_DIR);
-        if (dir == null) {
-            throw new ItsException("Could not output file");
-        }
-        return new File(dir, name);
-    }
-
-    public static String writeImageToFile(Context context, ByteBuffer buf, String name)
-        throws ItsException {
-        File imgFile = getOutputFile(context, name);
-        if (imgFile == null) {
-            throw new ItsException("Failed to get path: " + name);
-        }
-        FileChannel channel = null;
-        try {
-            channel = new FileOutputStream(imgFile, false).getChannel();
-            channel.write(buf);
-            channel.close();
-        } catch (FileNotFoundException e) {
-            throw new ItsException("Failed to write file: " + imgFile.toString(), e);
-        } catch (IOException e) {
-            throw new ItsException("Failed to write file: " + imgFile.toString(), e);
-        }
-        return getExternallyVisiblePath(context, imgFile.toString());
-    }
 }
+
diff --git a/apps/CameraITS/tests/test_3a_remote.py b/apps/CameraITS/tests/test_3a_remote.py
index 0400ed3..a5b0e60 100644
--- a/apps/CameraITS/tests/test_3a_remote.py
+++ b/apps/CameraITS/tests/test_3a_remote.py
@@ -45,24 +45,24 @@
             req['android.control.afTrigger'] = (0 if triggered else 1)
             triggered = True
 
-            fname, w, h, cap_res = cam.do_capture(req)
+            cap = cam.do_capture(req)
 
-            ae_state = cap_res["android.control.aeState"]
-            awb_state = cap_res["android.control.awbState"]
-            af_state = cap_res["android.control.afState"]
-            gains = cap_res["android.colorCorrection.gains"]
-            transform = cap_res["android.colorCorrection.transform"]
-            exp_time = cap_res['android.sensor.exposureTime']
-            lsc_map = cap_res["android.statistics.lensShadingMap"]
-            foc_dist = cap_res['android.lens.focusDistance']
-            foc_range = cap_res['android.lens.focusRange']
+            ae_state = cap["metadata"]["android.control.aeState"]
+            awb_state = cap["metadata"]["android.control.awbState"]
+            af_state = cap["metadata"]["android.control.afState"]
+            gains = cap["metadata"]["android.colorCorrection.gains"]
+            transform = cap["metadata"]["android.colorCorrection.transform"]
+            exp_time = cap["metadata"]['android.sensor.exposureTime']
+            lsc_map = cap["metadata"]["android.statistics.lensShadingMap"]
+            foc_dist = cap["metadata"]['android.lens.focusDistance']
+            foc_range = cap["metadata"]['android.lens.focusRange']
 
             print "States (AE,AWB,AF):", ae_state, awb_state, af_state
             print "Gains:", gains
             print "Transform:", [r2f(t) for t in transform]
-            print "AE region:", cap_res['android.control.aeRegions']
-            print "AF region:", cap_res['android.control.afRegions']
-            print "AWB region:", cap_res['android.control.awbRegions']
+            print "AE region:", cap["metadata"]['android.control.aeRegions']
+            print "AF region:", cap["metadata"]['android.control.afRegions']
+            print "AWB region:", cap["metadata"]['android.control.awbRegions']
             print "LSC map:", w_map, h_map, lsc_map[:8]
             print "Focus (dist,range):", foc_dist, foc_range
             print ""
diff --git a/apps/CameraITS/tests/test_black_level.py b/apps/CameraITS/tests/test_black_level.py
index 8518469..1405746 100644
--- a/apps/CameraITS/tests/test_black_level.py
+++ b/apps/CameraITS/tests/test_black_level.py
@@ -58,8 +58,10 @@
                 req = its.objects.manual_capture_request(100, 1)
                 req["android.blackLevel.lock"] = True
                 req["android.sensor.sensitivity"] = s
-                fname, w, h, cap_md = cam.do_capture(req)
-                yimg,uimg,vimg = its.image.load_yuv420_to_yuv_planes(fname,w,h)
+                cap = cam.do_capture(req)
+                yimg,uimg,vimg = its.image.convert_capture_to_yuv_planes(cap)
+                w = cap["width"]
+                h = cap["height"]
 
                 # Magnify the noise in saved images to help visualize.
                 its.image.write_image(yimg * 2,
diff --git a/apps/CameraITS/tests/test_black_white.py b/apps/CameraITS/tests/test_black_white.py
index 09c2c3d..ac9bf2e 100644
--- a/apps/CameraITS/tests/test_black_white.py
+++ b/apps/CameraITS/tests/test_black_white.py
@@ -41,9 +41,10 @@
 
         # Take a shot with very low ISO and exposure time. Expect it to
         # be black.
-        req = its.objects.manual_capture_request(sens_range[0], expt_range[0]/1000000.0)
-        fname, w, h, cap_md = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+        req = its.objects.manual_capture_request(sens_range[0],
+                                                 expt_range[0]/1000000.0)
+        cap = cam.do_capture(req)
+        img = its.image.convert_capture_to_rgb_image(cap)
         its.image.write_image(img, "%s_black.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
         black_means = its.image.compute_image_means(tile)
@@ -53,10 +54,11 @@
         print "Dark pixel means:", black_means
 
         # Take a shot with very high ISO and exposure time. Expect it to
-        # be black.
-        req = its.objects.manual_capture_request(sens_range[1], expt_range[1]/1000000.0)
-        fname, w, h, cap_md = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+        # be white.
+        req = its.objects.manual_capture_request(sens_range[1],
+                                                 expt_range[1]/1000000.0)
+        cap = cam.do_capture(req)
+        img = its.image.convert_capture_to_rgb_image(cap)
         its.image.write_image(img, "%s_white.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
         white_means = its.image.compute_image_means(tile)
diff --git a/apps/CameraITS/tests/test_blc_lsc.py b/apps/CameraITS/tests/test_blc_lsc.py
index 10eb4e3..8324ec5 100644
--- a/apps/CameraITS/tests/test_blc_lsc.py
+++ b/apps/CameraITS/tests/test_blc_lsc.py
@@ -72,9 +72,9 @@
             req["android.colorCorrection.gains"] = awb_gains
             reqs.append(req)
 
-        fnames, w, h, cap_mds = cam.do_capture(reqs)
-        for i,fname in enumerate(fnames):
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
 
             tile_center = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile_center)
diff --git a/apps/CameraITS/tests/test_capture_result.py b/apps/CameraITS/tests/test_capture_result.py
index 0dc2ac9..3638101 100644
--- a/apps/CameraITS/tests/test_capture_result.py
+++ b/apps/CameraITS/tests/test_capture_result.py
@@ -89,7 +89,9 @@
         rect = [0,0,1,1]
         cam.do_3a(rect, rect, rect, True, True, False)
 
-        fname, w, h, cap_res = cam.do_capture(auto_req)
+        cap = cam.do_capture(auto_req)
+        cap_res = cap["metadata"]
+
         gains = cap_res["android.colorCorrection.gains"]
         transform = cap_res["android.colorCorrection.transform"]
         exp_time = cap_res['android.sensor.exposureTime']
@@ -135,7 +137,9 @@
         return lsc_map
 
     def test_manual(cam, w_map, h_map, lsc_map_auto):
-        fname, w, h, cap_res = cam.do_capture(manual_req)
+        cap = cam.do_capture(auto_req)
+        cap_res = cap["metadata"]
+
         gains = cap_res["android.colorCorrection.gains"]
         transform = cap_res["android.colorCorrection.transform"]
         curves = [cap_res["android.tonemap.curveRed"],
@@ -143,15 +147,11 @@
                   cap_res["android.tonemap.curveBlue"]]
         exp_time = cap_res['android.sensor.exposureTime']
         lsc_map = cap_res["android.statistics.lensShadingMap"]
-        pred_gains = cap_res["android.statistics.predictedColorGains"]
-        pred_transform = cap_res["android.statistics.predictedColorTransform"]
         ctrl_mode = cap_res["android.control.mode"]
 
         print "Control mode:", ctrl_mode
         print "Gains:", gains
         print "Transform:", [r2f(t) for t in transform]
-        print "Predicted gains:", pred_gains
-        print "Predicted transform:", [r2f(t) for t in pred_transform]
         print "Tonemap:", curves[0][1::16]
         print "AE region:", cap_res['android.control.aeRegions']
         print "AF region:", cap_res['android.control.afRegions']
@@ -170,10 +170,6 @@
         assert(all([is_close_rational(transform[i], manual_transform[i])
                     for i in xrange(9)]))
 
-        # The predicted gains and transform must also be valid.
-        assert(len(pred_gains) == 4)
-        assert(len(pred_transform) == 9)
-
         # Tonemap must be valid.
         # The returned tonemap must be linear.
         for c in curves:
diff --git a/apps/CameraITS/tests/test_exposure.py b/apps/CameraITS/tests/test_exposure.py
index 57d1ad2..35f607f 100644
--- a/apps/CameraITS/tests/test_exposure.py
+++ b/apps/CameraITS/tests/test_exposure.py
@@ -51,8 +51,8 @@
         while s*m < sens_range[1] and e/m > expt_range[0]:
             mults.append(m)
             req = its.objects.manual_capture_request(s*m, e/1000000.0/m)
-            fname, w, h, md_obj = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_mult=%02d.jpg" % (NAME, m))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
diff --git a/apps/CameraITS/tests/test_formats.py b/apps/CameraITS/tests/test_formats.py
index cbc76df..ef7ab50 100644
--- a/apps/CameraITS/tests/test_formats.py
+++ b/apps/CameraITS/tests/test_formats.py
@@ -30,22 +30,24 @@
             req = its.objects.manual_capture_request(100,10)
             out_surface = copy.deepcopy(size)
             out_surface["format"] = "yuv"
-            fname, w, h, cap_md = cam.do_capture(req, out_surface)
-            assert(os.path.splitext(fname)[1] == ".yuv")
-            assert(w == size["width"] and h == size["height"])
-            assert(os.path.getsize(fname) == w*h*3/2)
-            print "Successfully captured YUV %dx%d" % (w, h)
+            cap = cam.do_capture(req, out_surface)
+            assert(cap["format"] == "yuv")
+            assert(cap["width"] == size["width"])
+            assert(cap["height"] == size["height"])
+            print "Captured YUV %dx%d" % (cap["width"], cap["height"])
         for size in props['android.scaler.availableJpegSizes']:
             req = its.objects.manual_capture_request(100,10)
             out_surface = copy.deepcopy(size)
             out_surface["format"] = "jpg"
-            fname, w, h, cap_md = cam.do_capture(req, out_surface)
-            assert(os.path.splitext(fname)[1] == ".jpg")
-            assert(w == size["width"] and h == size["height"])
-            img = Image.open(fname)
-            assert(img.size[0] == size["width"])
-            assert(img.size[1] == size["height"])
-            print "Successfully captured JPEG %dx%d" % (w, h)
+            cap = cam.do_capture(req, out_surface)
+            assert(cap["format"] == "jpeg")
+            assert(cap["width"] == size["width"])
+            assert(cap["height"] == size["height"])
+            img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+            assert(img.shape[0] == size["height"])
+            assert(img.shape[1] == size["width"])
+            assert(img.shape[2] == 3)
+            print "Captured JPEG %dx%d" % (cap["width"], cap["height"])
 
 if __name__ == '__main__':
     main()
diff --git a/apps/CameraITS/tests/test_jpeg.py b/apps/CameraITS/tests/test_jpeg.py
index 5104112..8fdcbdf 100644
--- a/apps/CameraITS/tests/test_jpeg.py
+++ b/apps/CameraITS/tests/test_jpeg.py
@@ -37,8 +37,8 @@
         size = props['android.scaler.availableProcessedSizes'][0]
         out_surface = copy.deepcopy(size)
         out_surface["format"] = "yuv"
-        fname, w, h, cap_md = cam.do_capture(req, out_surface)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+        cap = cam.do_capture(req, out_surface)
+        img = its.image.convert_capture_to_rgb_image(cap)
         its.image.write_image(img, "%s_fmt=yuv.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
         rgb0 = its.image.compute_image_means(tile)
@@ -48,11 +48,11 @@
         size = props['android.scaler.availableJpegSizes'][0]
         out_surface = copy.deepcopy(size)
         out_surface["format"] = "jpg"
-        fname, w, h, cap_md = cam.do_capture(req, out_surface)
-        img = numpy.array(Image.open(fname)).reshape(w,h,3) / 255.0
+        cap = cam.do_capture(req, out_surface)
+        img = its.image.decompress_jpeg_to_rgb_image(cap["data"])
+        its.image.write_image(img, "%s_fmt=jpg.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
         rgb1 = its.image.compute_image_means(tile)
-        shutil.copy2(fname, "%s_fmt=jpg.jpg" % (NAME))
 
         rms_diff = math.sqrt(
                 sum([pow(rgb0[i] - rgb1[i], 2.0) for i in range(3)]) / 3.0)
diff --git a/apps/CameraITS/tests/test_latching.py b/apps/CameraITS/tests/test_latching.py
index 720149f..5e3a9b7 100644
--- a/apps/CameraITS/tests/test_latching.py
+++ b/apps/CameraITS/tests/test_latching.py
@@ -54,9 +54,9 @@
             its.objects.manual_capture_request(s,  e/1000000.0  ),
             ]
 
-        fnames, w, h, cap_mds = cam.do_capture(reqs)
-        for i, fname in enumerate(fnames):
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_i=%02d.jpg" % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
diff --git a/apps/CameraITS/tests/test_linearity.py b/apps/CameraITS/tests/test_linearity.py
index 8e6c8e5..656ebc1 100644
--- a/apps/CameraITS/tests/test_linearity.py
+++ b/apps/CameraITS/tests/test_linearity.py
@@ -68,8 +68,8 @@
 
         for sens in sensitivities:
             req["android.sensor.sensitivity"] = sens
-            fname, w, h, cap_md = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_sens=%04d.jpg" % (NAME, sens))
             img = its.image.apply_lut_to_image(img, inv_gamma_lut[1::2] * LM1)
diff --git a/apps/CameraITS/tests/test_param_black_level_lock.py b/apps/CameraITS/tests/test_param_black_level_lock.py
index af9e2ba..1f65221 100644
--- a/apps/CameraITS/tests/test_param_black_level_lock.py
+++ b/apps/CameraITS/tests/test_param_black_level_lock.py
@@ -53,8 +53,8 @@
                               int((sens_range[1] - sens_range[0]) / NUM_STEPS))
         for si, s in enumerate(sensitivities):
             req["android.sensor.sensitivity"] = s
-            fname, w, h, cap_md = cam.do_capture(req)
-            yimg,_,_ = its.image.load_yuv420_to_yuv_planes(fname, w, h)
+            cap = cam.do_capture(req)
+            yimg,_,_ = its.image.convert_capture_to_yuv_planes(cap)
             hist,_ = numpy.histogram(yimg*255, 256, (0,256))
             modes.append(numpy.argmax(hist))
 
diff --git a/apps/CameraITS/tests/test_param_color_correction.py b/apps/CameraITS/tests/test_param_color_correction.py
index a10ae1a..7764026 100644
--- a/apps/CameraITS/tests/test_param_color_correction.py
+++ b/apps/CameraITS/tests/test_param_color_correction.py
@@ -78,8 +78,8 @@
         for i in range(len(transforms)):
             req["android.colorCorrection.transform"] = transforms[i]
             req["android.colorCorrection.gains"] = gains[i]
-            fname, w, h, md_obj = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_req=%d.jpg" % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
             rgb_means = its.image.compute_image_means(tile)
diff --git a/apps/CameraITS/tests/test_param_edge_mode.py b/apps/CameraITS/tests/test_param_edge_mode.py
index d4f8eee..bf4deca 100644
--- a/apps/CameraITS/tests/test_param_edge_mode.py
+++ b/apps/CameraITS/tests/test_param_edge_mode.py
@@ -40,8 +40,8 @@
         sens, exp, gains, xform, focus = cam.do_3a(rect, rect, rect)
         for e in [0,1,2]:
             req["android.edge.mode"] = e
-            fname, w, h, cap_md = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, e))
 
 if __name__ == '__main__':
diff --git a/apps/CameraITS/tests/test_param_exposure_time.py b/apps/CameraITS/tests/test_param_exposure_time.py
index bc91464..8e0bd8a 100644
--- a/apps/CameraITS/tests/test_param_exposure_time.py
+++ b/apps/CameraITS/tests/test_param_exposure_time.py
@@ -45,8 +45,8 @@
 
         for e in exp_times:
             req = its.objects.manual_capture_request(sens, e/1000000.0)
-            fname, w, h, cap_md = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_time=%03dms.jpg" % (NAME, e))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/test_param_exposure_time_burst.py b/apps/CameraITS/tests/test_param_exposure_time_burst.py
index d640302..f576af3 100644
--- a/apps/CameraITS/tests/test_param_exposure_time_burst.py
+++ b/apps/CameraITS/tests/test_param_exposure_time_burst.py
@@ -30,10 +30,10 @@
     reqs = [its.objects.manual_capture_request(100,e) for e in exp_times]
 
     with its.device.ItsSession() as cam:
-        fnames, w, h, cap_mds = cam.do_capture(reqs)
-        for i,md in enumerate(cap_mds):
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
             e_req = exp_times[i]*1000*1000
-            e_res = md["android.sensor.exposureTime"]
+            e_res = cap["metadata"]["android.sensor.exposureTime"]
             print e_req, e_res
 
 if __name__ == '__main__':
diff --git a/apps/CameraITS/tests/test_param_flash_mode.py b/apps/CameraITS/tests/test_param_flash_mode.py
index fca7bdb..55834ab 100644
--- a/apps/CameraITS/tests/test_param_flash_mode.py
+++ b/apps/CameraITS/tests/test_param_flash_mode.py
@@ -33,10 +33,10 @@
     with its.device.ItsSession() as cam:
         for f in [0,1,2]:
             req["android.flash.mode"] = f
-            fname, w, h, cap_md = cam.do_capture(req)
-            flash_modes_reported.append(cap_md["android.flash.mode"])
-            flash_states_reported.append(cap_md["android.flash.state"])
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            flash_modes_reported.append(cap["metadata"]["android.flash.mode"])
+            flash_states_reported.append(cap["metadata"]["android.flash.state"])
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, f))
 
     assert(flash_modes_reported == [0,1,2])
diff --git a/apps/CameraITS/tests/test_param_noise_reduction.py b/apps/CameraITS/tests/test_param_noise_reduction.py
index a4512d5..1d82875 100644
--- a/apps/CameraITS/tests/test_param_noise_reduction.py
+++ b/apps/CameraITS/tests/test_param_noise_reduction.py
@@ -55,11 +55,11 @@
         req["android.noiseReduction.mode"] = 0
         req["android.sensor.sensitivity"] = 100
         req["android.sensor.exposureTime"] = 20*1000*1000
-        fname, w, h, md_obj = cam.do_capture(req)
+        cap = cam.do_capture(req)
         its.image.write_image(
-                its.image.load_yuv420_to_rgb_image(fname, w, h),
+                its.image.convert_capture_to_rgb_image(cap),
                 "%s_low_gain.jpg" % (NAME))
-        planes = its.image.load_yuv420_to_yuv_planes(fname, w, h)
+        planes = its.image.convert_capture_to_yuv_planes(cap)
         for j in range(3):
             img = planes[j]
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -70,12 +70,13 @@
             req["android.noiseReduction.mode"] = i
             req["android.sensor.sensitivity"] = 100*16
             req["android.sensor.exposureTime"] = (20*1000*1000/16)
-            fname, w, h, md_obj = cam.do_capture(req)
-            nr_modes_reported.append(md_obj["android.noiseReduction.mode"])
+            cap = cam.do_capture(req)
+            nr_modes_reported.append(
+                    cap["metadata"]["android.noiseReduction.mode"])
             its.image.write_image(
-                    its.image.load_yuv420_to_rgb_image(fname, w, h),
+                    its.image.convert_capture_to_rgb_image(cap),
                     "%s_high_gain_nr=%d.jpg" % (NAME, i))
-            planes = its.image.load_yuv420_to_yuv_planes(fname, w, h)
+            planes = its.image.convert_capture_to_yuv_planes(cap)
             for j in range(3):
                 img = planes[j]
                 tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/test_param_sensitivity.py b/apps/CameraITS/tests/test_param_sensitivity.py
index e682864..76e4647 100644
--- a/apps/CameraITS/tests/test_param_sensitivity.py
+++ b/apps/CameraITS/tests/test_param_sensitivity.py
@@ -45,8 +45,8 @@
 
         for s in sensitivities:
             req = its.objects.manual_capture_request(s, expt/1000000.0)
-            fname, w, h, cap_md = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_iso=%04d.jpg" % (NAME, s))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/test_param_sensitivity_burst.py b/apps/CameraITS/tests/test_param_sensitivity_burst.py
index b907f6d..f9143d7 100644
--- a/apps/CameraITS/tests/test_param_sensitivity_burst.py
+++ b/apps/CameraITS/tests/test_param_sensitivity_burst.py
@@ -30,10 +30,10 @@
     reqs = [its.objects.manual_capture_request(s,10) for s in sensitivities]
 
     with its.device.ItsSession() as cam:
-        fnames, w, h, cap_mds = cam.do_capture(reqs)
-        for i,md in enumerate(cap_mds):
+        caps = cam.do_capture(reqs)
+        for i,cap in enumerate(caps):
             s_req = sensitivities[i]
-            s_res = md["android.sensor.sensitivity"]
+            s_res = cap["metadata"]["android.sensor.sensitivity"]
             print s_req, s_res
 
 if __name__ == '__main__':
diff --git a/apps/CameraITS/tests/test_param_tonemap_mode.py b/apps/CameraITS/tests/test_param_tonemap_mode.py
index aae4065..40f1582 100644
--- a/apps/CameraITS/tests/test_param_tonemap_mode.py
+++ b/apps/CameraITS/tests/test_param_tonemap_mode.py
@@ -58,8 +58,8 @@
                     sum([[i/LM1, (1+1.0*n)*i/LM1] for i in range(L)], []))
             req["android.tonemap.curveBlue"] = (
                     sum([[i/LM1, (1+1.5*n)*i/LM1] for i in range(L)], []))
-            fname, w, h, md = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_n=%d.jpg" %(NAME, n))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -83,8 +83,8 @@
             req["android.tonemap.curveRed"] = curve
             req["android.tonemap.curveGreen"] = curve
             req["android.tonemap.curveBlue"] = curve
-            fname, w, h, md = cam.do_capture(req)
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
+            cap = cam.do_capture(req)
+            img = its.image.convert_capture_to_rgb_image(fname, w, h)
             its.image.write_image(
                     img, "%s_size=%02d.jpg" %(NAME, size))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/test_predicted_burst.py b/apps/CameraITS/tests/test_predicted_burst.py
deleted file mode 100644
index 4a8731b..0000000
--- a/apps/CameraITS/tests/test_predicted_burst.py
+++ /dev/null
@@ -1,57 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import its.image
-import its.device
-import its.objects
-import pylab
-import os.path
-import matplotlib
-import matplotlib.pyplot
-import copy
-
-def main():
-    """Test that predicted AWB values come out on the right frames.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    def r2f(r):
-        return float(r["numerator"]) / float(r["denominator"])
-
-    reqs = \
-        [its.objects.manual_capture_request(100,0.1)]*7 + \
-        [its.objects.manual_capture_request(300,33)]*7 + \
-        [its.objects.manual_capture_request(100,0.1)]*7
-
-    curve = sum([[i/63.0, pow(i/63.0, 1/2.2)] for i in range(64)], [])
-    for r in reqs:
-        r["android.tonemap.mode"] = 0
-        r["android.tonemap.curveRed"] = curve
-        r["android.tonemap.curveGreen"] = curve
-        r["android.tonemap.curveBlue"] = curve
-
-    with its.device.ItsSession() as cam:
-        fnames, w, h, mds = cam.do_capture(reqs)
-        for i, fname in enumerate(fnames):
-            img = its.image.load_yuv420_to_rgb_image(fname, w, h)
-            its.image.write_image(img, "%s_i=%02d.jpg" % (NAME, i))
-            md = mds[i]
-            print "Predicted:", \
-                  md["android.statistics.predictedColorGains"], \
-                  [r2f(t)
-                   for t in md["android.statistics.predictedColorTransform"]]
-
-if __name__ == '__main__':
-    main()
-
diff --git a/apps/CameraITS/tests/test_predicted_wb.py b/apps/CameraITS/tests/test_predicted_wb.py
deleted file mode 100644
index 09df6db..0000000
--- a/apps/CameraITS/tests/test_predicted_wb.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 2013 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import its.image
-import its.device
-import its.objects
-import os.path
-import pprint
-import math
-import numpy
-import matplotlib.pyplot
-import mpl_toolkits.mplot3d
-
-def main():
-    """Test that valid data comes back in CaptureResult objects.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    def r2f(r):
-        return float(r["numerator"]) / float(r["denominator"])
-
-    if not its.device.reboot_device_on_argv():
-        its.device.reboot_device()
-
-    # Run a first pass, which starts with a 3A convergence step.
-    with its.device.ItsSession() as cam:
-        # Get 3A lock first, so the auto values in the capture result are
-        # populated properly.
-        r = [0,0,1,1]
-        sens,exp,awb_gains,awb_transform,_ = cam.do_3a(r,r,r,True,True,False)
-
-        # Capture an auto shot using the converged 3A.
-        req = its.objects.auto_capture_request()
-        fname, w, h, cap_res = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
-        its.image.write_image(img, "%s_n=1_pass=1_auto.jpg" % (NAME))
-        auto_gains = cap_res["android.colorCorrection.gains"]
-        auto_transform = cap_res["android.colorCorrection.transform"]
-
-        # Capture a request using default (unit/identify) gains, and get the
-        # predicted gains and transform.
-        req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
-        fname, w, h, cap_res = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
-        its.image.write_image(img, "%s_n=2_pass=1_identity.jpg" % (NAME))
-        pred_gains_1 = cap_res["android.statistics.predictedColorGains"]
-        pred_transform_1 = cap_res["android.statistics.predictedColorTransform"]
-
-        # Capture a request using the predicted gains/transform.
-        req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
-        req["android.colorCorrection.transform"] = pred_transform_1
-        req["android.colorCorrection.gains"] = pred_gains_1
-        fname, w, h, md_obj = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
-        its.image.write_image(img, "%s_n=3_pass=1_predicted.jpg" % (NAME))
-
-        print "Pass 1 metering gains:", awb_gains
-        print "Pass 1 metering transform:", awb_transform
-        print "Pass 1 auto shot gains:", auto_gains
-        print "Pass 1 auto shot transform:", [r2f(t) for t in auto_transform]
-        print "Pass 1 predicted gains:", pred_gains_1
-        print "Pass 1 predicted transform:", [r2f(t) for t in pred_transform_1]
-
-    if not its.device.reboot_device_on_argv():
-        its.device.reboot_device()
-
-    # Run a second pass after rebooting that doesn't start with 3A convergence.
-    with its.device.ItsSession() as cam:
-        # Capture a request using default (unit/identify) gains, and get the
-        # predicted gains and transform.
-        req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
-        fname, w, h, cap_res = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
-        its.image.write_image(img, "%s_n=4_pass=2_identity.jpg" % (NAME))
-        pred_gains_2 = cap_res["android.statistics.predictedColorGains"]
-        pred_transform_2 = cap_res["android.statistics.predictedColorTransform"]
-
-        # Capture a request using the predicted gains/transform.
-        req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
-        req["android.colorCorrection.transform"] = pred_transform_2
-        req["android.colorCorrection.gains"] = pred_gains_2
-        fname, w, h, md_obj = cam.do_capture(req)
-        img = its.image.load_yuv420_to_rgb_image(fname, w, h)
-        its.image.write_image(img, "%s_n=5_pass=2_predicted.jpg" % (NAME))
-
-        print "Pass 2 predicted gains:", pred_gains_2
-        print "Pass 2 predicted transform:", [r2f(t) for t in pred_transform_2]
-
-if __name__ == '__main__':
-    main()
-