CameraITS: Cleaned up object structure.
Previously, the Python objects holding capture requests and capture
results had to be nested inside a another object, mapped to by keys
such as "captureRequest" and "captureResult".
This change removes this nesting, making it much more convenient
to manipulate request and result objects.
Change-Id: I0f40a55001d26cb42a70cd7c89b2608c37325b46
diff --git a/apps/CameraITS/README b/apps/CameraITS/README
index 6faf44d..a4d1c21 100644
--- a/apps/CameraITS/README
+++ b/apps/CameraITS/README
@@ -129,26 +129,12 @@
to the host machine (using adb pull), along with JSON representations of the
CaptureResult and other objects that describe the shot that was actually taken.
-The Python dictionary object that is used to specify what shots to capture and
-that is passed as an argument to the do_capture() function can have the
-following top-level keys:
+The Python capture request object(s) can contain key/value entries corresponding
+to any of the Java CaptureRequest object fields.
- captureRequest
- captureRequestList
- outputSurface
-
-Exactly one of captureRequest and captureRequestList must be present, and
-outputSurface is optional. Look at the tests directory for examples of
-specifying these objects.
-
-The captureRequest object can contain key/value entries corresponding to any
-of the Java CaptureRequest object fields. The captureRequestList object is
-a list of such objecs, corresponding to a burst capture specification.
-
-The outputSurface object is used to specify the width, height, and format of
-the output images that are captured. Currently supported formats are "jpg" and
-"yuv", where "yuv" is YUV420 fully planar. The default output surface is a
-full sensor YUV420 frame.
+The output surface's width, height, and format can also be specified. Currently
+supported formats are "jpg" and "yuv", where "yuv" is YUV420 fully planar. The
+default output surface is a full sensor YUV420 frame.
The metadata that is returned along with the captured images is also in JSON
format, serialized from the CaptureRequest and CaptureResult objects that were
diff --git a/apps/CameraITS/pymodules/its/device.py b/apps/CameraITS/pymodules/its/device.py
index f773451..a11ece3 100644
--- a/apps/CameraITS/pymodules/its/device.py
+++ b/apps/CameraITS/pymodules/its/device.py
@@ -399,49 +399,47 @@
raise its.error.Error('3A failed to converge')
return ae_sens, ae_exp, awb_gains, awb_transform, af_dist
- def do_capture(self, request, out_fname_prefix=None):
+ def do_capture(self, cap_request, out_surface=None, out_fname_prefix=None):
"""Issue capture request(s), and read back the image(s) and metadata.
The main top-level function for capturing one or more images using the
- device. Captures a single image if the request has the "captureRequest"
- key, and captures a burst if it has "captureRequestList".
+ device. Captures a single image if cap_request is a single object, and
+ captures a burst if it is a list of objects.
- The request object may also contain an "outputSurface" field to specify
- the width, height, and format of the captured image. Supported formats
- are "yuv" and "jpeg". If no outputSurface field was passed inside the
- request object, then the default is used, which is "yuv" (a YUV420
- fully planar image) corresponding to a full sensor frame.
+ The out_surface field can specify the width, height, and format of
+ the captured image. The format may be "yuv" or "jpeg". The default is
+ a YUV420 frame ("yuv") corresponding to a full sensor frame.
- Example request 1:
+ Example of a single capture request:
{
- "captureRequest": {
- "android.sensor.exposureTime": 100*1000*1000,
- "android.sensor.sensitivity": 100
- }
+ "android.sensor.exposureTime": 100*1000*1000,
+ "android.sensor.sensitivity": 100
}
- Example request 2:
+ Example of a list of capture requests:
+
+ [
+ {
+ "android.sensor.exposureTime": 100*1000*1000,
+ "android.sensor.sensitivity": 100
+ },
+ {
+ "android.sensor.exposureTime": 100*1000*1000,
+ "android.sensor.sensitivity": 200
+ }
+ ]
+
+ Example of an output surface specification:
{
- "captureRequestList": [
- {
- "android.sensor.exposureTime": 100*1000*1000,
- "android.sensor.sensitivity": 100
- },
- {
- "android.sensor.exposureTime": 100*1000*1000,
- "android.sensor.sensitivity": 200
- }],
- "outputSurface": {
- "width": 640,
- "height": 480,
- "format": "yuv"
- }
+ "width": 640,
+ "height": 480,
+ "format": "yuv"
}
Args:
- request: The Python dictionary specifying the capture(s), which
+ cap_request: The Python dict/list specifying the capture(s), which
will be converted to JSON and sent to the device.
out_fname_prefix: (Optionally) the file name prefix to use for the
captured files. If this arg is present, then the captured files
@@ -457,9 +455,12 @@
* The width and height of the captured image(s). For a burst, all
are the same size.
* The Python dictionary or list of dictionaries (in the case of a
- burst capture) containing the metadata of the captured image(s).
+ burst capture) containing the returned capture result objects.
"""
- if request.has_key("captureRequest"):
+ if not isinstance(cap_request, list):
+ request = {"captureRequest" : cap_request}
+ if out_surface is not None:
+ request["outputSurface"] = out_surface
if self.CAPTURE_THROWAWAY_SHOTS:
print "Capturing throw-away image"
self.__start_capture(request)
@@ -475,17 +476,19 @@
os.rename(self.__get_json_path(local_fname),
out_fname_prefix + ".json")
local_fname = out_fname_prefix + image_ext
- return local_fname, w, h, out_metadata_obj
+ return local_fname, w, h, out_metadata_obj["captureResult"]
else:
- if not request.has_key("captureRequestList"):
- raise its.error.Error(
- 'Missing captureRequest or captureRequestList arg key')
+ request = {"captureRequestList" : cap_request}
+ if out_surface is not None:
+ request["outputSurface"] = out_surface
n = len(request['captureRequestList'])
print "Capture burst of %d images" % (n)
self.__start_capture(request)
remote_fnames, w, h = self.__wait_for_capture_done_burst(n)
local_fnames = self.__copy_captured_files(remote_fnames)
out_metadata_objs = self.__parse_captured_json(local_fnames)
+ for i in range(len(out_metadata_objs)):
+ out_metadata_objs[i] = out_metadata_objs[i]["captureResult"]
if out_fname_prefix is not None:
for i in range(len(local_fnames)):
_, image_ext = os.path.splitext(local_fnames[i])
diff --git a/apps/CameraITS/pymodules/its/objects.py b/apps/CameraITS/pymodules/its/objects.py
index 9183bc8..cdcb6e7 100644
--- a/apps/CameraITS/pymodules/its/objects.py
+++ b/apps/CameraITS/pymodules/its/objects.py
@@ -37,28 +37,6 @@
else:
return {"numerator":i, "denominator":1}
-def capture_request(obj):
- """Function to wrap an object inside a captureRequest object.
-
- Args:
- obj: The Python dictionary object to wrap.
-
- Returns:
- The dictionary: {"captureRequest": obj}
- """
- return {"captureRequest": obj}
-
-def capture_request_list(obj_list):
- """Function to wrap an object list inside a captureRequestList object.
-
- Args:
- obj_list: The list of Python dictionary objects to wrap.
-
- Returns:
- The dictionary: {"captureRequestList": obj_list}
- """
- return {"captureRequestList": obj_list}
-
def manual_capture_request(sensitivity, exp_time_ms):
"""Return a capture request with everything set to manual.
@@ -73,7 +51,7 @@
The default manual capture request, ready to be passed to the
its.device.do_capture function.
"""
- return capture_request( {
+ return {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
@@ -87,19 +65,19 @@
int_to_rational([1,0,0, 0,1,0, 0,0,1]),
"android.colorCorrection.gains": [1,1,1,1],
"android.tonemap.mode": 1,
- })
+ }
def auto_capture_request():
"""Return a capture request with everything set to auto.
"""
- return capture_request( {
+ return {
"android.control.mode": 1,
"android.control.aeMode": 1,
"android.control.awbMode": 1,
"android.control.afMode": 1,
"android.colorCorrection.mode": 1,
"android.tonemap.mode": 1,
- })
+ }
class __UnitTest(unittest.TestCase):
"""Run a suite of unit tests on this module.
diff --git a/apps/CameraITS/tests/test_3a_remote.py b/apps/CameraITS/tests/test_3a_remote.py
index 6d424aa..0400ed3 100644
--- a/apps/CameraITS/tests/test_3a_remote.py
+++ b/apps/CameraITS/tests/test_3a_remote.py
@@ -40,15 +40,12 @@
triggered = False
while True:
req = its.objects.auto_capture_request()
- req["captureRequest"]["android.statistics.lensShadingMapMode"] = 1
- req['captureRequest']['android.control.aePrecaptureTrigger'] = (
- 0 if triggered else 1)
- req['captureRequest']['android.control.afTrigger'] = (
- 0 if triggered else 1)
+ req["android.statistics.lensShadingMapMode"] = 1
+ req['android.control.aePrecaptureTrigger'] = (0 if triggered else 1)
+ req['android.control.afTrigger'] = (0 if triggered else 1)
triggered = True
- fname, w, h, md_obj = cam.do_capture(req)
- cap_res = md_obj["captureResult"]
+ fname, w, h, cap_res = cam.do_capture(req)
ae_state = cap_res["android.control.aeState"]
awb_state = cap_res["android.control.awbState"]
diff --git a/apps/CameraITS/tests/test_black_level.py b/apps/CameraITS/tests/test_black_level.py
index cb208d7..f75eb40 100644
--- a/apps/CameraITS/tests/test_black_level.py
+++ b/apps/CameraITS/tests/test_black_level.py
@@ -57,8 +57,8 @@
for si, s in enumerate(sensitivities):
for rep in xrange(NUM_REPEAT):
req = its.objects.manual_capture_request(100, 1)
- req["captureRequest"]["android.blackLevel.lock"] = True
- req["captureRequest"]["android.sensor.sensitivity"] = s
+ req["android.blackLevel.lock"] = True
+ req["android.sensor.sensitivity"] = s
fname, w, h, cap_md = cam.do_capture(req)
yimg,uimg,vimg = its.image.load_yuv420_to_yuv_planes(fname,w,h)
diff --git a/apps/CameraITS/tests/test_blc_lsc.py b/apps/CameraITS/tests/test_blc_lsc.py
index 62caf9e..0546abe 100644
--- a/apps/CameraITS/tests/test_blc_lsc.py
+++ b/apps/CameraITS/tests/test_blc_lsc.py
@@ -57,16 +57,16 @@
# Linear tonemap
tmap = sum([[i/63.0,i/63.0] for i in range(64)], [])
- reqs = its.objects.capture_request_list([])
+ reqs = []
for e in exposures:
- req = its.objects.manual_capture_request(ae_sen,e)["captureRequest"]
+ req = its.objects.manual_capture_request(ae_sen,e)
req["android.tonemap.mode"] = 0
req["android.tonemap.curveRed"] = tmap
req["android.tonemap.curveGreen"] = tmap
req["android.tonemap.curveBlue"] = tmap
req["android.colorCorrection.transform"] = awb_transform_rat
req["android.colorCorrection.gains"] = awb_gains
- reqs["captureRequestList"].append(req)
+ reqs.append(req)
fnames, w, h, cap_mds = cam.do_capture(reqs)
for i,fname in enumerate(fnames):
diff --git a/apps/CameraITS/tests/test_capture_result.py b/apps/CameraITS/tests/test_capture_result.py
index d0ff74e..0dc2ac9 100644
--- a/apps/CameraITS/tests/test_capture_result.py
+++ b/apps/CameraITS/tests/test_capture_result.py
@@ -35,9 +35,9 @@
manual_sensitivity = 100
auto_req = its.objects.auto_capture_request()
- auto_req["captureRequest"]["android.statistics.lensShadingMapMode"] = 1
+ auto_req["android.statistics.lensShadingMapMode"] = 1
- manual_req = its.objects.capture_request( {
+ manual_req = {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
@@ -56,7 +56,7 @@
"android.control.afRegions": manual_region,
"android.control.awbRegions": manual_region,
"android.statistics.lensShadingMapMode":1
- })
+ }
def r2f(r):
return float(r["numerator"]) / float(r["denominator"])
@@ -89,8 +89,7 @@
rect = [0,0,1,1]
cam.do_3a(rect, rect, rect, True, True, False)
- fname, w, h, md_obj = cam.do_capture(auto_req)
- cap_res = md_obj["captureResult"]
+ fname, w, h, cap_res = cam.do_capture(auto_req)
gains = cap_res["android.colorCorrection.gains"]
transform = cap_res["android.colorCorrection.transform"]
exp_time = cap_res['android.sensor.exposureTime']
@@ -136,8 +135,7 @@
return lsc_map
def test_manual(cam, w_map, h_map, lsc_map_auto):
- fname, w, h, md_obj = cam.do_capture(manual_req)
- cap_res = md_obj["captureResult"]
+ fname, w, h, cap_res = cam.do_capture(manual_req)
gains = cap_res["android.colorCorrection.gains"]
transform = cap_res["android.colorCorrection.transform"]
curves = [cap_res["android.tonemap.curveRed"],
diff --git a/apps/CameraITS/tests/test_formats.py b/apps/CameraITS/tests/test_formats.py
index b9b8ae1..cbc76df 100644
--- a/apps/CameraITS/tests/test_formats.py
+++ b/apps/CameraITS/tests/test_formats.py
@@ -17,6 +17,7 @@
import its.objects
import os.path
import Image
+import copy
def main():
"""Test that the reported sizes and formats for image capture work.
@@ -27,18 +28,18 @@
props = cam.get_camera_properties()
for size in props['android.scaler.availableProcessedSizes']:
req = its.objects.manual_capture_request(100,10)
- req["outputSurface"] = size
- req["outputSurface"]["format"] = "yuv"
- fname, w, h, cap_md = cam.do_capture(req)
+ out_surface = copy.deepcopy(size)
+ out_surface["format"] = "yuv"
+ fname, w, h, cap_md = cam.do_capture(req, out_surface)
assert(os.path.splitext(fname)[1] == ".yuv")
assert(w == size["width"] and h == size["height"])
assert(os.path.getsize(fname) == w*h*3/2)
print "Successfully captured YUV %dx%d" % (w, h)
for size in props['android.scaler.availableJpegSizes']:
req = its.objects.manual_capture_request(100,10)
- req["outputSurface"] = size
- req["outputSurface"]["format"] = "jpg"
- fname, w, h, cap_md = cam.do_capture(req)
+ out_surface = copy.deepcopy(size)
+ out_surface["format"] = "jpg"
+ fname, w, h, cap_md = cam.do_capture(req, out_surface)
assert(os.path.splitext(fname)[1] == ".jpg")
assert(w == size["width"] and h == size["height"])
img = Image.open(fname)
diff --git a/apps/CameraITS/tests/test_jpeg.py b/apps/CameraITS/tests/test_jpeg.py
index 870fda4..5104112 100644
--- a/apps/CameraITS/tests/test_jpeg.py
+++ b/apps/CameraITS/tests/test_jpeg.py
@@ -20,6 +20,7 @@
import shutil
import numpy
import math
+import copy
def main():
"""Test that converted YUV images and device JPEG images look the same.
@@ -34,9 +35,9 @@
# YUV
req = its.objects.manual_capture_request(100,100)
size = props['android.scaler.availableProcessedSizes'][0]
- req["outputSurface"] = size
- req["outputSurface"]["format"] = "yuv"
- fname, w, h, cap_md = cam.do_capture(req)
+ out_surface = copy.deepcopy(size)
+ out_surface["format"] = "yuv"
+ fname, w, h, cap_md = cam.do_capture(req, out_surface)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_fmt=yuv.jpg" % (NAME))
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -45,9 +46,9 @@
# JPEG
req = its.objects.manual_capture_request(100,100)
size = props['android.scaler.availableJpegSizes'][0]
- req["outputSurface"] = size
- req["outputSurface"]["format"] = "jpg"
- fname, w, h, cap_md = cam.do_capture(req)
+ out_surface = copy.deepcopy(size)
+ out_surface["format"] = "jpg"
+ fname, w, h, cap_md = cam.do_capture(req, out_surface)
img = numpy.array(Image.open(fname)).reshape(w,h,3) / 255.0
tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
rgb1 = its.image.compute_image_means(tile)
diff --git a/apps/CameraITS/tests/test_latching.py b/apps/CameraITS/tests/test_latching.py
index 14dbed8..aa89f90 100644
--- a/apps/CameraITS/tests/test_latching.py
+++ b/apps/CameraITS/tests/test_latching.py
@@ -33,20 +33,20 @@
S = 150 # Sensitivity
E = 10 # Exposure time, ms
- reqs = its.objects.capture_request_list([
- its.objects.manual_capture_request(S, E )["captureRequest"],
- its.objects.manual_capture_request(S, E )["captureRequest"],
- its.objects.manual_capture_request(S*8,E )["captureRequest"],
- its.objects.manual_capture_request(S*8,E )["captureRequest"],
- its.objects.manual_capture_request(S, E )["captureRequest"],
- its.objects.manual_capture_request(S, E )["captureRequest"],
- its.objects.manual_capture_request(S, E*8)["captureRequest"],
- its.objects.manual_capture_request(S, E )["captureRequest"],
- its.objects.manual_capture_request(S*8,E )["captureRequest"],
- its.objects.manual_capture_request(S, E )["captureRequest"],
- its.objects.manual_capture_request(S, E*8)["captureRequest"],
- its.objects.manual_capture_request(S, E )["captureRequest"],
- ])
+ reqs = [
+ its.objects.manual_capture_request(S, E ),
+ its.objects.manual_capture_request(S, E ),
+ its.objects.manual_capture_request(S*8,E ),
+ its.objects.manual_capture_request(S*8,E ),
+ its.objects.manual_capture_request(S, E ),
+ its.objects.manual_capture_request(S, E ),
+ its.objects.manual_capture_request(S, E*8),
+ its.objects.manual_capture_request(S, E ),
+ its.objects.manual_capture_request(S*8,E ),
+ its.objects.manual_capture_request(S, E ),
+ its.objects.manual_capture_request(S, E*8),
+ its.objects.manual_capture_request(S, E ),
+ ]
r_means = []
g_means = []
diff --git a/apps/CameraITS/tests/test_linearity.py b/apps/CameraITS/tests/test_linearity.py
index 23b8f6a..95cc5dd 100644
--- a/apps/CameraITS/tests/test_linearity.py
+++ b/apps/CameraITS/tests/test_linearity.py
@@ -45,7 +45,7 @@
inv_gamma_lut = numpy.array(
sum([[i/LM1, math.pow(i/LM1, 2.2)] for i in xrange(L)], []))
- req = its.objects.capture_request( {
+ req = {
"android.sensor.exposureTime": 10*1000*1000,
"android.sensor.frameDuration": 0,
"android.control.mode": 0,
@@ -59,7 +59,7 @@
"android.tonemap.curveRed": gamma_lut.tolist(),
"android.tonemap.curveGreen": gamma_lut.tolist(),
"android.tonemap.curveBlue": gamma_lut.tolist(),
- })
+ }
sensitivities = range(100,500,50)+range(500,1000,100)+range(1000,3000,300)
@@ -78,14 +78,13 @@
b_means = []
if i == 1:
- req["captureRequest"]["android.colorCorrection.mode"] = 0
- req["captureRequest"]["android.colorCorrection.transform"] = (
+ req["android.colorCorrection.mode"] = 0
+ req["android.colorCorrection.transform"] = (
its.objects.int_to_rational([1,0,0, 0,1,0, 0,0,1]))
- req["captureRequest"]["android.colorCorrection.gains"] = (
- [1,1,1,1])
+ req["android.colorCorrection.gains"] = [1,1,1,1]
for sens in sensitivities:
- req["captureRequest"]["android.sensor.sensitivity"] = sens
+ req["android.sensor.sensitivity"] = sens
fname, w, h, cap_md = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(
diff --git a/apps/CameraITS/tests/test_param_black_level_lock.py b/apps/CameraITS/tests/test_param_black_level_lock.py
index 723989a..af9e2ba 100644
--- a/apps/CameraITS/tests/test_param_black_level_lock.py
+++ b/apps/CameraITS/tests/test_param_black_level_lock.py
@@ -31,7 +31,7 @@
NUM_STEPS = 5
- req = its.objects.capture_request( {
+ req = {
"android.blackLevel.lock": True,
"android.control.mode": 0,
"android.control.aeMode": 0,
@@ -39,7 +39,7 @@
"android.control.afMode": 0,
"android.sensor.frameDuration": 0,
"android.sensor.exposureTime": 10*1000*1000
- })
+ }
# The most frequent pixel value in each image; assume this is the black
# level, since the images are all dark (shot with the lens covered).
@@ -52,7 +52,7 @@
sens_range[1]+1,
int((sens_range[1] - sens_range[0]) / NUM_STEPS))
for si, s in enumerate(sensitivities):
- req["captureRequest"]["android.sensor.sensitivity"] = s
+ req["android.sensor.sensitivity"] = s
fname, w, h, cap_md = cam.do_capture(req)
yimg,_,_ = its.image.load_yuv420_to_yuv_planes(fname, w, h)
hist,_ = numpy.histogram(yimg*255, 256, (0,256))
diff --git a/apps/CameraITS/tests/test_param_color_correction.py b/apps/CameraITS/tests/test_param_color_correction.py
index 45c7fc5..a10ae1a 100644
--- a/apps/CameraITS/tests/test_param_color_correction.py
+++ b/apps/CameraITS/tests/test_param_color_correction.py
@@ -41,7 +41,7 @@
linear_tonemap = sum([[i/31.0,i/31.0] for i in range(32)], [])
# Baseline request
- req = its.objects.capture_request( {
+ req = {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
@@ -54,7 +54,7 @@
"android.tonemap.curveRed": linear_tonemap,
"android.tonemap.curveGreen": linear_tonemap,
"android.tonemap.curveBlue": linear_tonemap
- })
+ }
# Transforms:
# 1. Identity
@@ -76,9 +76,8 @@
with its.device.ItsSession() as cam:
for i in range(len(transforms)):
- req['captureRequest']["android.colorCorrection.transform"] = (
- transforms[i])
- req['captureRequest']["android.colorCorrection.gains"] = gains[i]
+ req["android.colorCorrection.transform"] = transforms[i]
+ req["android.colorCorrection.gains"] = gains[i]
fname, w, h, md_obj = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_req=%d.jpg" % (NAME, i))
diff --git a/apps/CameraITS/tests/test_param_edge_mode.py b/apps/CameraITS/tests/test_param_edge_mode.py
index 353975c..d4f8eee 100644
--- a/apps/CameraITS/tests/test_param_edge_mode.py
+++ b/apps/CameraITS/tests/test_param_edge_mode.py
@@ -25,7 +25,7 @@
"""
NAME = os.path.basename(__file__).split(".")[0]
- req = its.objects.capture_request( {
+ req = {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
@@ -33,13 +33,13 @@
"android.sensor.frameDuration": 0,
"android.sensor.exposureTime": 30*1000*1000,
"android.sensor.sensitivity": 100
- })
+ }
with its.device.ItsSession() as cam:
rect = [0,0,1,1]
sens, exp, gains, xform, focus = cam.do_3a(rect, rect, rect)
for e in [0,1,2]:
- req["captureRequest"]["android.edge.mode"] = e
+ req["android.edge.mode"] = e
fname, w, h, cap_md = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, e))
diff --git a/apps/CameraITS/tests/test_param_exposure_time.py b/apps/CameraITS/tests/test_param_exposure_time.py
index 6b91206..38ab5bb 100644
--- a/apps/CameraITS/tests/test_param_exposure_time.py
+++ b/apps/CameraITS/tests/test_param_exposure_time.py
@@ -29,14 +29,14 @@
THRESHOLD_MAX_MIN_DIFF = 0.3
THRESHOLD_MAX_MIN_RATIO = 2.0
- req = its.objects.capture_request( {
+ req = {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
"android.control.afMode": 0,
"android.sensor.frameDuration": 0,
"android.sensor.sensitivity": 200
- })
+ }
exposures = range(1,101,20) # ms
r_means = []
@@ -45,7 +45,7 @@
with its.device.ItsSession() as cam:
for e in exposures:
- req["captureRequest"]["android.sensor.exposureTime"] = e*1000*1000
+ req["android.sensor.exposureTime"] = e*1000*1000
fname, w, h, cap_md = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(
diff --git a/apps/CameraITS/tests/test_param_exposure_time_burst.py b/apps/CameraITS/tests/test_param_exposure_time_burst.py
index fda31bf..d640302 100644
--- a/apps/CameraITS/tests/test_param_exposure_time_burst.py
+++ b/apps/CameraITS/tests/test_param_exposure_time_burst.py
@@ -27,15 +27,13 @@
NAME = os.path.basename(__file__).split(".")[0]
exp_times = range(1, 100, 9)
- reqs = its.objects.capture_request_list([
- its.objects.manual_capture_request(100,e)["captureRequest"]
- for e in exp_times])
+ reqs = [its.objects.manual_capture_request(100,e) for e in exp_times]
with its.device.ItsSession() as cam:
fnames, w, h, cap_mds = cam.do_capture(reqs)
for i,md in enumerate(cap_mds):
e_req = exp_times[i]*1000*1000
- e_res = md["captureResult"]["android.sensor.exposureTime"]
+ e_res = md["android.sensor.exposureTime"]
print e_req, e_res
if __name__ == '__main__':
diff --git a/apps/CameraITS/tests/test_param_flash_mode.py b/apps/CameraITS/tests/test_param_flash_mode.py
index 091a896..fca7bdb 100644
--- a/apps/CameraITS/tests/test_param_flash_mode.py
+++ b/apps/CameraITS/tests/test_param_flash_mode.py
@@ -32,12 +32,10 @@
with its.device.ItsSession() as cam:
for f in [0,1,2]:
- req["captureRequest"]["android.flash.mode"] = f
+ req["android.flash.mode"] = f
fname, w, h, cap_md = cam.do_capture(req)
- flash_modes_reported.append(
- cap_md["captureResult"]["android.flash.mode"])
- flash_states_reported.append(
- cap_md["captureResult"]["android.flash.state"])
+ flash_modes_reported.append(cap_md["android.flash.mode"])
+ flash_states_reported.append(cap_md["android.flash.state"])
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_mode=%d.jpg" % (NAME, f))
diff --git a/apps/CameraITS/tests/test_param_noise_reduction.py b/apps/CameraITS/tests/test_param_noise_reduction.py
index 63d663a..a4512d5 100644
--- a/apps/CameraITS/tests/test_param_noise_reduction.py
+++ b/apps/CameraITS/tests/test_param_noise_reduction.py
@@ -34,13 +34,13 @@
THRESHOLD_MIN_VARIANCE_RATIO = 0.7
- req = its.objects.capture_request( {
+ req = {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
"android.control.afMode": 0,
"android.sensor.frameDuration": 0
- })
+ }
# List of variances for Y,U,V.
variances = [[],[],[]]
@@ -52,9 +52,9 @@
with its.device.ItsSession() as cam:
# NR mode 0 with low gain
- req["captureRequest"]["android.noiseReduction.mode"] = 0
- req["captureRequest"]["android.sensor.sensitivity"] = 100
- req["captureRequest"]["android.sensor.exposureTime"] = 20*1000*1000
+ req["android.noiseReduction.mode"] = 0
+ req["android.sensor.sensitivity"] = 100
+ req["android.sensor.exposureTime"] = 20*1000*1000
fname, w, h, md_obj = cam.do_capture(req)
its.image.write_image(
its.image.load_yuv420_to_rgb_image(fname, w, h),
@@ -67,13 +67,11 @@
for i in range(3):
# NR modes 0, 1, 2 with high gain
- req["captureRequest"]["android.noiseReduction.mode"] = i
- req["captureRequest"]["android.sensor.sensitivity"] = 100*16
- req["captureRequest"]["android.sensor.exposureTime"] = (
- 20*1000*1000/16)
+ req["android.noiseReduction.mode"] = i
+ req["android.sensor.sensitivity"] = 100*16
+ req["android.sensor.exposureTime"] = (20*1000*1000/16)
fname, w, h, md_obj = cam.do_capture(req)
- nr_modes_reported.append(
- md_obj["captureResult"]["android.noiseReduction.mode"])
+ nr_modes_reported.append(md_obj["android.noiseReduction.mode"])
its.image.write_image(
its.image.load_yuv420_to_rgb_image(fname, w, h),
"%s_high_gain_nr=%d.jpg" % (NAME, i))
diff --git a/apps/CameraITS/tests/test_param_sensitivity.py b/apps/CameraITS/tests/test_param_sensitivity.py
index bff0c88..6812c9c 100644
--- a/apps/CameraITS/tests/test_param_sensitivity.py
+++ b/apps/CameraITS/tests/test_param_sensitivity.py
@@ -31,14 +31,14 @@
NUM_STEPS = 5
- req = its.objects.capture_request( {
+ req = {
"android.control.mode": 0,
"android.control.aeMode": 0,
"android.control.awbMode": 0,
"android.control.afMode": 0,
"android.sensor.frameDuration": 0,
"android.sensor.exposureTime": 2*1000*1000
- })
+ }
sensitivities = None
r_means = []
@@ -52,7 +52,7 @@
sens_range[1]+1,
int((sens_range[1] - sens_range[0]) / NUM_STEPS))
for s in sensitivities:
- req["captureRequest"]["android.sensor.sensitivity"] = s
+ req["android.sensor.sensitivity"] = s
fname, w, h, cap_md = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(
diff --git a/apps/CameraITS/tests/test_param_sensitivity_burst.py b/apps/CameraITS/tests/test_param_sensitivity_burst.py
index de1119e..b907f6d 100644
--- a/apps/CameraITS/tests/test_param_sensitivity_burst.py
+++ b/apps/CameraITS/tests/test_param_sensitivity_burst.py
@@ -27,15 +27,13 @@
NAME = os.path.basename(__file__).split(".")[0]
sensitivities = range(350, 400, 7)
- reqs = its.objects.capture_request_list([
- its.objects.manual_capture_request(s,10)["captureRequest"]
- for s in sensitivities])
+ reqs = [its.objects.manual_capture_request(s,10) for s in sensitivities]
with its.device.ItsSession() as cam:
fnames, w, h, cap_mds = cam.do_capture(reqs)
for i,md in enumerate(cap_mds):
s_req = sensitivities[i]
- s_res = md["captureResult"]["android.sensor.sensitivity"]
+ s_res = md["android.sensor.sensitivity"]
print s_req, s_res
if __name__ == '__main__':
diff --git a/apps/CameraITS/tests/test_param_tonemap_mode.py b/apps/CameraITS/tests/test_param_tonemap_mode.py
index 4000292..aae4065 100644
--- a/apps/CameraITS/tests/test_param_tonemap_mode.py
+++ b/apps/CameraITS/tests/test_param_tonemap_mode.py
@@ -51,12 +51,12 @@
for n in [0,1]:
req = its.objects.manual_capture_request(100,50)
- req["captureRequest"]["android.tonemap.mode"] = 0
- req["captureRequest"]["android.tonemap.curveRed"] = (
+ req["android.tonemap.mode"] = 0
+ req["android.tonemap.curveRed"] = (
sum([[i/LM1, (1+0.5*n)*i/LM1] for i in range(L)], []))
- req["captureRequest"]["android.tonemap.curveGreen"] = (
+ req["android.tonemap.curveGreen"] = (
sum([[i/LM1, (1+1.0*n)*i/LM1] for i in range(L)], []))
- req["captureRequest"]["android.tonemap.curveBlue"] = (
+ req["android.tonemap.curveBlue"] = (
sum([[i/LM1, (1+1.5*n)*i/LM1] for i in range(L)], []))
fname, w, h, md = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
@@ -79,10 +79,10 @@
m = float(size-1)
curve = sum([[i/m, i/m] for i in range(size)], [])
req = its.objects.manual_capture_request(100,50)
- req["captureRequest"]["android.tonemap.mode"] = 0
- req["captureRequest"]["android.tonemap.curveRed"] = curve
- req["captureRequest"]["android.tonemap.curveGreen"] = curve
- req["captureRequest"]["android.tonemap.curveBlue"] = curve
+ req["android.tonemap.mode"] = 0
+ req["android.tonemap.curveRed"] = curve
+ req["android.tonemap.curveGreen"] = curve
+ req["android.tonemap.curveBlue"] = curve
fname, w, h, md = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(
diff --git a/apps/CameraITS/tests/test_predicted_burst.py b/apps/CameraITS/tests/test_predicted_burst.py
index 1e76afb..4a8731b 100644
--- a/apps/CameraITS/tests/test_predicted_burst.py
+++ b/apps/CameraITS/tests/test_predicted_burst.py
@@ -29,28 +29,28 @@
def r2f(r):
return float(r["numerator"]) / float(r["denominator"])
- reqs = its.objects.capture_request_list(
- [its.objects.manual_capture_request(100,0.1)["captureRequest"]]*7 + \
- [its.objects.manual_capture_request(300,33)["captureRequest"]]*7 + \
- [its.objects.manual_capture_request(100,0.1)["captureRequest"]]*7
- )
+ reqs = \
+ [its.objects.manual_capture_request(100,0.1)]*7 + \
+ [its.objects.manual_capture_request(300,33)]*7 + \
+ [its.objects.manual_capture_request(100,0.1)]*7
curve = sum([[i/63.0, pow(i/63.0, 1/2.2)] for i in range(64)], [])
- for r in reqs["captureRequestList"]:
+ for r in reqs:
r["android.tonemap.mode"] = 0
r["android.tonemap.curveRed"] = curve
r["android.tonemap.curveGreen"] = curve
r["android.tonemap.curveBlue"] = curve
with its.device.ItsSession() as cam:
- fnames, w, h, cap_mds = cam.do_capture(reqs)
+ fnames, w, h, mds = cam.do_capture(reqs)
for i, fname in enumerate(fnames):
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_i=%02d.jpg" % (NAME, i))
- cap_res = cap_mds[i]["captureResult"]
+ md = mds[i]
print "Predicted:", \
- cap_res["android.statistics.predictedColorGains"], \
- [r2f(t) for t in cap_res["android.statistics.predictedColorTransform"]]
+ md["android.statistics.predictedColorGains"], \
+ [r2f(t)
+ for t in md["android.statistics.predictedColorTransform"]]
if __name__ == '__main__':
main()
diff --git a/apps/CameraITS/tests/test_predicted_wb.py b/apps/CameraITS/tests/test_predicted_wb.py
index 3aade2b..09df6db 100644
--- a/apps/CameraITS/tests/test_predicted_wb.py
+++ b/apps/CameraITS/tests/test_predicted_wb.py
@@ -42,28 +42,25 @@
# Capture an auto shot using the converged 3A.
req = its.objects.auto_capture_request()
- fname, w, h, md_obj = cam.do_capture(req)
+ fname, w, h, cap_res = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_n=1_pass=1_auto.jpg" % (NAME))
- cap_res = md_obj["captureResult"]
auto_gains = cap_res["android.colorCorrection.gains"]
auto_transform = cap_res["android.colorCorrection.transform"]
# Capture a request using default (unit/identify) gains, and get the
# predicted gains and transform.
req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
- fname, w, h, md_obj = cam.do_capture(req)
+ fname, w, h, cap_res = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_n=2_pass=1_identity.jpg" % (NAME))
- cap_res = md_obj["captureResult"]
pred_gains_1 = cap_res["android.statistics.predictedColorGains"]
pred_transform_1 = cap_res["android.statistics.predictedColorTransform"]
# Capture a request using the predicted gains/transform.
req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
- req["captureRequest"]["android.colorCorrection.transform"] = \
- pred_transform_1
- req["captureRequest"]["android.colorCorrection.gains"] = pred_gains_1
+ req["android.colorCorrection.transform"] = pred_transform_1
+ req["android.colorCorrection.gains"] = pred_gains_1
fname, w, h, md_obj = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_n=3_pass=1_predicted.jpg" % (NAME))
@@ -83,18 +80,16 @@
# Capture a request using default (unit/identify) gains, and get the
# predicted gains and transform.
req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
- fname, w, h, md_obj = cam.do_capture(req)
+ fname, w, h, cap_res = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_n=4_pass=2_identity.jpg" % (NAME))
- cap_res = md_obj["captureResult"]
pred_gains_2 = cap_res["android.statistics.predictedColorGains"]
pred_transform_2 = cap_res["android.statistics.predictedColorTransform"]
# Capture a request using the predicted gains/transform.
req = its.objects.manual_capture_request(sens, exp/(1000.0*1000.0))
- req["captureRequest"]["android.colorCorrection.transform"] = \
- pred_transform_2
- req["captureRequest"]["android.colorCorrection.gains"] = pred_gains_2
+ req["android.colorCorrection.transform"] = pred_transform_2
+ req["android.colorCorrection.gains"] = pred_gains_2
fname, w, h, md_obj = cam.do_capture(req)
img = its.image.load_yuv420_to_rgb_image(fname, w, h)
its.image.write_image(img, "%s_n=5_pass=2_predicted.jpg" % (NAME))