[ITS] Enable non-debug mode to speed up tests.

bug: 34775295
Use lower resolution images for appropriate tests.
Don't save images for tests that run through all fmts.

Change-Id: I078e66fc690de0e9de8ee5131d18892bce843bf0
diff --git a/apps/CameraITS/pymodules/its/caps.py b/apps/CameraITS/pymodules/its/caps.py
index d9270f7..e6f096f 100644
--- a/apps/CameraITS/pymodules/its/caps.py
+++ b/apps/CameraITS/pymodules/its/caps.py
@@ -447,6 +447,18 @@
         props["android.lens.info.minimumFocusDistance"] == 0
 
 
+def debug_mode():
+    """Returns True/False for whether test is run in debug mode.
+
+    Returns:
+        Boolean.
+    """
+    for s in sys.argv[1:]:
+        if s[:6] == "debug=" and s[6:] == "True":
+            return True
+    return False
+
+
 class __UnitTest(unittest.TestCase):
     """Run a suite of unit tests on this module.
     """
diff --git a/apps/CameraITS/pymodules/its/objects.py b/apps/CameraITS/pymodules/its/objects.py
index 9766ab9..a8b1535 100644
--- a/apps/CameraITS/pymodules/its/objects.py
+++ b/apps/CameraITS/pymodules/its/objects.py
@@ -264,6 +264,39 @@
 
     return req, out_spec
 
+
+def get_smallest_yuv_format(props):
+    """Return a capture request and format spec for the smallest yuv size.
+
+    Args:
+        props: the object returned from its.device.get_camera_properties().
+
+    Returns:
+        fmt:    an output format specification, for the smallest possible yuv
+        format for this device.
+    """
+    size = get_available_output_sizes("yuv", props)[-1]
+    fmt = {"format":"yuv", "width":size[0], "height":size[1]}
+
+    return fmt
+
+
+def get_largest_yuv_format(props):
+    """Return a capture request and format spec for the smallest yuv size.
+
+    Args:
+        props: the object returned from its.device.get_camera_properties().
+
+    Returns:
+        fmt:    an output format specification, for the smallest possible yuv
+        format for this device.
+    """
+    size = get_available_output_sizes("yuv", props)[0]
+    fmt = {"format":"yuv", "width":size[0], "height":size[1]}
+
+    return fmt
+
+
 def get_max_digital_zoom(props):
     """Returns the maximum amount of zooming possible by the camera device.
 
diff --git a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
index c5f5e29..89fb04f 100644
--- a/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
+++ b/apps/CameraITS/tests/scene1/test_auto_vs_manual.py
@@ -37,6 +37,11 @@
                              its.caps.per_frame_control(props))
 
         # Converge 3A and get the estimates.
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
         sens, exp, gains, xform, focus = cam.do_3a(get_results=True)
         xform_rat = its.objects.float_to_rational(xform)
         print "AE sensitivity %d, exposure %dms" % (sens, exp/1000000.0)
@@ -46,7 +51,7 @@
 
         # Auto capture.
         req = its.objects.auto_capture_request()
-        cap_auto = cam.do_capture(req)
+        cap_auto = cam.do_capture(req, fmt)
         img_auto = its.image.convert_capture_to_rgb_image(cap_auto)
         its.image.write_image(img_auto, "%s_auto.jpg" % (NAME))
         xform_a = its.objects.rational_to_float(
@@ -59,7 +64,7 @@
         req = its.objects.manual_capture_request(sens, exp, focus)
         req["android.colorCorrection.transform"] = xform_rat
         req["android.colorCorrection.gains"] = gains
-        cap_man1 = cam.do_capture(req)
+        cap_man1 = cam.do_capture(req, fmt)
         img_man1 = its.image.convert_capture_to_rgb_image(cap_man1)
         its.image.write_image(img_man1, "%s_manual_wb.jpg" % (NAME))
         xform_m1 = its.objects.rational_to_float(
@@ -74,7 +79,7 @@
         req["android.tonemap.curveRed"] = gamma
         req["android.tonemap.curveGreen"] = gamma
         req["android.tonemap.curveBlue"] = gamma
-        cap_man2 = cam.do_capture(req)
+        cap_man2 = cam.do_capture(req, fmt)
         img_man2 = its.image.convert_capture_to_rgb_image(cap_man2)
         its.image.write_image(img_man2, "%s_manual_wb_tm.jpg" % (NAME))
         xform_m2 = its.objects.rational_to_float(
diff --git a/apps/CameraITS/tests/scene1/test_black_white.py b/apps/CameraITS/tests/scene1/test_black_white.py
index 68d7de6..0cdf54c 100644
--- a/apps/CameraITS/tests/scene1/test_black_white.py
+++ b/apps/CameraITS/tests/scene1/test_black_white.py
@@ -35,6 +35,11 @@
         its.caps.skip_unless(its.caps.manual_sensor(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
         expt_range = props['android.sensor.info.exposureTimeRange']
         sens_range = props['android.sensor.info.sensitivityRange']
 
@@ -43,7 +48,7 @@
         print "Black shot: sens = %d, exp time = %.4fms" % (
                 sens_range[0], expt_range[0]/1000000.0)
         req = its.objects.manual_capture_request(sens_range[0], expt_range[0])
-        cap = cam.do_capture(req)
+        cap = cam.do_capture(req, fmt)
         img = its.image.convert_capture_to_rgb_image(cap)
         its.image.write_image(img, "%s_black.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -58,7 +63,7 @@
         print "White shot: sens = %d, exp time = %.2fms" % (
                 sens_range[1], expt_range[1]/1000000.0)
         req = its.objects.manual_capture_request(sens_range[1], expt_range[1])
-        cap = cam.do_capture(req)
+        cap = cam.do_capture(req, fmt)
         img = its.image.convert_capture_to_rgb_image(cap)
         its.image.write_image(img, "%s_white.jpg" % (NAME))
         tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py b/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
index c14f5a9..0537b25 100644
--- a/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
+++ b/apps/CameraITS/tests/scene1/test_ev_compensation_advanced.py
@@ -41,6 +41,12 @@
                              its.caps.per_frame_control(props) and
                              its.caps.ev_compensation(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         ev_compensation_range = props['android.control.aeCompensationRange']
         range_min = ev_compensation_range[0]
         range_max = ev_compensation_range[1]
@@ -69,7 +75,7 @@
             req["android.tonemap.curveRed"] = [0.0,0.0, 1.0,1.0]
             req["android.tonemap.curveGreen"] = [0.0,0.0, 1.0,1.0]
             req["android.tonemap.curveBlue"] = [0.0,0.0, 1.0,1.0]
-            caps = cam.do_capture([req]*THREASH_CONVERGE_FOR_EV)
+            caps = cam.do_capture([req]*THREASH_CONVERGE_FOR_EV, fmt)
 
             for cap in caps:
                 if (cap['metadata']['android.control.aeState'] == LOCKED):
diff --git a/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py b/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
index cb69607..a67c342 100644
--- a/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
+++ b/apps/CameraITS/tests/scene1/test_ev_compensation_basic.py
@@ -41,6 +41,12 @@
         its.caps.skip_unless(its.caps.ev_compensation(props) and
                              its.caps.ae_lock(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         ev_per_step = its.objects.rational_to_float(
                 props['android.control.aeCompensationStep'])
         steps_per_ev = int(1.0 / ev_per_step)
@@ -61,7 +67,7 @@
             req = its.objects.auto_capture_request()
             req['android.control.aeExposureCompensation'] = ev
             req["android.control.aeLock"] = True
-            caps = cam.do_capture([req]*THRESH_CONVERGE_FOR_EV)
+            caps = cam.do_capture([req]*THRESH_CONVERGE_FOR_EV, fmt)
             for cap in caps:
                 if (cap['metadata']['android.control.aeState'] == LOCKED):
                     y = its.image.convert_capture_to_planes(cap)[0]
diff --git a/apps/CameraITS/tests/scene1/test_exposure.py b/apps/CameraITS/tests/scene1/test_exposure.py
index e53af21..3cdfaf7 100644
--- a/apps/CameraITS/tests/scene1/test_exposure.py
+++ b/apps/CameraITS/tests/scene1/test_exposure.py
@@ -51,6 +51,12 @@
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         e,s = its.target.get_target_exposure_combos(cam)["minSensitivity"]
         s_e_product = s*e
         expt_range = props['android.sensor.info.exposureTimeRange']
@@ -64,7 +70,7 @@
             print "Testing s:", s_test, "e:", e_test
             req = its.objects.manual_capture_request(
                     s_test, e_test, 0.0, True, props)
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             s_res = cap["metadata"]["android.sensor.sensitivity"]
             e_res = cap["metadata"]["android.sensor.exposureTime"]
             assert(0 <= s_test - s_res < s_test * THRESHOLD_ROUND_DOWN_GAIN)
diff --git a/apps/CameraITS/tests/scene1/test_format_combos.py b/apps/CameraITS/tests/scene1/test_format_combos.py
index 1b40826..1519237 100644
--- a/apps/CameraITS/tests/scene1/test_format_combos.py
+++ b/apps/CameraITS/tests/scene1/test_format_combos.py
@@ -38,6 +38,7 @@
 
         successes = []
         failures = []
+        debug = its.caps.debug_mode()
 
         # Two different requests: auto, and manual.
         e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
@@ -89,7 +90,8 @@
                         for c,cap in enumerate(caps):
                             img = its.image.convert_capture_to_rgb_image(cap,
                                     props=props)
-                            its.image.write_image(img,
+                            if debug:
+                                its.image.write_image(img,
                                     "%s_n%02d_r%d_f%d_b%d_c%d.jpg"%(NAME,n,r,f,b,c))
 
                     except Exception as e:
diff --git a/apps/CameraITS/tests/scene1/test_linearity.py b/apps/CameraITS/tests/scene1/test_linearity.py
index 2176f5e..9e9f1cc 100644
--- a/apps/CameraITS/tests/scene1/test_linearity.py
+++ b/apps/CameraITS/tests/scene1/test_linearity.py
@@ -50,6 +50,12 @@
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         e,s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
         s /= 2
         sens_range = props['android.sensor.info.sensitivityRange']
@@ -70,7 +76,7 @@
 
         for sens in sensitivities:
             req["android.sensor.sensitivity"] = sens
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_sens=%04d.jpg" % (NAME, sens))
diff --git a/apps/CameraITS/tests/scene1/test_locked_burst.py b/apps/CameraITS/tests/scene1/test_locked_burst.py
index daefb6b..35b7006 100644
--- a/apps/CameraITS/tests/scene1/test_locked_burst.py
+++ b/apps/CameraITS/tests/scene1/test_locked_burst.py
@@ -44,6 +44,13 @@
         # Converge 3A prior to capture.
         cam.do_3a(do_af=True, lock_ae=True, lock_awb=True)
 
+        # Capture with fastest format
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         # After 3A has converged, lock AE+AWB for the duration of the test.
         req = its.objects.fastest_auto_capture_request(props)
         req["android.control.awbLock"] = True
@@ -54,7 +61,7 @@
         r_means = []
         g_means = []
         b_means = []
-        caps = cam.do_capture([req]*BURST_LEN)
+        caps = cam.do_capture([req]*BURST_LEN, fmt)
         for i,cap in enumerate(caps):
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_frame%d.jpg"%(NAME,i))
diff --git a/apps/CameraITS/tests/scene1/test_param_color_correction.py b/apps/CameraITS/tests/scene1/test_param_color_correction.py
index 8623426..ba00ec0 100644
--- a/apps/CameraITS/tests/scene1/test_param_color_correction.py
+++ b/apps/CameraITS/tests/scene1/test_param_color_correction.py
@@ -41,6 +41,12 @@
                              its.caps.per_frame_control(props))
 
         # Baseline request
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         e, s = its.target.get_target_exposure_combos(cam)["midSensitivity"]
         req = its.objects.manual_capture_request(s, e, 0.0, True, props)
         req["android.colorCorrection.mode"] = 0
@@ -70,7 +76,7 @@
         for i in range(len(transforms)):
             req["android.colorCorrection.transform"] = transforms[i]
             req["android.colorCorrection.gains"] = gains[i]
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_req=%d.jpg" % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/scene1/test_param_exposure_time.py b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
index 576516c..527349c 100644
--- a/apps/CameraITS/tests/scene1/test_param_exposure_time.py
+++ b/apps/CameraITS/tests/scene1/test_param_exposure_time.py
@@ -37,10 +37,16 @@
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         e,s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
         for i,e_mult in enumerate([0.8, 0.9, 1.0, 1.1, 1.2]):
             req = its.objects.manual_capture_request(s, e * e_mult, 0.0, True, props)
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_frame%d.jpg" % (NAME, i))
diff --git a/apps/CameraITS/tests/scene1/test_param_flash_mode.py b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
index 5ef6fd6..1339b0c 100644
--- a/apps/CameraITS/tests/scene1/test_param_flash_mode.py
+++ b/apps/CameraITS/tests/scene1/test_param_flash_mode.py
@@ -37,13 +37,19 @@
         # Manually set the exposure to be a little on the dark side, so that
         # it should be obvious whether the flash fired or not, and use a
         # linear tonemap.
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
         e /= 4
         req = its.objects.manual_capture_request(s, e, 0.0, True, props)
 
         for f in [0,1,2]:
             req["android.flash.mode"] = f
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             flash_modes_reported.append(cap["metadata"]["android.flash.mode"])
             flash_states_reported.append(cap["metadata"]["android.flash.state"])
             img = its.image.convert_capture_to_rgb_image(cap)
diff --git a/apps/CameraITS/tests/scene1/test_param_sensitivity.py b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
index d6b44a2..4f9d880 100644
--- a/apps/CameraITS/tests/scene1/test_param_sensitivity.py
+++ b/apps/CameraITS/tests/scene1/test_param_sensitivity.py
@@ -39,6 +39,12 @@
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         expt,_ = its.target.get_target_exposure_combos(cam)["midSensitivity"]
         sens_range = props['android.sensor.info.sensitivityRange']
         sens_step = (sens_range[1] - sens_range[0]) / float(NUM_STEPS-1)
@@ -46,7 +52,7 @@
 
         for s in sensitivities:
             req = its.objects.manual_capture_request(s, expt)
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_iso=%04d.jpg" % (NAME, s))
diff --git a/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py b/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
index 8c8e626..b392346 100644
--- a/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
+++ b/apps/CameraITS/tests/scene1/test_param_tonemap_mode.py
@@ -41,6 +41,12 @@
         its.caps.skip_unless(its.caps.compute_target_exposure(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         e, s = its.target.get_target_exposure_combos(cam)["midExposureTime"]
         e /= 2
 
@@ -60,7 +66,7 @@
                     sum([[i/LM1, min(1.0,(1+1.0*n)*i/LM1)] for i in range(L)], []))
             req["android.tonemap.curveBlue"] = (
                     sum([[i/LM1, min(1.0,(1+1.5*n)*i/LM1)] for i in range(L)], []))
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(
                     img, "%s_n=%d.jpg" %(NAME, n))
diff --git a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
index 0db70b8..f2cce57 100644
--- a/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
+++ b/apps/CameraITS/tests/scene1/test_tonemap_sequence.py
@@ -26,7 +26,7 @@
 
     # There should be 3 identical frames followed by a different set of
     # 3 identical frames.
-    MAX_SAME_DELTA = 0.015
+    MAX_SAME_DELTA = 0.03  # match number in test_burst_sameness_manual
     MIN_DIFF_DELTA = 0.10
 
     with its.device.ItsSession() as cam:
@@ -35,6 +35,12 @@
                              its.caps.manual_post_proc(props) and
                              its.caps.per_frame_control(props))
 
+        debug = its.caps.debug_mode()
+        if debug:
+            fmt = its.objects.get_largest_yuv_format(props)
+        else:
+            fmt = its.objects.get_smallest_yuv_format(props)
+
         sens, exp_time, _,_,f_dist = cam.do_3a(do_af=True,get_results=True)
 
         means = []
@@ -42,7 +48,7 @@
         # Capture 3 manual shots with a linear tonemap.
         req = its.objects.manual_capture_request(sens, exp_time, f_dist, True, props)
         for i in [0,1,2]:
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
@@ -51,7 +57,7 @@
         # Capture 3 manual shots with the default tonemap.
         req = its.objects.manual_capture_request(sens, exp_time, f_dist, False)
         for i in [3,4,5]:
-            cap = cam.do_capture(req)
+            cap = cam.do_capture(req, fmt)
             img = its.image.convert_capture_to_rgb_image(cap)
             its.image.write_image(img, "%s_i=%d.jpg" % (NAME, i))
             tile = its.image.get_image_patch(img, 0.45, 0.45, 0.1, 0.1)
diff --git a/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py b/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
index 9d212cc..13dbe84 100644
--- a/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
+++ b/apps/CameraITS/tests/scene4/test_aspect_ratio_and_crop.py
@@ -79,6 +79,7 @@
         run_crop_test = (level3_device or full_device) and raw_avlb
         if not run_crop_test:
             print "Crop test skipped"
+        debug = its.caps.debug_mode()
         # Converge 3A and get the estimates.
         sens, exp, gains, xform, focus = cam.do_3a(get_results=True,
                                                    lock_ae=True, lock_awb=True)
@@ -106,7 +107,8 @@
             img_name = "%s_%s_w%d_h%d.png" \
                        % (NAME, "raw", size_raw[1], size_raw[0])
             aspect_ratio_gt, cc_ct_gt, circle_size_raw = measure_aspect_ratio(
-                                                         img_raw, 1, img_name)
+                                                         img_raw, 1, img_name,
+                                                         debug)
             # Normalize the circle size to 1/4 of the image size, so that
             # circle size won"t affect the crop test result
             factor_cp_thres = (min(size_raw[0:1])/4.0) / max(circle_size_raw)
@@ -157,7 +159,8 @@
                 img_name = "%s_%s_with_%s_w%d_h%d.png" \
                            % (NAME, fmt_iter, fmt_cmpr, w_iter, h_iter)
                 aspect_ratio, cc_ct, (cc_w, cc_h) = \
-                        measure_aspect_ratio(img, raw_avlb, img_name)
+                        measure_aspect_ratio(img, raw_avlb, img_name,
+                                             debug)
                 # check pass/fail for aspect ratio
                 # image size >= LARGE_SIZE: use THRES_L_AR_TEST
                 # image size == 0 (extreme case): THRES_XS_AR_TEST
@@ -245,7 +248,7 @@
             assert (failed_image_number_for_crop_test == 0)
 
 
-def measure_aspect_ratio(img, raw_avlb, img_name):
+def measure_aspect_ratio(img, raw_avlb, img_name, debug):
     """ Measure the aspect ratio of the black circle in the test image.
 
     Args:
@@ -253,6 +256,7 @@
         raw_avlb: True: raw capture is available; False: raw capture is not
              available.
         img_name: string with image info of format and size.
+        debug: boolean for whether in debug mode.
     Returns:
         aspect_ratio: aspect ratio number in float.
         cc_ct: circle center position relative to the center of image.
@@ -382,7 +386,8 @@
     cv2.putText(img, "image center", (text_imgct_x, text_imgct_y),
                 cv2.FONT_HERSHEY_SIMPLEX, line_width/2.0, (255, 0, 0),
                 line_width)
-    its.image.write_image(img/255, img_name, True)
+    if debug:
+        its.image.write_image(img/255, img_name, True)
 
     print "Aspect ratio: %.3f" % aspect_ratio
     print "Circle center position regarding to image center: %.3fx%.3f" % \