CameraITS: rework scene1/test_dng_noise_model to use rawStats

bug: 62627645
Change-Id: I46e29ac1967edfe807e6923cd889bd11ad38231f
diff --git a/apps/CameraITS/tests/scene1/test_dng_noise_model.py b/apps/CameraITS/tests/scene1/test_dng_noise_model.py
index 02bce79..d1c7fbe 100644
--- a/apps/CameraITS/tests/scene1/test_dng_noise_model.py
+++ b/apps/CameraITS/tests/scene1/test_dng_noise_model.py
@@ -12,30 +12,31 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import its.device
-import its.caps
-import its.objects
-import its.image
 import os.path
-from matplotlib import pylab
+import its.caps
+import its.device
+import its.image
+import its.objects
 import matplotlib
-import matplotlib.pyplot
+from matplotlib import pylab
+
+NAME = os.path.basename(__file__).split('.')[0]
+BAYER_LIST = ['R', 'GR', 'GB', 'B']
+DIFF_THRESH = 0.0005  # absolute variance delta threshold
+FRAC_THRESH = 0.2  # relative variance delta threshold
+NUM_STEPS = 4
+STATS_GRID = 49  # center 2.04% of image for calculations
+
 
 def main():
-    """Verify that the DNG raw model parameters are correct.
-    """
-    NAME = os.path.basename(__file__).split(".")[0]
-
-    NUM_STEPS = 4
+    """Verify that the DNG raw model parameters are correct."""
 
     # Pass if the difference between expected and computed variances is small,
-    # defined as being within an absolute variance delta of 0.0005, or within
-    # 20% of the expected variance, whichever is larger; this is to allow the
+    # defined as being within an absolute variance delta or relative variance
+    # delta of the expected variance, whichever is larger. This is to allow the
     # test to pass in the presence of some randomness (since this test is
     # measuring noise of a small patch) and some imperfect scene conditions
     # (since ITS doesn't require a perfectly uniformly lit scene).
-    DIFF_THRESH = 0.0005
-    FRAC_THRESH = 0.2
 
     with its.device.ItsSession() as cam:
 
@@ -45,69 +46,85 @@
                              its.caps.manual_sensor(props) and
                              its.caps.read_3a(props) and
                              its.caps.per_frame_control(props))
+        debug = its.caps.debug_mode()
 
         white_level = float(props['android.sensor.info.whiteLevel'])
         cfa_idxs = its.image.get_canonical_cfa_order(props)
+        aax = props['android.sensor.info.activeArraySize']['left']
+        aay = props['android.sensor.info.activeArraySize']['top']
+        aaw = props['android.sensor.info.activeArraySize']['right']-aax
+        aah = props['android.sensor.info.activeArraySize']['bottom']-aay
 
         # Expose for the scene with min sensitivity
         sens_min, sens_max = props['android.sensor.info.sensitivityRange']
         sens_step = (sens_max - sens_min) / NUM_STEPS
-        s_ae,e_ae,_,_,f_dist  = cam.do_3a(get_results=True)
+        s_ae, e_ae, _, _, f_dist = cam.do_3a(get_results=True)
         s_e_prod = s_ae * e_ae
         sensitivities = range(sens_min, sens_max, sens_step)
 
-        var_expected = [[],[],[],[]]
-        var_measured = [[],[],[],[]]
+        var_expected = [[], [], [], []]
+        var_measured = [[], [], [], []]
+        x = STATS_GRID/2  # center in H of STATS_GRID
+        y = STATS_GRID/2  # center in W of STATS_GRID
         for sens in sensitivities:
 
-            # Capture a raw frame with the desired sensitivity.
+            # Capture a raw frame with the desired sensitivity
             exp = int(s_e_prod / float(sens))
             req = its.objects.manual_capture_request(sens, exp, f_dist)
-            cap = cam.do_capture(req, cam.CAP_RAW)
+            if debug:
+                cap = cam.do_capture(req, cam.CAP_RAW)
+                planes = its.image.convert_capture_to_planes(cap, props)
+            else:
+                cap = cam.do_capture(req, {'format': 'rawStats',
+                                           'gridWidth': aaw/STATS_GRID,
+                                           'gridHeight': aah/STATS_GRID})
+                mean_img, var_img = its.image.unpack_rawstats_capture(cap)
 
-            # Test each raw color channel (R, GR, GB, B):
-            noise_profile = cap["metadata"]["android.sensor.noiseProfile"]
-            assert((len(noise_profile)) == 4)
-            for ch in range(4):
+            # Test each raw color channel (R, GR, GB, B)
+            noise_profile = cap['metadata']['android.sensor.noiseProfile']
+            assert len(noise_profile) == len(BAYER_LIST)
+            for i in range(len(BAYER_LIST)):
                 # Get the noise model parameters for this channel of this shot.
-                s,o = noise_profile[cfa_idxs[ch]]
+                ch = cfa_idxs[i]
+                s, o = noise_profile[ch]
 
-                # Get a center tile of the raw channel, and compute the mean.
                 # Use a very small patch to ensure gross uniformity (i.e. so
                 # non-uniform lighting or vignetting doesn't affect the variance
-                # calculation).
-                plane = its.image.convert_capture_to_planes(cap, props)[ch]
-                black_level = its.image.get_black_level(
-                    ch, props, cap["metadata"])
-                plane = (plane * white_level - black_level) / (
-                    white_level - black_level)
-                tile = its.image.get_image_patch(plane, 0.49,0.49,0.02,0.02)
-                mean = tile.mean()
+                # calculation)
+                black_level = its.image.get_black_level(i, props,
+                                                        cap['metadata'])
+                level_range = white_level - black_level
+                if debug:
+                    plane = ((planes[i] * white_level - black_level) /
+                             level_range)
+                    tile = its.image.get_image_patch(plane, 0.49, 0.49,
+                                                     0.02, 0.02)
+                    mean_img_ch = tile.mean()
+                    var_measured[i].append(
+                            its.image.compute_image_variances(tile)[0])
+                else:
+                    mean_img_ch = (mean_img[x, y, ch]-black_level)/level_range
+                    var_measured[i].append(var_img[x, y, ch]/level_range**2)
+                var_expected[i].append(s * mean_img_ch + o)
 
-                # Calculate the expected variance based on the model, and the
-                # measured variance from the tile.
-                var_measured[ch].append(
-                        its.image.compute_image_variances(tile)[0])
-                var_expected[ch].append(s * mean + o)
-
-    for ch in range(4):
-        pylab.plot(sensitivities, var_expected[ch], "rgkb"[ch],
-                label=["R","GR","GB","B"][ch]+" expected")
-        pylab.plot(sensitivities, var_measured[ch], "rgkb"[ch]+"--",
-                label=["R", "GR", "GB", "B"][ch]+" measured")
-    pylab.xlabel("Sensitivity")
-    pylab.ylabel("Center patch variance")
+    for i, ch in enumerate(BAYER_LIST):
+        pylab.plot(sensitivities, var_expected[i], 'rgkb'[i],
+                   label=ch+' expected')
+        pylab.plot(sensitivities, var_measured[i], 'rgkb'[i]+'--',
+                   label=ch+' measured')
+    pylab.xlabel('Sensitivity')
+    pylab.ylabel('Center patch variance')
     pylab.legend(loc=2)
-    matplotlib.pyplot.savefig("%s_plot.png" % (NAME))
+    matplotlib.pyplot.savefig('%s_plot.png' % NAME)
 
-    # Pass/fail check.
-    for ch in range(4):
-        diffs = [var_measured[ch][i] - var_expected[ch][i]
-                 for i in range(NUM_STEPS)]
-        print "Diffs (%s):"%(["R","GR","GB","B"][ch]), diffs
-        for i,diff in enumerate(diffs):
-            thresh = max(DIFF_THRESH, FRAC_THRESH * var_expected[ch][i])
-            assert(diff <= thresh)
+    # PASS/FAIL check
+    for i, ch in enumerate(BAYER_LIST):
+        diffs = [var_measured[i][j] - var_expected[i][j]
+                 for j in range(NUM_STEPS)]
+        print 'Diffs (%s):'%(ch), diffs
+        for j, diff in enumerate(diffs):
+            thresh = max(DIFF_THRESH, FRAC_THRESH*var_expected[i][j])
+            assert diff <= thresh
 
 if __name__ == '__main__':
     main()