Camera HAL3 supports multiple dimensions

Bug: 70507102, 70507373
Test: run cts -m CameraTestCases
run cts -m CtsMediaStressTestCases -t
android.mediastress.cts.MediaRecorderStressTest#testStressRecordVideoAndPlayback
run cts -m CtsNNAPITestCases -t GeneratedTests#mobilenet
run cts -m CtsMediaTestCases -t
android.media.cts.CamcorderProfileTest#testGetWithId
run cts -m CtsMediaTestCases -t
android.media.cts.CamcorderProfileTest#testGet

Change-Id: I969701f8afddcf6cfbc1138bc6574daf0d429b91
diff --git a/camera/EmulatedFakeCamera3.cpp b/camera/EmulatedFakeCamera3.cpp
index 8f7285f..798d0ea 100644
--- a/camera/EmulatedFakeCamera3.cpp
+++ b/camera/EmulatedFakeCamera3.cpp
@@ -67,8 +67,9 @@
         HAL_PIXEL_FORMAT_Y16
 };
 
-const uint32_t EmulatedFakeCamera3::kAvailableRawSizes[2] = {
-    640, 480
+const uint32_t EmulatedFakeCamera3::kAvailableRawSizes[4] = {
+    640, 480,
+    1280, 720
     //    mSensorWidth, mSensorHeight
 };
 
@@ -264,10 +265,13 @@
             return BAD_VALUE;
         }
 
-        ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x",
+        ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x "
+              "width %d, height %d",
                 __FUNCTION__, newStream, i, newStream->stream_type,
                 newStream->usage,
-                newStream->format);
+                newStream->format,
+                newStream->width,
+                newStream->height);
 
         if (newStream->stream_type == CAMERA3_STREAM_INPUT ||
             newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
@@ -971,6 +975,10 @@
             if (res != OK) {
                 ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer",
                         __FUNCTION__, frameNumber, i);
+            } else {
+                ALOGV("%s, stream format 0x%x width %d height %d buffer 0x%p img 0x%p",
+                  __FUNCTION__, destBuf.format, destBuf.width, destBuf.height,
+                  destBuf.buffer, destBuf.img);
             }
         }
 
@@ -1349,10 +1357,12 @@
         HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 320, 240, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
         HAL_PIXEL_FORMAT_YCbCr_420_888, 320, 240, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
         HAL_PIXEL_FORMAT_BLOB, 320, 240, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_BLOB, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
         HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 176, 144, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
         HAL_PIXEL_FORMAT_YCbCr_420_888, 176, 144, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
         HAL_PIXEL_FORMAT_BLOB, 176, 144, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 1280, 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+        HAL_PIXEL_FORMAT_YCbCr_420_888, 1280, 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+        HAL_PIXEL_FORMAT_BLOB, 1280, 720, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
     };
 
     // Always need to include 640x480 in basic formats
@@ -1409,6 +1419,9 @@
         HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 176, 144, Sensor::kFrameDurationRange[0],
         HAL_PIXEL_FORMAT_YCbCr_420_888, 176, 144, Sensor::kFrameDurationRange[0],
         HAL_PIXEL_FORMAT_BLOB, 176, 144, Sensor::kFrameDurationRange[0],
+        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 1280, 720, Sensor::kFrameDurationRange[0],
+        HAL_PIXEL_FORMAT_YCbCr_420_888, 1280, 720, Sensor::kFrameDurationRange[0],
+        HAL_PIXEL_FORMAT_BLOB, 1280, 720, Sensor::kFrameDurationRange[0],
     };
 
     // Always need to include 640x480 in basic formats
@@ -1465,6 +1478,9 @@
         HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 176, 144, 0,
         HAL_PIXEL_FORMAT_YCbCr_420_888, 176, 144, 0,
         HAL_PIXEL_FORMAT_RGBA_8888, 176, 144, 0,
+        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 1280, 720, 0,
+        HAL_PIXEL_FORMAT_YCbCr_420_888, 1280, 720, 0,
+        HAL_PIXEL_FORMAT_RGBA_8888, 1280, 720, 0,
     };
 
     // Always need to include 640x480 in basic formats
@@ -1528,6 +1544,7 @@
         static const int32_t jpegThumbnailSizes[] = {
             0, 0,
             160, 120,
+            320, 180,
             320, 240
         };
         ADD_STATIC_ENTRY(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
diff --git a/camera/fake-pipeline2/Sensor.cpp b/camera/fake-pipeline2/Sensor.cpp
index e45e92d..d84cada 100644
--- a/camera/fake-pipeline2/Sensor.cpp
+++ b/camera/fake-pipeline2/Sensor.cpp
@@ -325,10 +325,10 @@
                     captureRaw(b.img, gain, b.stride);
                     break;
                 case HAL_PIXEL_FORMAT_RGB_888:
-                    captureRGB(b.img, gain, b.stride);
+                    captureRGB(b.img, gain, b.width, b.height);
                     break;
                 case HAL_PIXEL_FORMAT_RGBA_8888:
-                    captureRGBA(b.img, gain, b.stride);
+                    captureRGBA(b.img, gain, b.width, b.height);
                     break;
                 case HAL_PIXEL_FORMAT_BLOB:
                     if (b.dataSpace != HAL_DATASPACE_DEPTH) {
@@ -350,14 +350,14 @@
                     }
                     break;
                 case HAL_PIXEL_FORMAT_YCbCr_420_888:
-                    captureNV21(b.img, gain, b.stride);
-                    break;
+                    captureNV21(b.img, gain, b.width, b.height);
+                   break;
                 case HAL_PIXEL_FORMAT_YV12:
                     // TODO:
                     ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
                     break;
                 case HAL_PIXEL_FORMAT_Y16:
-                    captureDepth(b.img, gain, b.stride);
+                    captureDepth(b.img, gain, b.width, b.height);
                     break;
                 default:
                     ALOGE("%s: Unknown format %x, no output", __FUNCTION__,
@@ -428,19 +428,29 @@
     ALOGVV("Raw sensor image captured");
 }
 
-void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
+void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
     float totalGain = gain/100.0 * kBaseGainFactor;
     // In fixed-point math, calculate total scaling from electrons to 8bpp
     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
+    unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
+    unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
 
-    for (unsigned int y = 0, outY = 0; y < mResolution[1]; y+=inc, outY++ ) {
-        uint8_t *px = img + outY * stride * 4;
+    for (unsigned int outY = 0; outY < height; outY++) {
+        unsigned int y = outY * DivH >> 10;
+        uint8_t *px = img + outY * width * 4;
         mScene.setReadoutPixel(0, y);
-        for (unsigned int x = 0; x < mResolution[0]; x+=inc) {
+        unsigned int lastX = 0;
+        const uint32_t *pixel = mScene.getPixelElectrons();
+        for (unsigned int outX = 0; outX < width; outX++) {
             uint32_t rCount, gCount, bCount;
+            unsigned int x = outX * DivW >> 10;
+            if (x - lastX > 0) {
+                for (unsigned int k = 0; k < (x-lastX); k++) {
+                     pixel = mScene.getPixelElectrons();
+                }
+            }
+            lastX = x;
             // TODO: Perfect demosaicing is a cheat
-            const uint32_t *pixel = mScene.getPixelElectrons();
             rCount = pixel[Scene::R]  * scale64x;
             gCount = pixel[Scene::Gr] * scale64x;
             bCount = pixel[Scene::B]  * scale64x;
@@ -449,28 +459,36 @@
             *px++ = gCount < 255*64 ? gCount / 64 : 255;
             *px++ = bCount < 255*64 ? bCount / 64 : 255;
             *px++ = 255;
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
-        }
+         }
         // TODO: Handle this better
         //simulatedTime += mRowReadoutTime;
     }
     ALOGVV("RGBA sensor image captured");
 }
 
-void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
+void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
     float totalGain = gain/100.0 * kBaseGainFactor;
     // In fixed-point math, calculate total scaling from electrons to 8bpp
     int scale64x = 64 * totalGain * 255 / kMaxRawValue;
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
+    unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
+    unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
 
-    for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++ ) {
+    for (unsigned int outY = 0; outY < height; outY++) {
+        unsigned int y = outY * DivH >> 10;
+        uint8_t *px = img + outY * width * 3;
         mScene.setReadoutPixel(0, y);
-        uint8_t *px = img + outY * stride * 3;
-        for (unsigned int x = 0; x < mResolution[0]; x += inc) {
+        unsigned int lastX = 0;
+        const uint32_t *pixel = mScene.getPixelElectrons();
+        for (unsigned int outX = 0; outX < width; outX++) {
             uint32_t rCount, gCount, bCount;
-            // TODO: Perfect demosaicing is a cheat
-            const uint32_t *pixel = mScene.getPixelElectrons();
+            unsigned int x = outX * DivW >> 10;
+            if (x - lastX > 0) {
+                for (unsigned int k = 0; k < (x-lastX); k++) {
+                    pixel = mScene.getPixelElectrons();
+                }
+            }
+            lastX = x;
+           // TODO: Perfect demosaicing is a cheat
             rCount = pixel[Scene::R]  * scale64x;
             gCount = pixel[Scene::Gr] * scale64x;
             bCount = pixel[Scene::B]  * scale64x;
@@ -478,16 +496,12 @@
             *px++ = rCount < 255*64 ? rCount / 64 : 255;
             *px++ = gCount < 255*64 ? gCount / 64 : 255;
             *px++ = bCount < 255*64 ? bCount / 64 : 255;
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
-        }
-        // TODO: Handle this better
-        //simulatedTime += mRowReadoutTime;
+         }
     }
     ALOGVV("RGB sensor image captured");
 }
 
-void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
+void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
     float totalGain = gain/100.0 * kBaseGainFactor;
     // Using fixed-point math with 6 bits of fractional precision.
     // In fixed-point math, calculate total scaling from electrons to 8bpp
@@ -504,27 +518,30 @@
     const int scaleOut = 64;
     const int scaleOutSq = scaleOut * scaleOut; // after multiplies
 
-    // inc = how many pixels to skip while reading every next pixel
-    // horizontally.
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
-    // outH = projected vertical resolution based on stride.
-    uint32_t outH = mResolution[1] / inc;
-    for (unsigned int y = 0, outY = 0;
-         y < mResolution[1]; y+=inc, outY++) {
-        uint8_t *pxY = img + outY * stride;
-        uint8_t *pxVU = img + (outH + outY / 2) * stride;
-        mScene.setReadoutPixel(0,y);
-        for (unsigned int outX = 0; outX < stride; outX++) {
+    unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
+    unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
+    for (unsigned int outY = 0; outY < height; outY++) {
+        unsigned int y = outY * DivH >> 10;
+        uint8_t *pxY = img + outY * width;
+        uint8_t *pxVU = img + (height + outY / 2) * width;
+        mScene.setReadoutPixel(0, y);
+        unsigned int lastX = 0;
+        const uint32_t *pixel = mScene.getPixelElectrons();
+         for (unsigned int outX = 0; outX < width; outX++) {
             int32_t rCount, gCount, bCount;
-            // TODO: Perfect demosaicing is a cheat
-            const uint32_t *pixel = mScene.getPixelElectrons();
+            unsigned int x = outX * DivW >> 10;
+            if (x - lastX > 0) {
+                for (unsigned int k = 0; k < (x-lastX); k++) {
+                     pixel = mScene.getPixelElectrons();
+                }
+            }
+            lastX = x;
             rCount = pixel[Scene::R]  * scale64x;
             rCount = rCount < saturationPoint ? rCount : saturationPoint;
             gCount = pixel[Scene::Gr] * scale64x;
             gCount = gCount < saturationPoint ? gCount : saturationPoint;
             bCount = pixel[Scene::B]  * scale64x;
             bCount = bCount < saturationPoint ? bCount : saturationPoint;
-
             *pxY++ = (rgbToY[0] * rCount +
                     rgbToY[1] * gCount +
                     rgbToY[2] * bCount) / scaleOutSq;
@@ -538,32 +555,35 @@
                         rgbToCb[2] * bCount +
                         rgbToCb[3]) / scaleOutSq;
             }
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
         }
     }
     ALOGVV("NV21 sensor image captured");
 }
 
-void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t stride) {
+void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height) {
     float totalGain = gain/100.0 * kBaseGainFactor;
     // In fixed-point math, calculate scaling factor to 13bpp millimeters
     int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
+    unsigned int DivH= (float)mResolution[1]/height * (0x1 << 10);
+    unsigned int DivW = (float)mResolution[0]/width * (0x1 << 10);
 
-    for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++ ) {
+    for (unsigned int outY = 0; outY < height; outY++) {
+        unsigned int y = outY * DivH >> 10;
+        uint16_t *px = ((uint16_t*)img) + outY * width;
         mScene.setReadoutPixel(0, y);
-        uint16_t *px = ((uint16_t*)img) + outY * stride;
-        for (unsigned int x = 0; x < mResolution[0]; x += inc) {
+        unsigned int lastX = 0;
+        const uint32_t *pixel = mScene.getPixelElectrons();
+        for (unsigned int outX = 0; outX < width; outX++) {
             uint32_t depthCount;
-            // TODO: Make up real depth scene instead of using green channel
-            // as depth
-            const uint32_t *pixel = mScene.getPixelElectrons();
+            unsigned int x = outX * DivW >> 10;
+            if (x - lastX > 0) {
+                for (unsigned int k = 0; k < (x-lastX); k++) {
+                     pixel = mScene.getPixelElectrons();
+                }
+            }
+            lastX = x;
             depthCount = pixel[Scene::Gr] * scale64x;
-
             *px++ = depthCount < 8191*64 ? depthCount / 64 : 0;
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
         }
         // TODO: Handle this better
         //simulatedTime += mRowReadoutTime;
diff --git a/camera/fake-pipeline2/Sensor.h b/camera/fake-pipeline2/Sensor.h
index cdf1e97..1942861 100644
--- a/camera/fake-pipeline2/Sensor.h
+++ b/camera/fake-pipeline2/Sensor.h
@@ -234,10 +234,10 @@
     Scene mScene;
 
     void captureRaw(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureRGB(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureNV21(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureDepth(uint8_t *img, uint32_t gain, uint32_t stride);
+    void captureRGBA(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height);
+    void captureRGB(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height);
+    void captureNV21(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height);
+    void captureDepth(uint8_t *img, uint32_t gain, uint32_t width, uint32_t height);
     void captureDepthCloud(uint8_t *img);
 
 };
diff --git a/camera/media_profiles.xml b/camera/media_profiles.xml
index 1a44ff2..733b2e1 100644
--- a/camera/media_profiles.xml
+++ b/camera/media_profiles.xml
@@ -78,8 +78,7 @@
 <MediaSettings>
     <!-- Each camcorder profile defines a set of predefined configuration parameters -->
     <CamcorderProfiles cameraId="0">
-
-        <EncoderProfile quality="cif" fileFormat="mp4" duration="60">
+        <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
             <Video codec="h264"
                    bitRate="128000"
                    width="320"
@@ -91,7 +90,7 @@
                    channels="1" />
         </EncoderProfile>
 
-        <EncoderProfile quality="qvga" fileFormat="mp4" duration="60">
+        <EncoderProfile quality="720p " fileFormat="mp4" duration="60">
             <Video codec="h264"
                    bitRate="12000000"
                    width="1280"