Use YU12 instead of YV12 for camera video frames

The camera parameters API only supports formats where Y, U and V
components are in the YVU order, such as YV12 or NV21. Unfortunately the
software encoders that the emulator images use to encode video only
support YUV order. This presents a configuration problem as there is no
way to straight up match these. The CameraSource class in stagefright
that configures the encoder to match the camera settings actually
converts the YV12 setting to YU12 which leads to U and V components
being swapped. This caused problems as soon as we fixed the issue where
we were actually treating YV12 as YU12.

Fortunately it turns out that the data provided in the timestamped data
callback can be pretty much whatever format that the camera and the
encoder can agree upon. The settings and frames are only available
internally so it's mostly an implementation detail. This allows us to
generate YV12 frames and then convert them to YU12 before passing them
to the timestamped data callback. The regular data callback will still
receive YV12 frames, just like the caller asked for, but the encoder
will receive frames in a YU12 format that it will handle properly.

YU12 is the format of choice for video recording because the software
encoders do not seem to require extra conversion steps for this. As a
bonus the conversion from YV12 to YU12 is very cheap in this case. We
always had to copy memory, the only difference now is that we copy each
Y, U and V plane separately and just switch the order of the U and V
planes.

In more modern version of the camera HAL the camera does not provide raw
byte buffers like this but instead it feeds frames into a gralloc buffer
queue. This way the camera has a lot more flexibility in what formats it
can use and the encoders can also support YVU formats. So when we switch
to a new HAL version we shouldn't have to deal with this anymore.

BUG: 32771006
Test: ran camera CTS tests
Change-Id: Ic05c1f816dbb09d027f08dea59095a935716309c
(cherry picked from commit d7506ee34e9ccda8a8c7866706b8a2b897e842d7)
diff --git a/camera/CallbackNotifier.cpp b/camera/CallbackNotifier.cpp
index 8d49187..4af1945 100755
--- a/camera/CallbackNotifier.cpp
+++ b/camera/CallbackNotifier.cpp
@@ -225,7 +225,20 @@
         camera_memory_t* cam_buff =
             mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
         if (NULL != cam_buff && NULL != cam_buff->data) {
-            camera_dev->getCurrentFrame(cam_buff->data);
+            // This is the path for video frames, the format used here is not
+            // exposed to external users so it can be whatever the camera and
+            // the encoder can agree upon. The emulator system images use
+            // software encoders that expect a YUV420 format but the camera
+            // parameter constants cannot represent this. The closest we have
+            // is YV12 which is YVU420. So we produce YV12 frames so that we
+            // can serve those through the preview callback below and then we
+            // convert from YV12 to YU12 here. This is a pretty cheap conversion
+            // since we have to copy the frame here anyway. The conversion is
+            // just copying the U and V parts of the frame in different order.
+            // This way the encoder gets the format it expects and the preview
+            // callback (or data callback) below gets the format that is
+            // configured in camera parameters.
+            camera_dev->getCurrentFrame(cam_buff->data, V4L2_PIX_FMT_YUV420);
             mDataCBTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME,
                                cam_buff, 0, mCBOpaque);
             mCameraMemoryTs.push_back( cam_buff );
@@ -238,7 +251,8 @@
         camera_memory_t* cam_buff =
             mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, NULL);
         if (NULL != cam_buff && NULL != cam_buff->data) {
-            camera_dev->getCurrentFrame(cam_buff->data);
+            camera_dev->getCurrentFrame(cam_buff->data,
+                                        camera_dev->getOriginalPixelFormat());
             mDataCB(CAMERA_MSG_PREVIEW_FRAME, cam_buff, 0, NULL, mCBOpaque);
             cam_buff->release(cam_buff);
         } else {
diff --git a/camera/EmulatedCameraDevice.cpp b/camera/EmulatedCameraDevice.cpp
index bda3223..7cdd86b 100755
--- a/camera/EmulatedCameraDevice.cpp
+++ b/camera/EmulatedCameraDevice.cpp
@@ -170,7 +170,36 @@
     }
 }
 
-status_t EmulatedCameraDevice::getCurrentFrame(void* buffer)
+status_t EmulatedCameraDevice::getCurrentFrameImpl(const void* source,
+                                                   void* dest,
+                                                   uint32_t pixelFormat) const {
+    if (pixelFormat == mPixelFormat) {
+        memcpy(dest, source, mFrameBufferSize);
+        return NO_ERROR;
+    } else if (pixelFormat == V4L2_PIX_FMT_YUV420 &&
+               mPixelFormat == V4L2_PIX_FMT_YVU420) {
+        // Convert from YV12 to YU12
+        const int ySize = mYStride * mFrameHeight;
+        const int uvSize = mUVStride * (mFrameHeight / 2);
+        // Copy Y straight up
+        memcpy(dest, source, ySize);
+        // Swap U and V
+        memcpy(reinterpret_cast<uint8_t*>(dest) + ySize,
+               reinterpret_cast<const uint8_t*>(source) + ySize + uvSize,
+               uvSize);
+        memcpy(reinterpret_cast<uint8_t*>(dest) + ySize + uvSize,
+               reinterpret_cast<const uint8_t*>(source) + ySize,
+               uvSize);
+        return NO_ERROR;
+    }
+    ALOGE("%s: Invalid pixel format conversion [%.4s to %.4s] requested",
+          __FUNCTION__, reinterpret_cast<const char*>(&mPixelFormat),
+          reinterpret_cast<const char*>(&pixelFormat));
+    return EINVAL;
+}
+
+status_t EmulatedCameraDevice::getCurrentFrame(void* buffer,
+                                               uint32_t pixelFormat)
 {
     if (!isStarted()) {
         ALOGE("%s: Device is not started", __FUNCTION__);
@@ -187,7 +216,7 @@
         ALOGE("%s: No framebuffer", __FUNCTION__);
         return EINVAL;
     }
-    memcpy(buffer, source, mFrameBufferSize);
+    return getCurrentFrameImpl(source, buffer, pixelFormat);
     return NO_ERROR;
 }
 
diff --git a/camera/EmulatedCameraDevice.h b/camera/EmulatedCameraDevice.h
index ec2bdb3..9a00aca 100755
--- a/camera/EmulatedCameraDevice.h
+++ b/camera/EmulatedCameraDevice.h
@@ -186,13 +186,19 @@
      * Note that this method should be called only after at least one frame has
      * been captured and delivered. Otherwise it will return garbage in the
      * preview frame buffer. Typically, this method should be called from
-     * onNextFrameAvailable callback.
+     * onNextFrameAvailable callback. The method can perform some basic pixel
+     * format conversion for the most efficient conversions. If a conversion
+     * is not supported the method will fail.
+     *
      * Param:
      *  buffer - Buffer, large enough to contain the entire frame.
+     *  pixelFormat - The pixel format to convert to, use
+     *                getOriginalPixelFormat() to get the configured pixel
+     *                format (if using this no conversion will be needed)
      * Return:
      *  NO_ERROR on success, or an appropriate error status.
      */
-    virtual status_t getCurrentFrame(void* buffer);
+    virtual status_t getCurrentFrame(void* buffer, uint32_t pixelFormat);
 
     /* Gets current framebuffer, converted into preview frame format.
      * This method must be called on a connected instance of this class with a
@@ -381,6 +387,13 @@
      * implement its own auto-focus behavior. */
     void checkAutoFocusTrigger();
 
+    /* Implementation for getCurrentFrame that includes pixel format conversion
+     * if needed. This allows subclasses to easily use this method instead of
+     * having to reimplement the conversion all over.
+     */
+    status_t getCurrentFrameImpl(const void* source, void* dest,
+                                 uint32_t pixelFormat) const;
+
     /****************************************************************************
      * Worker thread management.
      * Typicaly when emulated camera device starts capturing frames from the
diff --git a/camera/EmulatedQemuCameraDevice.cpp b/camera/EmulatedQemuCameraDevice.cpp
index 20d9424..227f010 100755
--- a/camera/EmulatedQemuCameraDevice.cpp
+++ b/camera/EmulatedQemuCameraDevice.cpp
@@ -216,7 +216,8 @@
  * EmulatedCameraDevice virtual overrides
  ***************************************************************************/
 
-status_t EmulatedQemuCameraDevice::getCurrentFrame(void* buffer) {
+status_t EmulatedQemuCameraDevice::getCurrentFrame(void* buffer,
+                                                   uint32_t pixelFormat) {
     if (!isStarted()) {
         ALOGE("%s: Device is not started", __FUNCTION__);
         return EINVAL;
@@ -235,8 +236,7 @@
         ALOGE("%s: No frame", __FUNCTION__);
         return EINVAL;
     }
-    memcpy(buffer, frame, mFrameBufferSize);
-    return NO_ERROR;
+    return getCurrentFrameImpl(frame, buffer, pixelFormat);
 }
 
 status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer) {
diff --git a/camera/EmulatedQemuCameraDevice.h b/camera/EmulatedQemuCameraDevice.h
index 78c6f60..ed19f6c 100755
--- a/camera/EmulatedQemuCameraDevice.h
+++ b/camera/EmulatedQemuCameraDevice.h
@@ -82,7 +82,7 @@
 public:
 
     /* Copy the current frame to |buffer| */
-    status_t getCurrentFrame(void* buffer) override;
+    status_t getCurrentFrame(void* buffer, uint32_t pixelFormat) override;
 
     /* Copy the current preview frame to |buffer| */
     status_t getCurrentPreviewFrame(void* buffer) override;