Camera3Device: Clean up next request batch variable

Make the Vector of next requests a RequestThread member variable
to avoid memory allocation in every threadloop.

Bug: 23360060
Change-Id: I4f33e5c49f0f4deb1f9f45bada0909da748849e4
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 98d0a41..433a745 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -2888,17 +2888,15 @@
         return true;
     }
 
-    // Get next batch of requests.
-    Vector<NextRequest> nextRequests;
-    waitForNextRequestBatch(&nextRequests);
-    const size_t numRequests = nextRequests.size();
-    if (numRequests == 0) {
+    // Wait for the next batch of requests.
+    waitForNextRequestBatch();
+    if (mNextRequests.size() == 0) {
         return true;
     }
 
     // Get the latest request ID, if any
     int latestRequestId;
-    camera_metadata_entry_t requestIdEntry = nextRequests[nextRequests.size() - 1].
+    camera_metadata_entry_t requestIdEntry = mNextRequests[mNextRequests.size() - 1].
             captureRequest->mSettings.find(ANDROID_REQUEST_ID);
     if (requestIdEntry.count > 0) {
         latestRequestId = requestIdEntry.data.i32[0];
@@ -2908,13 +2906,13 @@
     }
 
     // Prepare a batch of HAL requests and output buffers.
-    res = prepareHalRequests(&nextRequests);
+    res = prepareHalRequests();
     if (res == TIMED_OUT) {
         // Not a fatal error if getting output buffers time out.
-        cleanUpFailedRequests(&nextRequests, /*sendRequestError*/ true);
+        cleanUpFailedRequests(/*sendRequestError*/ true);
         return true;
     } else if (res != OK) {
-        cleanUpFailedRequests(&nextRequests, /*sendRequestError*/ false);
+        cleanUpFailedRequests(/*sendRequestError*/ false);
         return false;
     }
 
@@ -2933,15 +2931,15 @@
     // process_capture_request() defeats the purpose of cancelling requests ASAP with flush().
     // For now, only synchronize for high speed recording and we should figure something out for
     // removing the synchronization.
-    bool useFlushLock = nextRequests.size() > 1;
+    bool useFlushLock = mNextRequests.size() > 1;
 
     if (useFlushLock) {
         mFlushLock.lock();
     }
 
     ALOGVV("%s: %d: submitting %d requests in a batch.", __FUNCTION__, __LINE__,
-            nextRequests.size());
-    for (auto& nextRequest : nextRequests) {
+            mNextRequests.size());
+    for (auto& nextRequest : mNextRequests) {
         // Submit request and block until ready for next one
         ATRACE_ASYNC_BEGIN("frame capture", nextRequest.halRequest.frame_number);
         ATRACE_BEGIN("camera3->process_capture_request");
@@ -2955,7 +2953,7 @@
             SET_ERR("RequestThread: Unable to submit capture request %d to HAL"
                     " device: %s (%d)", nextRequest.halRequest.frame_number, strerror(-res),
                     res);
-            cleanUpFailedRequests(&nextRequests, /*sendRequestError*/ false);
+            cleanUpFailedRequests(/*sendRequestError*/ false);
             if (useFlushLock) {
                 mFlushLock.unlock();
             }
@@ -2983,7 +2981,7 @@
             SET_ERR("RequestThread: Unable to remove triggers "
                   "(capture request %d, HAL device: %s (%d)",
                   nextRequest.halRequest.frame_number, strerror(-res), res);
-            cleanUpFailedRequests(&nextRequests, /*sendRequestError*/ false);
+            cleanUpFailedRequests(/*sendRequestError*/ false);
             if (useFlushLock) {
                 mFlushLock.unlock();
             }
@@ -3004,14 +3002,10 @@
     return true;
 }
 
-status_t Camera3Device::RequestThread::prepareHalRequests(Vector<NextRequest> *nextRequests) {
+status_t Camera3Device::RequestThread::prepareHalRequests() {
     ATRACE_CALL();
 
-    if (nextRequests == nullptr) {
-        return BAD_VALUE;
-    }
-
-    for (auto& nextRequest : *nextRequests) {
+    for (auto& nextRequest : mNextRequests) {
         sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
         camera3_capture_request_t* halRequest = &nextRequest.halRequest;
         Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
@@ -3143,10 +3137,12 @@
     Mutex::Autolock l(mRequestLock);
 
     for (const auto& nextRequest : mNextRequests) {
-        for (const auto& s : nextRequest->mOutputStreams) {
-            if (stream == s) return true;
+        if (!nextRequest.submitted) {
+            for (const auto& s : nextRequest.captureRequest->mOutputStreams) {
+                if (stream == s) return true;
+            }
+            if (stream == nextRequest.captureRequest->mInputStream) return true;
         }
-        if (stream == nextRequest->mInputStream) return true;
     }
 
     for (const auto& request : mRequestQueue) {
@@ -3166,13 +3162,12 @@
     return false;
 }
 
-void Camera3Device::RequestThread::cleanUpFailedRequests(Vector<NextRequest> *nextRequests,
-        bool sendRequestError) {
-    if (nextRequests == nullptr) {
+void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
+    if (mNextRequests.empty()) {
         return;
     }
 
-    for (auto& nextRequest : *nextRequests) {
+    for (auto& nextRequest : mNextRequests) {
         // Skip the ones that have been submitted successfully.
         if (nextRequest.submitted) {
             continue;
@@ -3208,19 +3203,13 @@
 
     Mutex::Autolock l(mRequestLock);
     mNextRequests.clear();
-    nextRequests->clear();
 }
 
-void Camera3Device::RequestThread::waitForNextRequestBatch(Vector<NextRequest> *nextRequests) {
-    if (nextRequests == nullptr) {
-        return;
-    }
-
+void Camera3Device::RequestThread::waitForNextRequestBatch() {
     // Optimized a bit for the simple steady-state case (single repeating
     // request), to avoid putting that request in the queue temporarily.
     Mutex::Autolock l(mRequestLock);
 
-    nextRequests->clear();
     assert(mNextRequests.empty());
 
     NextRequest nextRequest;
@@ -3231,8 +3220,7 @@
 
     nextRequest.halRequest = camera3_capture_request_t();
     nextRequest.submitted = false;
-    nextRequests->add(nextRequest);
-    mNextRequests.push_back(nextRequest.captureRequest);
+    mNextRequests.add(nextRequest);
 
     // Wait for additional requests
     const size_t batchSize = nextRequest.captureRequest->mBatchSize;
@@ -3246,14 +3234,13 @@
 
         additionalRequest.halRequest = camera3_capture_request_t();
         additionalRequest.submitted = false;
-        nextRequests->add(additionalRequest);
-        mNextRequests.push_back(additionalRequest.captureRequest);
+        mNextRequests.add(additionalRequest);
     }
 
-    if (nextRequests->size() < batchSize) {
+    if (mNextRequests.size() < batchSize) {
         ALOGE("RequestThread: only get %d out of %d requests. Skipping requests.",
-                nextRequests->size(), batchSize);
-        cleanUpFailedRequests(nextRequests, /*sendRequestError*/true);
+                mNextRequests.size(), batchSize);
+        cleanUpFailedRequests(/*sendRequestError*/true);
     }
 
     return;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index a3abbaf..9d3c533 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -519,20 +519,22 @@
             bool                            submitted;
         };
 
-        // Wait for the next batch of requests.
-        void waitForNextRequestBatch(Vector<NextRequest> *nextRequests);
+        // Wait for the next batch of requests and put them in mNextRequests. mNextRequests will
+        // be empty if it times out.
+        void waitForNextRequestBatch();
 
         // Waits for a request, or returns NULL if times out. Must be called with mRequestLock hold.
         sp<CaptureRequest> waitForNextRequestLocked();
 
-        // Prepare a HAL request and output buffers. Return TIMED_OUT if getting any output buffer
-        // timed out. If an error is returned, the caller should clean up the pending request batch.
-        status_t prepareHalRequests(Vector<NextRequest> *nextRequests);
+        // Prepare HAL requests and output buffers in mNextRequests. Return TIMED_OUT if getting any
+        // output buffer timed out. If an error is returned, the caller should clean up the pending
+        // request batch.
+        status_t prepareHalRequests();
 
-        // Return buffers, etc, for a request that couldn't be fully constructed and send request
-        // errors if sendRequestError is true. The buffers will be returned in the ERROR state
-        // to mark them as not having valid data. nextRequests will be modified.
-        void cleanUpFailedRequests(Vector<NextRequest> *nextRequests, bool sendRequestError);
+        // Return buffers, etc, for requests in mNextRequests that couldn't be fully constructed and
+        // send request errors if sendRequestError is true. The buffers will be returned in the
+        // ERROR state to mark them as not having valid data. mNextRequests will be cleared.
+        void cleanUpFailedRequests(bool sendRequestError);
 
         // Pause handling
         bool               waitIfPaused();
@@ -561,10 +563,10 @@
         Condition          mRequestSignal;
         RequestList        mRequestQueue;
         RequestList        mRepeatingRequests;
-        // The next requests being prepped for submission to the HAL, no longer
+        // The next batch of requests being prepped for submission to the HAL, no longer
         // on the request queue. Read-only even with mRequestLock held, outside
         // of threadLoop
-        RequestList        mNextRequests;
+        Vector<NextRequest> mNextRequests;
 
         // To protect flush() and sending a request batch to HAL.
         Mutex              mFlushLock;