Fugu Audio HAL: add pause, resume, flush, and fix position

Reset position for timestamps on stop() and flush().
Merge two redundant frame tracking variables.

Bug: 20921987
Bug: 21145353
Change-Id: I4d11bdbf0a78a894682d1bfa7b9a9212e199813d
Signed-off-by: Phil Burk <philburk@google.com>
diff --git a/libaudio/AudioStreamOut.cpp b/libaudio/AudioStreamOut.cpp
index 203c710..92678af 100644
--- a/libaudio/AudioStreamOut.cpp
+++ b/libaudio/AudioStreamOut.cpp
@@ -15,7 +15,7 @@
 ** limitations under the License.
 */
 
-#define LOG_TAG "AudioHAL:AudioStreamOut"
+#define LOG_TAG "AudioHAL_AudioStreamOut"
 
 #include <utils/Log.h>
 
@@ -37,8 +37,7 @@
 namespace android {
 
 AudioStreamOut::AudioStreamOut(AudioHardwareOutput& owner, bool mcOut)
-    : mFramesPresented(0)
-    , mFramesRendered(0)
+    : mFramesRendered(0)
     , mOwnerHAL(owner)
     , mFramesWritten(0)
     , mTgtDevices(0)
@@ -129,16 +128,20 @@
     }
 }
 
-status_t AudioStreamOut::standby()
+status_t AudioStreamOut::standbyHardware()
 {
-    mFramesRendered = 0;
     releaseAllOutputs();
     mOwnerHAL.standbyStatusUpdate(true, mIsMCOutput);
     mInStandby = true;
-
     return NO_ERROR;
 }
 
+status_t AudioStreamOut::standby()
+{
+    mFramesRendered = 0;
+    return standbyHardware();
+}
+
 void AudioStreamOut::releaseAllOutputs() {
     Mutex::Autolock _l(mRoutingLock);
 
@@ -150,6 +153,22 @@
     mPhysOutputs.clear();
 }
 
+status_t AudioStreamOut::pause()
+{
+    return standbyHardware();
+}
+
+status_t AudioStreamOut::resume()
+{
+    return NO_ERROR;
+}
+
+status_t AudioStreamOut::flush()
+{
+    mFramesRendered = 0;
+    return NO_ERROR;
+}
+
 void AudioStreamOut::updateInputNums()
 {
     assert(mLocalClock.initCheck());
@@ -242,7 +261,6 @@
     }
 
     mFramesWritten += framesWritten;
-    mFramesPresented += framesWritten;
     mFramesRendered += framesWritten;
 
     if (needThrottle) {
@@ -386,21 +404,21 @@
                     (int64_t)audioOutput->getKernelBufferSize() - (int64_t)avail;
 
                 int64_t pendingFrames = framesInDriverBuffer + fudgeFrames;
-                int64_t signedFrames = mFramesPresented - pendingFrames;
+                int64_t signedFrames = mFramesRendered - pendingFrames;
                 if (pendingFrames < 0) {
                     ALOGE("getPresentationPosition: negative pendingFrames = %lld",
                         pendingFrames);
                 } else if (signedFrames < 0) {
                     ALOGI("getPresentationPosition: playing silent preroll"
-                        ", mFramesPresented = %llu, pendingFrames = %lld",
-                        mFramesPresented, pendingFrames);
+                        ", mFramesRendered = %llu, pendingFrames = %lld",
+                        mFramesRendered, pendingFrames);
                 } else {
 #if HAL_PRINT_TIMESTAMP_CSV
                     // Print comma separated values for spreadsheet analysis.
                     uint64_t nanos = (((uint64_t)timestamp->tv_sec) * 1000000000L)
                             + timestamp->tv_nsec;
                     ALOGI("getPresentationPosition, %lld, %4u, %lld, %llu",
-                            mFramesPresented, avail, signedFrames, nanos);
+                            mFramesRendered, avail, signedFrames, nanos);
 #endif
                     *frames = (uint64_t) signedFrames;
                     result = NO_ERROR;
@@ -425,10 +443,6 @@
     if (dspFrames == NULL) {
         return -EINVAL;
     }
-    if (mPhysOutputs.isEmpty()) {
-        *dspFrames = 0;
-        return -ENODEV;
-    }
     *dspFrames = (uint32_t) mFramesRendered;
     return NO_ERROR;
 }
@@ -581,7 +595,7 @@
     // has not started yet.  This is odd, but certainly not impossible.  The
     // other possibility is that AudioFlinger is in its silence-pushing mode and
     // is not calling getNextWriteTimestamp.  After an output is primed, its in
-    // GNWTS where the amt of padding to compensate for different DMA start
+    // GNWTS where the amount of padding to compensate for different DMA start
     // times is taken into account.  Go ahead and force a call to GNWTS, just to
     // be certain that we have checked recently and are not stuck in silence
     // fill mode.  Failure to do this will cause the AudioOutput state machine
diff --git a/libaudio/AudioStreamOut.h b/libaudio/AudioStreamOut.h
index 78bc910..a6db8d2 100644
--- a/libaudio/AudioStreamOut.h
+++ b/libaudio/AudioStreamOut.h
@@ -41,6 +41,9 @@
     status_t            getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
     status_t            getNextWriteTimestamp(int64_t *timestamp);
     status_t            standby();
+    status_t            pause();
+    status_t            resume();
+    status_t            flush();
     status_t            dump(int fd);
 
     uint32_t            sampleRate()        const { return mInputSampleRate; }
@@ -69,9 +72,7 @@
     Mutex           mLock;
     Mutex           mRoutingLock;
 
-    // Used to implment get_presentation_position()
-    int64_t         mFramesPresented;
-    // Used to implement get_render_position()
+    // Track frame position for timestamps, etc.
     int64_t         mFramesRendered;
 
     // Our HAL, used as the middle-man to collect and trade AudioOutputs.
@@ -114,6 +115,7 @@
     // reduce log spew
     bool            mReportedAvailFail;
 
+    status_t        standbyHardware();
     void            releaseAllOutputs();
     void            updateTargetOutputs();
     void            updateInputNums();
diff --git a/libaudio/audio_hal_thunks.cpp b/libaudio/audio_hal_thunks.cpp
index 7896457..1550d04 100644
--- a/libaudio/audio_hal_thunks.cpp
+++ b/libaudio/audio_hal_thunks.cpp
@@ -216,6 +216,30 @@
     return tstream->impl->getNextWriteTimestamp(timestamp);
 }
 
+static int out_pause(struct audio_stream_out* stream)
+{
+    const struct atv_stream_out* tstream =
+        reinterpret_cast<const struct atv_stream_out*>(stream);
+
+    return tstream->impl->pause();
+}
+
+static int out_resume(struct audio_stream_out* stream)
+{
+    const struct atv_stream_out* tstream =
+        reinterpret_cast<const struct atv_stream_out*>(stream);
+
+    return tstream->impl->resume();
+}
+
+static int out_flush(struct audio_stream_out* stream)
+{
+    const struct atv_stream_out* tstream =
+        reinterpret_cast<const struct atv_stream_out*>(stream);
+
+    return tstream->impl->flush();
+}
+
 /*******************************************************************************
  *
  * Audio input stream implementation
@@ -525,6 +549,9 @@
     out->stream.get_render_position = out_get_render_position;
     out->stream.get_next_write_timestamp = out_get_next_write_timestamp;
     out->stream.get_presentation_position = out_get_presentation_position;
+    out->stream.pause = out_pause;
+    out->stream.resume = out_resume;
+    out->stream.flush = out_flush;
 
     out->impl = adev->output->openOutputStream(
             devices,