| /* |
| ** |
| ** Copyright 2012, The Android Open Source Project |
| ** |
| ** Licensed under the Apache License, Version 2.0 (the "License"); |
| ** you may not use this file except in compliance with the License. |
| ** You may obtain a copy of the License at |
| ** |
| ** http://www.apache.org/licenses/LICENSE-2.0 |
| ** |
| ** Unless required by applicable law or agreed to in writing, software |
| ** distributed under the License is distributed on an "AS IS" BASIS, |
| ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| ** See the License for the specific language governing permissions and |
| ** limitations under the License. |
| */ |
| |
| |
| #define LOG_TAG "AudioFlinger" |
| //#define LOG_NDEBUG 0 |
| #define ATRACE_TAG ATRACE_TAG_AUDIO |
| |
| #include "Configuration.h" |
| #include <math.h> |
| #include <fcntl.h> |
| #include <memory> |
| #include <sstream> |
| #include <string> |
| #include <linux/futex.h> |
| #include <sys/stat.h> |
| #include <sys/syscall.h> |
| #include <cutils/bitops.h> |
| #include <cutils/properties.h> |
| #include <media/AudioContainers.h> |
| #include <media/AudioDeviceTypeAddr.h> |
| #include <media/AudioParameter.h> |
| #include <media/AudioResamplerPublic.h> |
| #include <media/RecordBufferConverter.h> |
| #include <media/TypeConverter.h> |
| #include <utils/Log.h> |
| #include <utils/Trace.h> |
| |
| #include <private/media/AudioTrackShared.h> |
| #include <private/android_filesystem_config.h> |
| #include <audio_utils/Balance.h> |
| #include <audio_utils/Metadata.h> |
| #include <audio_utils/channels.h> |
| #include <audio_utils/mono_blend.h> |
| #include <audio_utils/primitives.h> |
| #include <audio_utils/format.h> |
| #include <audio_utils/minifloat.h> |
| #include <audio_utils/safe_math.h> |
| #include <system/audio_effects/effect_ns.h> |
| #include <system/audio_effects/effect_aec.h> |
| #include <system/audio.h> |
| |
| // NBAIO implementations |
| #include <media/nbaio/AudioStreamInSource.h> |
| #include <media/nbaio/AudioStreamOutSink.h> |
| #include <media/nbaio/MonoPipe.h> |
| #include <media/nbaio/MonoPipeReader.h> |
| #include <media/nbaio/Pipe.h> |
| #include <media/nbaio/PipeReader.h> |
| #include <media/nbaio/SourceAudioBufferProvider.h> |
| #include <mediautils/BatteryNotifier.h> |
| |
| #include <audiomanager/AudioManager.h> |
| #include <powermanager/PowerManager.h> |
| |
| #include <media/audiohal/EffectsFactoryHalInterface.h> |
| #include <media/audiohal/StreamHalInterface.h> |
| |
| #include "AudioFlinger.h" |
| #include "FastMixer.h" |
| #include "FastCapture.h" |
| #include <mediautils/SchedulingPolicyService.h> |
| #include <mediautils/ServiceUtilities.h> |
| |
| #ifdef ADD_BATTERY_DATA |
| #include <media/IMediaPlayerService.h> |
| #include <media/IMediaDeathNotifier.h> |
| #endif |
| |
| #ifdef DEBUG_CPU_USAGE |
| #include <audio_utils/Statistics.h> |
| #include <cpustats/ThreadCpuUsage.h> |
| #endif |
| |
| #include "AutoPark.h" |
| |
| #include <pthread.h> |
| #include "TypedLogger.h" |
| |
| // ---------------------------------------------------------------------------- |
| |
| // Note: the following macro is used for extremely verbose logging message. In |
| // order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to |
| // 0; but one side effect of this is to turn all LOGV's as well. Some messages |
| // are so verbose that we want to suppress them even when we have ALOG_ASSERT |
| // turned on. Do not uncomment the #def below unless you really know what you |
| // are doing and want to see all of the extremely verbose messages. |
| //#define VERY_VERY_VERBOSE_LOGGING |
| #ifdef VERY_VERY_VERBOSE_LOGGING |
| #define ALOGVV ALOGV |
| #else |
| #define ALOGVV(a...) do { } while(0) |
| #endif |
| |
| // TODO: Move these macro/inlines to a header file. |
| #define max(a, b) ((a) > (b) ? (a) : (b)) |
| |
| template <typename T> |
| static inline T min(const T& a, const T& b) |
| { |
| return a < b ? a : b; |
| } |
| |
| namespace android { |
| |
| using media::IEffectClient; |
| using content::AttributionSourceState; |
| |
| // retry counts for buffer fill timeout |
| // 50 * ~20msecs = 1 second |
| static const int8_t kMaxTrackRetries = 50; |
| static const int8_t kMaxTrackStartupRetries = 50; |
| |
| // allow less retry attempts on direct output thread. |
| // direct outputs can be a scarce resource in audio hardware and should |
| // be released as quickly as possible. |
| // Notes: |
| // 1) The retry duration kMaxTrackRetriesDirectMs may be increased |
| // in case the data write is bursty for the AudioTrack. The application |
| // should endeavor to write at least once every kMaxTrackRetriesDirectMs |
| // to prevent an underrun situation. If the data is bursty, then |
| // the application can also throttle the data sent to be even. |
| // 2) For compressed audio data, any data present in the AudioTrack buffer |
| // will be sent and reset the retry count. This delivers data as |
| // it arrives, with approximately kDirectMinSleepTimeUs = 10ms checking interval. |
| // 3) For linear PCM or proportional PCM, we wait one period for a period's worth |
| // of data to be available, then any remaining data is delivered. |
| // This is required to ensure the last bit of data is delivered before underrun. |
| // |
| // Sleep time per cycle is kDirectMinSleepTimeUs for compressed tracks |
| // or the size of the HAL period for proportional / linear PCM tracks. |
| static const int32_t kMaxTrackRetriesDirectMs = 200; |
| |
| // don't warn about blocked writes or record buffer overflows more often than this |
| static const nsecs_t kWarningThrottleNs = seconds(5); |
| |
| // RecordThread loop sleep time upon application overrun or audio HAL read error |
| static const int kRecordThreadSleepUs = 5000; |
| |
| // maximum time to wait in sendConfigEvent_l() for a status to be received |
| static const nsecs_t kConfigEventTimeoutNs = seconds(2); |
| |
| // minimum sleep time for the mixer thread loop when tracks are active but in underrun |
| static const uint32_t kMinThreadSleepTimeUs = 5000; |
| // maximum divider applied to the active sleep time in the mixer thread loop |
| static const uint32_t kMaxThreadSleepTimeShift = 2; |
| |
| // minimum normal sink buffer size, expressed in milliseconds rather than frames |
| // FIXME This should be based on experimentally observed scheduling jitter |
| static const uint32_t kMinNormalSinkBufferSizeMs = 20; |
| // maximum normal sink buffer size |
| static const uint32_t kMaxNormalSinkBufferSizeMs = 24; |
| |
| // minimum capture buffer size in milliseconds to _not_ need a fast capture thread |
| // FIXME This should be based on experimentally observed scheduling jitter |
| static const uint32_t kMinNormalCaptureBufferSizeMs = 12; |
| |
| // Offloaded output thread standby delay: allows track transition without going to standby |
| static const nsecs_t kOffloadStandbyDelayNs = seconds(1); |
| |
| // Direct output thread minimum sleep time in idle or active(underrun) state |
| static const nsecs_t kDirectMinSleepTimeUs = 10000; |
| |
| // The universal constant for ubiquitous 20ms value. The value of 20ms seems to provide a good |
| // balance between power consumption and latency, and allows threads to be scheduled reliably |
| // by the CFS scheduler. |
| // FIXME Express other hardcoded references to 20ms with references to this constant and move |
| // it appropriately. |
| #define FMS_20 20 |
| |
| // Whether to use fast mixer |
| static const enum { |
| FastMixer_Never, // never initialize or use: for debugging only |
| FastMixer_Always, // always initialize and use, even if not needed: for debugging only |
| // normal mixer multiplier is 1 |
| FastMixer_Static, // initialize if needed, then use all the time if initialized, |
| // multiplier is calculated based on min & max normal mixer buffer size |
| FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load, |
| // multiplier is calculated based on min & max normal mixer buffer size |
| // FIXME for FastMixer_Dynamic: |
| // Supporting this option will require fixing HALs that can't handle large writes. |
| // For example, one HAL implementation returns an error from a large write, |
| // and another HAL implementation corrupts memory, possibly in the sample rate converter. |
| // We could either fix the HAL implementations, or provide a wrapper that breaks |
| // up large writes into smaller ones, and the wrapper would need to deal with scheduler. |
| } kUseFastMixer = FastMixer_Static; |
| |
| // Whether to use fast capture |
| static const enum { |
| FastCapture_Never, // never initialize or use: for debugging only |
| FastCapture_Always, // always initialize and use, even if not needed: for debugging only |
| FastCapture_Static, // initialize if needed, then use all the time if initialized |
| } kUseFastCapture = FastCapture_Static; |
| |
| // Priorities for requestPriority |
| static const int kPriorityAudioApp = 2; |
| static const int kPriorityFastMixer = 3; |
| static const int kPriorityFastCapture = 3; |
| |
| // IAudioFlinger::createTrack() has an in/out parameter 'pFrameCount' for the total size of the |
| // track buffer in shared memory. Zero on input means to use a default value. For fast tracks, |
| // AudioFlinger derives the default from HAL buffer size and 'fast track multiplier'. |
| |
| // This is the default value, if not specified by property. |
| static const int kFastTrackMultiplier = 2; |
| |
| // The minimum and maximum allowed values |
| static const int kFastTrackMultiplierMin = 1; |
| static const int kFastTrackMultiplierMax = 2; |
| |
| // The actual value to use, which can be specified per-device via property af.fast_track_multiplier. |
| static int sFastTrackMultiplier = kFastTrackMultiplier; |
| |
| // See Thread::readOnlyHeap(). |
| // Initially this heap is used to allocate client buffers for "fast" AudioRecord. |
| // Eventually it will be the single buffer that FastCapture writes into via HAL read(), |
| // and that all "fast" AudioRecord clients read from. In either case, the size can be small. |
| static const size_t kRecordThreadReadOnlyHeapSize = 0xD000; |
| |
| // ---------------------------------------------------------------------------- |
| |
| // TODO: move all toString helpers to audio.h |
| // under #ifdef __cplusplus #endif |
| static std::string patchSinksToString(const struct audio_patch *patch) |
| { |
| std::stringstream ss; |
| for (size_t i = 0; i < patch->num_sinks; ++i) { |
| if (i > 0) { |
| ss << "|"; |
| } |
| ss << "(" << toString(patch->sinks[i].ext.device.type) |
| << ", " << patch->sinks[i].ext.device.address << ")"; |
| } |
| return ss.str(); |
| } |
| |
| static std::string patchSourcesToString(const struct audio_patch *patch) |
| { |
| std::stringstream ss; |
| for (size_t i = 0; i < patch->num_sources; ++i) { |
| if (i > 0) { |
| ss << "|"; |
| } |
| ss << "(" << toString(patch->sources[i].ext.device.type) |
| << ", " << patch->sources[i].ext.device.address << ")"; |
| } |
| return ss.str(); |
| } |
| |
| static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT; |
| |
| static void sFastTrackMultiplierInit() |
| { |
| char value[PROPERTY_VALUE_MAX]; |
| if (property_get("af.fast_track_multiplier", value, NULL) > 0) { |
| char *endptr; |
| unsigned long ul = strtoul(value, &endptr, 0); |
| if (*endptr == '\0' && kFastTrackMultiplierMin <= ul && ul <= kFastTrackMultiplierMax) { |
| sFastTrackMultiplier = (int) ul; |
| } |
| } |
| } |
| |
| // ---------------------------------------------------------------------------- |
| |
| #ifdef ADD_BATTERY_DATA |
| // To collect the amplifier usage |
| static void addBatteryData(uint32_t params) { |
| sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService(); |
| if (service == NULL) { |
| // it already logged |
| return; |
| } |
| |
| service->addBatteryData(params); |
| } |
| #endif |
| |
| // Track the CLOCK_BOOTTIME versus CLOCK_MONOTONIC timebase offset |
| struct { |
| // call when you acquire a partial wakelock |
| void acquire(const sp<IBinder> &wakeLockToken) { |
| pthread_mutex_lock(&mLock); |
| if (wakeLockToken.get() == nullptr) { |
| adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME); |
| } else { |
| if (mCount == 0) { |
| adjustTimebaseOffset(&mBoottimeOffset, ExtendedTimestamp::TIMEBASE_BOOTTIME); |
| } |
| ++mCount; |
| } |
| pthread_mutex_unlock(&mLock); |
| } |
| |
| // call when you release a partial wakelock. |
| void release(const sp<IBinder> &wakeLockToken) { |
| if (wakeLockToken.get() == nullptr) { |
| return; |
| } |
| pthread_mutex_lock(&mLock); |
| if (--mCount < 0) { |
| ALOGE("negative wakelock count"); |
| mCount = 0; |
| } |
| pthread_mutex_unlock(&mLock); |
| } |
| |
| // retrieves the boottime timebase offset from monotonic. |
| int64_t getBoottimeOffset() { |
| pthread_mutex_lock(&mLock); |
| int64_t boottimeOffset = mBoottimeOffset; |
| pthread_mutex_unlock(&mLock); |
| return boottimeOffset; |
| } |
| |
| // Adjusts the timebase offset between TIMEBASE_MONOTONIC |
| // and the selected timebase. |
| // Currently only TIMEBASE_BOOTTIME is allowed. |
| // |
| // This only needs to be called upon acquiring the first partial wakelock |
| // after all other partial wakelocks are released. |
| // |
| // We do an empirical measurement of the offset rather than parsing |
| // /proc/timer_list since the latter is not a formal kernel ABI. |
| static void adjustTimebaseOffset(int64_t *offset, ExtendedTimestamp::Timebase timebase) { |
| int clockbase; |
| switch (timebase) { |
| case ExtendedTimestamp::TIMEBASE_BOOTTIME: |
| clockbase = SYSTEM_TIME_BOOTTIME; |
| break; |
| default: |
| LOG_ALWAYS_FATAL("invalid timebase %d", timebase); |
| break; |
| } |
| // try three times to get the clock offset, choose the one |
| // with the minimum gap in measurements. |
| const int tries = 3; |
| nsecs_t bestGap, measured; |
| for (int i = 0; i < tries; ++i) { |
| const nsecs_t tmono = systemTime(SYSTEM_TIME_MONOTONIC); |
| const nsecs_t tbase = systemTime(clockbase); |
| const nsecs_t tmono2 = systemTime(SYSTEM_TIME_MONOTONIC); |
| const nsecs_t gap = tmono2 - tmono; |
| if (i == 0 || gap < bestGap) { |
| bestGap = gap; |
| measured = tbase - ((tmono + tmono2) >> 1); |
| } |
| } |
| |
| // to avoid micro-adjusting, we don't change the timebase |
| // unless it is significantly different. |
| // |
| // Assumption: It probably takes more than toleranceNs to |
| // suspend and resume the device. |
| static int64_t toleranceNs = 10000; // 10 us |
| if (llabs(*offset - measured) > toleranceNs) { |
| ALOGV("Adjusting timebase offset old: %lld new: %lld", |
| (long long)*offset, (long long)measured); |
| *offset = measured; |
| } |
| } |
| |
| pthread_mutex_t mLock; |
| int32_t mCount; |
| int64_t mBoottimeOffset; |
| } gBoottime = { PTHREAD_MUTEX_INITIALIZER, 0, 0 }; // static, so use POD initialization |
| |
| // ---------------------------------------------------------------------------- |
| // CPU Stats |
| // ---------------------------------------------------------------------------- |
| |
| class CpuStats { |
| public: |
| CpuStats(); |
| void sample(const String8 &title); |
| #ifdef DEBUG_CPU_USAGE |
| private: |
| ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns |
| audio_utils::Statistics<double> mWcStats; // statistics on thread CPU usage in wall clock ns |
| |
| audio_utils::Statistics<double> mHzStats; // statistics on thread CPU usage in cycles |
| |
| int mCpuNum; // thread's current CPU number |
| int mCpukHz; // frequency of thread's current CPU in kHz |
| #endif |
| }; |
| |
| CpuStats::CpuStats() |
| #ifdef DEBUG_CPU_USAGE |
| : mCpuNum(-1), mCpukHz(-1) |
| #endif |
| { |
| } |
| |
| void CpuStats::sample(const String8 &title |
| #ifndef DEBUG_CPU_USAGE |
| __unused |
| #endif |
| ) { |
| #ifdef DEBUG_CPU_USAGE |
| // get current thread's delta CPU time in wall clock ns |
| double wcNs; |
| bool valid = mCpuUsage.sampleAndEnable(wcNs); |
| |
| // record sample for wall clock statistics |
| if (valid) { |
| mWcStats.add(wcNs); |
| } |
| |
| // get the current CPU number |
| int cpuNum = sched_getcpu(); |
| |
| // get the current CPU frequency in kHz |
| int cpukHz = mCpuUsage.getCpukHz(cpuNum); |
| |
| // check if either CPU number or frequency changed |
| if (cpuNum != mCpuNum || cpukHz != mCpukHz) { |
| mCpuNum = cpuNum; |
| mCpukHz = cpukHz; |
| // ignore sample for purposes of cycles |
| valid = false; |
| } |
| |
| // if no change in CPU number or frequency, then record sample for cycle statistics |
| if (valid && mCpukHz > 0) { |
| const double cycles = wcNs * cpukHz * 0.000001; |
| mHzStats.add(cycles); |
| } |
| |
| const unsigned n = mWcStats.getN(); |
| // mCpuUsage.elapsed() is expensive, so don't call it every loop |
| if ((n & 127) == 1) { |
| const long long elapsed = mCpuUsage.elapsed(); |
| if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) { |
| const double perLoop = elapsed / (double) n; |
| const double perLoop100 = perLoop * 0.01; |
| const double perLoop1k = perLoop * 0.001; |
| const double mean = mWcStats.getMean(); |
| const double stddev = mWcStats.getStdDev(); |
| const double minimum = mWcStats.getMin(); |
| const double maximum = mWcStats.getMax(); |
| const double meanCycles = mHzStats.getMean(); |
| const double stddevCycles = mHzStats.getStdDev(); |
| const double minCycles = mHzStats.getMin(); |
| const double maxCycles = mHzStats.getMax(); |
| mCpuUsage.resetElapsed(); |
| mWcStats.reset(); |
| mHzStats.reset(); |
| ALOGD("CPU usage for %s over past %.1f secs\n" |
| " (%u mixer loops at %.1f mean ms per loop):\n" |
| " us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n" |
| " %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n" |
| " MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f", |
| title.string(), |
| elapsed * .000000001, n, perLoop * .000001, |
| mean * .001, |
| stddev * .001, |
| minimum * .001, |
| maximum * .001, |
| mean / perLoop100, |
| stddev / perLoop100, |
| minimum / perLoop100, |
| maximum / perLoop100, |
| meanCycles / perLoop1k, |
| stddevCycles / perLoop1k, |
| minCycles / perLoop1k, |
| maxCycles / perLoop1k); |
| |
| } |
| } |
| #endif |
| }; |
| |
| // ---------------------------------------------------------------------------- |
| // ThreadBase |
| // ---------------------------------------------------------------------------- |
| |
| // static |
| const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBase::type_t type) |
| { |
| switch (type) { |
| case MIXER: |
| return "MIXER"; |
| case DIRECT: |
| return "DIRECT"; |
| case DUPLICATING: |
| return "DUPLICATING"; |
| case RECORD: |
| return "RECORD"; |
| case OFFLOAD: |
| return "OFFLOAD"; |
| case MMAP_PLAYBACK: |
| return "MMAP_PLAYBACK"; |
| case MMAP_CAPTURE: |
| return "MMAP_CAPTURE"; |
| default: |
| return "unknown"; |
| } |
| } |
| |
| AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id, |
| type_t type, bool systemReady, bool isOut) |
| : Thread(false /*canCallJava*/), |
| mType(type), |
| mAudioFlinger(audioFlinger), |
| mThreadMetrics(std::string(AMEDIAMETRICS_KEY_PREFIX_AUDIO_THREAD) + std::to_string(id), |
| isOut), |
| mIsOut(isOut), |
| // mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, mFormat, mBufferSize |
| // are set by PlaybackThread::readOutputParameters_l() or |
| // RecordThread::readInputParameters_l() |
| //FIXME: mStandby should be true here. Is this some kind of hack? |
| mStandby(false), |
| mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id), |
| // mName will be set by concrete (non-virtual) subclass |
| mDeathRecipient(new PMDeathRecipient(this)), |
| mSystemReady(systemReady), |
| mSignalPending(false) |
| { |
| mThreadMetrics.logConstructor(getpid(), threadTypeToString(type), id); |
| memset(&mPatch, 0, sizeof(struct audio_patch)); |
| } |
| |
| AudioFlinger::ThreadBase::~ThreadBase() |
| { |
| // mConfigEvents should be empty, but just in case it isn't, free the memory it owns |
| mConfigEvents.clear(); |
| |
| // do not lock the mutex in destructor |
| releaseWakeLock_l(); |
| if (mPowerManager != 0) { |
| sp<IBinder> binder = IInterface::asBinder(mPowerManager); |
| binder->unlinkToDeath(mDeathRecipient); |
| } |
| |
| sendStatistics(true /* force */); |
| } |
| |
| status_t AudioFlinger::ThreadBase::readyToRun() |
| { |
| status_t status = initCheck(); |
| if (status == NO_ERROR) { |
| ALOGI("AudioFlinger's thread %p tid=%d ready to run", this, getTid()); |
| } else { |
| ALOGE("No working audio driver found."); |
| } |
| return status; |
| } |
| |
| void AudioFlinger::ThreadBase::exit() |
| { |
| ALOGV("ThreadBase::exit"); |
| // do any cleanup required for exit to succeed |
| preExit(); |
| { |
| // This lock prevents the following race in thread (uniprocessor for illustration): |
| // if (!exitPending()) { |
| // // context switch from here to exit() |
| // // exit() calls requestExit(), what exitPending() observes |
| // // exit() calls signal(), which is dropped since no waiters |
| // // context switch back from exit() to here |
| // mWaitWorkCV.wait(...); |
| // // now thread is hung |
| // } |
| AutoMutex lock(mLock); |
| requestExit(); |
| mWaitWorkCV.broadcast(); |
| } |
| // When Thread::requestExitAndWait is made virtual and this method is renamed to |
| // "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();" |
| requestExitAndWait(); |
| } |
| |
| status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs) |
| { |
| ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string()); |
| Mutex::Autolock _l(mLock); |
| |
| return sendSetParameterConfigEvent_l(keyValuePairs); |
| } |
| |
| // sendConfigEvent_l() must be called with ThreadBase::mLock held |
| // Can temporarily release the lock if waiting for a reply from processConfigEvents_l(). |
| status_t AudioFlinger::ThreadBase::sendConfigEvent_l(sp<ConfigEvent>& event) |
| { |
| status_t status = NO_ERROR; |
| |
| if (event->mRequiresSystemReady && !mSystemReady) { |
| event->mWaitStatus = false; |
| mPendingConfigEvents.add(event); |
| return status; |
| } |
| mConfigEvents.add(event); |
| ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType); |
| mWaitWorkCV.signal(); |
| mLock.unlock(); |
| { |
| Mutex::Autolock _l(event->mLock); |
| while (event->mWaitStatus) { |
| if (event->mCond.waitRelative(event->mLock, kConfigEventTimeoutNs) != NO_ERROR) { |
| event->mStatus = TIMED_OUT; |
| event->mWaitStatus = false; |
| } |
| } |
| status = event->mStatus; |
| } |
| mLock.lock(); |
| return status; |
| } |
| |
| void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event, pid_t pid, |
| audio_port_handle_t portId) |
| { |
| Mutex::Autolock _l(mLock); |
| sendIoConfigEvent_l(event, pid, portId); |
| } |
| |
| // sendIoConfigEvent_l() must be called with ThreadBase::mLock held |
| void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid, |
| audio_port_handle_t portId) |
| { |
| // The audio statistics history is exponentially weighted to forget events |
| // about five or more seconds in the past. In order to have |
| // crisper statistics for mediametrics, we reset the statistics on |
| // an IoConfigEvent, to reflect different properties for a new device. |
| mIoJitterMs.reset(); |
| mLatencyMs.reset(); |
| mProcessTimeMs.reset(); |
| mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_CONTINUOUS); |
| |
| sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid, portId); |
| sendConfigEvent_l(configEvent); |
| } |
| |
| void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio, bool forApp) |
| { |
| Mutex::Autolock _l(mLock); |
| sendPrioConfigEvent_l(pid, tid, prio, forApp); |
| } |
| |
| // sendPrioConfigEvent_l() must be called with ThreadBase::mLock held |
| void AudioFlinger::ThreadBase::sendPrioConfigEvent_l( |
| pid_t pid, pid_t tid, int32_t prio, bool forApp) |
| { |
| sp<ConfigEvent> configEvent = (ConfigEvent *)new PrioConfigEvent(pid, tid, prio, forApp); |
| sendConfigEvent_l(configEvent); |
| } |
| |
| // sendSetParameterConfigEvent_l() must be called with ThreadBase::mLock held |
| status_t AudioFlinger::ThreadBase::sendSetParameterConfigEvent_l(const String8& keyValuePair) |
| { |
| sp<ConfigEvent> configEvent; |
| AudioParameter param(keyValuePair); |
| int value; |
| if (param.getInt(String8(AudioParameter::keyMonoOutput), value) == NO_ERROR) { |
| setMasterMono_l(value != 0); |
| if (param.size() == 1) { |
| return NO_ERROR; // should be a solo parameter - we don't pass down |
| } |
| param.remove(String8(AudioParameter::keyMonoOutput)); |
| configEvent = new SetParameterConfigEvent(param.toString()); |
| } else { |
| configEvent = new SetParameterConfigEvent(keyValuePair); |
| } |
| return sendConfigEvent_l(configEvent); |
| } |
| |
| status_t AudioFlinger::ThreadBase::sendCreateAudioPatchConfigEvent( |
| const struct audio_patch *patch, |
| audio_patch_handle_t *handle) |
| { |
| Mutex::Autolock _l(mLock); |
| sp<ConfigEvent> configEvent = (ConfigEvent *)new CreateAudioPatchConfigEvent(*patch, *handle); |
| status_t status = sendConfigEvent_l(configEvent); |
| if (status == NO_ERROR) { |
| CreateAudioPatchConfigEventData *data = |
| (CreateAudioPatchConfigEventData *)configEvent->mData.get(); |
| *handle = data->mHandle; |
| } |
| return status; |
| } |
| |
| status_t AudioFlinger::ThreadBase::sendReleaseAudioPatchConfigEvent( |
| const audio_patch_handle_t handle) |
| { |
| Mutex::Autolock _l(mLock); |
| sp<ConfigEvent> configEvent = (ConfigEvent *)new ReleaseAudioPatchConfigEvent(handle); |
| return sendConfigEvent_l(configEvent); |
| } |
| |
| status_t AudioFlinger::ThreadBase::sendUpdateOutDeviceConfigEvent( |
| const DeviceDescriptorBaseVector& outDevices) |
| { |
| if (type() != RECORD) { |
| // The update out device operation is only for record thread. |
| return INVALID_OPERATION; |
| } |
| Mutex::Autolock _l(mLock); |
| sp<ConfigEvent> configEvent = (ConfigEvent *)new UpdateOutDevicesConfigEvent(outDevices); |
| return sendConfigEvent_l(configEvent); |
| } |
| |
| void AudioFlinger::ThreadBase::sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs) |
| { |
| ALOG_ASSERT(type() == RECORD, "sendResizeBufferConfigEvent_l() called on non record thread"); |
| sp<ConfigEvent> configEvent = |
| (ConfigEvent *)new ResizeBufferConfigEvent(maxSharedAudioHistoryMs); |
| sendConfigEvent_l(configEvent); |
| } |
| |
| // post condition: mConfigEvents.isEmpty() |
| void AudioFlinger::ThreadBase::processConfigEvents_l() |
| { |
| bool configChanged = false; |
| |
| while (!mConfigEvents.isEmpty()) { |
| ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size()); |
| sp<ConfigEvent> event = mConfigEvents[0]; |
| mConfigEvents.removeAt(0); |
| switch (event->mType) { |
| case CFG_EVENT_PRIO: { |
| PrioConfigEventData *data = (PrioConfigEventData *)event->mData.get(); |
| // FIXME Need to understand why this has to be done asynchronously |
| int err = requestPriority(data->mPid, data->mTid, data->mPrio, data->mForApp, |
| true /*asynchronous*/); |
| if (err != 0) { |
| ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d", |
| data->mPrio, data->mPid, data->mTid, err); |
| } |
| } break; |
| case CFG_EVENT_IO: { |
| IoConfigEventData *data = (IoConfigEventData *)event->mData.get(); |
| ioConfigChanged(data->mEvent, data->mPid, data->mPortId); |
| } break; |
| case CFG_EVENT_SET_PARAMETER: { |
| SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get(); |
| if (checkForNewParameter_l(data->mKeyValuePairs, event->mStatus)) { |
| configChanged = true; |
| mLocalLog.log("CFG_EVENT_SET_PARAMETER: (%s) configuration changed", |
| data->mKeyValuePairs.string()); |
| } |
| } break; |
| case CFG_EVENT_CREATE_AUDIO_PATCH: { |
| const DeviceTypeSet oldDevices = getDeviceTypes(); |
| CreateAudioPatchConfigEventData *data = |
| (CreateAudioPatchConfigEventData *)event->mData.get(); |
| event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle); |
| const DeviceTypeSet newDevices = getDeviceTypes(); |
| mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %s (%s) new device %s (%s)", |
| dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(), |
| dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str()); |
| } break; |
| case CFG_EVENT_RELEASE_AUDIO_PATCH: { |
| const DeviceTypeSet oldDevices = getDeviceTypes(); |
| ReleaseAudioPatchConfigEventData *data = |
| (ReleaseAudioPatchConfigEventData *)event->mData.get(); |
| event->mStatus = releaseAudioPatch_l(data->mHandle); |
| const DeviceTypeSet newDevices = getDeviceTypes(); |
| mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %s (%s) new device %s (%s)", |
| dumpDeviceTypes(oldDevices).c_str(), toString(oldDevices).c_str(), |
| dumpDeviceTypes(newDevices).c_str(), toString(newDevices).c_str()); |
| } break; |
| case CFG_EVENT_UPDATE_OUT_DEVICE: { |
| UpdateOutDevicesConfigEventData *data = |
| (UpdateOutDevicesConfigEventData *)event->mData.get(); |
| updateOutDevices(data->mOutDevices); |
| } break; |
| case CFG_EVENT_RESIZE_BUFFER: { |
| ResizeBufferConfigEventData *data = |
| (ResizeBufferConfigEventData *)event->mData.get(); |
| resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs); |
| } break; |
| default: |
| ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType); |
| break; |
| } |
| { |
| Mutex::Autolock _l(event->mLock); |
| if (event->mWaitStatus) { |
| event->mWaitStatus = false; |
| event->mCond.signal(); |
| } |
| } |
| ALOGV_IF(mConfigEvents.isEmpty(), "processConfigEvents_l() DONE thread %p", this); |
| } |
| |
| if (configChanged) { |
| cacheParameters_l(); |
| } |
| } |
| |
| String8 channelMaskToString(audio_channel_mask_t mask, bool output) { |
| String8 s; |
| const audio_channel_representation_t representation = |
| audio_channel_mask_get_representation(mask); |
| |
| switch (representation) { |
| // Travel all single bit channel mask to convert channel mask to string. |
| case AUDIO_CHANNEL_REPRESENTATION_POSITION: { |
| if (output) { |
| if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low-frequency, "); |
| if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,"); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT) s.append("top-side-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT) s.append("top-side-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT) s.append("bottom-front-left, "); |
| if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER) s.append("bottom-front-center, "); |
| if (mask & AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT) s.append("bottom-front-right, "); |
| if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) s.append("low-frequency-2, "); |
| if (mask & AUDIO_CHANNEL_OUT_HAPTIC_B) s.append("haptic-B, "); |
| if (mask & AUDIO_CHANNEL_OUT_HAPTIC_A) s.append("haptic-A, "); |
| if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown, "); |
| } else { |
| if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, "); |
| if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, "); |
| if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, "); |
| if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, "); |
| if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, "); |
| if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, "); |
| if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, "); |
| if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, "); |
| if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, "); |
| if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, "); |
| if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, "); |
| if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, "); |
| if (mask & AUDIO_CHANNEL_IN_BACK_LEFT) s.append("back-left, "); |
| if (mask & AUDIO_CHANNEL_IN_BACK_RIGHT) s.append("back-right, "); |
| if (mask & AUDIO_CHANNEL_IN_CENTER) s.append("center, "); |
| if (mask & AUDIO_CHANNEL_IN_LOW_FREQUENCY) s.append("low-frequency, "); |
| if (mask & AUDIO_CHANNEL_IN_TOP_LEFT) s.append("top-left, "); |
| if (mask & AUDIO_CHANNEL_IN_TOP_RIGHT) s.append("top-right, "); |
| if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, "); |
| if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, "); |
| if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown, "); |
| } |
| const int len = s.length(); |
| if (len > 2) { |
| (void) s.lockBuffer(len); // needed? |
| s.unlockBuffer(len - 2); // remove trailing ", " |
| } |
| return s; |
| } |
| case AUDIO_CHANNEL_REPRESENTATION_INDEX: |
| s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask)); |
| return s; |
| default: |
| s.appendFormat("unknown mask, representation:%d bits:%#x", |
| representation, audio_channel_mask_get_bits(mask)); |
| return s; |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::dump(int fd, const Vector<String16>& args) |
| { |
| dprintf(fd, "\n%s thread %p, name %s, tid %d, type %d (%s):\n", isOutput() ? "Output" : "Input", |
| this, mThreadName, getTid(), type(), threadTypeToString(type())); |
| |
| bool locked = AudioFlinger::dumpTryLock(mLock); |
| if (!locked) { |
| dprintf(fd, " Thread may be deadlocked\n"); |
| } |
| |
| dumpBase_l(fd, args); |
| dumpInternals_l(fd, args); |
| dumpTracks_l(fd, args); |
| dumpEffectChains_l(fd, args); |
| |
| if (locked) { |
| mLock.unlock(); |
| } |
| |
| dprintf(fd, " Local log:\n"); |
| mLocalLog.dump(fd, " " /* prefix */, 40 /* lines */); |
| } |
| |
| void AudioFlinger::ThreadBase::dumpBase_l(int fd, const Vector<String16>& args __unused) |
| { |
| dprintf(fd, " I/O handle: %d\n", mId); |
| dprintf(fd, " Standby: %s\n", mStandby ? "yes" : "no"); |
| dprintf(fd, " Sample rate: %u Hz\n", mSampleRate); |
| dprintf(fd, " HAL frame count: %zu\n", mFrameCount); |
| dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat).c_str()); |
| dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize); |
| dprintf(fd, " Channel count: %u\n", mChannelCount); |
| dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask, |
| channelMaskToString(mChannelMask, mType != RECORD).string()); |
| dprintf(fd, " Processing format: 0x%x (%s)\n", mFormat, formatToString(mFormat).c_str()); |
| dprintf(fd, " Processing frame size: %zu bytes\n", mFrameSize); |
| dprintf(fd, " Pending config events:"); |
| size_t numConfig = mConfigEvents.size(); |
| if (numConfig) { |
| const size_t SIZE = 256; |
| char buffer[SIZE]; |
| for (size_t i = 0; i < numConfig; i++) { |
| mConfigEvents[i]->dump(buffer, SIZE); |
| dprintf(fd, "\n %s", buffer); |
| } |
| dprintf(fd, "\n"); |
| } else { |
| dprintf(fd, " none\n"); |
| } |
| // Note: output device may be used by capture threads for effects such as AEC. |
| dprintf(fd, " Output devices: %s (%s)\n", |
| dumpDeviceTypes(outDeviceTypes()).c_str(), toString(outDeviceTypes()).c_str()); |
| dprintf(fd, " Input device: %#x (%s)\n", |
| inDeviceType(), toString(inDeviceType()).c_str()); |
| dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str()); |
| |
| // Dump timestamp statistics for the Thread types that support it. |
| if (mType == RECORD |
| || mType == MIXER |
| || mType == DUPLICATING |
| || mType == DIRECT |
| || mType == OFFLOAD) { |
| dprintf(fd, " Timestamp stats: %s\n", mTimestampVerifier.toString().c_str()); |
| dprintf(fd, " Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no"); |
| } |
| |
| if (mLastIoBeginNs > 0) { // MMAP may not set this |
| dprintf(fd, " Last %s occurred (msecs): %lld\n", |
| isOutput() ? "write" : "read", |
| (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND); |
| } |
| |
| if (mProcessTimeMs.getN() > 0) { |
| dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str()); |
| } |
| |
| if (mIoJitterMs.getN() > 0) { |
| dprintf(fd, " Hal %s jitter ms stats: %s\n", |
| isOutput() ? "write" : "read", |
| mIoJitterMs.toString().c_str()); |
| } |
| |
| if (mLatencyMs.getN() > 0) { |
| dprintf(fd, " Threadloop %s latency stats: %s\n", |
| isOutput() ? "write" : "read", |
| mLatencyMs.toString().c_str()); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::dumpEffectChains_l(int fd, const Vector<String16>& args) |
| { |
| const size_t SIZE = 256; |
| char buffer[SIZE]; |
| |
| size_t numEffectChains = mEffectChains.size(); |
| snprintf(buffer, SIZE, " %zu Effect Chains\n", numEffectChains); |
| write(fd, buffer, strlen(buffer)); |
| |
| for (size_t i = 0; i < numEffectChains; ++i) { |
| sp<EffectChain> chain = mEffectChains[i]; |
| if (chain != 0) { |
| chain->dump(fd, args); |
| } |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::acquireWakeLock() |
| { |
| Mutex::Autolock _l(mLock); |
| acquireWakeLock_l(); |
| } |
| |
| String16 AudioFlinger::ThreadBase::getWakeLockTag() |
| { |
| switch (mType) { |
| case MIXER: |
| return String16("AudioMix"); |
| case DIRECT: |
| return String16("AudioDirectOut"); |
| case DUPLICATING: |
| return String16("AudioDup"); |
| case RECORD: |
| return String16("AudioIn"); |
| case OFFLOAD: |
| return String16("AudioOffload"); |
| case MMAP_PLAYBACK: |
| return String16("MmapPlayback"); |
| case MMAP_CAPTURE: |
| return String16("MmapCapture"); |
| default: |
| ALOG_ASSERT(false); |
| return String16("AudioUnknown"); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::acquireWakeLock_l() |
| { |
| getPowerManager_l(); |
| if (mPowerManager != 0) { |
| sp<IBinder> binder = new BBinder(); |
| // Uses AID_AUDIOSERVER for wakelock. updateWakeLockUids_l() updates with client uids. |
| binder::Status status = mPowerManager->acquireWakeLockAsync(binder, |
| POWERMANAGER_PARTIAL_WAKE_LOCK, |
| getWakeLockTag(), |
| String16("audioserver"), |
| {} /* workSource */, |
| {} /* historyTag */); |
| if (status.isOk()) { |
| mWakeLockToken = binder; |
| } |
| ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status.exceptionCode()); |
| } |
| |
| gBoottime.acquire(mWakeLockToken); |
| mTimestamp.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_BOOTTIME] = |
| gBoottime.getBoottimeOffset(); |
| } |
| |
| void AudioFlinger::ThreadBase::releaseWakeLock() |
| { |
| Mutex::Autolock _l(mLock); |
| releaseWakeLock_l(); |
| } |
| |
| void AudioFlinger::ThreadBase::releaseWakeLock_l() |
| { |
| gBoottime.release(mWakeLockToken); |
| if (mWakeLockToken != 0) { |
| ALOGV("releaseWakeLock_l() %s", mThreadName); |
| if (mPowerManager != 0) { |
| mPowerManager->releaseWakeLockAsync(mWakeLockToken, 0); |
| } |
| mWakeLockToken.clear(); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::getPowerManager_l() { |
| if (mSystemReady && mPowerManager == 0) { |
| // use checkService() to avoid blocking if power service is not up yet |
| sp<IBinder> binder = |
| defaultServiceManager()->checkService(String16("power")); |
| if (binder == 0) { |
| ALOGW("Thread %s cannot connect to the power manager service", mThreadName); |
| } else { |
| mPowerManager = interface_cast<os::IPowerManager>(binder); |
| binder->linkToDeath(mDeathRecipient); |
| } |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<uid_t> &uids) { |
| getPowerManager_l(); |
| |
| #if !LOG_NDEBUG |
| std::stringstream s; |
| for (uid_t uid : uids) { |
| s << uid << " "; |
| } |
| ALOGD("updateWakeLockUids_l %s uids:%s", mThreadName, s.str().c_str()); |
| #endif |
| |
| if (mWakeLockToken == NULL) { // token may be NULL if AudioFlinger::systemReady() not called. |
| if (mSystemReady) { |
| ALOGE("no wake lock to update, but system ready!"); |
| } else { |
| ALOGW("no wake lock to update, system not ready yet"); |
| } |
| return; |
| } |
| if (mPowerManager != 0) { |
| std::vector<int> uidsAsInt(uids.begin(), uids.end()); // powermanager expects uids as ints |
| binder::Status status = mPowerManager->updateWakeLockUidsAsync( |
| mWakeLockToken, uidsAsInt); |
| ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status.exceptionCode()); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::clearPowerManager() |
| { |
| Mutex::Autolock _l(mLock); |
| releaseWakeLock_l(); |
| mPowerManager.clear(); |
| } |
| |
| void AudioFlinger::ThreadBase::updateOutDevices( |
| const DeviceDescriptorBaseVector& outDevices __unused) |
| { |
| ALOGE("%s should only be called in RecordThread", __func__); |
| } |
| |
| void AudioFlinger::ThreadBase::resizeInputBuffer_l(int32_t maxSharedAudioHistoryMs __unused) |
| { |
| ALOGE("%s should only be called in RecordThread", __func__); |
| } |
| |
| void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who __unused) |
| { |
| sp<ThreadBase> thread = mThread.promote(); |
| if (thread != 0) { |
| thread->clearPowerManager(); |
| } |
| ALOGW("power manager service died !!!"); |
| } |
| |
| void AudioFlinger::ThreadBase::setEffectSuspended_l( |
| const effect_uuid_t *type, bool suspend, audio_session_t sessionId) |
| { |
| sp<EffectChain> chain = getEffectChain_l(sessionId); |
| if (chain != 0) { |
| if (type != NULL) { |
| chain->setEffectSuspended_l(type, suspend); |
| } else { |
| chain->setEffectSuspendedAll_l(suspend); |
| } |
| } |
| |
| updateSuspendedSessions_l(type, suspend, sessionId); |
| } |
| |
| void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain) |
| { |
| ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId()); |
| if (index < 0) { |
| return; |
| } |
| |
| const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects = |
| mSuspendedSessions.valueAt(index); |
| |
| for (size_t i = 0; i < sessionEffects.size(); i++) { |
| const sp<SuspendedSessionDesc>& desc = sessionEffects.valueAt(i); |
| for (int j = 0; j < desc->mRefCount; j++) { |
| if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) { |
| chain->setEffectSuspendedAll_l(true); |
| } else { |
| ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x", |
| desc->mType.timeLow); |
| chain->setEffectSuspended_l(&desc->mType, true); |
| } |
| } |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type, |
| bool suspend, |
| audio_session_t sessionId) |
| { |
| ssize_t index = mSuspendedSessions.indexOfKey(sessionId); |
| |
| KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects; |
| |
| if (suspend) { |
| if (index >= 0) { |
| sessionEffects = mSuspendedSessions.valueAt(index); |
| } else { |
| mSuspendedSessions.add(sessionId, sessionEffects); |
| } |
| } else { |
| if (index < 0) { |
| return; |
| } |
| sessionEffects = mSuspendedSessions.valueAt(index); |
| } |
| |
| |
| int key = EffectChain::kKeyForSuspendAll; |
| if (type != NULL) { |
| key = type->timeLow; |
| } |
| index = sessionEffects.indexOfKey(key); |
| |
| sp<SuspendedSessionDesc> desc; |
| if (suspend) { |
| if (index >= 0) { |
| desc = sessionEffects.valueAt(index); |
| } else { |
| desc = new SuspendedSessionDesc(); |
| if (type != NULL) { |
| desc->mType = *type; |
| } |
| sessionEffects.add(key, desc); |
| ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key); |
| } |
| desc->mRefCount++; |
| } else { |
| if (index < 0) { |
| return; |
| } |
| desc = sessionEffects.valueAt(index); |
| if (--desc->mRefCount == 0) { |
| ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key); |
| sessionEffects.removeItemsAt(index); |
| if (sessionEffects.isEmpty()) { |
| ALOGV("updateSuspendedSessions_l() restore removing session %d", |
| sessionId); |
| mSuspendedSessions.removeItem(sessionId); |
| } |
| } |
| } |
| if (!sessionEffects.isEmpty()) { |
| mSuspendedSessions.replaceValueFor(sessionId, sessionEffects); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(bool enabled, |
| audio_session_t sessionId, |
| bool threadLocked) { |
| if (!threadLocked) { |
| mLock.lock(); |
| } |
| |
| if (mType != RECORD) { |
| // suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on |
| // another session. This gives the priority to well behaved effect control panels |
| // and applications not using global effects. |
| // Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect |
| // global effects |
| if (!audio_is_global_session(sessionId)) { |
| setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX); |
| } |
| } |
| |
| if (!threadLocked) { |
| mLock.unlock(); |
| } |
| } |
| |
| // checkEffectCompatibility_l() must be called with ThreadBase::mLock held |
| status_t AudioFlinger::RecordThread::checkEffectCompatibility_l( |
| const effect_descriptor_t *desc, audio_session_t sessionId) |
| { |
| // No global output effect sessions on record threads |
| if (sessionId == AUDIO_SESSION_OUTPUT_MIX |
| || sessionId == AUDIO_SESSION_OUTPUT_STAGE) { |
| ALOGW("checkEffectCompatibility_l(): global effect %s on record thread %s", |
| desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| // only pre processing effects on record thread |
| if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC) { |
| ALOGW("checkEffectCompatibility_l(): non pre processing effect %s on record thread %s", |
| desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| |
| // always allow effects without processing load or latency |
| if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) { |
| return NO_ERROR; |
| } |
| |
| audio_input_flags_t flags = mInput->flags; |
| if (hasFastCapture() || (flags & AUDIO_INPUT_FLAG_FAST)) { |
| if (flags & AUDIO_INPUT_FLAG_RAW) { |
| ALOGW("checkEffectCompatibility_l(): effect %s on record thread %s in raw mode", |
| desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) { |
| ALOGW("checkEffectCompatibility_l(): non HW effect %s on record thread %s in fast mode", |
| desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| } |
| |
| if (EffectModule::isHapticGenerator(&desc->type)) { |
| ALOGE("%s(): HapticGenerator is not supported in RecordThread", __func__); |
| return BAD_VALUE; |
| } |
| return NO_ERROR; |
| } |
| |
| // checkEffectCompatibility_l() must be called with ThreadBase::mLock held |
| status_t AudioFlinger::PlaybackThread::checkEffectCompatibility_l( |
| const effect_descriptor_t *desc, audio_session_t sessionId) |
| { |
| // no preprocessing on playback threads |
| if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) { |
| ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback" |
| " thread %s", desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| |
| // always allow effects without processing load or latency |
| if ((desc->flags & EFFECT_FLAG_NO_PROCESS_MASK) == EFFECT_FLAG_NO_PROCESS) { |
| return NO_ERROR; |
| } |
| |
| if (EffectModule::isHapticGenerator(&desc->type) && mHapticChannelCount == 0) { |
| ALOGW("%s: thread doesn't support haptic playback while the effect is HapticGenerator", |
| __func__); |
| return BAD_VALUE; |
| } |
| |
| switch (mType) { |
| case MIXER: { |
| #ifndef MULTICHANNEL_EFFECT_CHAIN |
| // Reject any effect on mixer multichannel sinks. |
| // TODO: fix both format and multichannel issues with effects. |
| if (mChannelCount != FCC_2) { |
| ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d) on MIXER" |
| " thread %s", desc->name, mChannelCount, mThreadName); |
| return BAD_VALUE; |
| } |
| #endif |
| audio_output_flags_t flags = mOutput->flags; |
| if (hasFastMixer() || (flags & AUDIO_OUTPUT_FLAG_FAST)) { |
| if (sessionId == AUDIO_SESSION_OUTPUT_MIX) { |
| // global effects are applied only to non fast tracks if they are SW |
| if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) { |
| break; |
| } |
| } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) { |
| // only post processing on output stage session |
| if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) { |
| ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed" |
| " on output stage session", desc->name); |
| return BAD_VALUE; |
| } |
| } else if (sessionId == AUDIO_SESSION_DEVICE) { |
| // only post processing on output stage session |
| if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) { |
| ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed" |
| " on device session", desc->name); |
| return BAD_VALUE; |
| } |
| } else { |
| // no restriction on effects applied on non fast tracks |
| if ((hasAudioSession_l(sessionId) & ThreadBase::FAST_SESSION) == 0) { |
| break; |
| } |
| } |
| |
| if (flags & AUDIO_OUTPUT_FLAG_RAW) { |
| ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode", |
| desc->name); |
| return BAD_VALUE; |
| } |
| if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) { |
| ALOGW("checkEffectCompatibility_l(): non HW effect %s on playback thread" |
| " in fast mode", desc->name); |
| return BAD_VALUE; |
| } |
| } |
| } break; |
| case OFFLOAD: |
| // nothing actionable on offload threads, if the effect: |
| // - is offloadable: the effect can be created |
| // - is NOT offloadable: the effect should still be created, but EffectHandle::enable() |
| // will take care of invalidating the tracks of the thread |
| break; |
| case DIRECT: |
| // Reject any effect on Direct output threads for now, since the format of |
| // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo). |
| ALOGW("checkEffectCompatibility_l(): effect %s on DIRECT output thread %s", |
| desc->name, mThreadName); |
| return BAD_VALUE; |
| case DUPLICATING: |
| #ifndef MULTICHANNEL_EFFECT_CHAIN |
| // Reject any effect on mixer multichannel sinks. |
| // TODO: fix both format and multichannel issues with effects. |
| if (mChannelCount != FCC_2) { |
| ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d)" |
| " on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName); |
| return BAD_VALUE; |
| } |
| #endif |
| if (audio_is_global_session(sessionId)) { |
| ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING" |
| " thread %s", desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) { |
| ALOGW("checkEffectCompatibility_l(): post processing effect %s on" |
| " DUPLICATING thread %s", desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) { |
| ALOGW("checkEffectCompatibility_l(): HW tunneled effect %s on" |
| " DUPLICATING thread %s", desc->name, mThreadName); |
| return BAD_VALUE; |
| } |
| break; |
| default: |
| LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType); |
| } |
| |
| return NO_ERROR; |
| } |
| |
| // ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held |
| sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l( |
| const sp<AudioFlinger::Client>& client, |
| const sp<IEffectClient>& effectClient, |
| int32_t priority, |
| audio_session_t sessionId, |
| effect_descriptor_t *desc, |
| int *enabled, |
| status_t *status, |
| bool pinned, |
| bool probe) |
| { |
| sp<EffectModule> effect; |
| sp<EffectHandle> handle; |
| status_t lStatus; |
| sp<EffectChain> chain; |
| bool chainCreated = false; |
| bool effectCreated = false; |
| audio_unique_id_t effectId = AUDIO_UNIQUE_ID_USE_UNSPECIFIED; |
| |
| lStatus = initCheck(); |
| if (lStatus != NO_ERROR) { |
| ALOGW("createEffect_l() Audio driver not initialized."); |
| goto Exit; |
| } |
| |
| ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId); |
| |
| { // scope for mLock |
| Mutex::Autolock _l(mLock); |
| |
| lStatus = checkEffectCompatibility_l(desc, sessionId); |
| if (probe || lStatus != NO_ERROR) { |
| goto Exit; |
| } |
| |
| // check for existing effect chain with the requested audio session |
| chain = getEffectChain_l(sessionId); |
| if (chain == 0) { |
| // create a new chain for this session |
| ALOGV("createEffect_l() new effect chain for session %d", sessionId); |
| chain = new EffectChain(this, sessionId); |
| addEffectChain_l(chain); |
| chain->setStrategy(getStrategyForSession_l(sessionId)); |
| chainCreated = true; |
| } else { |
| effect = chain->getEffectFromDesc_l(desc); |
| } |
| |
| ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get()); |
| |
| if (effect == 0) { |
| effectId = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT); |
| // create a new effect module if none present in the chain |
| lStatus = chain->createEffect_l(effect, desc, effectId, sessionId, pinned); |
| if (lStatus != NO_ERROR) { |
| goto Exit; |
| } |
| effectCreated = true; |
| |
| // FIXME: use vector of device and address when effect interface is ready. |
| effect->setDevices(outDeviceTypeAddrs()); |
| effect->setInputDevice(inDeviceTypeAddr()); |
| effect->setMode(mAudioFlinger->getMode()); |
| effect->setAudioSource(mAudioSource); |
| } |
| if (effect->isHapticGenerator()) { |
| // TODO(b/184194057): Use the vibrator information from the vibrator that will be used |
| // for the HapticGenerator. |
| const media::AudioVibratorInfo* defaultVibratorInfo = |
| mAudioFlinger->getDefaultVibratorInfo_l(); |
| if (defaultVibratorInfo != nullptr) { |
| // Only set the vibrator info when it is a valid one. |
| effect->setVibratorInfo(defaultVibratorInfo); |
| } |
| } |
| // create effect handle and connect it to effect module |
| handle = new EffectHandle(effect, client, effectClient, priority); |
| lStatus = handle->initCheck(); |
| if (lStatus == OK) { |
| lStatus = effect->addHandle(handle.get()); |
| } |
| if (enabled != NULL) { |
| *enabled = (int)effect->isEnabled(); |
| } |
| } |
| |
| Exit: |
| if (!probe && lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) { |
| Mutex::Autolock _l(mLock); |
| if (effectCreated) { |
| chain->removeEffect_l(effect); |
| } |
| if (chainCreated) { |
| removeEffectChain_l(chain); |
| } |
| // handle must be cleared by caller to avoid deadlock. |
| } |
| |
| *status = lStatus; |
| return handle; |
| } |
| |
| void AudioFlinger::ThreadBase::disconnectEffectHandle(EffectHandle *handle, |
| bool unpinIfLast) |
| { |
| bool remove = false; |
| sp<EffectModule> effect; |
| { |
| Mutex::Autolock _l(mLock); |
| sp<EffectBase> effectBase = handle->effect().promote(); |
| if (effectBase == nullptr) { |
| return; |
| } |
| effect = effectBase->asEffectModule(); |
| if (effect == nullptr) { |
| return; |
| } |
| // restore suspended effects if the disconnected handle was enabled and the last one. |
| remove = (effect->removeHandle(handle) == 0) && (!effect->isPinned() || unpinIfLast); |
| if (remove) { |
| removeEffect_l(effect, true); |
| } |
| } |
| if (remove) { |
| mAudioFlinger->updateOrphanEffectChains(effect); |
| if (handle->enabled()) { |
| effect->checkSuspendOnEffectEnabled(false, false /*threadLocked*/); |
| } |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::onEffectEnable(const sp<EffectModule>& effect) { |
| if (isOffloadOrMmap()) { |
| Mutex::Autolock _l(mLock); |
| broadcast_l(); |
| } |
| if (!effect->isOffloadable()) { |
| if (mType == ThreadBase::OFFLOAD) { |
| PlaybackThread *t = (PlaybackThread *)this; |
| t->invalidateTracks(AUDIO_STREAM_MUSIC); |
| } |
| if (effect->sessionId() == AUDIO_SESSION_OUTPUT_MIX) { |
| mAudioFlinger->onNonOffloadableGlobalEffectEnable(); |
| } |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::onEffectDisable() { |
| if (isOffloadOrMmap()) { |
| Mutex::Autolock _l(mLock); |
| broadcast_l(); |
| } |
| } |
| |
| sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(audio_session_t sessionId, |
| int effectId) |
| { |
| Mutex::Autolock _l(mLock); |
| return getEffect_l(sessionId, effectId); |
| } |
| |
| sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(audio_session_t sessionId, |
| int effectId) |
| { |
| sp<EffectChain> chain = getEffectChain_l(sessionId); |
| return chain != 0 ? chain->getEffectFromId_l(effectId) : 0; |
| } |
| |
| std::vector<int> AudioFlinger::ThreadBase::getEffectIds_l(audio_session_t sessionId) |
| { |
| sp<EffectChain> chain = getEffectChain_l(sessionId); |
| return chain != nullptr ? chain->getEffectIds() : std::vector<int>{}; |
| } |
| |
| // PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and |
| // PlaybackThread::mLock held |
| status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect) |
| { |
| // check for existing effect chain with the requested audio session |
| audio_session_t sessionId = effect->sessionId(); |
| sp<EffectChain> chain = getEffectChain_l(sessionId); |
| bool chainCreated = false; |
| |
| ALOGD_IF((mType == OFFLOAD) && !effect->isOffloadable(), |
| "addEffect_l() on offloaded thread %p: effect %s does not support offload flags %#x", |
| this, effect->desc().name, effect->desc().flags); |
| |
| if (chain == 0) { |
| // create a new chain for this session |
| ALOGV("addEffect_l() new effect chain for session %d", sessionId); |
| chain = new EffectChain(this, sessionId); |
| addEffectChain_l(chain); |
| chain->setStrategy(getStrategyForSession_l(sessionId)); |
| chainCreated = true; |
| } |
| ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get()); |
| |
| if (chain->getEffectFromId_l(effect->id()) != 0) { |
| ALOGW("addEffect_l() %p effect %s already present in chain %p", |
| this, effect->desc().name, chain.get()); |
| return BAD_VALUE; |
| } |
| |
| effect->setOffloaded(mType == OFFLOAD, mId); |
| |
| status_t status = chain->addEffect_l(effect); |
| if (status != NO_ERROR) { |
| if (chainCreated) { |
| removeEffectChain_l(chain); |
| } |
| return status; |
| } |
| |
| effect->setDevices(outDeviceTypeAddrs()); |
| effect->setInputDevice(inDeviceTypeAddr()); |
| effect->setMode(mAudioFlinger->getMode()); |
| effect->setAudioSource(mAudioSource); |
| |
| return NO_ERROR; |
| } |
| |
| void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect, bool release) { |
| |
| ALOGV("%s %p effect %p", __FUNCTION__, this, effect.get()); |
| effect_descriptor_t desc = effect->desc(); |
| if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) { |
| detachAuxEffect_l(effect->id()); |
| } |
| |
| sp<EffectChain> chain = effect->getCallback()->chain().promote(); |
| if (chain != 0) { |
| // remove effect chain if removing last effect |
| if (chain->removeEffect_l(effect, release) == 0) { |
| removeEffectChain_l(chain); |
| } |
| } else { |
| ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get()); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::lockEffectChains_l( |
| Vector< sp<AudioFlinger::EffectChain> >& effectChains) |
| { |
| effectChains = mEffectChains; |
| for (size_t i = 0; i < mEffectChains.size(); i++) { |
| mEffectChains[i]->lock(); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::unlockEffectChains( |
| const Vector< sp<AudioFlinger::EffectChain> >& effectChains) |
| { |
| for (size_t i = 0; i < effectChains.size(); i++) { |
| effectChains[i]->unlock(); |
| } |
| } |
| |
| sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(audio_session_t sessionId) |
| { |
| Mutex::Autolock _l(mLock); |
| return getEffectChain_l(sessionId); |
| } |
| |
| sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(audio_session_t sessionId) |
| const |
| { |
| size_t size = mEffectChains.size(); |
| for (size_t i = 0; i < size; i++) { |
| if (mEffectChains[i]->sessionId() == sessionId) { |
| return mEffectChains[i]; |
| } |
| } |
| return 0; |
| } |
| |
| void AudioFlinger::ThreadBase::setMode(audio_mode_t mode) |
| { |
| Mutex::Autolock _l(mLock); |
| size_t size = mEffectChains.size(); |
| for (size_t i = 0; i < size; i++) { |
| mEffectChains[i]->setMode_l(mode); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::toAudioPortConfig(struct audio_port_config *config) |
| { |
| config->type = AUDIO_PORT_TYPE_MIX; |
| config->ext.mix.handle = mId; |
| config->sample_rate = mSampleRate; |
| config->format = mFormat; |
| config->channel_mask = mChannelMask; |
| config->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK| |
| AUDIO_PORT_CONFIG_FORMAT; |
| } |
| |
| void AudioFlinger::ThreadBase::systemReady() |
| { |
| Mutex::Autolock _l(mLock); |
| if (mSystemReady) { |
| return; |
| } |
| mSystemReady = true; |
| |
| for (size_t i = 0; i < mPendingConfigEvents.size(); i++) { |
| sendConfigEvent_l(mPendingConfigEvents.editItemAt(i)); |
| } |
| mPendingConfigEvents.clear(); |
| } |
| |
| template <typename T> |
| ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::add(const sp<T> &track) { |
| ssize_t index = mActiveTracks.indexOf(track); |
| if (index >= 0) { |
| ALOGW("ActiveTracks<T>::add track %p already there", track.get()); |
| return index; |
| } |
| logTrack("add", track); |
| mActiveTracksGeneration++; |
| mLatestActiveTrack = track; |
| ++mBatteryCounter[track->uid()].second; |
| mHasChanged = true; |
| return mActiveTracks.add(track); |
| } |
| |
| template <typename T> |
| ssize_t AudioFlinger::ThreadBase::ActiveTracks<T>::remove(const sp<T> &track) { |
| ssize_t index = mActiveTracks.remove(track); |
| if (index < 0) { |
| ALOGW("ActiveTracks<T>::remove nonexistent track %p", track.get()); |
| return index; |
| } |
| logTrack("remove", track); |
| mActiveTracksGeneration++; |
| --mBatteryCounter[track->uid()].second; |
| // mLatestActiveTrack is not cleared even if is the same as track. |
| mHasChanged = true; |
| #ifdef TEE_SINK |
| track->dumpTee(-1 /* fd */, "_REMOVE"); |
| #endif |
| track->logEndInterval(); // log to MediaMetrics |
| return index; |
| } |
| |
| template <typename T> |
| void AudioFlinger::ThreadBase::ActiveTracks<T>::clear() { |
| for (const sp<T> &track : mActiveTracks) { |
| BatteryNotifier::getInstance().noteStopAudio(track->uid()); |
| logTrack("clear", track); |
| } |
| mLastActiveTracksGeneration = mActiveTracksGeneration; |
| if (!mActiveTracks.empty()) { mHasChanged = true; } |
| mActiveTracks.clear(); |
| mLatestActiveTrack.clear(); |
| mBatteryCounter.clear(); |
| } |
| |
| template <typename T> |
| void AudioFlinger::ThreadBase::ActiveTracks<T>::updatePowerState( |
| sp<ThreadBase> thread, bool force) { |
| // Updates ActiveTracks client uids to the thread wakelock. |
| if (mActiveTracksGeneration != mLastActiveTracksGeneration || force) { |
| thread->updateWakeLockUids_l(getWakeLockUids()); |
| mLastActiveTracksGeneration = mActiveTracksGeneration; |
| } |
| |
| // Updates BatteryNotifier uids |
| for (auto it = mBatteryCounter.begin(); it != mBatteryCounter.end();) { |
| const uid_t uid = it->first; |
| ssize_t &previous = it->second.first; |
| ssize_t ¤t = it->second.second; |
| if (current > 0) { |
| if (previous == 0) { |
| BatteryNotifier::getInstance().noteStartAudio(uid); |
| } |
| previous = current; |
| ++it; |
| } else if (current == 0) { |
| if (previous > 0) { |
| BatteryNotifier::getInstance().noteStopAudio(uid); |
| } |
| it = mBatteryCounter.erase(it); // std::map<> is stable on iterator erase. |
| } else /* (current < 0) */ { |
| LOG_ALWAYS_FATAL("negative battery count %zd", current); |
| } |
| } |
| } |
| |
| template <typename T> |
| bool AudioFlinger::ThreadBase::ActiveTracks<T>::readAndClearHasChanged() { |
| bool hasChanged = mHasChanged; |
| mHasChanged = false; |
| |
| for (const sp<T> &track : mActiveTracks) { |
| // Do not short-circuit as all hasChanged states must be reset |
| // as all the metadata are going to be sent |
| hasChanged |= track->readAndClearHasChanged(); |
| } |
| return hasChanged; |
| } |
| |
| template <typename T> |
| void AudioFlinger::ThreadBase::ActiveTracks<T>::logTrack( |
| const char *funcName, const sp<T> &track) const { |
| if (mLocalLog != nullptr) { |
| String8 result; |
| track->appendDump(result, false /* active */); |
| mLocalLog->log("AT::%-10s(%p) %s", funcName, track.get(), result.string()); |
| } |
| } |
| |
| void AudioFlinger::ThreadBase::broadcast_l() |
| { |
| // Thread could be blocked waiting for async |
| // so signal it to handle state changes immediately |
| // If threadLoop is currently unlocked a signal of mWaitWorkCV will |
| // be lost so we also flag to prevent it blocking on mWaitWorkCV |
| mSignalPending = true; |
| mWaitWorkCV.broadcast(); |
| } |
| |
| // Call only from threadLoop() or when it is idle. |
| // Do not call from high performance code as this may do binder rpc to the MediaMetrics service. |
| void AudioFlinger::ThreadBase::sendStatistics(bool force) |
| { |
| // Do not log if we have no stats. |
| // We choose the timestamp verifier because it is the most likely item to be present. |
| const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN; |
| if (nstats == 0) { |
| return; |
| } |
| |
| // Don't log more frequently than once per 12 hours. |
| // We use BOOTTIME to include suspend time. |
| const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME); |
| const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0 |
| if (!force && sinceNs <= 12 * NANOS_PER_HOUR) { |
| return; |
| } |
| |
| mLastRecordedTimestampVerifierN = mTimestampVerifier.getN(); |
| mLastRecordedTimeNs = timeNs; |
| |
| std::unique_ptr<mediametrics::Item> item(mediametrics::Item::create("audiothread")); |
| |
| #define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors. |
| |
| // thread configuration |
| item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle |
| // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId); |
| item->setCString(MM_PREFIX "type", threadTypeToString(mType)); |
| item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate); |
| item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask); |
| item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str()); |
| item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount); |
| item->setCString(MM_PREFIX "outDevice", toString(outDeviceTypes()).c_str()); |
| item->setCString(MM_PREFIX "inDevice", toString(inDeviceType()).c_str()); |
| |
| // thread statistics |
| if (mIoJitterMs.getN() > 0) { |
| item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean()); |
| item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev()); |
| } |
| if (mProcessTimeMs.getN() > 0) { |
| item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean()); |
| item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev()); |
| } |
| const auto tsjitter = mTimestampVerifier.getJitterMs(); |
| if (tsjitter.getN() > 0) { |
| item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean()); |
| item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev()); |
| } |
| if (mLatencyMs.getN() > 0) { |
| item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean()); |
| item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev()); |
| } |
| |
| item->selfrecord(); |
| } |
| |
| // ---------------------------------------------------------------------------- |
| // Playback |
| // ---------------------------------------------------------------------------- |
| |
| AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger, |
| AudioStreamOut* output, |
| audio_io_handle_t id, |
| type_t type, |
| bool systemReady) |
| : ThreadBase(audioFlinger, id, type, systemReady, true /* isOut */), |
| mNormalFrameCount(0), mSinkBuffer(NULL), |
| mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision), |
| mMixerBuffer(NULL), |
| mMixerBufferSize(0), |
| mMixerBufferFormat(AUDIO_FORMAT_INVALID), |
| mMixerBufferValid(false), |
| mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision), |
| mEffectBuffer(NULL), |
| mEffectBufferSize(0), |
| mEffectBufferFormat(AUDIO_FORMAT_INVALID), |
| mEffectBufferValid(false), |
| mSuspended(0), mBytesWritten(0), |
| mFramesWritten(0), |
| mSuspendedFrames(0), |
| mActiveTracks(&this->mLocalLog), |
| // mStreamTypes[] initialized in constructor body |
| mTracks(type == MIXER), |
| mOutput(output), |
| mNumWrites(0), mNumDelayedWrites(0), mInWrite(false), |
| mMixerStatus(MIXER_IDLE), |
| mMixerStatusIgnoringFastTracks(MIXER_IDLE), |
| mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs), |
| mBytesRemaining(0), |
| mCurrentWriteLength(0), |
| mUseAsyncWrite(false), |
| mWriteAckSequence(0), |
| mDrainSequence(0), |
| mScreenState(AudioFlinger::mScreenState), |
| // index 0 is reserved for normal mixer's submix |
| mFastTrackAvailMask(((1 << FastMixerState::sMaxFastTracks) - 1) & ~1), |
| mHwSupportsPause(false), mHwPaused(false), mFlushPending(false), |
| mLeftVolFloat(-1.0), mRightVolFloat(-1.0), |
| mDownStreamPatch{} |
| { |
| snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id); |
| mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName); |
| |
| // Assumes constructor is called by AudioFlinger with it's mLock held, but |
| // it would be safer to explicitly pass initial masterVolume/masterMute as |
| // parameter. |
| // |
| // If the HAL we are using has support for master volume or master mute, |
| // then do not attenuate or mute during mixing (just leave the volume at 1.0 |
| // and the mute set to false). |
| mMasterVolume = audioFlinger->masterVolume_l(); |
| mMasterMute = audioFlinger->masterMute_l(); |
| if (mOutput->audioHwDev) { |
| if (mOutput->audioHwDev->canSetMasterVolume()) { |
| mMasterVolume = 1.0; |
| } |
| |
| if (mOutput->audioHwDev->canSetMasterMute()) { |
| mMasterMute = false; |
| } |
| mIsMsdDevice = strcmp( |
| mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0; |
| } |
| |
| readOutputParameters_l(); |
| |
| // TODO: We may also match on address as well as device type for |
| // AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX |
| if (type == MIXER || type == DIRECT || type == OFFLOAD) { |
| // TODO: This property should be ensure that only contains one single device type. |
| mTimestampCorrectedDevice = (audio_devices_t)property_get_int64( |
| "audio.timestamp.corrected_output_device", |
| (int64_t)(mIsMsdDevice ? AUDIO_DEVICE_OUT_BUS // turn on by default for MSD |
| : AUDIO_DEVICE_NONE)); |
| } |
| |
| for (int i = AUDIO_STREAM_MIN; i < AUDIO_STREAM_FOR_POLICY_CNT; ++i) { |
| const audio_stream_type_t stream{static_cast<audio_stream_type_t>(i)}; |
| mStreamTypes[stream].volume = 0.0f; |
| mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream); |
| } |
| // Audio patch and call assistant volume are always max |
| mStreamTypes[AUDIO_STREAM_PATCH].volume = 1.0f; |
| mStreamTypes[AUDIO_STREAM_PATCH].mute = false; |
| mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].volume = 1.0f; |
| mStreamTypes[AUDIO_STREAM_CALL_ASSISTANT].mute = false; |
| } |
| |
| AudioFlinger::PlaybackThread::~PlaybackThread() |
| { |
| mAudioFlinger->unregisterWriter(mNBLogWriter); |
| free(mSinkBuffer); |
| free(mMixerBuffer); |
| free(mEffectBuffer); |
| } |
| |
| // Thread virtuals |
| |
| void AudioFlinger::PlaybackThread::onFirstRef() |
| { |
| if (!isStreamInitialized()) { |
| ALOGE("The stream is not open yet"); // This should not happen. |
| } else { |
| // setEventCallback will need a strong pointer as a parameter. Calling it |
| // here instead of constructor of PlaybackThread so that the onFirstRef |
| // callback would not be made on an incompletely constructed object. |
| if (mOutput->stream->setEventCallback(this) != OK) { |
| ALOGD("Failed to add event callback"); |
| } |
| } |
| run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO); |
| } |
| |
| // ThreadBase virtuals |
| void AudioFlinger::PlaybackThread::preExit() |
| { |
| ALOGV(" preExit()"); |
| // FIXME this is using hard-coded strings but in the future, this functionality will be |
| // converted to use audio HAL extensions required to support tunneling |
| status_t result = mOutput->stream->setParameters(String8("exiting=1")); |
| ALOGE_IF(result != OK, "Error when setting parameters on exit: %d", result); |
| } |
| |
| void AudioFlinger::PlaybackThread::dumpTracks_l(int fd, const Vector<String16>& args __unused) |
| { |
| String8 result; |
| |
| result.appendFormat(" Stream volumes in dB: "); |
| for (int i = 0; i < AUDIO_STREAM_CNT; ++i) { |
| const stream_type_t *st = &mStreamTypes[i]; |
| if (i > 0) { |
| result.appendFormat(", "); |
| } |
| result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume)); |
| if (st->mute) { |
| result.append("M"); |
| } |
| } |
| result.append("\n"); |
| write(fd, result.string(), result.length()); |
| result.clear(); |
| |
| // These values are "raw"; they will wrap around. See prepareTracks_l() for a better way. |
| FastTrackUnderruns underruns = getFastTrackUnderruns(0); |
| dprintf(fd, " Normal mixer raw underrun counters: partial=%u empty=%u\n", |
| underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty); |
| |
| size_t numtracks = mTracks.size(); |
| size_t numactive = mActiveTracks.size(); |
| dprintf(fd, " %zu Tracks", numtracks); |
| size_t numactiveseen = 0; |
| const char *prefix = " "; |
| if (numtracks) { |
| dprintf(fd, " of which %zu are active\n", numactive); |
| result.append(prefix); |
| mTracks[0]->appendDumpHeader(result); |
| for (size_t i = 0; i < numtracks; ++i) { |
| sp<Track> track = mTracks[i]; |
| if (track != 0) { |
| bool active = mActiveTracks.indexOf(track) >= 0; |
| if (active) { |
| numactiveseen++; |
| } |
| result.append(prefix); |
| track->appendDump(result, active); |
| } |
| } |
| } else { |
| result.append("\n"); |
| } |
| if (numactiveseen != numactive) { |
| // some tracks in the active list were not in the tracks list |
| result.append(" The following tracks are in the active list but" |
| " not in the track list\n"); |
| result.append(prefix); |
| mActiveTracks[0]->appendDumpHeader(result); |
| for (size_t i = 0; i < numactive; ++i) { |
| sp<Track> track = mActiveTracks[i]; |
| if (mTracks.indexOf(track) < 0) { |
| result.append(prefix); |
| track->appendDump(result, true /* active */); |
| } |
| } |
| } |
| |
| write(fd, result.string(), result.size()); |
| } |
| |
| void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused) |
| { |
| dprintf(fd, " Master volume: %f\n", mMasterVolume); |
| dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off"); |
| if (mHapticChannelMask != AUDIO_CHANNEL_NONE) { |
| dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask, |
| channelMaskToString(mHapticChannelMask, true /* output */).c_str()); |
| } |
| dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount); |
| dprintf(fd, " Total writes: %d\n", mNumWrites); |
| dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites); |
| dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no"); |
| dprintf(fd, " Suspend count: %d\n", mSuspended); |
| dprintf(fd, " Sink buffer : %p\n", mSinkBuffer); |
| dprintf(fd, " Mixer buffer: %p\n", mMixerBuffer); |
| dprintf(fd, " Effect buffer: %p\n", mEffectBuffer); |
| dprintf(fd, " Fast track availMask=%#x\n", mFastTrackAvailMask); |
| dprintf(fd, " Standby delay ns=%lld\n", (long long)mStandbyDelayNs); |
| AudioStreamOut *output = mOutput; |
| audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE; |
| dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n", |
| output, flags, toString(flags).c_str()); |
| dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten); |
| dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames); |
| if (mPipeSink.get() != nullptr) { |
| dprintf(fd, " PipeSink frames written: %lld\n", (long long)mPipeSink->framesWritten()); |
| } |
| if (output != nullptr) { |
| dprintf(fd, " Hal stream dump:\n"); |
| (void)output->stream->dump(fd); |
| } |
| } |
| |
| // PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held |
| sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l( |
| const sp<AudioFlinger::Client>& client, |
| audio_stream_type_t streamType, |
| const audio_attributes_t& attr, |
| uint32_t *pSampleRate, |
| audio_format_t format, |
| audio_channel_mask_t channelMask, |
| size_t *pFrameCount, |
| size_t *pNotificationFrameCount, |
| uint32_t notificationsPerBuffer, |
| float speed, |
| const sp<IMemory>& sharedBuffer, |
| audio_session_t sessionId, |
| audio_output_flags_t *flags, |
| pid_t creatorPid, |
| const AttributionSourceState& attributionSource, |
| pid_t tid, |
| status_t *status, |
| audio_port_handle_t portId, |
| const sp<media::IAudioTrackCallback>& callback) |
| { |
| size_t frameCount = *pFrameCount; |
| size_t notificationFrameCount = *pNotificationFrameCount; |
| sp<Track> track; |
| status_t lStatus; |
| audio_output_flags_t outputFlags = mOutput->flags; |
| audio_output_flags_t requestedFlags = *flags; |
| uint32_t sampleRate; |
| |
| if (sharedBuffer != 0 && checkIMemory(sharedBuffer) != NO_ERROR) { |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| |
| if (*pSampleRate == 0) { |
| *pSampleRate = mSampleRate; |
| } |
| sampleRate = *pSampleRate; |
| |
| // special case for FAST flag considered OK if fast mixer is present |
| if (hasFastMixer()) { |
| outputFlags = (audio_output_flags_t)(outputFlags | AUDIO_OUTPUT_FLAG_FAST); |
| } |
| |
| // Check if requested flags are compatible with output stream flags |
| if ((*flags & outputFlags) != *flags) { |
| ALOGW("createTrack_l(): mismatch between requested flags (%08x) and output flags (%08x)", |
| *flags, outputFlags); |
| *flags = (audio_output_flags_t)(*flags & outputFlags); |
| } |
| |
| // client expresses a preference for FAST, but we get the final say |
| if (*flags & AUDIO_OUTPUT_FLAG_FAST) { |
| if ( |
| // PCM data |
| audio_is_linear_pcm(format) && |
| // TODO: extract as a data library function that checks that a computationally |
| // expensive downmixer is not required: isFastOutputChannelConversion() |
| (channelMask == (mChannelMask | mHapticChannelMask) || |
| mChannelMask != AUDIO_CHANNEL_OUT_STEREO || |
| (channelMask == AUDIO_CHANNEL_OUT_MONO |
| /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) && |
| // hardware sample rate |
| (sampleRate == mSampleRate) && |
| // normal mixer has an associated fast mixer |
| hasFastMixer() && |
| // there are sufficient fast track slots available |
| (mFastTrackAvailMask != 0) |
| // FIXME test that MixerThread for this fast track has a capable output HAL |
| // FIXME add a permission test also? |
| ) { |
| // static tracks can have any nonzero framecount, streaming tracks check against minimum. |
| if (sharedBuffer == 0) { |
| // read the fast track multiplier property the first time it is needed |
| int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit); |
| if (ok != 0) { |
| ALOGE("%s pthread_once failed: %d", __func__, ok); |
| } |
| frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0 |
| } |
| |
| // check compatibility with audio effects. |
| { // scope for mLock |
| Mutex::Autolock _l(mLock); |
| for (audio_session_t session : { |
| AUDIO_SESSION_DEVICE, |
| AUDIO_SESSION_OUTPUT_STAGE, |
| AUDIO_SESSION_OUTPUT_MIX, |
| sessionId, |
| }) { |
| sp<EffectChain> chain = getEffectChain_l(session); |
| if (chain.get() != nullptr) { |
| audio_output_flags_t old = *flags; |
| chain->checkOutputFlagCompatibility(flags); |
| if (old != *flags) { |
| ALOGV("AUDIO_OUTPUT_FLAGS denied by effect, session=%d old=%#x new=%#x", |
| (int)session, (int)old, (int)*flags); |
| } |
| } |
| } |
| } |
| ALOGV_IF((*flags & AUDIO_OUTPUT_FLAG_FAST) != 0, |
| "AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu", |
| frameCount, mFrameCount); |
| } else { |
| ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu " |
| "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x " |
| "sampleRate=%u mSampleRate=%u " |
| "hasFastMixer=%d tid=%d fastTrackAvailMask=%#x", |
| sharedBuffer.get(), frameCount, mFrameCount, format, mFormat, |
| audio_is_linear_pcm(format), channelMask, sampleRate, |
| mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask); |
| *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_FAST); |
| } |
| } |
| |
| if (!audio_has_proportional_frames(format)) { |
| if (sharedBuffer != 0) { |
| // Same comment as below about ignoring frameCount parameter for set() |
| frameCount = sharedBuffer->size(); |
| } else if (frameCount == 0) { |
| frameCount = mNormalFrameCount; |
| } |
| if (notificationFrameCount != frameCount) { |
| notificationFrameCount = frameCount; |
| } |
| } else if (sharedBuffer != 0) { |
| // FIXME: Ensure client side memory buffers need |
| // not have additional alignment beyond sample |
| // (e.g. 16 bit stereo accessed as 32 bit frame). |
| size_t alignment = audio_bytes_per_sample(format); |
| if (alignment & 1) { |
| // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java). |
| alignment = 1; |
| } |
| uint32_t channelCount = audio_channel_count_from_out_mask(channelMask); |
| size_t frameSize = channelCount * audio_bytes_per_sample(format); |
| if (channelCount > 1) { |
| // More than 2 channels does not require stronger alignment than stereo |
| alignment <<= 1; |
| } |
| if (((uintptr_t)sharedBuffer->unsecurePointer() & (alignment - 1)) != 0) { |
| ALOGE("Invalid buffer alignment: address %p, channel count %u", |
| sharedBuffer->unsecurePointer(), channelCount); |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| |
| // When initializing a shared buffer AudioTrack via constructors, |
| // there's no frameCount parameter. |
| // But when initializing a shared buffer AudioTrack via set(), |
| // there _is_ a frameCount parameter. We silently ignore it. |
| frameCount = sharedBuffer->size() / frameSize; |
| } else { |
| size_t minFrameCount = 0; |
| // For fast tracks we try to respect the application's request for notifications per buffer. |
| if (*flags & AUDIO_OUTPUT_FLAG_FAST) { |
| if (notificationsPerBuffer > 0) { |
| // Avoid possible arithmetic overflow during multiplication. |
| if (notificationsPerBuffer > SIZE_MAX / mFrameCount) { |
| ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu", |
| notificationsPerBuffer, mFrameCount); |
| } else { |
| minFrameCount = mFrameCount * notificationsPerBuffer; |
| } |
| } |
| } else { |
| // For normal PCM streaming tracks, update minimum frame count. |
| // Buffer depth is forced to be at least 2 x the normal mixer frame count and |
| // cover audio hardware latency. |
| // This is probably too conservative, but legacy application code may depend on it. |
| // If you change this calculation, also review the start threshold which is related. |
| uint32_t latencyMs = latency_l(); |
| if (latencyMs == 0) { |
| ALOGE("Error when retrieving output stream latency"); |
| lStatus = UNKNOWN_ERROR; |
| goto Exit; |
| } |
| |
| minFrameCount = AudioSystem::calculateMinFrameCount(latencyMs, mNormalFrameCount, |
| mSampleRate, sampleRate, speed /*, 0 mNotificationsPerBufferReq*/); |
| |
| } |
| if (frameCount < minFrameCount) { |
| frameCount = minFrameCount; |
| } |
| } |
| |
| // Make sure that application is notified with sufficient margin before underrun. |
| // The client can divide the AudioTrack buffer into sub-buffers, |
| // and expresses its desire to server as the notification frame count. |
| if (sharedBuffer == 0 && audio_is_linear_pcm(format)) { |
| size_t maxNotificationFrames; |
| if (*flags & AUDIO_OUTPUT_FLAG_FAST) { |
| // notify every HAL buffer, regardless of the size of the track buffer |
| maxNotificationFrames = mFrameCount; |
| } else { |
| // Triple buffer the notification period for a triple buffered mixer period; |
| // otherwise, double buffering for the notification period is fine. |
| // |
| // TODO: This should be moved to AudioTrack to modify the notification period |
| // on AudioTrack::setBufferSizeInFrames() changes. |
| const int nBuffering = |
| (uint64_t{frameCount} * mSampleRate) |
| / (uint64_t{mNormalFrameCount} * sampleRate) == 3 ? 3 : 2; |
| |
| maxNotificationFrames = frameCount / nBuffering; |
| // If client requested a fast track but this was denied, then use the smaller maximum. |
| if (requestedFlags & AUDIO_OUTPUT_FLAG_FAST) { |
| size_t maxNotificationFramesFastDenied = FMS_20 * sampleRate / 1000; |
| if (maxNotificationFrames > maxNotificationFramesFastDenied) { |
| maxNotificationFrames = maxNotificationFramesFastDenied; |
| } |
| } |
| } |
| if (notificationFrameCount == 0 || notificationFrameCount > maxNotificationFrames) { |
| if (notificationFrameCount == 0) { |
| ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu", |
| maxNotificationFrames, frameCount); |
| } else { |
| ALOGW("Client adjusted notificationFrames from %zu to %zu for frameCount %zu", |
| notificationFrameCount, maxNotificationFrames, frameCount); |
| } |
| notificationFrameCount = maxNotificationFrames; |
| } |
| } |
| |
| *pFrameCount = frameCount; |
| *pNotificationFrameCount = notificationFrameCount; |
| |
| switch (mType) { |
| |
| case DIRECT: |
| if (audio_is_linear_pcm(format)) { // TODO maybe use audio_has_proportional_frames()? |
| if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { |
| ALOGE("createTrack_l() Bad parameter: sampleRate %u format %#x, channelMask 0x%08x " |
| "for output %p with format %#x", |
| sampleRate, format, channelMask, mOutput, mFormat); |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| } |
| break; |
| |
| case OFFLOAD: |
| if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) { |
| ALOGE("createTrack_l() Bad parameter: sampleRate %d format %#x, channelMask 0x%08x \"" |
| "for output %p with format %#x", |
| sampleRate, format, channelMask, mOutput, mFormat); |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| break; |
| |
| default: |
| if (!audio_is_linear_pcm(format)) { |
| ALOGE("createTrack_l() Bad parameter: format %#x \"" |
| "for output %p with format %#x", |
| format, mOutput, mFormat); |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| if (sampleRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) { |
| ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate); |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| break; |
| |
| } |
| |
| lStatus = initCheck(); |
| if (lStatus != NO_ERROR) { |
| ALOGE("createTrack_l() audio driver not initialized"); |
| goto Exit; |
| } |
| |
| { // scope for mLock |
| Mutex::Autolock _l(mLock); |
| |
| // all tracks in same audio session must share the same routing strategy otherwise |
| // conflicts will happen when tracks are moved from one output to another by audio policy |
| // manager |
| product_strategy_t strategy = AudioSystem::getStrategyForStream(streamType); |
| for (size_t i = 0; i < mTracks.size(); ++i) { |
| sp<Track> t = mTracks[i]; |
| if (t != 0 && t->isExternalTrack()) { |
| product_strategy_t actual = AudioSystem::getStrategyForStream(t->streamType()); |
| if (sessionId == t->sessionId() && strategy != actual) { |
| ALOGE("createTrack_l() mismatched strategy; expected %u but found %u", |
| strategy, actual); |
| lStatus = BAD_VALUE; |
| goto Exit; |
| } |
| } |
| } |
| |
| // Set DIRECT flag if current thread is DirectOutputThread. This can |
| // happen when the playback is rerouted to direct output thread by |
| // dynamic audio policy. |
| // Do NOT report the flag changes back to client, since the client |
| // doesn't explicitly request a direct flag. |
| audio_output_flags_t trackFlags = *flags; |
| if (mType == DIRECT) { |
| trackFlags = static_cast<audio_output_flags_t>(trackFlags | AUDIO_OUTPUT_FLAG_DIRECT); |
| } |
| |
| track = new Track(this, client, streamType, attr, sampleRate, format, |
| channelMask, frameCount, |
| nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer, |
| sessionId, creatorPid, attributionSource, trackFlags, |
| TrackBase::TYPE_DEFAULT, portId, SIZE_MAX /*frameCountToBeReady*/, speed); |
| |
| lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY; |
| if (lStatus != NO_ERROR) { |
| ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus); |
| // track must be cleared from the caller as the caller has the AF lock |
| goto Exit; |
| } |
| mTracks.add(track); |
| { |
| Mutex::Autolock _atCbL(mAudioTrackCbLock); |
| if (callback.get() != nullptr) { |
| mAudioTrackCallbacks.emplace(track, callback); |
| } |
| } |
| |
| sp<EffectChain> chain = getEffectChain_l(sessionId); |
| if (chain != 0) { |
| ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer()); |
| track->setMainBuffer(chain->inBuffer()); |
| chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType())); |
| chain->incTrackCnt(); |
| } |
| |
| if ((*flags & AUDIO_OUTPUT_FLAG_FAST) && (tid != -1)) { |
| pid_t callingPid = IPCThreadState::self()->getCallingPid(); |
| // we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful, |
| // so ask activity manager to do this on our behalf |
| sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp, true /*forApp*/); |
| } |
| } |
| |
| lStatus = NO_ERROR; |
| |
| Exit: |
| *status = lStatus; |
| return track; |
| } |
| |
| template<typename T> |
| ssize_t AudioFlinger::PlaybackThread::Tracks<T>::remove(const sp<T> &track) |
| { |
| const int trackId = track->id(); |
| const ssize_t index = mTracks.remove(track); |
| if (index >= 0) { |
| if (mSaveDeletedTrackIds) { |
| // We can't directly access mAudioMixer since the caller may be outside of threadLoop. |
| // Instead, we add to mDeletedTrackIds which is solely used for mAudioMixer update, |
| // to be handled when MixerThread::prepareTracks_l() next changes mAudioMixer. |
| mDeletedTrackIds.emplace(trackId); |
| } |
| } |
| return index; |
| } |
| |
| uint32_t AudioFlinger::PlaybackThread::correctLatency_l(uint32_t latency) const |
| { |
| return latency; |
| } |
| |
| uint32_t AudioFlinger::PlaybackThread::latency() const |
| { |
| Mutex::Autolock _l(mLock); |
| return latency_l(); |
| } |
| uint32_t AudioFlinger::PlaybackThread::latency_l() const |
| { |
| uint32_t latency; |
| if (initCheck() == NO_ERROR && mOutput->stream->getLatency(&latency) == OK) { |
| return correctLatency_l(latency); |
| } |
| return 0; |
| } |
| |
| void AudioFlinger::PlaybackThread::setMasterVolume(float value) |
| { |
| Mutex::Autolock _l(mLock); |
| // Don't apply master volume in SW if our HAL can do it for us. |
| if (mOutput && mOutput->audioHwDev && |
| mOutput->audioHwDev->canSetMasterVolume()) { |
| mMasterVolume = 1.0; |
| } else { |
| mMasterVolume = value; |
| } |
| } |
| |
| void AudioFlinger::PlaybackThread::setMasterBalance(float balance) |
| { |
| mMasterBalance.store(balance); |
| } |
| |
| void AudioFlinger::PlaybackThread::setMasterMute(bool muted) |
| { |
| if (isDuplicating()) { |
| return; |
| } |
| Mutex::Autolock _l(mLock); |
| // Don't apply master mute in SW if our HAL can do it for us. |
| if (mOutput && mOutput->audioHwDev && |
| mOutput->audioHwDev->canSetMasterMute()) { |
| mMasterMute = false; |
| } else { |
| mMasterMute = muted; |
| } |
| } |
| |
| void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value) |
| { |
| Mutex::Autolock _l(mLock); |
| mStreamTypes[stream].volume = value; |
| broadcast_l(); |
| } |
| |
| void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted) |
| { |
| Mutex::Autolock _l(mLock); |
| mStreamTypes[stream].mute = muted; |
| broadcast_l(); |
| } |
| |
| float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const |
| { |
| Mutex::Autolock _l(mLock); |
| return mStreamTypes[stream].volume; |
| } |
| |
| void AudioFlinger::PlaybackThread::setVolumeForOutput_l(float left, float right) const |
| { |
| mOutput->stream->setVolume(left, right); |
| } |
| |
| // addTrack_l() must be called with ThreadBase::mLock held |
| status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track) |
| { |
| status_t status = ALREADY_EXISTS; |
| |
| if (mActiveTracks.indexOf(track) < 0) { |
| // the track is newly added, make sure it fills up all its |
| // buffers before playing. This is to ensure the client will |
| // effectively get the latency it requested. |
| if (track->isExternalTrack()) { |
| TrackBase::track_state state = track->mState; |
| mLock.unlock(); |
| status = AudioSystem::startOutput(track->portId()); |
| mLock.lock(); |
| // abort track was stopped/paused while we released the lock |
| if (state != track->mState) { |
| if (status == NO_ERROR) { |
| mLock.unlock(); |
| AudioSystem::stopOutput(track->portId()); |
| mLock.lock(); |
| } |
| return INVALID_OPERATION; |
| } |
| // abort if start is rejected by audio policy manager |
| if (status != NO_ERROR) { |
| return PERMISSION_DENIED; |
| } |
| #ifdef ADD_BATTERY_DATA |
| // to track the speaker usage |
| addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart); |
| #endif |
| sendIoConfigEvent_l(AUDIO_CLIENT_STARTED, track->creatorPid(), track->portId()); |
| } |
| |
| // set retry count for buffer fill |
| if (track->isOffloaded()) { |
| if (track->isStopping_1()) { |
| track->mRetryCount = kMaxTrackStopRetriesOffload; |
| } else { |
| track->mRetryCount = kMaxTrackStartupRetriesOffload; |
| } |
| track->mFillingUpStatus = mStandby ? Track::FS_FILLING : Track::FS_FILLED; |
| } else { |
| track->mRetryCount = kMaxTrackStartupRetries; |
| track->mFillingUpStatus = |
| track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING; |
| } |
| |
| sp<EffectChain> chain = getEffectChain_l(track->sessionId()); |
| if (mHapticChannelMask != AUDIO_CHANNEL_NONE |
| && ((track->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE |
| || (chain != nullptr && chain->containsHapticGeneratingEffect_l()))) { |
| // Unlock due to VibratorService will lock for this call and will |
| // call Tracks.mute/unmute which also require thread's lock. |
| mLock.unlock(); |
| const int intensity = AudioFlinger::onExternalVibrationStart( |
| track->getExternalVibration()); |
| mLock.lock(); |
| track->setHapticIntensity(static_cast<os::HapticScale>(intensity)); |
| // Haptic playback should be enabled by vibrator service. |
| if (track->getHapticPlaybackEnabled()) { |
| // Disable haptic playback of all active track to ensure only |
| // one track playing haptic if current track should play haptic. |
| for (const auto &t : mActiveTracks) { |
| t->setHapticPlaybackEnabled(false); |
| } |
| } |
| |
| // Set haptic intensity for effect |
| if (chain != nullptr) { |
| chain->setHapticIntensity_l(track->id(), intensity); |
| } |
| } |
| |
| track->mResetDone = false; |
| track->resetPresentationComplete(); |
| mActiveTracks.add(track); |
| if (chain != 0) { |
| ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(), |
| track->sessionId()); |
| chain->incActiveTrackCnt(); |
| } |
| |
| track->logBeginInterval(patchSinksToString(&mPatch)); // log to MediaMetrics |
| status = NO_ERROR; |
| } |
| |
| onAddNewTrack_l(); |
| return status; |
| } |
| |
| bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track) |
| { |
| track->terminate(); |
| // active tracks are removed by threadLoop() |
| bool trackActive = (mActiveTracks.indexOf(track) >= 0); |
| track->mState = TrackBase::STOPPED; |
| if (!trackActive) { |
| removeTrack_l(track); |
| } else if (track->isFastTrack() || track->isOffloaded() || track->isDirect()) { |
| track->mState = TrackBase::STOPPING_1; |
| } |
| |
| return trackActive; |
| } |
| |
| void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track) |
| { |
| track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); |
| |
| String8 result; |
| track->appendDump(result, false /* active */); |
| mLocalLog.log("removeTrack_l (%p) %s", track.get(), result.string()); |
| |
| mTracks.remove(track); |
| { |
| Mutex::Autolock _atCbL(mAudioTrackCbLock); |
| mAudioTrackCallbacks.erase(track); |
| } |
| if (track->isFastTrack()) { |
| int index = track->mFastIndex; |
| ALOG_ASSERT(0 < index && index < (int)FastMixerState::sMaxFastTracks); |
| ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index))); |
| mFastTrackAvailMask |= 1 << index; |
| // redundant as track is about to be destroyed, for dumpsys only |
| track->mFastIndex = -1; |
| } |
| sp<EffectChain> chain = getEffectChain_l(track->sessionId()); |
| if (chain != 0) { |
| chain->decTrackCnt(); |
| } |
| } |
| |
| String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys) |
| { |
| Mutex::Autolock _l(mLock); |
| String8 out_s8; |
| if (initCheck() == NO_ERROR && mOutput->stream->getParameters(keys, &out_s8) == OK) { |
| return out_s8; |
| } |
| return String8(); |
| } |
| |
| status_t AudioFlinger::DirectOutputThread::selectPresentation(int presentationId, int programId) { |
| Mutex::Autolock _l(mLock); |
| if (!isStreamInitialized()) { |
| return NO_INIT; |
| } |
| return mOutput->stream->selectPresentation(presentationId, programId); |
| } |
| |
| void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid, |
| audio_port_handle_t portId) { |
| sp<AudioIoDescriptor> desc = new AudioIoDescriptor(); |
| ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event); |
| |
| desc->mIoHandle = mId; |
| struct audio_patch patch = mPatch; |
| if (isMsdDevice()) { |
| patch = mDownStreamPatch; |
| } |
| |
| switch (event) { |
| case AUDIO_OUTPUT_OPENED: |
| case AUDIO_OUTPUT_REGISTERED: |
| case AUDIO_OUTPUT_CONFIG_CHANGED: |
| desc->mPatch = patch; |
| desc->mChannelMask = mChannelMask; |
| desc->mSamplingRate = mSampleRate; |
| desc->mFormat = mFormat; |
| desc->mFrameCount = mNormalFrameCount; // FIXME see |
| // AudioFlinger::frameCount(audio_io_handle_t) |
| desc->mFrameCountHAL = mFrameCount; |
| desc->mLatency = latency_l(); |
| break; |
| case AUDIO_CLIENT_STARTED: |
| desc->mPatch = patch; |
| desc->mPortId = portId; |
| break; |
| case AUDIO_OUTPUT_CLOSED: |
| default: |
| break; |
| } |
| mAudioFlinger->ioConfigChanged(event, desc, pid); |
| } |
| |
| void AudioFlinger::PlaybackThread::onWriteReady() |
| { |
| mCallbackThread->resetWriteBlocked(); |
| } |
| |
| void AudioFlinger::PlaybackThread::onDrainReady() |
| { |
| mCallbackThread->resetDraining(); |
| } |
| |
| void AudioFlinger::PlaybackThread::onError() |
| { |
| mCallbackThread->setAsyncError(); |
| } |
| |
| void AudioFlinger::PlaybackThread::onCodecFormatChanged( |
| const std::basic_string<uint8_t>& metadataBs) |
| { |
| std::thread([this, metadataBs]() { |
| audio_utils::metadata::Data metadata = |
| audio_utils::metadata::dataFromByteString(metadataBs); |
| if (metadata.empty()) { |
| ALOGW("Can not transform the buffer to audio metadata, %s, %d", |
| reinterpret_cast<char*>(const_cast<uint8_t*>(metadataBs.data())), |
| (int)metadataBs.size()); |
| return; |
| } |
| |
| audio_utils::metadata::ByteString metaDataStr = |
| audio_utils::metadata::byteStringFromData(metadata); |
| std::vector metadataVec(metaDataStr.begin(), metaDataStr.end()); |
| Mutex::Autolock _l(mAudioTrackCbLock); |
| for (const auto& callbackPair : mAudioTrackCallbacks) { |
| callbackPair.second->onCodecFormatChanged(metadataVec); |
| } |
| }).detach(); |
| } |
| |
| void AudioFlinger::PlaybackThread::resetWriteBlocked(uint32_t sequence) |
| { |
| Mutex::Autolock _l(mLock); |
| // reject out of sequence requests |
| if ((mWriteAckSequence & 1) && (sequence == mWriteAckSequence)) { |
| mWriteAckSequence &= ~1; |
| mWaitWorkCV.signal(); |
| } |
| } |
| |
| void AudioFlinger::PlaybackThread::resetDraining(uint32_t sequence) |
| { |
| Mutex::Autolock _l(mLock); |
| // reject out of sequence requests |
| if ((mDrainSequence & 1) && (sequence == mDrainSequence)) { |
| // Register discontinuity when HW drain is completed because that can cause |
| // the timestamp frame position to reset to 0 for direct and offload threads. |
| // (Out of sequence requests are ignored, since the discontinuity would be handled |
| // elsewhere, e.g. in flush). |
| mTimestampVerifier.discontinuity(mTimestampVerifier.DISCONTINUITY_MODE_ZERO); |
| mDrainSequence &= ~1; |
| mWaitWorkCV.signal(); |
| } |
| } |
| |
| void AudioFlinger::PlaybackThread::readOutputParameters_l() |
| { |
| // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL |
| const audio_config_base_t audioConfig = mOutput->getAudioProperties(); |
| mSampleRate = audioConfig.sample_rate; |
| mChannelMask = audioConfig.channel_mask; |
| if (!audio_is_output_channel(mChannelMask)) { |
| LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask); |
| } |
| if ((mType == MIXER || mType == DUPLICATING) |
| && !isValidPcmSinkChannelMask(mChannelMask)) { |
| LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output", |
| mChannelMask); |
| } |
| mChannelCount = audio_channel_count_from_out_mask(mChannelMask); |
| mBalance.setChannelMask(mChannelMask); |
| |
| // Get actual HAL format. |
| status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat); |
| LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result); |
| // Get format from the shim, which will be different than the HAL format |
| // if playing compressed audio over HDMI passthrough. |
| mFormat = audioConfig.format; |
| if (!audio_is_valid_format(mFormat)) { |
| LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat); |
| } |
| if ((mType == MIXER || mType == DUPLICATING) |
| && !isValidPcmSinkFormat(mFormat)) { |
| LOG_FATAL("HAL format %#x not supported for mixed output", |
| mFormat); |
| } |
| mFrameSize = mOutput->getFrameSize(); |
| result = mOutput->stream->getBufferSize(&mBufferSize); |
| LOG_ALWAYS_FATAL_IF(result != OK, |
| "Error when retrieving output stream buffer size: %d", result); |
| mFrameCount = mBufferSize / mFrameSize |