blob: 110e45cf709f34cc33b78419272855d817a98b4d [file] [log] [blame]
/*
**
** Copyright 2012, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
#define LOG_TAG "AudioFlinger"
//#define LOG_NDEBUG 0
#define ATRACE_TAG ATRACE_TAG_AUDIO
#include "Configuration.h"
#include <math.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <cutils/properties.h>
#include <media/AudioParameter.h>
#include <utils/Log.h>
#include <utils/Trace.h>
#include <private/media/AudioTrackShared.h>
#include <hardware/audio.h>
#include <audio_effects/effect_ns.h>
#include <audio_effects/effect_aec.h>
#include <audio_utils/primitives.h>
// NBAIO implementations
#include <media/nbaio/AudioStreamOutSink.h>
#include <media/nbaio/MonoPipe.h>
#include <media/nbaio/MonoPipeReader.h>
#include <media/nbaio/Pipe.h>
#include <media/nbaio/PipeReader.h>
#include <media/nbaio/SourceAudioBufferProvider.h>
#include <powermanager/PowerManager.h>
#include <common_time/cc_helper.h>
#include <common_time/local_clock.h>
#include "AudioFlinger.h"
#include "AudioMixer.h"
#include "FastMixer.h"
#include "ServiceUtilities.h"
#include "SchedulingPolicyService.h"
#ifdef ADD_BATTERY_DATA
#include <media/IMediaPlayerService.h>
#include <media/IMediaDeathNotifier.h>
#endif
#ifdef DEBUG_CPU_USAGE
#include <cpustats/CentralTendencyStatistics.h>
#include <cpustats/ThreadCpuUsage.h>
#endif
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
// 0; but one side effect of this is to turn all LOGV's as well. Some messages
// are so verbose that we want to suppress them even when we have ALOG_ASSERT
// turned on. Do not uncomment the #def below unless you really know what you
// are doing and want to see all of the extremely verbose messages.
//#define VERY_VERY_VERBOSE_LOGGING
#ifdef VERY_VERY_VERBOSE_LOGGING
#define ALOGVV ALOGV
#else
#define ALOGVV(a...) do { } while(0)
#endif
namespace android {
// retry counts for buffer fill timeout
// 50 * ~20msecs = 1 second
static const int8_t kMaxTrackRetries = 50;
static const int8_t kMaxTrackStartupRetries = 50;
// allow less retry attempts on direct output thread.
// direct outputs can be a scarce resource in audio hardware and should
// be released as quickly as possible.
static const int8_t kMaxTrackRetriesDirect = 2;
// don't warn about blocked writes or record buffer overflows more often than this
static const nsecs_t kWarningThrottleNs = seconds(5);
// RecordThread loop sleep time upon application overrun or audio HAL read error
static const int kRecordThreadSleepUs = 5000;
// maximum time to wait for setParameters to complete
static const nsecs_t kSetParametersTimeoutNs = seconds(2);
// minimum sleep time for the mixer thread loop when tracks are active but in underrun
static const uint32_t kMinThreadSleepTimeUs = 5000;
// maximum divider applied to the active sleep time in the mixer thread loop
static const uint32_t kMaxThreadSleepTimeShift = 2;
// minimum normal mix buffer size, expressed in milliseconds rather than frames
static const uint32_t kMinNormalMixBufferSizeMs = 20;
// maximum normal mix buffer size
static const uint32_t kMaxNormalMixBufferSizeMs = 24;
// Offloaded output thread standby delay: allows track transition without going to standby
static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
// Whether to use fast mixer
static const enum {
FastMixer_Never, // never initialize or use: for debugging only
FastMixer_Always, // always initialize and use, even if not needed: for debugging only
// normal mixer multiplier is 1
FastMixer_Static, // initialize if needed, then use all the time if initialized,
// multiplier is calculated based on min & max normal mixer buffer size
FastMixer_Dynamic, // initialize if needed, then use dynamically depending on track load,
// multiplier is calculated based on min & max normal mixer buffer size
// FIXME for FastMixer_Dynamic:
// Supporting this option will require fixing HALs that can't handle large writes.
// For example, one HAL implementation returns an error from a large write,
// and another HAL implementation corrupts memory, possibly in the sample rate converter.
// We could either fix the HAL implementations, or provide a wrapper that breaks
// up large writes into smaller ones, and the wrapper would need to deal with scheduler.
} kUseFastMixer = FastMixer_Static;
// Priorities for requestPriority
static const int kPriorityAudioApp = 2;
static const int kPriorityFastMixer = 3;
// IAudioFlinger::createTrack() reports back to client the total size of shared memory area
// for the track. The client then sub-divides this into smaller buffers for its use.
// Currently the client uses double-buffering by default, but doesn't tell us about that.
// So for now we just assume that client is double-buffered.
// FIXME It would be better for client to tell AudioFlinger whether it wants double-buffering or
// N-buffering, so AudioFlinger could allocate the right amount of memory.
// See the client's minBufCount and mNotificationFramesAct calculations for details.
static const int kFastTrackMultiplier = 1;
// ----------------------------------------------------------------------------
#ifdef ADD_BATTERY_DATA
// To collect the amplifier usage
static void addBatteryData(uint32_t params) {
sp<IMediaPlayerService> service = IMediaDeathNotifier::getMediaPlayerService();
if (service == NULL) {
// it already logged
return;
}
service->addBatteryData(params);
}
#endif
// ----------------------------------------------------------------------------
// CPU Stats
// ----------------------------------------------------------------------------
class CpuStats {
public:
CpuStats();
void sample(const String8 &title);
#ifdef DEBUG_CPU_USAGE
private:
ThreadCpuUsage mCpuUsage; // instantaneous thread CPU usage in wall clock ns
CentralTendencyStatistics mWcStats; // statistics on thread CPU usage in wall clock ns
CentralTendencyStatistics mHzStats; // statistics on thread CPU usage in cycles
int mCpuNum; // thread's current CPU number
int mCpukHz; // frequency of thread's current CPU in kHz
#endif
};
CpuStats::CpuStats()
#ifdef DEBUG_CPU_USAGE
: mCpuNum(-1), mCpukHz(-1)
#endif
{
}
void CpuStats::sample(const String8 &title) {
#ifdef DEBUG_CPU_USAGE
// get current thread's delta CPU time in wall clock ns
double wcNs;
bool valid = mCpuUsage.sampleAndEnable(wcNs);
// record sample for wall clock statistics
if (valid) {
mWcStats.sample(wcNs);
}
// get the current CPU number
int cpuNum = sched_getcpu();
// get the current CPU frequency in kHz
int cpukHz = mCpuUsage.getCpukHz(cpuNum);
// check if either CPU number or frequency changed
if (cpuNum != mCpuNum || cpukHz != mCpukHz) {
mCpuNum = cpuNum;
mCpukHz = cpukHz;
// ignore sample for purposes of cycles
valid = false;
}
// if no change in CPU number or frequency, then record sample for cycle statistics
if (valid && mCpukHz > 0) {
double cycles = wcNs * cpukHz * 0.000001;
mHzStats.sample(cycles);
}
unsigned n = mWcStats.n();
// mCpuUsage.elapsed() is expensive, so don't call it every loop
if ((n & 127) == 1) {
long long elapsed = mCpuUsage.elapsed();
if (elapsed >= DEBUG_CPU_USAGE * 1000000000LL) {
double perLoop = elapsed / (double) n;
double perLoop100 = perLoop * 0.01;
double perLoop1k = perLoop * 0.001;
double mean = mWcStats.mean();
double stddev = mWcStats.stddev();
double minimum = mWcStats.minimum();
double maximum = mWcStats.maximum();
double meanCycles = mHzStats.mean();
double stddevCycles = mHzStats.stddev();
double minCycles = mHzStats.minimum();
double maxCycles = mHzStats.maximum();
mCpuUsage.resetElapsed();
mWcStats.reset();
mHzStats.reset();
ALOGD("CPU usage for %s over past %.1f secs\n"
" (%u mixer loops at %.1f mean ms per loop):\n"
" us per mix loop: mean=%.0f stddev=%.0f min=%.0f max=%.0f\n"
" %% of wall: mean=%.1f stddev=%.1f min=%.1f max=%.1f\n"
" MHz: mean=%.1f, stddev=%.1f, min=%.1f max=%.1f",
title.string(),
elapsed * .000000001, n, perLoop * .000001,
mean * .001,
stddev * .001,
minimum * .001,
maximum * .001,
mean / perLoop100,
stddev / perLoop100,
minimum / perLoop100,
maximum / perLoop100,
meanCycles / perLoop1k,
stddevCycles / perLoop1k,
minCycles / perLoop1k,
maxCycles / perLoop1k);
}
}
#endif
};
// ----------------------------------------------------------------------------
// ThreadBase
// ----------------------------------------------------------------------------
AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
: Thread(false /*canCallJava*/),
mType(type),
mAudioFlinger(audioFlinger),
// mSampleRate, mFrameCount, mChannelMask, mChannelCount, mFrameSize, and mFormat are
// set by PlaybackThread::readOutputParameters() or RecordThread::readInputParameters()
mParamStatus(NO_ERROR),
//FIXME: mStandby should be true here. Is this some kind of hack?
mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
// mName will be set by concrete (non-virtual) subclass
mDeathRecipient(new PMDeathRecipient(this))
{
}
AudioFlinger::ThreadBase::~ThreadBase()
{
// mConfigEvents should be empty, but just in case it isn't, free the memory it owns
for (size_t i = 0; i < mConfigEvents.size(); i++) {
delete mConfigEvents[i];
}
mConfigEvents.clear();
mParamCond.broadcast();
// do not lock the mutex in destructor
releaseWakeLock_l();
if (mPowerManager != 0) {
sp<IBinder> binder = mPowerManager->asBinder();
binder->unlinkToDeath(mDeathRecipient);
}
}
void AudioFlinger::ThreadBase::exit()
{
ALOGV("ThreadBase::exit");
// do any cleanup required for exit to succeed
preExit();
{
// This lock prevents the following race in thread (uniprocessor for illustration):
// if (!exitPending()) {
// // context switch from here to exit()
// // exit() calls requestExit(), what exitPending() observes
// // exit() calls signal(), which is dropped since no waiters
// // context switch back from exit() to here
// mWaitWorkCV.wait(...);
// // now thread is hung
// }
AutoMutex lock(mLock);
requestExit();
mWaitWorkCV.broadcast();
}
// When Thread::requestExitAndWait is made virtual and this method is renamed to
// "virtual status_t requestExitAndWait()", replace by "return Thread::requestExitAndWait();"
requestExitAndWait();
}
status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
{
status_t status;
ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
Mutex::Autolock _l(mLock);
mNewParameters.add(keyValuePairs);
mWaitWorkCV.signal();
// wait condition with timeout in case the thread loop has exited
// before the request could be processed
if (mParamCond.waitRelative(mLock, kSetParametersTimeoutNs) == NO_ERROR) {
status = mParamStatus;
mWaitWorkCV.signal();
} else {
status = TIMED_OUT;
}
return status;
}
void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
{
Mutex::Autolock _l(mLock);
sendIoConfigEvent_l(event, param);
}
// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param)
{
IoConfigEvent *ioEvent = new IoConfigEvent(event, param);
mConfigEvents.add(static_cast<ConfigEvent *>(ioEvent));
ALOGV("sendIoConfigEvent() num events %d event %d, param %d", mConfigEvents.size(), event,
param);
mWaitWorkCV.signal();
}
// sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
{
PrioConfigEvent *prioEvent = new PrioConfigEvent(pid, tid, prio);
mConfigEvents.add(static_cast<ConfigEvent *>(prioEvent));
ALOGV("sendPrioConfigEvent_l() num events %d pid %d, tid %d prio %d",
mConfigEvents.size(), pid, tid, prio);
mWaitWorkCV.signal();
}
void AudioFlinger::ThreadBase::processConfigEvents()
{
mLock.lock();
while (!mConfigEvents.isEmpty()) {
ALOGV("processConfigEvents() remaining events %d", mConfigEvents.size());
ConfigEvent *event = mConfigEvents[0];
mConfigEvents.removeAt(0);
// release mLock before locking AudioFlinger mLock: lock order is always
// AudioFlinger then ThreadBase to avoid cross deadlock
mLock.unlock();
switch(event->type()) {
case CFG_EVENT_PRIO: {
PrioConfigEvent *prioEvent = static_cast<PrioConfigEvent *>(event);
// FIXME Need to understand why this has be done asynchronously
int err = requestPriority(prioEvent->pid(), prioEvent->tid(), prioEvent->prio(),
true /*asynchronous*/);
if (err != 0) {
ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; "
"error %d",
prioEvent->prio(), prioEvent->pid(), prioEvent->tid(), err);
}
} break;
case CFG_EVENT_IO: {
IoConfigEvent *ioEvent = static_cast<IoConfigEvent *>(event);
mAudioFlinger->mLock.lock();
audioConfigChanged_l(ioEvent->event(), ioEvent->param());
mAudioFlinger->mLock.unlock();
} break;
default:
ALOGE("processConfigEvents() unknown event type %d", event->type());
break;
}
delete event;
mLock.lock();
}
mLock.unlock();
}
void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
bool locked = AudioFlinger::dumpTryLock(mLock);
if (!locked) {
snprintf(buffer, SIZE, "thread %p maybe dead locked\n", this);
write(fd, buffer, strlen(buffer));
}
snprintf(buffer, SIZE, "io handle: %d\n", mId);
result.append(buffer);
snprintf(buffer, SIZE, "TID: %d\n", getTid());
result.append(buffer);
snprintf(buffer, SIZE, "standby: %d\n", mStandby);
result.append(buffer);
snprintf(buffer, SIZE, "Sample rate: %u\n", mSampleRate);
result.append(buffer);
snprintf(buffer, SIZE, "HAL frame count: %d\n", mFrameCount);
result.append(buffer);
snprintf(buffer, SIZE, "Channel Count: %u\n", mChannelCount);
result.append(buffer);
snprintf(buffer, SIZE, "Channel Mask: 0x%08x\n", mChannelMask);
result.append(buffer);
snprintf(buffer, SIZE, "Format: %d\n", mFormat);
result.append(buffer);
snprintf(buffer, SIZE, "Frame size: %u\n", mFrameSize);
result.append(buffer);
snprintf(buffer, SIZE, "\nPending setParameters commands: \n");
result.append(buffer);
result.append(" Index Command");
for (size_t i = 0; i < mNewParameters.size(); ++i) {
snprintf(buffer, SIZE, "\n %02d ", i);
result.append(buffer);
result.append(mNewParameters[i]);
}
snprintf(buffer, SIZE, "\n\nPending config events: \n");
result.append(buffer);
for (size_t i = 0; i < mConfigEvents.size(); i++) {
mConfigEvents[i]->dump(buffer, SIZE);
result.append(buffer);
}
result.append("\n");
write(fd, result.string(), result.size());
if (locked) {
mLock.unlock();
}
}
void AudioFlinger::ThreadBase::dumpEffectChains(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
snprintf(buffer, SIZE, "\n- %d Effect Chains:\n", mEffectChains.size());
write(fd, buffer, strlen(buffer));
for (size_t i = 0; i < mEffectChains.size(); ++i) {
sp<EffectChain> chain = mEffectChains[i];
if (chain != 0) {
chain->dump(fd, args);
}
}
}
void AudioFlinger::ThreadBase::acquireWakeLock(int uid)
{
Mutex::Autolock _l(mLock);
acquireWakeLock_l(uid);
}
String16 AudioFlinger::ThreadBase::getWakeLockTag()
{
switch (mType) {
case MIXER:
return String16("AudioMix");
case DIRECT:
return String16("AudioDirectOut");
case DUPLICATING:
return String16("AudioDup");
case RECORD:
return String16("AudioIn");
case OFFLOAD:
return String16("AudioOffload");
default:
ALOG_ASSERT(false);
return String16("AudioUnknown");
}
}
void AudioFlinger::ThreadBase::acquireWakeLock_l(int uid)
{
getPowerManager_l();
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
status_t status;
if (uid >= 0) {
status = mPowerManager->acquireWakeLockWithUid(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
getWakeLockTag(),
String16("media"),
uid);
} else {
status = mPowerManager->acquireWakeLock(POWERMANAGER_PARTIAL_WAKE_LOCK,
binder,
getWakeLockTag(),
String16("media"));
}
if (status == NO_ERROR) {
mWakeLockToken = binder;
}
ALOGV("acquireWakeLock_l() %s status %d", mName, status);
}
}
void AudioFlinger::ThreadBase::releaseWakeLock()
{
Mutex::Autolock _l(mLock);
releaseWakeLock_l();
}
void AudioFlinger::ThreadBase::releaseWakeLock_l()
{
if (mWakeLockToken != 0) {
ALOGV("releaseWakeLock_l() %s", mName);
if (mPowerManager != 0) {
mPowerManager->releaseWakeLock(mWakeLockToken, 0);
}
mWakeLockToken.clear();
}
}
void AudioFlinger::ThreadBase::updateWakeLockUids(const SortedVector<int> &uids) {
Mutex::Autolock _l(mLock);
updateWakeLockUids_l(uids);
}
void AudioFlinger::ThreadBase::getPowerManager_l() {
if (mPowerManager == 0) {
// use checkService() to avoid blocking if power service is not up yet
sp<IBinder> binder =
defaultServiceManager()->checkService(String16("power"));
if (binder == 0) {
ALOGW("Thread %s cannot connect to the power manager service", mName);
} else {
mPowerManager = interface_cast<IPowerManager>(binder);
binder->linkToDeath(mDeathRecipient);
}
}
}
void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
getPowerManager_l();
if (mWakeLockToken == NULL) {
ALOGE("no wake lock to update!");
return;
}
if (mPowerManager != 0) {
sp<IBinder> binder = new BBinder();
status_t status;
status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array());
ALOGV("acquireWakeLock_l() %s status %d", mName, status);
}
}
void AudioFlinger::ThreadBase::clearPowerManager()
{
Mutex::Autolock _l(mLock);
releaseWakeLock_l();
mPowerManager.clear();
}
void AudioFlinger::ThreadBase::PMDeathRecipient::binderDied(const wp<IBinder>& who)
{
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
thread->clearPowerManager();
}
ALOGW("power manager service died !!!");
}
void AudioFlinger::ThreadBase::setEffectSuspended(
const effect_uuid_t *type, bool suspend, int sessionId)
{
Mutex::Autolock _l(mLock);
setEffectSuspended_l(type, suspend, sessionId);
}
void AudioFlinger::ThreadBase::setEffectSuspended_l(
const effect_uuid_t *type, bool suspend, int sessionId)
{
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
if (type != NULL) {
chain->setEffectSuspended_l(type, suspend);
} else {
chain->setEffectSuspendedAll_l(suspend);
}
}
updateSuspendedSessions_l(type, suspend, sessionId);
}
void AudioFlinger::ThreadBase::checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain)
{
ssize_t index = mSuspendedSessions.indexOfKey(chain->sessionId());
if (index < 0) {
return;
}
const KeyedVector <int, sp<SuspendedSessionDesc> >& sessionEffects =
mSuspendedSessions.valueAt(index);
for (size_t i = 0; i < sessionEffects.size(); i++) {
sp<SuspendedSessionDesc> desc = sessionEffects.valueAt(i);
for (int j = 0; j < desc->mRefCount; j++) {
if (sessionEffects.keyAt(i) == EffectChain::kKeyForSuspendAll) {
chain->setEffectSuspendedAll_l(true);
} else {
ALOGV("checkSuspendOnAddEffectChain_l() suspending effects %08x",
desc->mType.timeLow);
chain->setEffectSuspended_l(&desc->mType, true);
}
}
}
}
void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type,
bool suspend,
int sessionId)
{
ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
KeyedVector <int, sp<SuspendedSessionDesc> > sessionEffects;
if (suspend) {
if (index >= 0) {
sessionEffects = mSuspendedSessions.valueAt(index);
} else {
mSuspendedSessions.add(sessionId, sessionEffects);
}
} else {
if (index < 0) {
return;
}
sessionEffects = mSuspendedSessions.valueAt(index);
}
int key = EffectChain::kKeyForSuspendAll;
if (type != NULL) {
key = type->timeLow;
}
index = sessionEffects.indexOfKey(key);
sp<SuspendedSessionDesc> desc;
if (suspend) {
if (index >= 0) {
desc = sessionEffects.valueAt(index);
} else {
desc = new SuspendedSessionDesc();
if (type != NULL) {
desc->mType = *type;
}
sessionEffects.add(key, desc);
ALOGV("updateSuspendedSessions_l() suspend adding effect %08x", key);
}
desc->mRefCount++;
} else {
if (index < 0) {
return;
}
desc = sessionEffects.valueAt(index);
if (--desc->mRefCount == 0) {
ALOGV("updateSuspendedSessions_l() restore removing effect %08x", key);
sessionEffects.removeItemsAt(index);
if (sessionEffects.isEmpty()) {
ALOGV("updateSuspendedSessions_l() restore removing session %d",
sessionId);
mSuspendedSessions.removeItem(sessionId);
}
}
}
if (!sessionEffects.isEmpty()) {
mSuspendedSessions.replaceValueFor(sessionId, sessionEffects);
}
}
void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
bool enabled,
int sessionId)
{
Mutex::Autolock _l(mLock);
checkSuspendOnEffectEnabled_l(effect, enabled, sessionId);
}
void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
bool enabled,
int sessionId)
{
if (mType != RECORD) {
// suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
// another session. This gives the priority to well behaved effect control panels
// and applications not using global effects.
// Enabling post processing in AUDIO_SESSION_OUTPUT_STAGE session does not affect
// global effects
if ((sessionId != AUDIO_SESSION_OUTPUT_MIX) && (sessionId != AUDIO_SESSION_OUTPUT_STAGE)) {
setEffectSuspended_l(NULL, enabled, AUDIO_SESSION_OUTPUT_MIX);
}
}
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
chain->checkSuspendOnEffectEnabled(effect, enabled);
}
}
// ThreadBase::createEffect_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::EffectHandle> AudioFlinger::ThreadBase::createEffect_l(
const sp<AudioFlinger::Client>& client,
const sp<IEffectClient>& effectClient,
int32_t priority,
int sessionId,
effect_descriptor_t *desc,
int *enabled,
status_t *status
)
{
sp<EffectModule> effect;
sp<EffectHandle> handle;
status_t lStatus;
sp<EffectChain> chain;
bool chainCreated = false;
bool effectCreated = false;
bool effectRegistered = false;
lStatus = initCheck();
if (lStatus != NO_ERROR) {
ALOGW("createEffect_l() Audio driver not initialized.");
goto Exit;
}
// Allow global effects only on offloaded and mixer threads
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
switch (mType) {
case MIXER:
case OFFLOAD:
break;
case DIRECT:
case DUPLICATING:
case RECORD:
default:
ALOGW("createEffect_l() Cannot add global effect %s on thread %s", desc->name, mName);
lStatus = BAD_VALUE;
goto Exit;
}
}
// Only Pre processor effects are allowed on input threads and only on input threads
if ((mType == RECORD) != ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC)) {
ALOGW("createEffect_l() effect %s (flags %08x) created on wrong thread type %d",
desc->name, desc->flags, mType);
lStatus = BAD_VALUE;
goto Exit;
}
ALOGV("createEffect_l() thread %p effect %s on session %d", this, desc->name, sessionId);
{ // scope for mLock
Mutex::Autolock _l(mLock);
// check for existing effect chain with the requested audio session
chain = getEffectChain_l(sessionId);
if (chain == 0) {
// create a new chain for this session
ALOGV("createEffect_l() new effect chain for session %d", sessionId);
chain = new EffectChain(this, sessionId);
addEffectChain_l(chain);
chain->setStrategy(getStrategyForSession_l(sessionId));
chainCreated = true;
} else {
effect = chain->getEffectFromDesc_l(desc);
}
ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
if (effect == 0) {
int id = mAudioFlinger->nextUniqueId();
// Check CPU and memory usage
lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
if (lStatus != NO_ERROR) {
goto Exit;
}
effectRegistered = true;
// create a new effect module if none present in the chain
effect = new EffectModule(this, chain, desc, id, sessionId);
lStatus = effect->status();
if (lStatus != NO_ERROR) {
goto Exit;
}
effect->setOffloaded(mType == OFFLOAD, mId);
lStatus = chain->addEffect_l(effect);
if (lStatus != NO_ERROR) {
goto Exit;
}
effectCreated = true;
effect->setDevice(mOutDevice);
effect->setDevice(mInDevice);
effect->setMode(mAudioFlinger->getMode());
effect->setAudioSource(mAudioSource);
}
// create effect handle and connect it to effect module
handle = new EffectHandle(effect, client, effectClient, priority);
lStatus = effect->addHandle(handle.get());
if (enabled != NULL) {
*enabled = (int)effect->isEnabled();
}
}
Exit:
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
Mutex::Autolock _l(mLock);
if (effectCreated) {
chain->removeEffect_l(effect);
}
if (effectRegistered) {
AudioSystem::unregisterEffect(effect->id());
}
if (chainCreated) {
removeEffectChain_l(chain);
}
handle.clear();
}
if (status != NULL) {
*status = lStatus;
}
return handle;
}
sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId)
{
Mutex::Autolock _l(mLock);
return getEffect_l(sessionId, effectId);
}
sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
{
sp<EffectChain> chain = getEffectChain_l(sessionId);
return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
}
// PlaybackThread::addEffect_l() must be called with AudioFlinger::mLock and
// PlaybackThread::mLock held
status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
{
// check for existing effect chain with the requested audio session
int sessionId = effect->sessionId();
sp<EffectChain> chain = getEffectChain_l(sessionId);
bool chainCreated = false;
ALOGD_IF((mType == OFFLOAD) && !effect->isOffloadable(),
"addEffect_l() on offloaded thread %p: effect %s does not support offload flags %x",
this, effect->desc().name, effect->desc().flags);
if (chain == 0) {
// create a new chain for this session
ALOGV("addEffect_l() new effect chain for session %d", sessionId);
chain = new EffectChain(this, sessionId);
addEffectChain_l(chain);
chain->setStrategy(getStrategyForSession_l(sessionId));
chainCreated = true;
}
ALOGV("addEffect_l() %p chain %p effect %p", this, chain.get(), effect.get());
if (chain->getEffectFromId_l(effect->id()) != 0) {
ALOGW("addEffect_l() %p effect %s already present in chain %p",
this, effect->desc().name, chain.get());
return BAD_VALUE;
}
effect->setOffloaded(mType == OFFLOAD, mId);
status_t status = chain->addEffect_l(effect);
if (status != NO_ERROR) {
if (chainCreated) {
removeEffectChain_l(chain);
}
return status;
}
effect->setDevice(mOutDevice);
effect->setDevice(mInDevice);
effect->setMode(mAudioFlinger->getMode());
effect->setAudioSource(mAudioSource);
return NO_ERROR;
}
void AudioFlinger::ThreadBase::removeEffect_l(const sp<EffectModule>& effect) {
ALOGV("removeEffect_l() %p effect %p", this, effect.get());
effect_descriptor_t desc = effect->desc();
if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
detachAuxEffect_l(effect->id());
}
sp<EffectChain> chain = effect->chain().promote();
if (chain != 0) {
// remove effect chain if removing last effect
if (chain->removeEffect_l(effect) == 0) {
removeEffectChain_l(chain);
}
} else {
ALOGW("removeEffect_l() %p cannot promote chain for effect %p", this, effect.get());
}
}
void AudioFlinger::ThreadBase::lockEffectChains_l(
Vector< sp<AudioFlinger::EffectChain> >& effectChains)
{
effectChains = mEffectChains;
for (size_t i = 0; i < mEffectChains.size(); i++) {
mEffectChains[i]->lock();
}
}
void AudioFlinger::ThreadBase::unlockEffectChains(
const Vector< sp<AudioFlinger::EffectChain> >& effectChains)
{
for (size_t i = 0; i < effectChains.size(); i++) {
effectChains[i]->unlock();
}
}
sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId)
{
Mutex::Autolock _l(mLock);
return getEffectChain_l(sessionId);
}
sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const
{
size_t size = mEffectChains.size();
for (size_t i = 0; i < size; i++) {
if (mEffectChains[i]->sessionId() == sessionId) {
return mEffectChains[i];
}
}
return 0;
}
void AudioFlinger::ThreadBase::setMode(audio_mode_t mode)
{
Mutex::Autolock _l(mLock);
size_t size = mEffectChains.size();
for (size_t i = 0; i < size; i++) {
mEffectChains[i]->setMode_l(mode);
}
}
void AudioFlinger::ThreadBase::disconnectEffect(const sp<EffectModule>& effect,
EffectHandle *handle,
bool unpinIfLast) {
Mutex::Autolock _l(mLock);
ALOGV("disconnectEffect() %p effect %p", this, effect.get());
// delete the effect module if removing last handle on it
if (effect->removeHandle(handle) == 0) {
if (!effect->isPinned() || unpinIfLast) {
removeEffect_l(effect);
AudioSystem::unregisterEffect(effect->id());
}
}
}
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
AudioFlinger::PlaybackThread::PlaybackThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output,
audio_io_handle_t id,
audio_devices_t device,
type_t type)
: ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
mNormalFrameCount(0), mMixBuffer(NULL),
mAllocMixBuffer(NULL), mSuspended(0), mBytesWritten(0),
mActiveTracksGeneration(0),
// mStreamTypes[] initialized in constructor body
mOutput(output),
mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
mBytesRemaining(0),
mCurrentWriteLength(0),
mUseAsyncWrite(false),
mWriteAckSequence(0),
mDrainSequence(0),
mSignalPending(false),
mScreenState(AudioFlinger::mScreenState),
// index 0 is reserved for normal mixer's submix
mFastTrackAvailMask(((1 << FastMixerState::kMaxFastTracks) - 1) & ~1),
// mLatchD, mLatchQ,
mLatchDValid(false), mLatchQValid(false)
{
snprintf(mName, kNameLength, "AudioOut_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
// Assumes constructor is called by AudioFlinger with it's mLock held, but
// it would be safer to explicitly pass initial masterVolume/masterMute as
// parameter.
//
// If the HAL we are using has support for master volume or master mute,
// then do not attenuate or mute during mixing (just leave the volume at 1.0
// and the mute set to false).
mMasterVolume = audioFlinger->masterVolume_l();
mMasterMute = audioFlinger->masterMute_l();
if (mOutput && mOutput->audioHwDev) {
if (mOutput->audioHwDev->canSetMasterVolume()) {
mMasterVolume = 1.0;
}
if (mOutput->audioHwDev->canSetMasterMute()) {
mMasterMute = false;
}
}
readOutputParameters();
// mStreamTypes[AUDIO_STREAM_CNT] is initialized by stream_type_t default constructor
// There is no AUDIO_STREAM_MIN, and ++ operator does not compile
for (audio_stream_type_t stream = (audio_stream_type_t) 0; stream < AUDIO_STREAM_CNT;
stream = (audio_stream_type_t) (stream + 1)) {
mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
}
// mStreamTypes[AUDIO_STREAM_CNT] exists but isn't explicitly initialized here,
// because mAudioFlinger doesn't have one to copy from
}
AudioFlinger::PlaybackThread::~PlaybackThread()
{
mAudioFlinger->unregisterWriter(mNBLogWriter);
delete [] mAllocMixBuffer;
}
void AudioFlinger::PlaybackThread::dump(int fd, const Vector<String16>& args)
{
dumpInternals(fd, args);
dumpTracks(fd, args);
dumpEffectChains(fd, args);
}
void AudioFlinger::PlaybackThread::dumpTracks(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
result.appendFormat("Output thread %p stream volumes in dB:\n ", this);
for (int i = 0; i < AUDIO_STREAM_CNT; ++i) {
const stream_type_t *st = &mStreamTypes[i];
if (i > 0) {
result.appendFormat(", ");
}
result.appendFormat("%d:%.2g", i, 20.0 * log10(st->volume));
if (st->mute) {
result.append("M");
}
}
result.append("\n");
write(fd, result.string(), result.length());
result.clear();
snprintf(buffer, SIZE, "Output thread %p tracks\n", this);
result.append(buffer);
Track::appendDumpHeader(result);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (track != 0) {
track->dump(buffer, SIZE);
result.append(buffer);
}
}
snprintf(buffer, SIZE, "Output thread %p active tracks\n", this);
result.append(buffer);
Track::appendDumpHeader(result);
for (size_t i = 0; i < mActiveTracks.size(); ++i) {
sp<Track> track = mActiveTracks[i].promote();
if (track != 0) {
track->dump(buffer, SIZE);
result.append(buffer);
}
}
write(fd, result.string(), result.size());
// These values are "raw"; they will wrap around. See prepareTracks_l() for a better way.
FastTrackUnderruns underruns = getFastTrackUnderruns(0);
fdprintf(fd, "Normal mixer raw underrun counters: partial=%u empty=%u\n",
underruns.mBitFields.mPartial, underruns.mBitFields.mEmpty);
}
void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
snprintf(buffer, SIZE, "\nOutput thread %p internals\n", this);
result.append(buffer);
snprintf(buffer, SIZE, "Normal frame count: %d\n", mNormalFrameCount);
result.append(buffer);
snprintf(buffer, SIZE, "last write occurred (msecs): %llu\n",
ns2ms(systemTime() - mLastWriteTime));
result.append(buffer);
snprintf(buffer, SIZE, "total writes: %d\n", mNumWrites);
result.append(buffer);
snprintf(buffer, SIZE, "delayed writes: %d\n", mNumDelayedWrites);
result.append(buffer);
snprintf(buffer, SIZE, "blocked in write: %d\n", mInWrite);
result.append(buffer);
snprintf(buffer, SIZE, "suspend count: %d\n", mSuspended);
result.append(buffer);
snprintf(buffer, SIZE, "mix buffer : %p\n", mMixBuffer);
result.append(buffer);
write(fd, result.string(), result.size());
fdprintf(fd, "Fast track availMask=%#x\n", mFastTrackAvailMask);
dumpBase(fd, args);
}
// Thread virtuals
status_t AudioFlinger::PlaybackThread::readyToRun()
{
status_t status = initCheck();
if (status == NO_ERROR) {
ALOGI("AudioFlinger's thread %p ready to run", this);
} else {
ALOGE("No working audio driver found.");
}
return status;
}
void AudioFlinger::PlaybackThread::onFirstRef()
{
run(mName, ANDROID_PRIORITY_URGENT_AUDIO);
}
// ThreadBase virtuals
void AudioFlinger::PlaybackThread::preExit()
{
ALOGV(" preExit()");
// FIXME this is using hard-coded strings but in the future, this functionality will be
// converted to use audio HAL extensions required to support tunneling
mOutput->stream->common.set_parameters(&mOutput->stream->common, "exiting=1");
}
// PlaybackThread::createTrack_l() must be called with AudioFlinger::mLock held
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int uid,
status_t *status)
{
sp<Track> track;
status_t lStatus;
bool isTimed = (*flags & IAudioFlinger::TRACK_TIMED) != 0;
// client expresses a preference for FAST, but we get the final say
if (*flags & IAudioFlinger::TRACK_FAST) {
if (
// not timed
(!isTimed) &&
// either of these use cases:
(
// use case 1: shared buffer with any frame count
(
(sharedBuffer != 0)
) ||
// use case 2: callback handler and frame count is default or at least as large as HAL
(
(tid != -1) &&
((frameCount == 0) ||
(frameCount >= (mFrameCount * kFastTrackMultiplier)))
)
) &&
// PCM data
audio_is_linear_pcm(format) &&
// mono or stereo
( (channelMask == AUDIO_CHANNEL_OUT_MONO) ||
(channelMask == AUDIO_CHANNEL_OUT_STEREO) ) &&
#ifndef FAST_TRACKS_AT_NON_NATIVE_SAMPLE_RATE
// hardware sample rate
(sampleRate == mSampleRate) &&
#endif
// normal mixer has an associated fast mixer
hasFastMixer() &&
// there are sufficient fast track slots available
(mFastTrackAvailMask != 0)
// FIXME test that MixerThread for this fast track has a capable output HAL
// FIXME add a permission test also?
) {
// if frameCount not specified, then it defaults to fast mixer (HAL) frame count
if (frameCount == 0) {
frameCount = mFrameCount * kFastTrackMultiplier;
}
ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
frameCount, mFrameCount);
} else {
ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: isTimed=%d sharedBuffer=%p frameCount=%d "
"mFrameCount=%d format=%d isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
"hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
isTimed, sharedBuffer.get(), frameCount, mFrameCount, format,
audio_is_linear_pcm(format),
channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
*flags &= ~IAudioFlinger::TRACK_FAST;
// For compatibility with AudioTrack calculation, buffer depth is forced
// to be at least 2 x the normal mixer frame count and cover audio hardware latency.
// This is probably too conservative, but legacy application code may depend on it.
// If you change this calculation, also review the start threshold which is related.
uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
if (minBufCount < 2) {
minBufCount = 2;
}
size_t minFrameCount = mNormalFrameCount * minBufCount;
if (frameCount < minFrameCount) {
frameCount = minFrameCount;
}
}
}
if (mType == DIRECT) {
if ((format & AUDIO_FORMAT_MAIN_MASK) == AUDIO_FORMAT_PCM) {
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
ALOGE("createTrack_l() Bad parameter: sampleRate %u format %d, channelMask 0x%08x "
"for output %p with format %d",
sampleRate, format, channelMask, mOutput, mFormat);
lStatus = BAD_VALUE;
goto Exit;
}
}
} else if (mType == OFFLOAD) {
if (sampleRate != mSampleRate || format != mFormat || channelMask != mChannelMask) {
ALOGE("createTrack_l() Bad parameter: sampleRate %d format %d, channelMask 0x%08x \""
"for output %p with format %d",
sampleRate, format, channelMask, mOutput, mFormat);
lStatus = BAD_VALUE;
goto Exit;
}
} else {
if ((format & AUDIO_FORMAT_MAIN_MASK) != AUDIO_FORMAT_PCM) {
ALOGE("createTrack_l() Bad parameter: format %d \""
"for output %p with format %d",
format, mOutput, mFormat);
lStatus = BAD_VALUE;
goto Exit;
}
// Resampler implementation limits input sampling rate to 2 x output sampling rate.
if (sampleRate > mSampleRate*2) {
ALOGE("Sample rate out of range: %u mSampleRate %u", sampleRate, mSampleRate);
lStatus = BAD_VALUE;
goto Exit;
}
}
lStatus = initCheck();
if (lStatus != NO_ERROR) {
ALOGE("Audio driver not initialized.");
goto Exit;
}
{ // scope for mLock
Mutex::Autolock _l(mLock);
// all tracks in same audio session must share the same routing strategy otherwise
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
uint32_t strategy = AudioSystem::getStrategyForStream(streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0 && !t->isOutputTrack()) {
uint32_t actual = AudioSystem::getStrategyForStream(t->streamType());
if (sessionId == t->sessionId() && strategy != actual) {
ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
strategy, actual);
lStatus = BAD_VALUE;
goto Exit;
}
}
}
if (!isTimed) {
track = new Track(this, client, streamType, sampleRate, format,
channelMask, frameCount, sharedBuffer, sessionId, uid, *flags);
} else {
track = TimedTrack::create(this, client, streamType, sampleRate, format,
channelMask, frameCount, sharedBuffer, sessionId, uid);
}
if (track == 0 || track->getCblk() == NULL || track->name() < 0) {
lStatus = NO_MEMORY;
// track must be cleared from the caller as the caller has the AF lock
goto Exit;
}
mTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
if ((*flags & IAudioFlinger::TRACK_FAST) && (tid != -1)) {
pid_t callingPid = IPCThreadState::self()->getCallingPid();
// we don't have CAP_SYS_NICE, nor do we want to have it as it's too powerful,
// so ask activity manager to do this on our behalf
sendPrioConfigEvent_l(callingPid, tid, kPriorityAudioApp);
}
}
lStatus = NO_ERROR;
Exit:
if (status) {
*status = lStatus;
}
return track;
}
uint32_t AudioFlinger::PlaybackThread::correctLatency_l(uint32_t latency) const
{
return latency;
}
uint32_t AudioFlinger::PlaybackThread::latency() const
{
Mutex::Autolock _l(mLock);
return latency_l();
}
uint32_t AudioFlinger::PlaybackThread::latency_l() const
{
if (initCheck() == NO_ERROR) {
return correctLatency_l(mOutput->stream->get_latency(mOutput->stream));
} else {
return 0;
}
}
void AudioFlinger::PlaybackThread::setMasterVolume(float value)
{
Mutex::Autolock _l(mLock);
// Don't apply master volume in SW if our HAL can do it for us.
if (mOutput && mOutput->audioHwDev &&
mOutput->audioHwDev->canSetMasterVolume()) {
mMasterVolume = 1.0;
} else {
mMasterVolume = value;
}
}
void AudioFlinger::PlaybackThread::setMasterMute(bool muted)
{
Mutex::Autolock _l(mLock);
// Don't apply master mute in SW if our HAL can do it for us.
if (mOutput && mOutput->audioHwDev &&
mOutput->audioHwDev->canSetMasterMute()) {
mMasterMute = false;
} else {
mMasterMute = muted;
}
}
void AudioFlinger::PlaybackThread::setStreamVolume(audio_stream_type_t stream, float value)
{
Mutex::Autolock _l(mLock);
mStreamTypes[stream].volume = value;
broadcast_l();
}
void AudioFlinger::PlaybackThread::setStreamMute(audio_stream_type_t stream, bool muted)
{
Mutex::Autolock _l(mLock);
mStreamTypes[stream].mute = muted;
broadcast_l();
}
float AudioFlinger::PlaybackThread::streamVolume(audio_stream_type_t stream) const
{
Mutex::Autolock _l(mLock);
return mStreamTypes[stream].volume;
}
// addTrack_l() must be called with ThreadBase::mLock held
status_t AudioFlinger::PlaybackThread::addTrack_l(const sp<Track>& track)
{
status_t status = ALREADY_EXISTS;
// set retry count for buffer fill
track->mRetryCount = kMaxTrackStartupRetries;
if (mActiveTracks.indexOf(track) < 0) {
// the track is newly added, make sure it fills up all its
// buffers before playing. This is to ensure the client will
// effectively get the latency it requested.
if (!track->isOutputTrack()) {
TrackBase::track_state state = track->mState;
mLock.unlock();
status = AudioSystem::startOutput(mId, track->streamType(), track->sessionId());
mLock.lock();
// abort track was stopped/paused while we released the lock
if (state != track->mState) {
if (status == NO_ERROR) {
mLock.unlock();
AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
mLock.lock();
}
return INVALID_OPERATION;
}
// abort if start is rejected by audio policy manager
if (status != NO_ERROR) {
return PERMISSION_DENIED;
}
#ifdef ADD_BATTERY_DATA
// to track the speaker usage
addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStart);
#endif
}
track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
mActiveTracks.add(track);
mWakeLockUids.add(track->uid());
mActiveTracksGeneration++;
mLatestActiveTrack = track;
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
ALOGV("addTrack_l() starting track on chain %p for session %d", chain.get(),
track->sessionId());
chain->incActiveTrackCnt();
}
status = NO_ERROR;
}
ALOGV("signal playback thread");
broadcast_l();
return status;
}
bool AudioFlinger::PlaybackThread::destroyTrack_l(const sp<Track>& track)
{
track->terminate();
// active tracks are removed by threadLoop()
bool trackActive = (mActiveTracks.indexOf(track) >= 0);
track->mState = TrackBase::STOPPED;
if (!trackActive) {
removeTrack_l(track);
} else if (track->isFastTrack() || track->isOffloaded()) {
track->mState = TrackBase::STOPPING_1;
}
return trackActive;
}
void AudioFlinger::PlaybackThread::removeTrack_l(const sp<Track>& track)
{
track->triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
mTracks.remove(track);
deleteTrackName_l(track->name());
// redundant as track is about to be destroyed, for dumpsys only
track->mName = -1;
if (track->isFastTrack()) {
int index = track->mFastIndex;
ALOG_ASSERT(0 < index && index < (int)FastMixerState::kMaxFastTracks);
ALOG_ASSERT(!(mFastTrackAvailMask & (1 << index)));
mFastTrackAvailMask |= 1 << index;
// redundant as track is about to be destroyed, for dumpsys only
track->mFastIndex = -1;
}
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
chain->decTrackCnt();
}
}
void AudioFlinger::PlaybackThread::broadcast_l()
{
// Thread could be blocked waiting for async
// so signal it to handle state changes immediately
// If threadLoop is currently unlocked a signal of mWaitWorkCV will
// be lost so we also flag to prevent it blocking on mWaitWorkCV
mSignalPending = true;
mWaitWorkCV.broadcast();
}
String8 AudioFlinger::PlaybackThread::getParameters(const String8& keys)
{
Mutex::Autolock _l(mLock);
if (initCheck() != NO_ERROR) {
return String8();
}
char *s = mOutput->stream->common.get_parameters(&mOutput->stream->common, keys.string());
const String8 out_s8(s);
free(s);
return out_s8;
}
// audioConfigChanged_l() must be called with AudioFlinger::mLock held
void AudioFlinger::PlaybackThread::audioConfigChanged_l(int event, int param) {
AudioSystem::OutputDescriptor desc;
void *param2 = NULL;
ALOGV("PlaybackThread::audioConfigChanged_l, thread %p, event %d, param %d", this, event,
param);
switch (event) {
case AudioSystem::OUTPUT_OPENED:
case AudioSystem::OUTPUT_CONFIG_CHANGED:
desc.channelMask = mChannelMask;
desc.samplingRate = mSampleRate;
desc.format = mFormat;
desc.frameCount = mNormalFrameCount; // FIXME see
// AudioFlinger::frameCount(audio_io_handle_t)
desc.latency = latency();
param2 = &desc;
break;
case AudioSystem::STREAM_CONFIG_CHANGED:
param2 = &param;
case AudioSystem::OUTPUT_CLOSED:
default:
break;
}
mAudioFlinger->audioConfigChanged_l(event, mId, param2);
}
void AudioFlinger::PlaybackThread::writeCallback()
{
ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->resetWriteBlocked();
}
void AudioFlinger::PlaybackThread::drainCallback()
{
ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->resetDraining();
}
void AudioFlinger::PlaybackThread::resetWriteBlocked(uint32_t sequence)
{
Mutex::Autolock _l(mLock);
// reject out of sequence requests
if ((mWriteAckSequence & 1) && (sequence == mWriteAckSequence)) {
mWriteAckSequence &= ~1;
mWaitWorkCV.signal();
}
}
void AudioFlinger::PlaybackThread::resetDraining(uint32_t sequence)
{
Mutex::Autolock _l(mLock);
// reject out of sequence requests
if ((mDrainSequence & 1) && (sequence == mDrainSequence)) {
mDrainSequence &= ~1;
mWaitWorkCV.signal();
}
}
// static
int AudioFlinger::PlaybackThread::asyncCallback(stream_callback_event_t event,
void *param,
void *cookie)
{
AudioFlinger::PlaybackThread *me = (AudioFlinger::PlaybackThread *)cookie;
ALOGV("asyncCallback() event %d", event);
switch (event) {
case STREAM_CBK_EVENT_WRITE_READY:
me->writeCallback();
break;
case STREAM_CBK_EVENT_DRAIN_READY:
me->drainCallback();
break;
default:
ALOGW("asyncCallback() unknown event %d", event);
break;
}
return 0;
}
void AudioFlinger::PlaybackThread::readOutputParameters()
{
// unfortunately we have no way of recovering from errors here, hence the LOG_FATAL
mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
if (!audio_is_output_channel(mChannelMask)) {
LOG_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
}
if ((mType == MIXER || mType == DUPLICATING) && mChannelMask != AUDIO_CHANNEL_OUT_STEREO) {
LOG_FATAL("HAL channel mask %#x not supported for mixed output; "
"must be AUDIO_CHANNEL_OUT_STEREO", mChannelMask);
}
mChannelCount = popcount(mChannelMask);
mFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
if (!audio_is_valid_format(mFormat)) {
LOG_FATAL("HAL format %d not valid for output", mFormat);
}
if ((mType == MIXER || mType == DUPLICATING) && mFormat != AUDIO_FORMAT_PCM_16_BIT) {
LOG_FATAL("HAL format %d not supported for mixed output; must be AUDIO_FORMAT_PCM_16_BIT",
mFormat);
}
mFrameSize = audio_stream_frame_size(&mOutput->stream->common);
mFrameCount = mOutput->stream->common.get_buffer_size(&mOutput->stream->common) / mFrameSize;
if (mFrameCount & 15) {
ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
if ((mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) &&
(mOutput->stream->set_callback != NULL)) {
if (mOutput->stream->set_callback(mOutput->stream,
AudioFlinger::PlaybackThread::asyncCallback, this) == 0) {
mUseAsyncWrite = true;
mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
}
}
// Calculate size of normal mix buffer relative to the HAL output buffer size
double multiplier = 1.0;
if (mType == MIXER && (kUseFastMixer == FastMixer_Static ||
kUseFastMixer == FastMixer_Dynamic)) {
size_t minNormalFrameCount = (kMinNormalMixBufferSizeMs * mSampleRate) / 1000;
size_t maxNormalFrameCount = (kMaxNormalMixBufferSizeMs * mSampleRate) / 1000;
// round up minimum and round down maximum to nearest 16 frames to satisfy AudioMixer
minNormalFrameCount = (minNormalFrameCount + 15) & ~15;
maxNormalFrameCount = maxNormalFrameCount & ~15;
if (maxNormalFrameCount < minNormalFrameCount) {
maxNormalFrameCount = minNormalFrameCount;
}
multiplier = (double) minNormalFrameCount / (double) mFrameCount;
if (multiplier <= 1.0) {
multiplier = 1.0;
} else if (multiplier <= 2.0) {
if (2 * mFrameCount <= maxNormalFrameCount) {
multiplier = 2.0;
} else {
multiplier = (double) maxNormalFrameCount / (double) mFrameCount;
}
} else {
// prefer an even multiplier, for compatibility with doubling of fast tracks due to HAL
// SRC (it would be unusual for the normal mix buffer size to not be a multiple of fast
// track, but we sometimes have to do this to satisfy the maximum frame count
// constraint)
// FIXME this rounding up should not be done if no HAL SRC
uint32_t truncMult = (uint32_t) multiplier;
if ((truncMult & 1)) {
if ((truncMult + 1) * mFrameCount <= maxNormalFrameCount) {
++truncMult;
}
}
multiplier = (double) truncMult;
}
}
mNormalFrameCount = multiplier * mFrameCount;
// round up to nearest 16 frames to satisfy AudioMixer
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
ALOGI("HAL output buffer size %u frames, normal mix buffer size %u frames", mFrameCount,
mNormalFrameCount);
delete[] mAllocMixBuffer;
size_t align = (mFrameSize < sizeof(int16_t)) ? sizeof(int16_t) : mFrameSize;
mAllocMixBuffer = new int8_t[mNormalFrameCount * mFrameSize + align - 1];
mMixBuffer = (int16_t *) ((((size_t)mAllocMixBuffer + align - 1) / align) * align);
memset(mMixBuffer, 0, mNormalFrameCount * mFrameSize);
// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
// Note that mLock is not held when readOutputParameters() is called from the constructor
// but in this case nothing is done below as no audio sessions have effect yet so it doesn't
// matter.
// create a copy of mEffectChains as calling moveEffectChain_l() can reorder some effect chains
Vector< sp<EffectChain> > effectChains = mEffectChains;
for (size_t i = 0; i < effectChains.size(); i ++) {
mAudioFlinger->moveEffectChain_l(effectChains[i]->sessionId(), this, this, false);
}
}
status_t AudioFlinger::PlaybackThread::getRenderPosition(size_t *halFrames, size_t *dspFrames)
{
if (halFrames == NULL || dspFrames == NULL) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
if (initCheck() != NO_ERROR) {
return INVALID_OPERATION;
}
size_t framesWritten = mBytesWritten / mFrameSize;
*halFrames = framesWritten;
if (isSuspended()) {
// return an estimation of rendered frames when the output is suspended
size_t latencyFrames = (latency_l() * mSampleRate) / 1000;
*dspFrames = framesWritten >= latencyFrames ? framesWritten - latencyFrames : 0;
return NO_ERROR;
} else {
return mOutput->stream->get_render_position(mOutput->stream, dspFrames);
}
}
uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const
{
Mutex::Autolock _l(mLock);
uint32_t result = 0;
if (getEffectChain_l(sessionId) != 0) {
result = EFFECT_SESSION;
}
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() && !track->isInvalid()) {
result |= TRACK_SESSION;
break;
}
}
return result;
}
uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
{
// session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
// it is moved to correct output by audio policy manager when A2DP is connected or disconnected
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
}
for (size_t i = 0; i < mTracks.size(); i++) {
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() && !track->isInvalid()) {
return AudioSystem::getStrategyForStream(track->streamType());
}
}
return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
}
AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
{
Mutex::Autolock _l(mLock);
return mOutput;
}
AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
{
Mutex::Autolock _l(mLock);
AudioStreamOut *output = mOutput;
mOutput = NULL;
// FIXME FastMixer might also have a raw ptr to mOutputSink;
// must push a NULL and wait for ack
mOutputSink.clear();
mPipeSink.clear();
mNormalSink.clear();
return output;
}
// this method must always be called either with ThreadBase mLock held or inside the thread loop
audio_stream_t* AudioFlinger::PlaybackThread::stream() const
{
if (mOutput == NULL) {
return NULL;
}
return &mOutput->stream->common;
}
uint32_t AudioFlinger::PlaybackThread::activeSleepTimeUs() const
{
return (uint32_t)((uint32_t)((mNormalFrameCount * 1000) / mSampleRate) * 1000);
}
status_t AudioFlinger::PlaybackThread::setSyncEvent(const sp<SyncEvent>& event)
{
if (!isValidSyncEvent(event)) {
return BAD_VALUE;
}
Mutex::Autolock _l(mLock);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (event->triggerSession() == track->sessionId()) {
(void) track->setSyncEvent(event);
return NO_ERROR;
}
}
return NAME_NOT_FOUND;
}
bool AudioFlinger::PlaybackThread::isValidSyncEvent(const sp<SyncEvent>& event) const
{
return event->type() == AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE;
}
void AudioFlinger::PlaybackThread::threadLoop_removeTracks(
const Vector< sp<Track> >& tracksToRemove)
{
size_t count = tracksToRemove.size();
if (count) {
for (size_t i = 0 ; i < count ; i++) {
const sp<Track>& track = tracksToRemove.itemAt(i);
if (!track->isOutputTrack()) {
AudioSystem::stopOutput(mId, track->streamType(), track->sessionId());
#ifdef ADD_BATTERY_DATA
// to track the speaker usage
addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
#endif
if (track->isTerminated()) {
AudioSystem::releaseOutput(mId);
}
}
}
}
}
void AudioFlinger::PlaybackThread::checkSilentMode_l()
{
if (!mMasterMute) {
char value[PROPERTY_VALUE_MAX];
if (property_get("ro.audio.silent", value, "0") > 0) {
char *endptr;
unsigned long ul = strtoul(value, &endptr, 0);
if (*endptr == '\0' && ul != 0) {
ALOGD("Silence is golden");
// The setprop command will not allow a property to be changed after
// the first time it is set, so we don't have to worry about un-muting.
setMasterMute_l(true);
}
}
}
}
// shared by MIXER and DIRECT, overridden by DUPLICATING
ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
{
// FIXME rewrite to reduce number of system calls
mLastWriteTime = systemTime();
mInWrite = true;
ssize_t bytesWritten;
// If an NBAIO sink is present, use it to write the normal mixer's submix
if (mNormalSink != 0) {
#define mBitShift 2 // FIXME
size_t count = mBytesRemaining >> mBitShift;
size_t offset = (mCurrentWriteLength - mBytesRemaining) >> 1;
ATRACE_BEGIN("write");
// update the setpoint when AudioFlinger::mScreenState changes
uint32_t screenState = AudioFlinger::mScreenState;
if (screenState != mScreenState) {
mScreenState = screenState;
MonoPipe *pipe = (MonoPipe *)mPipeSink.get();
if (pipe != NULL) {
pipe->setAvgFrames((mScreenState & 1) ?
(pipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
}
}
ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count);
ATRACE_END();
if (framesWritten > 0) {
bytesWritten = framesWritten << mBitShift;
} else {
bytesWritten = framesWritten;
}
status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp);
if (status == NO_ERROR) {
size_t totalFramesWritten = mNormalSink->framesWritten();
if (totalFramesWritten >= mLatchD.mTimestamp.mPosition) {
mLatchD.mUnpresentedFrames = totalFramesWritten - mLatchD.mTimestamp.mPosition;
mLatchDValid = true;
}
}
// otherwise use the HAL / AudioStreamOut directly
} else {
// Direct output and offload threads
size_t offset = (mCurrentWriteLength - mBytesRemaining) / sizeof(int16_t);
if (mUseAsyncWrite) {
ALOGW_IF(mWriteAckSequence & 1, "threadLoop_write(): out of sequence write request");
mWriteAckSequence += 2;
mWriteAckSequence |= 1;
ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->setWriteBlocked(mWriteAckSequence);
}
// FIXME We should have an implementation of timestamps for direct output threads.
// They are used e.g for multichannel PCM playback over HDMI.
bytesWritten = mOutput->stream->write(mOutput->stream,
mMixBuffer + offset, mBytesRemaining);
if (mUseAsyncWrite &&
((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
// do not wait for async callback in case of error of full write
mWriteAckSequence &= ~1;
ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->setWriteBlocked(mWriteAckSequence);
}
}
mNumWrites++;
mInWrite = false;
mStandby = false;
return bytesWritten;
}
void AudioFlinger::PlaybackThread::threadLoop_drain()
{
if (mOutput->stream->drain) {
ALOGV("draining %s", (mMixerStatus == MIXER_DRAIN_TRACK) ? "early" : "full");
if (mUseAsyncWrite) {
ALOGW_IF(mDrainSequence & 1, "threadLoop_drain(): out of sequence drain request");
mDrainSequence |= 1;
ALOG_ASSERT(mCallbackThread != 0);
mCallbackThread->setDraining(mDrainSequence);
}
mOutput->stream->drain(mOutput->stream,
(mMixerStatus == MIXER_DRAIN_TRACK) ? AUDIO_DRAIN_EARLY_NOTIFY
: AUDIO_DRAIN_ALL);
}
}
void AudioFlinger::PlaybackThread::threadLoop_exit()
{
// Default implementation has nothing to do
}
/*
The derived values that are cached:
- mixBufferSize from frame count * frame size
- activeSleepTime from activeSleepTimeUs()
- idleSleepTime from idleSleepTimeUs()
- standbyDelay from mActiveSleepTimeUs (DIRECT only)
- maxPeriod from frame count and sample rate (MIXER only)
The parameters that affect these derived values are:
- frame count
- frame size
- sample rate
- device type: A2DP or not
- device latency
- format: PCM or not
- active sleep time
- idle sleep time
*/
void AudioFlinger::PlaybackThread::cacheParameters_l()
{
mixBufferSize = mNormalFrameCount * mFrameSize;
activeSleepTime = activeSleepTimeUs();
idleSleepTime = idleSleepTimeUs();
}
void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
{
ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
this, streamType, mTracks.size());
Mutex::Autolock _l(mLock);
size_t size = mTracks.size();
for (size_t i = 0; i < size; i++) {
sp<Track> t = mTracks[i];
if (t->streamType() == streamType) {
t->invalidate();
}
}
}
status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
{
int session = chain->sessionId();
int16_t *buffer = mMixBuffer;
bool ownsBuffer = false;
ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
if (session > 0) {
// Only one effect chain can be present in direct output thread and it uses
// the mix buffer as input
if (mType != DIRECT) {
size_t numSamples = mNormalFrameCount * mChannelCount;
buffer = new int16_t[numSamples];
memset(buffer, 0, numSamples * sizeof(int16_t));
ALOGV("addEffectChain_l() creating new input buffer %p session %d", buffer, session);
ownsBuffer = true;
}
// Attach all tracks with same session ID to this chain.
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (session == track->sessionId()) {
ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(),
buffer);
track->setMainBuffer(buffer);
chain->incTrackCnt();
}
}
// indicate all active tracks in the chain
for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
sp<Track> track = mActiveTracks[i].promote();
if (track == 0) {
continue;
}
if (session == track->sessionId()) {
ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
chain->incActiveTrackCnt();
}
}
}
chain->setInBuffer(buffer, ownsBuffer);
chain->setOutBuffer(mMixBuffer);
// Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
// chains list in order to be processed last as it contains output stage effects
// Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
// session AUDIO_SESSION_OUTPUT_STAGE to be processed
// after track specific effects and before output stage
// It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
// that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX
// Effect chain for other sessions are inserted at beginning of effect
// chains list to be processed before output mix effects. Relative order between other
// sessions is not important
size_t size = mEffectChains.size();
size_t i = 0;
for (i = 0; i < size; i++) {
if (mEffectChains[i]->sessionId() < session) {
break;
}
}
mEffectChains.insertAt(chain, i);
checkSuspendOnAddEffectChain_l(chain);
return NO_ERROR;
}
size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain)
{
int session = chain->sessionId();
ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
for (size_t i = 0; i < mEffectChains.size(); i++) {
if (chain == mEffectChains[i]) {
mEffectChains.removeAt(i);
// detach all active tracks from the chain
for (size_t i = 0 ; i < mActiveTracks.size() ; ++i) {
sp<Track> track = mActiveTracks[i].promote();
if (track == 0) {
continue;
}
if (session == track->sessionId()) {
ALOGV("removeEffectChain_l(): stopping track on chain %p for session Id: %d",
chain.get(), session);
chain->decActiveTrackCnt();
}
}
// detach all tracks with same session ID from this chain
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (session == track->sessionId()) {
track->setMainBuffer(mMixBuffer);
chain->decTrackCnt();
}
}
break;
}
}
return mEffectChains.size();
}
status_t AudioFlinger::PlaybackThread::attachAuxEffect(
const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
{
Mutex::Autolock _l(mLock);
return attachAuxEffect_l(track, EffectId);
}
status_t AudioFlinger::PlaybackThread::attachAuxEffect_l(
const sp<AudioFlinger::PlaybackThread::Track> track, int EffectId)
{
status_t status = NO_ERROR;
if (EffectId == 0) {
track->setAuxBuffer(0, NULL);
} else {
// Auxiliary effects are always in audio session AUDIO_SESSION_OUTPUT_MIX
sp<EffectModule> effect = getEffect_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
if (effect != 0) {
if ((effect->desc().flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
track->setAuxBuffer(EffectId, (int32_t *)effect->inBuffer());
} else {
status = INVALID_OPERATION;
}
} else {
status = BAD_VALUE;
}
}
return status;
}
void AudioFlinger::PlaybackThread::detachAuxEffect_l(int effectId)
{
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (track->auxEffectId() == effectId) {
attachAuxEffect_l(track, 0);
}
}
}
bool AudioFlinger::PlaybackThread::threadLoop()
{
Vector< sp<Track> > tracksToRemove;
standbyTime = systemTime();
// MIXER
nsecs_t lastWarning = 0;
// DUPLICATING
// FIXME could this be made local to while loop?
writeFrames = 0;
int lastGeneration = 0;
cacheParameters_l();
sleepTime = idleSleepTime;
if (mType == MIXER) {
sleepTimeShift = 0;
}
CpuStats cpuStats;
const String8 myName(String8::format("thread %p type %d TID %d", this, mType, gettid()));
acquireWakeLock();
// mNBLogWriter->log can only be called while thread mutex mLock is held.
// So if you need to log when mutex is unlocked, set logString to a non-NULL string,
// and then that string will be logged at the next convenient opportunity.
const char *logString = NULL;
checkSilentMode_l();
while (!exitPending())
{
cpuStats.sample(myName);
Vector< sp<EffectChain> > effectChains;
processConfigEvents();
{ // scope for mLock
Mutex::Autolock _l(mLock);
if (logString != NULL) {
mNBLogWriter->logTimestamp();
mNBLogWriter->log(logString);
logString = NULL;
}
if (mLatchDValid) {
mLatchQ = mLatchD;
mLatchDValid = false;
mLatchQValid = true;
}
if (checkForNewParameters_l()) {
cacheParameters_l();
}
saveOutputTracks();
if (mSignalPending) {
// A signal was raised while we were unlocked
mSignalPending = false;
} else if (waitingAsyncCallback_l()) {
if (exitPending()) {
break;
}
releaseWakeLock_l();
mWakeLockUids.clear();
mActiveTracksGeneration++;
ALOGV("wait async completion");
mWaitWorkCV.wait(mLock);
ALOGV("async completion/wake");
acquireWakeLock_l();
standbyTime = systemTime() + standbyDelay;
sleepTime = 0;
continue;
}
if ((!mActiveTracks.size() && systemTime() > standbyTime) ||
isSuspended()) {
// put audio hardware into standby after short delay
if (shouldStandby_l()) {
threadLoop_standby();
mStandby = true;
}
if (!mActiveTracks.size() && mConfigEvents.isEmpty()) {
// we're about to wait, flush the binder command buffer
IPCThreadState::self()->flushCommands();
clearOutputTracks();
if (exitPending()) {
break;
}
releaseWakeLock_l();
mWakeLockUids.clear();
mActiveTracksGeneration++;
// wait until we have something to do...
ALOGV("%s going to sleep", myName.string());
mWaitWorkCV.wait(mLock);
ALOGV("%s waking up", myName.string());
acquireWakeLock_l();
mMixerStatus = MIXER_IDLE;
mMixerStatusIgnoringFastTracks = MIXER_IDLE;
mBytesWritten = 0;
mBytesRemaining = 0;
checkSilentMode_l();
standbyTime = systemTime() + standbyDelay;
sleepTime = idleSleepTime;
if (mType == MIXER) {
sleepTimeShift = 0;
}
continue;
}
}
// mMixerStatusIgnoringFastTracks is also updated internally
mMixerStatus = prepareTracks_l(&tracksToRemove);
// compare with previously applied list
if (lastGeneration != mActiveTracksGeneration) {
// update wakelock
updateWakeLockUids_l(mWakeLockUids);
lastGeneration = mActiveTracksGeneration;
}
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
// or modified if an effect is created or deleted
lockEffectChains_l(effectChains);
} // mLock scope ends
if (mBytesRemaining == 0) {
mCurrentWriteLength = 0;
if (mMixerStatus == MIXER_TRACKS_READY) {
// threadLoop_mix() sets mCurrentWriteLength
threadLoop_mix();
} else if ((mMixerStatus != MIXER_DRAIN_TRACK)
&& (mMixerStatus != MIXER_DRAIN_ALL)) {
// threadLoop_sleepTime sets sleepTime to 0 if data
// must be written to HAL
threadLoop_sleepTime();
if (sleepTime == 0) {
mCurrentWriteLength = mixBufferSize;
}
}
mBytesRemaining = mCurrentWriteLength;
if (isSuspended()) {
sleepTime = suspendSleepTimeUs();
// simulate write to HAL when suspended
mBytesWritten += mixBufferSize;
mBytesRemaining = 0;
}
// only process effects if we're going to write
if (sleepTime == 0 && mType != OFFLOAD) {
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
}
}
}
// Process effect chains for offloaded thread even if no audio
// was read from audio track: process only updates effect state
// and thus does have to be synchronized with audio writes but may have
// to be called while waiting for async write callback
if (mType == OFFLOAD) {
for (size_t i = 0; i < effectChains.size(); i ++) {
effectChains[i]->process_l();
}
}
// enable changes in effect chain
unlockEffectChains(effectChains);
if (!waitingAsyncCallback()) {
// sleepTime == 0 means we must write to audio hardware
if (sleepTime == 0) {
if (mBytesRemaining) {
ssize_t ret = threadLoop_write();
if (ret < 0) {
mBytesRemaining = 0;
} else {
mBytesWritten += ret;
mBytesRemaining -= ret;
}
} else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
(mMixerStatus == MIXER_DRAIN_ALL)) {
threadLoop_drain();
}
if (mType == MIXER) {
// write blocked detection
nsecs_t now = systemTime();
nsecs_t delta = now - mLastWriteTime;
if (!mStandby && delta > maxPeriod) {
mNumDelayedWrites++;
if ((now - lastWarning) > kWarningThrottleNs) {
ATRACE_NAME("underrun");
ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
ns2ms(delta), mNumDelayedWrites, this);
lastWarning = now;
}
}
}
} else {
usleep(sleepTime);
}
}
// Finally let go of removed track(s), without the lock held
// since we can't guarantee the destructors won't acquire that
// same lock. This will also mutate and push a new fast mixer state.
threadLoop_removeTracks(tracksToRemove);
tracksToRemove.clear();
// FIXME I don't understand the need for this here;
// it was in the original code but maybe the
// assignment in saveOutputTracks() makes this unnecessary?
clearOutputTracks();
// Effect chains will be actually deleted here if they were removed from
// mEffectChains list during mixing or effects processing
effectChains.clear();
// FIXME Note that the above .clear() is no longer necessary since effectChains
// is now local to this block, but will keep it for now (at least until merge done).
}
threadLoop_exit();
// for DuplicatingThread, standby mode is handled by the outputTracks, otherwise ...
if (mType == MIXER || mType == DIRECT || mType == OFFLOAD) {
// put output stream into standby mode
if (!mStandby) {
mOutput->stream->common.standby(&mOutput->stream->common);
}
}
releaseWakeLock();
mWakeLockUids.clear();
mActiveTracksGeneration++;
ALOGV("Thread %p type %d exiting", this, mType);
return false;
}
// removeTracks_l() must be called with ThreadBase::mLock held
void AudioFlinger::PlaybackThread::removeTracks_l(const Vector< sp<Track> >& tracksToRemove)
{
size_t count = tracksToRemove.size();
if (count) {
for (size_t i=0 ; i<count ; i++) {
const sp<Track>& track = tracksToRemove.itemAt(i);
mActiveTracks.remove(track);
mWakeLockUids.remove(track->uid());
mActiveTracksGeneration++;
ALOGV("removeTracks_l removing track on session %d", track->sessionId());
sp<EffectChain> chain = getEffectChain_l(track->sessionId());
if (chain != 0) {
ALOGV("stopping track on chain %p for session Id: %d", chain.get(),
track->sessionId());
chain->decActiveTrackCnt();
}
if (track->isTerminated()) {
removeTrack_l(track);
}
}
}
}
status_t AudioFlinger::PlaybackThread::getTimestamp_l(AudioTimestamp& timestamp)
{
if (mNormalSink != 0) {
return mNormalSink->getTimestamp(timestamp);
}
if (mType == OFFLOAD && mOutput->stream->get_presentation_position) {
uint64_t position64;
int ret = mOutput->stream->get_presentation_position(
mOutput->stream, &position64, &timestamp.mTime);
if (ret == 0) {
timestamp.mPosition = (uint32_t)position64;
return NO_ERROR;
}
}
return INVALID_OPERATION;
}
// ----------------------------------------------------------------------------
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, audio_devices_t device, type_t type)
: PlaybackThread(audioFlinger, output, id, device, type),
// mAudioMixer below
// mFastMixer below
mFastMixerFutex(0)
// mOutputSink below
// mPipeSink below
// mNormalSink below
{
ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%u, "
"mFrameCount=%d, mNormalFrameCount=%d",
mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
mNormalFrameCount);
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
// FIXME - Current mixer implementation only supports stereo output
if (mChannelCount != FCC_2) {
ALOGE("Invalid audio hardware channel count %d", mChannelCount);
}
// create an NBAIO sink for the HAL output stream, and negotiate
mOutputSink = new AudioStreamOutSink(output->stream);
size_t numCounterOffers = 0;
const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount)};
ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
// initialize fast mixer depending on configuration
bool initFastMixer;
switch (kUseFastMixer) {
case FastMixer_Never:
initFastMixer = false;
break;
case FastMixer_Always:
initFastMixer = true;
break;
case FastMixer_Static:
case FastMixer_Dynamic:
initFastMixer = mFrameCount < mNormalFrameCount;
break;
}
if (initFastMixer) {
// create a MonoPipe to connect our submix to FastMixer
NBAIO_Format format = mOutputSink->format();
// This pipe depth compensates for scheduling latency of the normal mixer thread.
// When it wakes up after a maximum latency, it runs a few cycles quickly before
// finally blocking. Note the pipe implementation rounds up the request to a power of 2.
MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
const NBAIO_Format offers[1] = {format};
size_t numCounterOffers = 0;
ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
monoPipe->setAvgFrames((mScreenState & 1) ?
(monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
mPipeSink = monoPipe;
#ifdef TEE_SINK
if (mTeeSinkOutputEnabled) {
// create a Pipe to archive a copy of FastMixer's output for dumpsys
Pipe *teeSink = new Pipe(mTeeSinkOutputFrames, format);
numCounterOffers = 0;
index = teeSink->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
mTeeSink = teeSink;
PipeReader *teeSource = new PipeReader(*teeSink);
numCounterOffers = 0;
index = teeSource->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
mTeeSource = teeSource;
}
#endif
// create fast mixer and configure it initially with just one fast track for our submix
mFastMixer = new FastMixer();
FastMixerStateQueue *sq = mFastMixer->sq();
#ifdef STATE_QUEUE_DUMP
sq->setObserverDump(&mStateQueueObserverDump);
sq->setMutatorDump(&mStateQueueMutatorDump);
#endif
FastMixerState *state = sq->begin();
FastTrack *fastTrack = &state->mFastTracks[0];
// wrap the source side of the MonoPipe to make it an AudioBufferProvider
fastTrack->mBufferProvider = new SourceAudioBufferProvider(new MonoPipeReader(monoPipe));
fastTrack->mVolumeProvider = NULL;
fastTrack->mGeneration++;
state->mFastTracksGen++;
state->mTrackMask = 1;
// fast mixer will use the HAL output sink
state->mOutputSink = mOutputSink.get();
state->mOutputSinkGen++;
state->mFrameCount = mFrameCount;
state->mCommand = FastMixerState::COLD_IDLE;
// already done in constructor initialization list
//mFastMixerFutex = 0;
state->mColdFutexAddr = &mFastMixerFutex;
state->mColdGen++;
state->mDumpState = &mFastMixerDumpState;
#ifdef TEE_SINK
state->mTeeSink = mTeeSink.get();
#endif
mFastMixerNBLogWriter = audioFlinger->newWriter_l(kFastMixerLogSize, "FastMixer");
state->mNBLogWriter = mFastMixerNBLogWriter.get();
sq->end();
sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
// start the fast mixer
mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
pid_t tid = mFastMixer->getTid();
int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
if (err != 0) {
ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
kPriorityFastMixer, getpid_cached, tid, err);
}
#ifdef AUDIO_WATCHDOG
// create and start the watchdog
mAudioWatchdog = new AudioWatchdog();
mAudioWatchdog->setDump(&mAudioWatchdogDump);
mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
tid = mAudioWatchdog->getTid();
err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
if (err != 0) {
ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
kPriorityFastMixer, getpid_cached, tid, err);
}
#endif
} else {
mFastMixer = NULL;
}
switch (kUseFastMixer) {
case FastMixer_Never:
case FastMixer_Dynamic:
mNormalSink = mOutputSink;
break;
case FastMixer_Always:
mNormalSink = mPipeSink;
break;
case FastMixer_Static:
mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
break;
}
}
AudioFlinger::MixerThread::~MixerThread()
{
if (mFastMixer != NULL) {
FastMixerStateQueue *sq = mFastMixer->sq();
FastMixerState *state = sq->begin();
if (state->mCommand == FastMixerState::COLD_IDLE) {
int32_t old = android_atomic_inc(&mFastMixerFutex);
if (old == -1) {
__futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
}
}
state->mCommand = FastMixerState::EXIT;
sq->end();
sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
mFastMixer->join();
// Though the fast mixer thread has exited, it's state queue is still valid.
// We'll use that extract the final state which contains one remaining fast track
// corresponding to our sub-mix.
state = sq->begin();
ALOG_ASSERT(state->mTrackMask == 1);
FastTrack *fastTrack = &state->mFastTracks[0];
ALOG_ASSERT(fastTrack->mBufferProvider != NULL);
delete fastTrack->mBufferProvider;
sq->end(false /*didModify*/);
delete mFastMixer;
#ifdef AUDIO_WATCHDOG
if (mAudioWatchdog != 0) {
mAudioWatchdog->requestExit();
mAudioWatchdog->requestExitAndWait();
mAudioWatchdog.clear();
}
#endif
}
mAudioFlinger->unregisterWriter(mFastMixerNBLogWriter);
delete mAudioMixer;
}
uint32_t AudioFlinger::MixerThread::correctLatency_l(uint32_t