blob: d265f116725aa49355b9cc4f238f8864d1a8d5b7 [file] [log] [blame]
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "APM_AudioPolicyManager"
//#define LOG_NDEBUG 0
//#define VERY_VERBOSE_LOGGING
#ifdef VERY_VERBOSE_LOGGING
#define ALOGVV ALOGV
#else
#define ALOGVV(a...) do { } while(0)
#endif
#define AUDIO_POLICY_XML_CONFIG_FILE "/system/etc/audio_policy_configuration.xml"
#include <inttypes.h>
#include <math.h>
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyEngineInstance.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <hardware/audio.h>
#include <hardware/audio_effect.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicyHelper.h>
#include <soundtrigger/SoundTrigger.h>
#include "AudioPolicyManager.h"
#ifndef USE_XML_AUDIO_POLICY_CONF
#include <ConfigParsingUtils.h>
#include <StreamDescriptor.h>
#endif
#include <Serializer.h>
#include "TypeConverter.h"
#include <policy.h>
namespace android {
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
#define TOUCH_SOUND_FIXED_DELAY_MS 100
// ----------------------------------------------------------------------------
// AudioPolicyInterface implementation
// ----------------------------------------------------------------------------
status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
const char *device_name)
{
return setDeviceConnectionStateInt(device, state, device_address, device_name);
}
status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
const char *device_name)
{
ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
- device, state, device_address, device_name);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
sp<DeviceDescriptor> devDesc =
mHwModules.getDeviceDescriptor(device, device_address, device_name);
// handle output devices
if (audio_is_output_device(device)) {
SortedVector <audio_io_handle_t> outputs;
ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
// save a copy of the opened output descriptors before any output is opened or closed
// by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
mPreviousOutputs = mOutputs;
switch (state)
{
// handle output device connection
case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
if (index >= 0) {
ALOGW("setDeviceConnectionState() device already connected: %x", device);
return INVALID_OPERATION;
}
ALOGV("setDeviceConnectionState() connecting device %x", device);
// register new device as available
index = mAvailableOutputDevices.add(devDesc);
if (index >= 0) {
sp<HwModule> module = mHwModules.getModuleForDevice(device);
if (module == 0) {
ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
device);
mAvailableOutputDevices.remove(devDesc);
return INVALID_OPERATION;
}
mAvailableOutputDevices[index]->attach(module);
} else {
return NO_MEMORY;
}
if (checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress) != NO_ERROR) {
mAvailableOutputDevices.remove(devDesc);
return INVALID_OPERATION;
}
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
// outputs should never be empty here
ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
"checkOutputsForDevice() returned no outputs but status OK");
ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
outputs.size());
// Send connect to HALs
AudioParameter param = AudioParameter(devDesc->mAddress);
param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
} break;
// handle output device disconnection
case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
if (index < 0) {
ALOGW("setDeviceConnectionState() device not connected: %x", device);
return INVALID_OPERATION;
}
ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
// Send Disconnect to HALs
AudioParameter param = AudioParameter(devDesc->mAddress);
param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
// remove device from available output devices
mAvailableOutputDevices.remove(devDesc);
checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
} break;
default:
ALOGE("setDeviceConnectionState() invalid state: %x", state);
return BAD_VALUE;
}
// checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
// output is suspended before any tracks are moved to it
checkA2dpSuspend();
checkOutputForAllStrategies();
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
for (size_t i = 0; i < outputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
// close unused outputs after device disconnection or direct outputs that have been
// opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
closeOutput(outputs[i]);
}
}
// check again after closing A2DP output to reset mA2dpSuspended if needed
checkA2dpSuspend();
}
updateDevicesAndOutputs();
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
// do not force device change on duplicated output because if device is 0, it will
// also force a device 0 for the two outputs it is duplicated to which may override
// a valid device selection on those outputs.
bool force = !desc->isDuplicated()
&& (!device_distinguishes_on_address(device)
// always force when disconnecting (a non-duplicated device)
|| (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
setOutputDevice(desc, newDevice, force, 0);
}
}
if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
cleanUpForDevice(devDesc);
}
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
} // end if is output device
// handle input devices
if (audio_is_input_device(device)) {
SortedVector <audio_io_handle_t> inputs;
ssize_t index = mAvailableInputDevices.indexOf(devDesc);
switch (state)
{
// handle input device connection
case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
if (index >= 0) {
ALOGW("setDeviceConnectionState() device already connected: %d", device);
return INVALID_OPERATION;
}
sp<HwModule> module = mHwModules.getModuleForDevice(device);
if (module == NULL) {
ALOGW("setDeviceConnectionState(): could not find HW module for device %08x",
device);
return INVALID_OPERATION;
}
if (checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress) != NO_ERROR) {
return INVALID_OPERATION;
}
index = mAvailableInputDevices.add(devDesc);
if (index >= 0) {
mAvailableInputDevices[index]->attach(module);
} else {
return NO_MEMORY;
}
// Set connect to HALs
AudioParameter param = AudioParameter(devDesc->mAddress);
param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
} break;
// handle input device disconnection
case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
if (index < 0) {
ALOGW("setDeviceConnectionState() device not connected: %d", device);
return INVALID_OPERATION;
}
ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
// Set Disconnect to HALs
AudioParameter param = AudioParameter(devDesc->mAddress);
param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress);
mAvailableInputDevices.remove(devDesc);
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
} break;
default:
ALOGE("setDeviceConnectionState() invalid state: %x", state);
return BAD_VALUE;
}
closeAllInputs();
// As the input device list can impact the output device selection, update
// getDeviceForStrategy() cache
updateDevicesAndOutputs();
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
cleanUpForDevice(devDesc);
}
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
} // end if is input device
ALOGW("setDeviceConnectionState() invalid device: %x", device);
return BAD_VALUE;
}
audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
const char *device_address)
{
sp<DeviceDescriptor> devDesc =
mHwModules.getDeviceDescriptor(device, device_address, "",
(strlen(device_address) != 0)/*matchAddress*/);
if (devDesc == 0) {
ALOGW("getDeviceConnectionState() undeclared device, type %08x, address: %s",
device, device_address);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
DeviceVector *deviceVector;
if (audio_is_output_device(device)) {
deviceVector = &mAvailableOutputDevices;
} else if (audio_is_input_device(device)) {
deviceVector = &mAvailableInputDevices;
} else {
ALOGW("getDeviceConnectionState() invalid device type %08x", device);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
return (deviceVector->getDevice(device, String8(device_address)) != 0) ?
AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
uint32_t AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs)
{
bool createTxPatch = false;
status_t status;
audio_patch_handle_t afPatchHandle;
DeviceVector deviceList;
uint32_t muteWaitMs = 0;
if(!hasPrimaryOutput() || mPrimaryOutput->device() == AUDIO_DEVICE_OUT_STUB) {
return muteWaitMs;
}
audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
// release existing RX patch if any
if (mCallRxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
mCallRxPatch.clear();
}
// release TX patch if any
if (mCallTxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
mCallTxPatch.clear();
}
// If the RX device is on the primary HW module, then use legacy routing method for voice calls
// via setOutputDevice() on primary output.
// Otherwise, create two audio patches for TX and RX path.
if (availablePrimaryOutputDevices() & rxDevice) {
muteWaitMs = setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
// If the TX device is also on the primary HW module, setOutputDevice() will take care
// of it due to legacy implementation. If not, create a patch.
if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
== AUDIO_DEVICE_NONE) {
createTxPatch = true;
}
} else { // create RX path audio patch
struct audio_patch patch;
patch.num_sources = 1;
patch.num_sinks = 1;
deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
ALOG_ASSERT(!deviceList.isEmpty(),
"updateCallRouting() selected device not in output device list");
sp<DeviceDescriptor> rxSinkDeviceDesc = deviceList.itemAt(0);
deviceList = mAvailableInputDevices.getDevicesFromType(AUDIO_DEVICE_IN_TELEPHONY_RX);
ALOG_ASSERT(!deviceList.isEmpty(),
"updateCallRouting() no telephony RX device");
sp<DeviceDescriptor> rxSourceDeviceDesc = deviceList.itemAt(0);
rxSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
rxSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
// request to reuse existing output stream if one is already opened to reach the RX device
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(rxDevice, mOutputs);
audio_io_handle_t output = selectOutput(outputs,
AUDIO_OUTPUT_FLAG_NONE,
AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
"updateCallRouting() RX device output is duplicated");
outputDesc->toAudioPortConfig(&patch.sources[1]);
patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
patch.num_sources = 2;
}
afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
status);
if (status == NO_ERROR) {
mCallRxPatch = new AudioPatch(&patch, mUidCached);
mCallRxPatch->mAfPatchHandle = afPatchHandle;
mCallRxPatch->mUid = mUidCached;
}
createTxPatch = true;
}
if (createTxPatch) { // create TX path audio patch
struct audio_patch patch;
patch.num_sources = 1;
patch.num_sinks = 1;
deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
ALOG_ASSERT(!deviceList.isEmpty(),
"updateCallRouting() selected device not in input device list");
sp<DeviceDescriptor> txSourceDeviceDesc = deviceList.itemAt(0);
txSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
deviceList = mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
ALOG_ASSERT(!deviceList.isEmpty(),
"updateCallRouting() no telephony TX device");
sp<DeviceDescriptor> txSinkDeviceDesc = deviceList.itemAt(0);
txSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
audio_io_handle_t output = selectOutput(outputs,
AUDIO_OUTPUT_FLAG_NONE,
AUDIO_FORMAT_INVALID);
// request to reuse existing output stream if one is already opened to reach the TX
// path output device
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
"updateCallRouting() RX device output is duplicated");
outputDesc->toAudioPortConfig(&patch.sources[1]);
patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
patch.num_sources = 2;
}
// terminate active capture if on the same HW module as the call TX source device
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
audio_io_handle_t activeInput = mInputs.getActiveInput();
if (activeInput != 0) {
sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
if (activeDesc->getModuleHandle() == txSourceDeviceDesc->getModuleHandle()) {
//FIXME: consider all active sessions
AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
audio_session_t activeSession = activeSessions.keyAt(0);
stopInput(activeInput, activeSession);
releaseInput(activeInput, activeSession);
}
}
afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
status);
if (status == NO_ERROR) {
mCallTxPatch = new AudioPatch(&patch, mUidCached);
mCallTxPatch->mAfPatchHandle = afPatchHandle;
mCallTxPatch->mUid = mUidCached;
}
}
return muteWaitMs;
}
void AudioPolicyManager::setPhoneState(audio_mode_t state)
{
ALOGV("setPhoneState() state %d", state);
// store previous phone state for management of sonification strategy below
int oldState = mEngine->getPhoneState();
if (mEngine->setPhoneState(state) != NO_ERROR) {
ALOGW("setPhoneState() invalid or same state %d", state);
return;
}
/// Opens: can these line be executed after the switch of volume curves???
// if leaving call state, handle special case of active streams
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, false, true);
}
// force reevaluating accessibility routing when call stops
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
/**
* Switching to or from incall state or switching between telephony and VoIP lead to force
* routing command.
*/
bool force = ((is_state_in_call(oldState) != is_state_in_call(state))
|| (is_state_in_call(state) && (state != oldState)));
// check for device and output changes triggered by new phone state
checkA2dpSuspend();
checkOutputForAllStrategies();
updateDevicesAndOutputs();
int delayMs = 0;
if (isStateInCall(state)) {
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
// mute media and sonification strategies and delay device switch by the largest
// latency of any output where either strategy is active.
// This avoid sending the ring tone or music tail into the earpiece or headset.
if ((isStrategyActive(desc, STRATEGY_MEDIA,
SONIFICATION_HEADSET_MUSIC_DELAY,
sysTime) ||
isStrategyActive(desc, STRATEGY_SONIFICATION,
SONIFICATION_HEADSET_MUSIC_DELAY,
sysTime)) &&
(delayMs < (int)desc->latency()*2)) {
delayMs = desc->latency()*2;
}
setStrategyMute(STRATEGY_MEDIA, true, desc);
setStrategyMute(STRATEGY_MEDIA, false, desc, MUTE_TIME_MS,
getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
setStrategyMute(STRATEGY_SONIFICATION, true, desc);
setStrategyMute(STRATEGY_SONIFICATION, false, desc, MUTE_TIME_MS,
getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
}
}
if (hasPrimaryOutput()) {
// Note that despite the fact that getNewOutputDevice() is called on the primary output,
// the device returned is not necessarily reachable via this output
audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
// force routing command to audio hardware when ending call
// even if no device change is needed
if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
rxDevice = mPrimaryOutput->device();
}
if (state == AUDIO_MODE_IN_CALL) {
updateCallRouting(rxDevice, delayMs);
} else if (oldState == AUDIO_MODE_IN_CALL) {
if (mCallRxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
mCallRxPatch.clear();
}
if (mCallTxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
mCallTxPatch.clear();
}
setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
} else {
setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
}
}
// if entering in call state, handle special case of active streams
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, true, true);
}
// force reevaluating accessibility routing when call starts
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
// Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
if (state == AUDIO_MODE_RINGTONE &&
isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
mLimitRingtoneVolume = true;
} else {
mLimitRingtoneVolume = false;
}
}
audio_mode_t AudioPolicyManager::getPhoneState() {
return mEngine->getPhoneState();
}
void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config)
{
ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState());
if (mEngine->setForceUse(usage, config) != NO_ERROR) {
ALOGW("setForceUse() could not set force cfg %d for usage %d", config, usage);
return;
}
bool forceVolumeReeval = (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ||
(usage == AUDIO_POLICY_FORCE_FOR_DOCK) ||
(usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
// check for device and output changes triggered by new force usage
checkA2dpSuspend();
checkOutputForAllStrategies();
updateDevicesAndOutputs();
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
uint32_t delayMs = 0;
uint32_t waitMs = 0;
if (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) {
delayMs = TOUCH_SOUND_FIXED_DELAY_MS;
}
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
waitMs = updateCallRouting(newDevice, delayMs);
}
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
audio_devices_t newDevice = getNewOutputDevice(outputDesc, true /*fromCache*/);
if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
waitMs = setOutputDevice(outputDesc, newDevice, (newDevice != AUDIO_DEVICE_NONE),
delayMs);
}
if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
applyStreamVolumes(outputDesc, newDevice, waitMs, true);
}
}
audio_io_handle_t activeInput = mInputs.getActiveInput();
if (activeInput != 0) {
sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
audio_devices_t newDevice = getNewInputDevice(activeInput);
// Force new input selection if the new device can not be reached via current input
if (activeDesc->mProfile->getSupportedDevices().types() & (newDevice & ~AUDIO_DEVICE_BIT_IN)) {
setInputDevice(activeInput, newDevice);
} else {
closeInput(activeInput);
}
}
}
void AudioPolicyManager::setSystemProperty(const char* property, const char* value)
{
ALOGV("setSystemProperty() property %s, value %s", property, value);
}
// Find a direct output profile compatible with the parameters passed, even if the input flags do
// not explicitly request a direct output
sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
audio_devices_t device,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_output_flags_t flags)
{
// only retain flags that will drive the direct output profile selection
// if explicitly requested
static const uint32_t kRelevantFlags =
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
flags =
(audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
sp<IOProfile> profile;
for (size_t i = 0; i < mHwModules.size(); i++) {
if (mHwModules[i]->mHandle == 0) {
continue;
}
for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
channelMask, NULL /*updatedChannelMask*/,
flags)) {
continue;
}
// reject profiles not corresponding to a device currently available
if ((mAvailableOutputDevices.types() & curProfile->getSupportedDevicesType()) == 0) {
continue;
}
// if several profiles are compatible, give priority to one with offload capability
if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
continue;
}
profile = curProfile;
if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
break;
}
}
}
return profile;
}
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo)
{
routing_strategy strategy = getStrategy(stream);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
device, stream, samplingRate, format, channelMask, flags);
return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE,
stream, samplingRate,format, channelMask,
flags, offloadInfo);
}
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_output_flags_t flags,
audio_port_handle_t selectedDeviceId,
const audio_offload_info_t *offloadInfo)
{
audio_attributes_t attributes;
if (attr != NULL) {
if (!isValidAttributes(attr)) {
ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
attr->usage, attr->content_type, attr->flags,
attr->tags);
return BAD_VALUE;
}
attributes = *attr;
} else {
if (*stream < AUDIO_STREAM_MIN || *stream >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("getOutputForAttr(): invalid stream type");
return BAD_VALUE;
}
stream_type_to_audio_attributes(*stream, &attributes);
}
sp<SwAudioOutputDescriptor> desc;
if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
if (!audio_has_proportional_frames(format)) {
return BAD_VALUE;
}
*stream = streamTypefromAttributesInt(&attributes);
*output = desc->mIoHandle;
ALOGV("getOutputForAttr() returns output %d", *output);
return NO_ERROR;
}
if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
return BAD_VALUE;
}
ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
" session %d selectedDeviceId %d",
attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
session, selectedDeviceId);
*stream = streamTypefromAttributesInt(&attributes);
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
if (mAvailableOutputDevices[i]->getId() == selectedDeviceId) {
deviceDesc = mAvailableOutputDevices[i];
break;
}
}
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
device, samplingRate, format, channelMask, flags);
*output = getOutputForDevice(device, session, *stream,
samplingRate, format, channelMask,
flags, offloadInfo);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
}
return NO_ERROR;
}
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
audio_devices_t device,
audio_session_t session __unused,
audio_stream_type_t stream,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
#ifdef AUDIO_POLICY_TEST
if (mCurOutput != 0) {
ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d",
mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
if (mTestOutputs[mCurOutput] == 0) {
ALOGV("getOutput() opening test output");
sp<AudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(NULL,
mpClientInterface);
outputDesc->mDevice = mTestDevice;
outputDesc->mLatency = mTestLatencyMs;
outputDesc->mFlags =
(audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0);
outputDesc->mRefCount[stream] = 0;
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = mTestSamplingRate;
config.channel_mask = mTestChannels;
config.format = mTestFormat;
if (offloadInfo != NULL) {
config.offload_info = *offloadInfo;
}
status = mpClientInterface->openOutput(0,
&mTestOutputs[mCurOutput],
&config,
&outputDesc->mDevice,
String8(""),
&outputDesc->mLatency,
outputDesc->mFlags);
if (status == NO_ERROR) {
outputDesc->mSamplingRate = config.sample_rate;
outputDesc->mFormat = config.format;
outputDesc->mChannelMask = config.channel_mask;
AudioParameter outputCmd = AudioParameter();
outputCmd.addInt(String8("set_id"),mCurOutput);
mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
addOutput(mTestOutputs[mCurOutput], outputDesc);
}
}
return mTestOutputs[mCurOutput];
}
#endif //AUDIO_POLICY_TEST
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
// this should normally be set appropriately in the policy configuration file
if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
} else if (/* stream == AUDIO_STREAM_MUSIC && */
flags == AUDIO_OUTPUT_FLAG_NONE &&
property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
// use DEEP_BUFFER as default output for music stream type
flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
}
if (stream == AUDIO_STREAM_TTS) {
flags = AUDIO_OUTPUT_FLAG_TTS;
}
sp<IOProfile> profile;
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
audio_channel_count_from_out_mask(channelMask) <= 2) {
goto non_direct_output;
}
// Do not allow offloading if one non offloadable effect is enabled or MasterMono is enabled.
// This prevents creating an offloaded track and tearing it down immediately after start
// when audioflinger detects there is an active non offloadable effect.
// FIXME: We should check the audio session here but we do not have it in this context.
// This may prevent offloading in rare situations where effects are left active by apps
// in the background.
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForDirectOutput(device,
samplingRate,
format,
channelMask,
(audio_output_flags_t)flags);
}
if (profile != 0) {
sp<SwAudioOutputDescriptor> outputDesc = NULL;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
outputDesc = desc;
// reuse direct output if currently open and configured with same parameters
if ((samplingRate == outputDesc->mSamplingRate) &&
audio_formats_match(format, outputDesc->mFormat) &&
(channelMask == outputDesc->mChannelMask)) {
outputDesc->mDirectOpenCount++;
ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
return mOutputs.keyAt(i);
}
}
}
// close direct output if currently open and configured with different parameters
if (outputDesc != NULL) {
closeOutput(outputDesc->mIoHandle);
}
// if the selected profile is offloaded and no offload info was specified,
// create a default one
audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
defaultOffloadInfo.sample_rate = samplingRate;
defaultOffloadInfo.channel_mask = channelMask;
defaultOffloadInfo.format = format;
defaultOffloadInfo.stream_type = stream;
defaultOffloadInfo.bit_rate = 0;
defaultOffloadInfo.duration_us = -1;
defaultOffloadInfo.has_video = true; // conservative
defaultOffloadInfo.is_streaming = true; // likely
offloadInfo = &defaultOffloadInfo;
}
outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
outputDesc->mDevice = device;
outputDesc->mLatency = 0;
outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = samplingRate;
config.channel_mask = channelMask;
config.format = format;
if (offloadInfo != NULL) {
config.offload_info = *offloadInfo;
}
status = mpClientInterface->openOutput(profile->getModuleHandle(),
&output,
&config,
&outputDesc->mDevice,
String8(""),
&outputDesc->mLatency,
outputDesc->mFlags);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
(samplingRate != 0 && samplingRate != config.sample_rate) ||
(format != AUDIO_FORMAT_DEFAULT && !audio_formats_match(format, config.format)) ||
(channelMask != 0 && channelMask != config.channel_mask)) {
ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
"format %d %d, channelMask %04x %04x", output, samplingRate,
outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
mpClientInterface->closeOutput(output);
}
// fall back to mixer output if possible when the direct output could not be open
if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
}
outputDesc->mSamplingRate = config.sample_rate;
outputDesc->mChannelMask = config.channel_mask;
outputDesc->mFormat = config.format;
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
audio_io_handle_t srcOutput = getOutputForEffect();
addOutput(output, outputDesc);
audio_io_handle_t dstOutput = getOutputForEffect();
if (dstOutput == output) {
mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput);
}
mPreviousOutputs = mOutputs;
ALOGV("getOutput() returns new direct output %d", output);
mpClientInterface->onAudioPortListUpdate();
return output;
}
non_direct_output:
// A request for HW A/V sync cannot fallback to a mixed output because time
// stamps are embedded in audio data
if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
return AUDIO_IO_HANDLE_NONE;
}
// ignoring channel mask due to downmix capability in mixer
// open a non direct output
// for non direct outputs, only PCM is supported
if (audio_is_linear_pcm(format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
output = selectOutput(outputs, flags, format);
}
ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
"format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
ALOGV(" getOutputForDevice() returns output %d", output);
return output;
}
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format)
{
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevice()).
// The priority is as follows:
// 1: the output with the highest number of requested policy flags
// 2: the output with the bit depth the closest to the requested one
// 3: the primary output
// 4: the first output in the list
if (outputs.size() == 0) {
return 0;
}
if (outputs.size() == 1) {
return outputs[0];
}
int maxCommonFlags = 0;
audio_io_handle_t outputForFlags = 0;
audio_io_handle_t outputForPrimary = 0;
audio_io_handle_t outputForFormat = 0;
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
for (size_t i = 0; i < outputs.size(); i++) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
if (!outputDesc->isDuplicated()) {
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (!audio_formats_match(format, outputDesc->mFormat)) {
continue;
}
} else if (!audio_is_linear_pcm(format)) {
continue;
}
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormat, format)) {
outputForFormat = outputs[i];
bestFormat = outputDesc->mFormat;
}
}
int commonFlags = popcount(outputDesc->mProfile->getFlags() & flags);
if (commonFlags >= maxCommonFlags) {
if (commonFlags == maxCommonFlags) {
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormatForFlags, format)) {
outputForFlags = outputs[i];
bestFormatForFlags = outputDesc->mFormat;
}
} else {
outputForFlags = outputs[i];
maxCommonFlags = commonFlags;
bestFormatForFlags = outputDesc->mFormat;
}
ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
}
if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
outputForPrimary = outputs[i];
}
}
}
if (outputForFlags != 0) {
return outputForFlags;
}
if (outputForFormat != 0) {
return outputForFormat;
}
if (outputForPrimary != 0) {
return outputForPrimary;
}
return outputs[0];
}
status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session)
{
ALOGV("startOutput() output %d, stream %d, session %d",
output, stream, session);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("startOutput() unknown output %d", output);
return BAD_VALUE;
}
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
// Routing?
mOutputRoutes.incRouteActivity(session);
audio_devices_t newDevice;
AudioMix *policyMix = NULL;
const char *address = NULL;
if (outputDesc->mPolicyMix != NULL) {
policyMix = outputDesc->mPolicyMix;
address = policyMix->mDeviceAddress.string();
if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
newDevice = policyMix->mDeviceType;
} else {
newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
}
} else if (mOutputRoutes.hasRouteChanged(session)) {
newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
checkStrategyRoute(getStrategy(stream), output);
} else {
newDevice = AUDIO_DEVICE_NONE;
}
uint32_t delayMs = 0;
status_t status = startSource(outputDesc, stream, newDevice, address, &delayMs);
if (status != NO_ERROR) {
mOutputRoutes.decRouteActivity(session);
return status;
}
// Automatically enable the remote submix input when output is started on a re routing mix
// of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(newDevice) && policyMix != NULL &&
policyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address,
"remote-submix");
}
if (delayMs != 0) {
usleep(delayMs * 1000);
}
return status;
}
status_t AudioPolicyManager::startSource(sp<AudioOutputDescriptor> outputDesc,
audio_stream_type_t stream,
audio_devices_t device,
const char *address,
uint32_t *delayMs)
{
// cannot start playback of STREAM_TTS if any other output is being used
uint32_t beaconMuteLatency = 0;
*delayMs = 0;
if (stream == AUDIO_STREAM_TTS) {
ALOGV("\t found BEACON stream");
if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
return INVALID_OPERATION;
} else {
beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
}
} else {
// some playback other than beacon starts
beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
}
// force device change if the output is inactive and no audio patch is already present.
// check active before incrementing usage count
bool force = !outputDesc->isActive() &&
(outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
// increment usage count for this stream on the requested output:
// NOTE that the usage count is the same for duplicated output and hardware output which is
// necessary for a correct control of hardware output routing by startOutput() and stopOutput()
outputDesc->changeRefCount(stream, 1);
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
// starting an output being rerouted?
if (device == AUDIO_DEVICE_NONE) {
device = getNewOutputDevice(outputDesc, false /*fromCache*/);
}
routing_strategy strategy = getStrategy(stream);
bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
(strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
(beaconMuteLatency > 0);
uint32_t waitMs = beaconMuteLatency;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc) {
// force a device change if any other output is:
// - managed by the same hw module
// - has a current device selection that differs from selected device.
// - supports currently selected device
// - has an active audio patch
// In this case, the audio HAL must receive the new device selection so that it can
// change the device currently selected by the other active output.
if (outputDesc->sharesHwModuleWith(desc) &&
desc->device() != device &&
desc->supportedDevices() & device &&
desc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
force = true;
}
// wait for audio on other active outputs to be presented when starting
// a notification so that audio focus effect can propagate, or that a mute/unmute
// event occurred for beacon
uint32_t latency = desc->latency();
if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
waitMs = latency;
}
}
}
uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force, 0, NULL, address);
// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, true, false);
}
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
mVolumeCurves->getVolumeIndex(stream, device),
outputDesc,
device);
// update the outputs if starting an output with a stream that can affect notification
// routing
handleNotificationRoutingForStream(stream);
// force reevaluating accessibility routing when ringtone or alarm starts
if (strategy == STRATEGY_SONIFICATION) {
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
if (waitMs > muteWaitMs) {
*delayMs = waitMs - muteWaitMs;
}
}
return NO_ERROR;
}
status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session)
{
ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("stopOutput() unknown output %d", output);
return BAD_VALUE;
}
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
if (outputDesc->mRefCount[stream] == 1) {
// Automatically disable the remote submix input when output is stopped on a
// re routing mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(outputDesc->mDevice) &&
outputDesc->mPolicyMix != NULL &&
outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
outputDesc->mPolicyMix->mDeviceAddress,
"remote-submix");
}
}
// Routing?
bool forceDeviceUpdate = false;
if (outputDesc->mRefCount[stream] > 0) {
int activityCount = mOutputRoutes.decRouteActivity(session);
forceDeviceUpdate = (mOutputRoutes.hasRoute(session) && (activityCount == 0));
if (forceDeviceUpdate) {
checkStrategyRoute(getStrategy(stream), AUDIO_IO_HANDLE_NONE);
}
}
return stopSource(outputDesc, stream, forceDeviceUpdate);
}
status_t AudioPolicyManager::stopSource(sp<AudioOutputDescriptor> outputDesc,
audio_stream_type_t stream,
bool forceDeviceUpdate)
{
// always handle stream stop, check which stream type is stopping
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, false, false);
}
if (outputDesc->mRefCount[stream] > 0) {
// decrement usage count of this stream on the output
outputDesc->changeRefCount(stream, -1);
// store time at which the stream was stopped - see isStreamActive()
if (outputDesc->mRefCount[stream] == 0 || forceDeviceUpdate) {
outputDesc->mStopTime[stream] = systemTime();
audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
// delay the device switch by twice the latency because stopOutput() is executed when
// the track stop() command is received and at that time the audio track buffer can
// still contain data that needs to be drained. The latency only covers the audio HAL
// and kernel buffers. Also the latency does not always include additional delay in the
// audio path (audio DSP, CODEC ...)
setOutputDevice(outputDesc, newDevice, false, outputDesc->latency()*2);
// force restoring the device selection on other active outputs if it differs from the
// one being selected for this output
uint32_t delayMs = outputDesc->latency()*2;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc &&
desc->isActive() &&
outputDesc->sharesHwModuleWith(desc) &&
(newDevice != desc->device())) {
audio_devices_t newDevice2 = getNewOutputDevice(desc, false /*fromCache*/);
bool force = desc->device() != newDevice2;
setOutputDevice(desc,
newDevice2,
force,
delayMs);
// re-apply device specific volume if not done by setOutputDevice()
if (!force) {
applyStreamVolumes(desc, newDevice2, delayMs);
}
}
}
// update the outputs if stopping one with a stream that can affect notification routing
handleNotificationRoutingForStream(stream);
}
return NO_ERROR;
} else {
ALOGW("stopOutput() refcount is already 0");
return INVALID_OPERATION;
}
}
void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
audio_stream_type_t stream __unused,
audio_session_t session __unused)
{
ALOGV("releaseOutput() %d", output);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("releaseOutput() releasing unknown output %d", output);
return;
}
#ifdef AUDIO_POLICY_TEST
int testIndex = testOutputIndex(output);
if (testIndex != 0) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
if (outputDesc->isActive()) {
mpClientInterface->closeOutput(output);
removeOutput(output);
mTestOutputs[testIndex] = 0;
}
return;
}
#endif //AUDIO_POLICY_TEST
// Routing
mOutputRoutes.removeRoute(session);
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index);
if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (desc->mDirectOpenCount <= 0) {
ALOGW("releaseOutput() invalid open count %d for output %d",
desc->mDirectOpenCount, output);
return;
}
if (--desc->mDirectOpenCount == 0) {
closeOutput(output);
// If effects where present on the output, audioflinger moved them to the primary
// output by default: move them back to the appropriate output.
audio_io_handle_t dstOutput = getOutputForEffect();
if (hasPrimaryOutput() && dstOutput != mPrimaryOutput->mIoHandle) {
mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX,
mPrimaryOutput->mIoHandle, dstOutput);
}
mpClientInterface->onAudioPortListUpdate();
}
}
}
status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
audio_port_handle_t selectedDeviceId,
input_type_t *inputType)
{
ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
"session %d, flags %#x",
attr->source, samplingRate, format, channelMask, session, flags);
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
audio_devices_t device;
// handle legacy remote submix case where the address was not always specified
String8 address = String8("");
audio_source_t inputSource = attr->source;
audio_source_t halInputSource;
AudioMix *policyMix = NULL;
if (inputSource == AUDIO_SOURCE_DEFAULT) {
inputSource = AUDIO_SOURCE_MIC;
}
halInputSource = inputSource;
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
if (mAvailableInputDevices[i]->getId() == selectedDeviceId) {
deviceDesc = mAvailableInputDevices[i];
break;
}
}
mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
status_t ret = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
if (ret != NO_ERROR) {
return ret;
}
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
address = String8(attr->tags + strlen("addr="));
} else {
device = getDeviceAndMixForInputSource(inputSource, &policyMix);
if (device == AUDIO_DEVICE_NONE) {
ALOGW("getInputForAttr() could not find device for source %d", inputSource);
return BAD_VALUE;
}
if (policyMix != NULL) {
address = policyMix->mDeviceAddress;
if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
// there is an external policy, but this input is attached to a mix of recorders,
// meaning it receives audio injected into the framework, so the recorder doesn't
// know about it and is therefore considered "legacy"
*inputType = API_INPUT_LEGACY;
} else {
// recording a mix of players defined by an external policy, we're rerouting for
// an external policy
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
}
} else if (audio_is_remote_submix_device(device)) {
address = String8("0");
*inputType = API_INPUT_MIX_CAPTURE;
} else if (device == AUDIO_DEVICE_IN_TELEPHONY_RX) {
*inputType = API_INPUT_TELEPHONY_RX;
} else {
*inputType = API_INPUT_LEGACY;
}
}
*input = getInputForDevice(device, address, session, uid, inputSource,
samplingRate, format, channelMask, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
mInputRoutes.removeRoute(session);
return INVALID_OPERATION;
}
ALOGV("getInputForAttr() returns input type = %d", *inputType);
return NO_ERROR;
}
audio_io_handle_t AudioPolicyManager::getInputForDevice(audio_devices_t device,
String8 address,
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_input_flags_t flags,
AudioMix *policyMix)
{
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
audio_source_t halInputSource = inputSource;
bool isSoundTrigger = false;
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
ALOGV("SoundTrigger capture on session %d input %d", session, input);
} else {
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
// samplingRate and flags may be updated by getInputProfile
uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
audio_format_t profileFormat = format;
audio_channel_mask_t profileChannelMask = channelMask;
audio_input_flags_t profileFlags = flags;
for (;;) {
profile = getInputProfile(device, address,
profileSamplingRate, profileFormat, profileChannelMask,
profileFlags);
if (profile != 0) {
break; // success
} else if (profileFlags & AUDIO_INPUT_FLAG_RAW) {
profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry
} else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("getInputForDevice() could not find profile for device 0x%X,"
"samplingRate %u, format %#x, channelMask 0x%X, flags %#x",
device, samplingRate, format, channelMask, flags);
return input;
}
}
// Pick input sampling rate if not specified by client
if (samplingRate == 0) {
samplingRate = profileSamplingRate;
}
if (profile->getModuleHandle() == 0) {
ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName());
return input;
}
sp<AudioSession> audioSession = new AudioSession(session,
inputSource,
format,
samplingRate,
channelMask,
flags,
uid,
isSoundTrigger,
policyMix, mpClientInterface);
// TODO enable input reuse
#if 0
// reuse an open input if possible
for (size_t i = 0; i < mInputs.size(); i++) {
sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
// reuse input if it shares the same profile and same sound trigger attribute
if (profile == desc->mProfile &&
isSoundTrigger == desc->isSoundTrigger()) {
sp<AudioSession> as = desc->getAudioSession(session);
if (as != 0) {
// do not allow unmatching properties on same session
if (as->matches(audioSession)) {
as->changeOpenCount(1);
} else {
ALOGW("getInputForDevice() record with different attributes"
" exists for session %d", session);
return input;
}
} else {
desc->addAudioSession(session, audioSession);
}
ALOGV("getInputForDevice() reusing input %d", mInputs.keyAt(i));
return mInputs.keyAt(i);
}
}
#endif
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
config.sample_rate = profileSamplingRate;
config.channel_mask = profileChannelMask;
config.format = profileFormat;
status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
&input,
&config,
&device,
address,
halInputSource,
profileFlags);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
(profileSamplingRate != config.sample_rate) ||
!audio_formats_match(profileFormat, config.format) ||
(profileChannelMask != config.channel_mask)) {
ALOGW("getInputForAttr() failed opening input: samplingRate %d"
", format %d, channelMask %x",
samplingRate, format, channelMask);
if (input != AUDIO_IO_HANDLE_NONE) {
mpClientInterface->closeInput(input);
}
return AUDIO_IO_HANDLE_NONE;
}
sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
inputDesc->mSamplingRate = profileSamplingRate;
inputDesc->mFormat = profileFormat;
inputDesc->mChannelMask = profileChannelMask;
inputDesc->mDevice = device;
inputDesc->mPolicyMix = policyMix;
inputDesc->addAudioSession(session, audioSession);
addInput(input, inputDesc);
mpClientInterface->onAudioPortListUpdate();
return input;
}
status_t AudioPolicyManager::startInput(audio_io_handle_t input,
audio_session_t session)
{
ALOGV("startInput() input %d", input);
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("startInput() unknown input %d", input);
return BAD_VALUE;
}
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (audioSession == 0) {
ALOGW("startInput() unknown session %d on input %d", session, input);
return BAD_VALUE;
}
// virtual input devices are compatible with other input devices
if (!is_virtual_input_device(inputDesc->mDevice)) {
// for a non-virtual input device, check if there is another (non-virtual) active input
audio_io_handle_t activeInput = mInputs.getActiveInput();
if (activeInput != 0 && activeInput != input) {
// If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
// otherwise the active input continues and the new input cannot be started.
sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
if ((activeDesc->inputSource() == AUDIO_SOURCE_HOTWORD) &&
!activeDesc->hasPreemptedSession(session)) {
ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
//FIXME: consider all active sessions
AudioSessionCollection activeSessions = activeDesc->getActiveAudioSessions();
audio_session_t activeSession = activeSessions.keyAt(0);
SortedVector<audio_session_t> sessions =
activeDesc->getPreemptedSessions();
sessions.add(activeSession);
inputDesc->setPreemptedSessions(sessions);
stopInput(activeInput, activeSession);
releaseInput(activeInput, activeSession);
} else {
ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
return INVALID_OPERATION;
}
}
// Do not allow capture if an active voice call is using a software patch and
// the call TX source device is on the same HW module.
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch
if (mCallTxPatch != 0 &&
inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
return INVALID_OPERATION;
}
}
// Routing?
mInputRoutes.incRouteActivity(session);
if (!inputDesc->isActive() || mInputRoutes.hasRouteChanged(session)) {
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
MIX_STATE_MIXING);
}
// indicate active capture to sound trigger service if starting capture from a mic on
// primary HW module
audio_devices_t device = getNewInputDevice(input);
audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
SoundTrigger::setCaptureState(true);
}
setInputDevice(input, device, true /* force */);
// automatically enable the remote submix output when input is started if not
// used by a policy mix of type MIX_TYPE_RECORDERS
// For remote submix (a virtual device), we open only one input per capture request.
if (audio_is_remote_submix_device(inputDesc->mDevice)) {
String8 address = String8("");
if (inputDesc->mPolicyMix == NULL) {
address = String8("0");
} else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
address = inputDesc->mPolicyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address, "remote-submix");
}
}
}
ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
audioSession->changeActiveCount(1);
return NO_ERROR;
}
status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
audio_session_t session)
{
ALOGV("stopInput() input %d", input);
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("stopInput() unknown input %d", input);
return BAD_VALUE;
}
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (index < 0) {
ALOGW("stopInput() unknown session %d on input %d", session, input);
return BAD_VALUE;
}
if (audioSession->activeCount() == 0) {
ALOGW("stopInput() input %d already stopped", input);
return INVALID_OPERATION;
}
audioSession->changeActiveCount(-1);
// Routing?
mInputRoutes.decRouteActivity(session);
if (!inputDesc->isActive()) {
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
MIX_STATE_IDLE);
}
// automatically disable the remote submix output when input is stopped if not
// used by a policy mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(inputDesc->mDevice)) {
String8 address = String8("");
if (inputDesc->mPolicyMix == NULL) {
address = String8("0");
} else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
address = inputDesc->mPolicyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address, "remote-submix");
}
}
audio_devices_t device = inputDesc->mDevice;
resetInputDevice(input);
// indicate inactive capture to sound trigger service if stopping capture from a mic on
// primary HW module
audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
SoundTrigger::setCaptureState(false);
}
inputDesc->clearPreemptedSessions();
}
return NO_ERROR;
}
void AudioPolicyManager::releaseInput(audio_io_handle_t input,
audio_session_t session)
{
ALOGV("releaseInput() %d", input);
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("releaseInput() releasing unknown input %d", input);
return;
}
// Routing
mInputRoutes.removeRoute(session);
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
ALOG_ASSERT(inputDesc != 0);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (index < 0) {
ALOGW("releaseInput() unknown session %d on input %d", session, input);
return;
}
if (audioSession->openCount() == 0) {
ALOGW("releaseInput() invalid open count %d on session %d",
audioSession->openCount(), session);
return;
}
if (audioSession->changeOpenCount(-1) == 0) {
inputDesc->removeAudioSession(session);
}
if (inputDesc->getOpenRefCount() > 0) {
ALOGV("releaseInput() exit > 0");
return;
}
closeInput(input);
mpClientInterface->onAudioPortListUpdate();
ALOGV("releaseInput() exit");
}
void AudioPolicyManager::closeAllInputs() {
bool patchRemoved = false;
for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
(void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
mpClientInterface->closeInput(mInputs.keyAt(input_index));
}
mInputs.clear();
SoundTrigger::setCaptureState(false);
nextAudioPortGeneration();
if (patchRemoved) {
mpClientInterface->onAudioPatchListUpdate();
}
}
void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax)
{
ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
// initialize other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
}
}
status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
if ((index < mVolumeCurves->getVolumeIndexMin(stream)) ||
(index > mVolumeCurves->getVolumeIndexMax(stream))) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
// Force max volume if stream cannot be muted
if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
stream, device, index);
// update other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
}
// update volume on all outputs and streams matching the following:
// - The requested stream (or a stream matching for volume control) is active on the output
// - The device (or devices) selected by the strategy corresponding to this stream includes
// the requested device
// - For non default requested device, currently selected device on the output is either the
// requested device or one of the devices selected by the strategy
// - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
// no specific device volume value exists for currently selected device.
status_t status = NO_ERROR;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
(isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, false /*fromCache*/);
if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
((curStreamDevice & device) == 0)) {
continue;
}
bool applyVolume;
if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
curStreamDevice |= device;
applyVolume = (curDevice & curStreamDevice) != 0;
} else {
applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
stream, Volume::getDeviceForVolume(curStreamDevice));
}
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
status_t volStatus =
checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
(stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
}
}
}
}
return status;
}
status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
int *index,
audio_devices_t device)
{
if (index == NULL) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
// if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device corresponding to
// the strategy the stream belongs to.
if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
}
device = Volume::getDeviceForVolume(device);
*index = mVolumeCurves->getVolumeIndex(stream, device);
ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
return NO_ERROR;
}
audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
const SortedVector<audio_io_handle_t>& outputs)
{
// select one output among several suitable for global effects.
// The priority is as follows:
// 1: An offloaded output. If the effect ends up not being offloadable,
// AudioFlinger will invalidate the track and the offloaded output
// will be closed causing the effect to be moved to a PCM output.
// 2: A deep buffer output
// 3: the first output in the list
if (outputs.size() == 0) {
return 0;
}
audio_io_handle_t outputOffloaded = 0;
audio_io_handle_t outputDeepBuffer = 0;
for (size_t i = 0; i < outputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags);
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
outputOffloaded = outputs[i];
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
outputDeepBuffer = outputs[i];
}
}
ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d",
outputOffloaded, outputDeepBuffer);
if (outputOffloaded != 0) {
return outputOffloaded;
}
if (outputDeepBuffer != 0) {
return outputDeepBuffer;
}
return outputs[0];
}
audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)
{
// apply simple rule where global effects are attached to the same output as MUSIC streams
routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs);
audio_io_handle_t output = selectOutputForEffects(dstOutputs);
ALOGV("getOutputForEffect() got output %d for fx %s flags %x",
output, (desc == NULL) ? "unspecified" : desc->name, (desc == NULL) ? 0 : desc->flags);
return output;
}
status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
int session,
int id)
{
ssize_t index = mOutputs.indexOfKey(io);
if (index < 0) {
index = mInputs.indexOfKey(io);
if (index < 0) {
ALOGW("registerEffect() unknown io %d", io);
return INVALID_OPERATION;
}
}
return mEffects.registerEffect(desc, io, strategy, session, id);
}
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
bool active = false;
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT && !active; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
}
return active;
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
{
return mOutputs.isStreamActiveRemotely(stream, inPastMs);
}
bool AudioPolicyManager::isSourceActive(audio_source_t source) const
{
for (size_t i = 0; i < mInputs.size(); i++) {
const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i);
if (inputDescriptor->isSourceActive(source)) {
return true;
}
}
return false;
}
// Register a list of custom mixes with their attributes and format.
// When a mix is registered, corresponding input and output profiles are
// added to the remote submix hw module. The profile contains only the
// parameters (sampling rate, format...) specified by the mix.
// The corresponding input remote submix device is also connected.
//
// When a remote submix device is connected, the address is checked to select the
// appropriate profile and the corresponding input or output stream is opened.
//
// When capture starts, getInputForAttr() will:
// - 1 look for a mix matching the address passed in attribtutes tags if any
// - 2 if none found, getDeviceForInputSource() will:
// - 2.1 look for a mix matching the attributes source
// - 2.2 if none found, default to device selection by policy rules
// At this time, the corresponding output remote submix device is also connected
// and active playback use cases can be transferred to this mix if needed when reconnecting
// after AudioTracks are invalidated
//
// When playback starts, getOutputForAttr() will:
// - 1 look for a mix matching the address passed in attribtutes tags if any
// - 2 if none found, look for a mix matching the attributes usage
// - 3 if none found, default to device and output selection by policy rules.
status_t AudioPolicyManager::registerPolicyMixes(Vector<AudioMix> mixes)
{
ALOGV("registerPolicyMixes() %zu mix(es)", mixes.size());
status_t res = NO_ERROR;
sp<HwModule> rSubmixModule;
// examine each mix's route type
for (size_t i = 0; i < mixes.size(); i++) {
// we only support MIX_ROUTE_FLAG_LOOP_BACK or MIX_ROUTE_FLAG_RENDER, not the combination
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_ALL) == MIX_ROUTE_FLAG_ALL) {
res = INVALID_OPERATION;
break;
}
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
// Loop back through "remote submix"
if (rSubmixModule == 0) {
for (size_t j = 0; i < mHwModules.size(); j++) {
if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
&& mHwModules[j]->mHandle != 0) {
rSubmixModule = mHwModules[j];
break;
}
}
}
ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
if (rSubmixModule == 0) {
ALOGE(" Unable to find audio module for submix, aborting mix %zu registration", i);
res = INVALID_OPERATION;
break;
}
String8 address = mixes[i].mDeviceAddress;
if (mPolicyMixes.registerMix(address, mixes[i], 0 /*output desc*/) != NO_ERROR) {
ALOGE(" Error registering mix %zu for address %s", i, address.string());
res = INVALID_OPERATION;
break;
}
audio_config_t outputConfig = mixes[i].mFormat;
audio_config_t inputConfig = mixes[i].mFormat;
// NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
// stereo and let audio flinger do the channel conversion if needed.
outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
rSubmixModule->addOutputProfile(address, &outputConfig,
AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
rSubmixModule->addInputProfile(address, &inputConfig,
AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.string(), "remote-submix");
} else {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.string(), "remote-submix");
}
} else if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
String8 address = mixes[i].mDeviceAddress;
audio_devices_t device = mixes[i].mDeviceType;
ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
i, mixes.size(), device, address.string());
bool foundOutput = false;
for (size_t j = 0 ; j < mOutputs.size() ; j++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
if ((patch != 0) && (patch->mPatch.num_sinks != 0)
&& (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
&& (patch->mPatch.sinks[0].ext.device.type == device)
&& (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
if (mPolicyMixes.registerMix(address, mixes[i], desc) != NO_ERROR) {
res = INVALID_OPERATION;
} else {
foundOutput = true;
}
break;
}
}
if (res != NO_ERROR) {
ALOGE(" Error registering mix %zu for device 0x%X addr %s",
i, device, address.string());
res = INVALID_OPERATION;
break;
} else if (!foundOutput) {
ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
i, device, address.string());
res = INVALID_OPERATION;
break;
}
}
}
if (res != NO_ERROR) {
unregisterPolicyMixes(mixes);
}
return res;
}
status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
{
ALOGV("unregisterPolicyMixes() num mixes %zu", mixes.size());
status_t res = NO_ERROR;
sp<HwModule> rSubmixModule;
// examine each mix's route type
for (size_t i = 0; i < mixes.size(); i++) {
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
if (rSubmixModule == 0) {
for (size_t j = 0; i < mHwModules.size(); j++) {
if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
&& mHwModules[j]->mHandle != 0) {
rSubmixModule = mHwModules[j];
break;
}
}
}
if (rSubmixModule == 0) {
res = INVALID_OPERATION;
continue;
}
String8 address = mixes[i].mDeviceAddress;
if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
res = INVALID_OPERATION;
continue;
}
if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address.string(), "remote-submix");
}
if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address.string(), "remote-submix");
}
rSubmixModule->removeOutputProfile(address);
rSubmixModule->removeInputProfile(address);
} if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
if (mPolicyMixes.unregisterMix(mixes[i].mDeviceAddress) != NO_ERROR) {
res = INVALID_OPERATION;
continue;
}
}
}
return res;
}
status_t AudioPolicyManager::dump(int fd)
{
const size_t SIZE = 256;
char buffer[SIZE];
String8 result;
snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
result.append(buffer);
snprintf(buffer, SIZE, " Primary Output: %d\n",
hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
result.append(buffer);
snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
result.append(buffer);
snprintf(buffer, SIZE, " Force use for communications %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
result.append(buffer);
snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
result.append(buffer);
snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
result.append(buffer);
snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
result.append(buffer);
write(fd, result.string(), result.size());
mAvailableOutputDevices.dump(fd, String8("Available output"));
mAvailableInputDevices.dump(fd, String8("Available input"));
mHwModules.dump(fd);
mOutputs.dump(fd);
mInputs.dump(fd);
mVolumeCurves->dump(fd);
mEffects.dump(fd);
mAudioPatches.dump(fd);
return NO_ERROR;
}
// This function checks for the parameters which can be offloaded.
// This can be enhanced depending on the capability of the DSP and policy
// of the system.
bool AudioPolicyManager::isOffloadSupported(const audio_offload_info_t& offloadInfo)
{
ALOGV("isOffloadSupported: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d,"
" BitRate=%u, duration=%" PRId64 " us, has_video=%d",
offloadInfo.sample_rate, offloadInfo.channel_mask,
offloadInfo.format,
offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
offloadInfo.has_video);
if (mMasterMono) {
return false; // no offloading if mono is set.
}
// Check if offload has been disabled
char propValue[PROPERTY_VALUE_MAX];
if (property_get("audio.offload.disable", propValue, "0")) {
if (atoi(propValue) != 0) {
ALOGV("offload disabled by audio.offload.disable=%s", propValue );
return false;
}
}
// Check if stream type is music, then only allow offload as of now.
if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
{
ALOGV("isOffloadSupported: stream_type != MUSIC, returning false");
return false;
}
//TODO: enable audio offloading with video when ready
const bool allowOffloadWithVideo =
property_get_bool("audio.offload.video", false /* default_value */);
if (offloadInfo.has_video && !allowOffloadWithVideo) {
ALOGV("isOffloadSupported: has_video == true, returning false");
return false;
}
//If duration is less than minimum value defined in property, return false
if (property_get("audio.offload.min.duration.secs", propValue, NULL)) {
if (offloadInfo.duration_us < (atoi(propValue) * 1000000 )) {
ALOGV("Offload denied by duration < audio.offload.min.duration.secs(=%s)", propValue);
return false;
}
} else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
ALOGV("Offload denied by duration < default min(=%u)", OFFLOAD_DEFAULT_MIN_DURATION_SECS);
return false;
}
// Do not allow offloading if one non offloadable effect is enabled. This prevents from
// creating an offloaded track and tearing it down immediately after start when audioflinger
// detects there is an active non offloadable effect.
// FIXME: We should check the audio session here but we do not have it in this context.
// This may prevent offloading in rare situations where effects are left active by apps
// in the background.
if (mEffects.isNonOffloadableEffectEnabled()) {
return false;
}
// See if there is a profile to support this.
// AUDIO_DEVICE_NONE
sp<IOProfile> profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */,
offloadInfo.sample_rate,
offloadInfo.format,
offloadInfo.channel_mask,
AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
ALOGV("isOffloadSupported() profile %sfound", profile != 0 ? "" : "NOT ");
return (profile != 0);
}
status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
audio_port_type_t type,
unsigned int *num_ports,
struct audio_port *ports,
unsigned int *generation)
{
if (num_ports == NULL || (*num_ports != 0 && ports == NULL) ||
generation == NULL) {
return BAD_VALUE;
}
ALOGV("listAudioPorts() role %d type %d num_ports %d ports %p", role, type, *num_ports, ports);
if (ports == NULL) {
*num_ports = 0;
}
size_t portsWritten = 0;
size_t portsMax = *num_ports;
*num_ports = 0;
if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_DEVICE) {
// do not report devices with type AUDIO_DEVICE_IN_STUB or AUDIO_DEVICE_OUT_STUB
// as they are used by stub HALs by convention
if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
if (mAvailableOutputDevices[i]->type() == AUDIO_DEVICE_OUT_STUB) {
continue;
}
if (portsWritten < portsMax) {
mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
}
if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_STUB) {
continue;
}
if (portsWritten < portsMax) {
mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
}
(*num_ports)++;
}
}
}
if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_MIX) {
if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
for (size_t i = 0; i < mInputs.size() && portsWritten < portsMax; i++) {
mInputs[i]->toAudioPort(&ports[portsWritten++]);
}
*num_ports += mInputs.size();
}
if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
size_t numOutputs = 0;
for (size_t i = 0; i < mOutputs.size(); i++) {
if (!mOutputs[i]->isDuplicated()) {
numOutputs++;
if (portsWritten < portsMax) {
mOutputs[i]->toAudioPort(&ports[portsWritten++]);
}
}
}
*num_ports += numOutputs;
}
}
*generation = curAudioPortGeneration();
ALOGV("listAudioPorts() got %zu ports needed %d", portsWritten, *num_ports);
return NO_ERROR;
}
status_t AudioPolicyManager::getAudioPort(struct audio_port *port __unused)
{
return NO_ERROR;
}
status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
uid_t uid)
{
ALOGV("createAudioPatch()");
if (handle == NULL || patch == NULL) {
return BAD_VALUE;
}
ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
return BAD_VALUE;
}
// only one source per audio patch supported for now
if (patch->num_sources > 1) {
return INVALID_OPERATION;
}
if (patch->sources[0].role != AUDIO_PORT_ROLE_SOURCE) {
return INVALID_OPERATION;
}
for (size_t i = 0; i < patch->num_sinks; i++) {
if (patch->sinks[i].role != AUDIO_PORT_ROLE_SINK) {
return INVALID_OPERATION;
}
}
sp<AudioPatch> patchDesc;
ssize_t index = mAudioPatches.indexOfKey(*handle);
ALOGV("createAudioPatch source id %d role %d type %d", patch->sources[0].id,
patch->sources[0].role,
patch->sources[0].type);
#if LOG_NDEBUG == 0
for (size_t i = 0; i < patch->num_sinks; i++) {
ALOGV("createAudioPatch sink %zu: id %d role %d type %d", i, patch->sinks[i].id,
patch->sinks[i].role,
patch->sinks[i].type);
}
#endif
if (index >= 0) {
patchDesc = mAudioPatches.valueAt(index);
ALOGV("createAudioPatch() mUidCached %d patchDesc->mUid %d uid %d",
mUidCached, patchDesc->mUid, uid);
if (patchDesc->mUid != mUidCached && uid != patchDesc->mUid) {
return INVALID_OPERATION;
}
} else {
*handle = AUDIO_PATCH_HANDLE_NONE;
}
if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id);
if (outputDesc == NULL) {
ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id);
return BAD_VALUE;
}
ALOG_ASSERT(!outputDesc->isDuplicated(),"duplicated output %d in source in ports",
outputDesc->mIoHandle);
if (patchDesc != 0) {
if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
ALOGV("createAudioPatch() source id differs for patch current id %d new id %d",
patchDesc->mPatch.sources[0].id, patch->sources[0].id);
return BAD_VALUE;
}
}
DeviceVector devices;
for (size_t i = 0; i < patch->num_sinks; i++) {
// Only support mix to devices connection
// TODO add support for mix to mix connection
if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
ALOGV("createAudioPatch() source mix but sink is not a device");
return INVALID_OPERATION;
}
sp<DeviceDescriptor> devDesc =
mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
if (devDesc == 0) {
ALOGV("createAudioPatch() out device not found for id %d", patch->sinks[i].id);
return BAD_VALUE;
}
if (!outputDesc->mProfile->isCompatibleProfile(devDesc->type(),
devDesc->mAddress,
patch->sources[0].sample_rate,
NULL, // updatedSamplingRate
patch->sources[0].format,
NULL, // updatedFormat
patch->sources[0].channel_mask,
NULL, // updatedChannelMask
AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
ALOGV("createAudioPatch() profile not supported for device %08x",
devDesc->type());
return INVALID_OPERATION;
}
devices.add(devDesc);
}
if (devices.size() == 0) {
return INVALID_OPERATION;
}
// TODO: reconfigure output format and channels here
ALOGV("createAudioPatch() setting device %08x on output %d",
devices.types(), outputDesc->mIoHandle);
setOutputDevice(outputDesc, devices.types(), true, 0, handle);
index = mAudioPatches.indexOfKey(*handle);
if (index >= 0) {
if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
ALOGW("createAudioPatch() setOutputDevice() did not reuse the patch provided");
}
patchDesc = mAudioPatches.valueAt(index);
patchDesc->mUid = uid;
ALOGV("createAudioPatch() success");
} else {
ALOGW("createAudioPatch() setOutputDevice() failed to create a patch");
return INVALID_OPERATION;
}
} else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
// input device to input mix connection
// only one sink supported when connecting an input device to a mix
if (patch->num_sinks > 1) {
return INVALID_OPERATION;
}
sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
if (inputDesc == NULL) {
return BAD_VALUE;
}
if (patchDesc != 0) {
if (patchDesc->mPatch.sinks[0].id != patch->sinks[0].id) {
return BAD_VALUE;
}
}
sp<DeviceDescriptor> devDesc =
mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
if (devDesc == 0) {
return BAD_VALUE;
}
if (!inputDesc->mProfile->isCompatibleProfile(devDesc->type(),
devDesc->mAddress,
patch->sinks[0].sample_rate,
NULL, /*updatedSampleRate*/
patch->sinks[0].format,
NULL, /*updatedFormat*/
patch->sinks[0].channel_mask,
NULL, /*updatedChannelMask*/
// FIXME for the parameter type,
// and the NONE
(audio_output_flags_t)
AUDIO_INPUT_FLAG_NONE)) {
return INVALID_OPERATION;
}
// TODO: reconfigure output format and channels here
ALOGV("createAudioPatch() setting device %08x on output %d",
devDesc->type(), inputDesc->mIoHandle);
setInputDevice(inputDesc->mIoHandle, devDesc->type(), true, handle);
index = mAudioPatches.indexOfKey(*handle);
if (index >= 0) {
if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
ALOGW("createAudioPatch() setInputDevice() did not reuse the patch provided");
}
patchDesc = mAudioPatches.valueAt(index);
patchDesc->mUid = uid;
ALOGV("createAudioPatch() success");
} else {
ALOGW("createAudioPatch() setInputDevice() failed to create a patch");
return INVALID_OPERATION;
}
} else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
// device to device connection
if (patchDesc != 0) {
if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
return BAD_VALUE;
}
}
sp<DeviceDescriptor> srcDeviceDesc =
mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
if (srcDeviceDesc == 0) {
return BAD_VALUE;
}
//update source and sink with our own data as the data passed in the patch may
// be incomplete.
struct audio_patch newPatch = *patch;
srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
for (size_t i = 0; i < patch->num_sinks; i++) {
if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
ALOGV("createAudioPatch() source device but one sink is not a device");
return INVALID_OPERATION;
}
sp<DeviceDescriptor> sinkDeviceDesc =
mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
if (sinkDeviceDesc == 0) {
return BAD_VALUE;
}
sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
// create a software bridge in PatchPanel if:
// - source and sink devices are on differnt HW modules OR
// - audio HAL version is < 3.0
if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
(srcDeviceDesc->mModule->getHalVersion() < AUDIO_DEVICE_API_VERSION_3_0)) {
// support only one sink device for now to simplify output selection logic
if (patch->num_sinks > 1) {
return INVALID_OPERATION;
}
SortedVector<audio_io_handle_t> outputs =
getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
// if the sink device is reachable via an opened output stream, request to go via
// this output stream by adding a second source to the patch description
audio_io_handle_t output = selectOutput(outputs,
AUDIO_OUTPUT_FLAG_NONE,
AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (outputDesc->isDuplicated()) {
return INVALID_OPERATION;
}
outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]);
newPatch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
newPatch.num_sources = 2;
}
}
}
// TODO: check from routing capabilities in config file and other conflicting patches
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
if (index >= 0) {
afPatchHandle = patchDesc->mAfPatchHandle;
}
status_t status = mpClientInterface->createAudioPatch(&newPatch,
&afPatchHandle,
0);
ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
status, afPatchHandle);
if (status == NO_ERROR) {
if (index < 0) {
patchDesc = new AudioPatch(&newPatch, uid);
addAudioPatch(patchDesc->mHandle, patchDesc);
} else {
patchDesc->mPatch = newPatch;
}
patchDesc->mAfPatchHandle = afPatchHandle;
*handle = patchDesc->mHandle;
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
} else {
ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
status);
return INVALID_OPERATION;
}
} else {
return BAD_VALUE;
}
} else {
return BAD_VALUE;
}
return NO_ERROR;
}
status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle,
uid_t uid)
{
ALOGV("releaseAudioPatch() patch %d", handle);
ssize_t index = mAudioPatches.indexOfKey(handle);
if (index < 0) {
return BAD_VALUE;
}
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
ALOGV("releaseAudioPatch() mUidCached %d patchDesc->mUid %d uid %d",
mUidCached, patchDesc->mUid, uid);
if (patchDesc->mUid != mUidCached && uid != patchDesc->mUid) {
return INVALID_OPERATION;
}
struct audio_patch *patch = &patchDesc->mPatch;
patchDesc->mUid = mUidCached;
if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id);
if (outputDesc == NULL) {
ALOGV("releaseAudioPatch() output not found for id %d", patch->sources[0].id);
return BAD_VALUE;
}
setOutputDevice(outputDesc,
getNewOutputDevice(outputDesc, true /*fromCache*/),
true,
0,
NULL);
} else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
if (inputDesc == NULL) {
ALOGV("releaseAudioPatch() input not found for id %d", patch->sinks[0].id);
return BAD_VALUE;
}
setInputDevice(inputDesc->mIoHandle,
getNewInputDevice(inputDesc->mIoHandle),
true,
NULL);
} else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
ALOGV("releaseAudioPatch() patch panel returned %d patchHandle %d",
status, patchDesc->mAfPatchHandle);
removeAudioPatch(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
} else {
return BAD_VALUE;
}
} else {
return BAD_VALUE;
}
return NO_ERROR;
}
status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches,
struct audio_patch *patches,
unsigned int *generation)
{