Adding support for OpenSL ES output in native WebRTC
BUG=4573,2982,2175,3590
TEST=modules_unittests --gtest_filter=AudioDevice*, AppRTCDemo and WebRTCDemo
Summary:
- Removes dependency of the 'enable_android_opensl' compiler flag.
Instead, OpenSL ES is always supported, and will enabled for devices that
supports low-latency output.
- WebRTC no longer supports OpenSL ES for the input/recording side.
- Removes old code and demos using OpenSL ES for audio input.
- Improves accuracy of total delay estimates (better AEC performance).
- Reduces roundtrip audio latency; especially when OpenSL can be used.
Performance verified on: Nexus 5, 6, 7 and 9. Samsung Galaxy S4 and S6.
Android One device.
R=magjed@webrtc.org, phoglund@webrtc.org, tommi@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/51759004
Cr-Commit-Position: refs/heads/master@{#9208}
diff --git a/talk/libjingle.gyp b/talk/libjingle.gyp
index 1c97f0f..29f2900 100755
--- a/talk/libjingle.gyp
+++ b/talk/libjingle.gyp
@@ -144,10 +144,10 @@
'app/webrtc/java/src/org/webrtc/MediaCodecVideoEncoder.java',
'app/webrtc/java/src/org/webrtc/MediaCodecVideoDecoder.java',
'app/webrtc/java/src/org/webrtc/VideoCapturerAndroid.java',
- '<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java',
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViEAndroidGLES20.java',
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViERenderer.java',
'<(webrtc_modules_dir)/video_render/android/java/src/org/webrtc/videoengine/ViESurfaceRenderer.java',
+ '<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java',
'<(webrtc_modules_dir)/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java',
diff --git a/webrtc/BUILD.gn b/webrtc/BUILD.gn
index 893dd9f..2c1fafb 100644
--- a/webrtc/BUILD.gn
+++ b/webrtc/BUILD.gn
@@ -55,9 +55,6 @@
"WEBRTC_LINUX",
"WEBRTC_ANDROID",
]
- if (rtc_enable_android_opensl) {
- defines += [ "WEBRTC_ANDROID_OPENSLES" ]
- }
}
}
diff --git a/webrtc/build/common.gypi b/webrtc/build/common.gypi
index 2d7d985..e1ef419 100644
--- a/webrtc/build/common.gypi
+++ b/webrtc/build/common.gypi
@@ -363,11 +363,6 @@
'WEBRTC_ANDROID',
],
'conditions': [
- ['enable_android_opensl==1', {
- 'defines': [
- 'WEBRTC_ANDROID_OPENSLES',
- ],
- }],
['clang!=1', {
# The Android NDK doesn't provide optimized versions of these
# functions. Ensure they are disabled for all compilers.
@@ -431,13 +426,6 @@
'WEBRTC_LINUX',
'WEBRTC_ANDROID',
],
- 'conditions': [
- ['enable_android_opensl==1', {
- 'defines': [
- 'WEBRTC_ANDROID_OPENSLES',
- ],
- }]
- ],
}],
['os_posix==1', {
# For access to standard POSIXish features, use WEBRTC_POSIX instead
diff --git a/webrtc/examples/android/opensl_loopback/AndroidManifest.xml b/webrtc/examples/android/opensl_loopback/AndroidManifest.xml
deleted file mode 100644
index 3d32a7a..0000000
--- a/webrtc/examples/android/opensl_loopback/AndroidManifest.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<manifest xmlns:android="http://schemas.android.com/apk/res/android"
- android:versionCode="1" package="org.webrtc.app" android:versionName="1.07">
- <application android:icon="@drawable/logo"
- android:label="@string/app_name"
- android:debuggable="true">
- <activity android:name=".OpenSlDemo"
- android:label="@string/app_name"
- android:screenOrientation="landscape"
- >
- <intent-filter>
- <action android:name="android.intent.action.MAIN" />
- <category android:name="android.intent.category.LAUNCHER" />
- <action android:name="android.intent.action.HEADSET_PLUG"/>
- </intent-filter>
- </activity>
- </application>
-
- <uses-sdk android:minSdkVersion="14" />
- <uses-permission android:name="android.permission.RECORD_AUDIO" />
- <uses-permission android:name="android.permission.WAKE_LOCK" />
-</manifest>
diff --git a/webrtc/examples/android/opensl_loopback/README b/webrtc/examples/android/opensl_loopback/README
deleted file mode 100644
index 59f6de9..0000000
--- a/webrtc/examples/android/opensl_loopback/README
+++ /dev/null
@@ -1,23 +0,0 @@
-This directory contains an app for measuring the total delay from the native
-OpenSL implementation. Note that it just loops audio back from mic to speakers.
-
-Prerequisites:
-- Make sure gclient is checking out tools necessary to target Android: your
- .gclient file should contain a line like:
- target_os = ['android']
- Make sure to re-run gclient sync after adding this to download the tools.
-- Env vars need to be set up to target Android; easiest way to do this is to run
- (from the libjingle trunk directory):
- . ./build/android/envsetup.sh
- Note that this clobbers any previously-set $GYP_DEFINES so it must be done
- before the next item.
-- Set up webrtc-related GYP variables:
- export GYP_DEFINES="$GYP_DEFINES java_home=</path/to/JDK>
- enable_android_opensl=1"
-- Finally, run "gclient runhooks" to generate Android-targeting .ninja files.
-
-Example of building & using the app:
-
-cd <path/to/repository>/trunk
-ninja -C out/Debug OpenSlDemo
-adb install -r out/Debug/OpenSlDemo-debug.apk
\ No newline at end of file
diff --git a/webrtc/examples/android/opensl_loopback/build.xml b/webrtc/examples/android/opensl_loopback/build.xml
deleted file mode 100644
index b6e033a..0000000
--- a/webrtc/examples/android/opensl_loopback/build.xml
+++ /dev/null
@@ -1,92 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<project name="OpenSlDemo" default="help">
-
- <!-- The local.properties file is created and updated by the 'android' tool.
- It contains the path to the SDK. It should *NOT* be checked into
- Version Control Systems. -->
- <property file="local.properties" />
-
- <!-- The ant.properties file can be created by you. It is only edited by the
- 'android' tool to add properties to it.
- This is the place to change some Ant specific build properties.
- Here are some properties you may want to change/update:
-
- source.dir
- The name of the source directory. Default is 'src'.
- out.dir
- The name of the output directory. Default is 'bin'.
-
- For other overridable properties, look at the beginning of the rules
- files in the SDK, at tools/ant/build.xml
-
- Properties related to the SDK location or the project target should
- be updated using the 'android' tool with the 'update' action.
-
- This file is an integral part of the build system for your
- application and should be checked into Version Control Systems.
-
- -->
- <property file="ant.properties" />
-
- <!-- if sdk.dir was not set from one of the property file, then
- get it from the ANDROID_SDK_ROOT env var.
- This must be done before we load project.properties since
- the proguard config can use sdk.dir -->
- <property environment="env" />
- <condition property="sdk.dir" value="${env.ANDROID_SDK_ROOT}">
- <isset property="env.ANDROID_SDK_ROOT" />
- </condition>
-
- <!-- The project.properties file is created and updated by the 'android'
- tool, as well as ADT.
-
- This contains project specific properties such as project target, and library
- dependencies. Lower level build properties are stored in ant.properties
- (or in .classpath for Eclipse projects).
-
- This file is an integral part of the build system for your
- application and should be checked into Version Control Systems. -->
- <loadproperties srcFile="project.properties" />
-
- <!-- quick check on sdk.dir -->
- <fail
- message="sdk.dir is missing. Make sure to generate local.properties using 'android update project' or to inject it through the ANDROID_SDK_ROOT environment variable."
- unless="sdk.dir"
- />
-
- <!--
- Import per project custom build rules if present at the root of the project.
- This is the place to put custom intermediary targets such as:
- -pre-build
- -pre-compile
- -post-compile (This is typically used for code obfuscation.
- Compiled code location: ${out.classes.absolute.dir}
- If this is not done in place, override ${out.dex.input.absolute.dir})
- -post-package
- -post-build
- -pre-clean
- -->
- <import file="custom_rules.xml" optional="true" />
-
- <!-- Import the actual build file.
-
- To customize existing targets, there are two options:
- - Customize only one target:
- - copy/paste the target into this file, *before* the
- <import> task.
- - customize it to your needs.
- - Customize the whole content of build.xml
- - copy/paste the content of the rules files (minus the top node)
- into this file, replacing the <import> task.
- - customize to your needs.
-
- ***********************
- ****** IMPORTANT ******
- ***********************
- In all cases you must update the value of version-tag below to read 'custom' instead of an integer,
- in order to avoid having your file be overridden by tools such as "android update project"
- -->
- <!-- version-tag: 1 -->
- <import file="${sdk.dir}/tools/ant/build.xml" />
-
-</project>
diff --git a/webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.cc b/webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.cc
deleted file mode 100644
index 116521e..0000000
--- a/webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.h"
-
-#include <assert.h>
-
-#include "webrtc/modules/audio_device/android/opensles_common.h"
-#include "webrtc/modules/audio_device/android/audio_common.h"
-
-namespace webrtc {
-
-FakeAudioDeviceBuffer::FakeAudioDeviceBuffer()
- : fifo_(kNumBuffers),
- next_available_buffer_(0),
- record_channels_(0),
- play_channels_(0) {
- buf_.reset(new rtc::scoped_ptr<int8_t[]>[kNumBuffers]);
- for (int i = 0; i < kNumBuffers; ++i) {
- buf_[i].reset(new int8_t[buffer_size_bytes()]);
- }
-}
-
-int32_t FakeAudioDeviceBuffer::SetRecordingSampleRate(uint32_t fsHz) {
- assert(static_cast<int>(fsHz) == sample_rate());
- return 0;
-}
-
-int32_t FakeAudioDeviceBuffer::SetPlayoutSampleRate(uint32_t fsHz) {
- assert(static_cast<int>(fsHz) == sample_rate());
- return 0;
-}
-
-int32_t FakeAudioDeviceBuffer::SetRecordingChannels(uint8_t channels) {
- assert(channels > 0);
- record_channels_ = channels;
- assert((play_channels_ == 0) ||
- (record_channels_ == play_channels_));
- return 0;
-}
-
-int32_t FakeAudioDeviceBuffer::SetPlayoutChannels(uint8_t channels) {
- assert(channels > 0);
- play_channels_ = channels;
- assert((record_channels_ == 0) ||
- (record_channels_ == play_channels_));
- return 0;
-}
-
-int32_t FakeAudioDeviceBuffer::SetRecordedBuffer(const void* audioBuffer,
- uint32_t nSamples) {
- assert(audioBuffer);
- assert(fifo_.size() < fifo_.capacity());
- assert(nSamples == kDefaultBufSizeInSamples);
- int8_t* buffer = buf_[next_available_buffer_].get();
- next_available_buffer_ = (next_available_buffer_ + 1) % kNumBuffers;
- memcpy(buffer, audioBuffer, nSamples * sizeof(int16_t));
- fifo_.Push(buffer);
- return 0;
-}
-
-int32_t FakeAudioDeviceBuffer::RequestPlayoutData(uint32_t nSamples) {
- assert(nSamples == kDefaultBufSizeInSamples);
- return 0;
-}
-
-int32_t FakeAudioDeviceBuffer::GetPlayoutData(void* audioBuffer) {
- assert(audioBuffer);
- if (fifo_.size() < 1) {
- // Playout silence until there is data available.
- memset(audioBuffer, 0, buffer_size_bytes());
- return buffer_size_samples();
- }
- int8_t* buffer = fifo_.Pop();
- memcpy(audioBuffer, buffer, buffer_size_bytes());
- return buffer_size_samples();
-}
-
-int FakeAudioDeviceBuffer::sample_rate() const {
- return audio_manager_.low_latency_supported() ?
- audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
-}
-
-int FakeAudioDeviceBuffer::buffer_size_samples() const {
- return sample_rate() * 10 / 1000;
-}
-
-int FakeAudioDeviceBuffer::buffer_size_bytes() const {
- return buffer_size_samples() * kNumChannels * sizeof(int16_t);
-}
-
-
-void FakeAudioDeviceBuffer::ClearBuffer() {
- while (fifo_.size() != 0) {
- fifo_.Pop();
- }
- next_available_buffer_ = 0;
-}
-
-} // namespace webrtc
diff --git a/webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.h b/webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.h
deleted file mode 100644
index f5442ee..0000000
--- a/webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_EXAMPLES_ANDROID_OPENSL_LOOPBACK_FAKE_AUDIO_DEVICE_BUFFER_H_
-#define WEBRTC_EXAMPLES_ANDROID_OPENSL_LOOPBACK_FAKE_AUDIO_DEVICE_BUFFER_H_
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
-#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
-#include "webrtc/modules/audio_device/audio_device_buffer.h"
-
-namespace webrtc {
-
-// Fake AudioDeviceBuffer implementation that returns audio data that is pushed
-// to it. It implements all APIs used by the OpenSL implementation.
-class FakeAudioDeviceBuffer : public AudioDeviceBuffer {
- public:
- FakeAudioDeviceBuffer();
- virtual ~FakeAudioDeviceBuffer() {}
-
- virtual int32_t SetRecordingSampleRate(uint32_t fsHz);
- virtual int32_t SetPlayoutSampleRate(uint32_t fsHz);
- virtual int32_t SetRecordingChannels(uint8_t channels);
- virtual int32_t SetPlayoutChannels(uint8_t channels);
- virtual int32_t SetRecordedBuffer(const void* audioBuffer,
- uint32_t nSamples);
- virtual void SetVQEData(int playDelayMS,
- int recDelayMS,
- int clockDrift) {}
- virtual int32_t DeliverRecordedData() { return 0; }
- virtual int32_t RequestPlayoutData(uint32_t nSamples);
- virtual int32_t GetPlayoutData(void* audioBuffer);
-
- void ClearBuffer();
-
- private:
- enum {
- // Each buffer contains 10 ms of data since that is what OpenSlesInput
- // delivers. Keep 7 buffers which would cover 70 ms of data. These buffers
- // are needed because of jitter between OpenSl recording and playing.
- kNumBuffers = 7,
- };
- int sample_rate() const;
- int buffer_size_samples() const;
- int buffer_size_bytes() const;
-
- // Java API handle
- AudioManagerJni audio_manager_;
-
- SingleRwFifo fifo_;
- rtc::scoped_ptr<rtc::scoped_ptr<int8_t[]>[]> buf_;
- int next_available_buffer_;
-
- uint8_t record_channels_;
- uint8_t play_channels_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_EXAMPLES_ANDROID_OPENSL_LOOPBACK_FAKE_AUDIO_DEVICE_BUFFER_H_
diff --git a/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc b/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc
deleted file mode 100644
index 3b201a7..0000000
--- a/webrtc/examples/android/opensl_loopback/jni/opensl_runner.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include <assert.h>
-#include <jni.h>
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/examples/android/opensl_loopback/fake_audio_device_buffer.h"
-#include "webrtc/modules/audio_device/android/audio_device_template.h"
-#include "webrtc/modules/audio_device/android/audio_record_jni.h"
-#include "webrtc/modules/audio_device/android/audio_track_jni.h"
-#include "webrtc/modules/audio_device/android/opensles_input.h"
-#include "webrtc/modules/audio_device/android/opensles_output.h"
-
-// Java globals
-static JavaVM* g_vm = NULL;
-static jclass g_osr = NULL;
-
-namespace webrtc {
-
-template <class InputType, class OutputType>
-class OpenSlRunnerTemplate {
- public:
- OpenSlRunnerTemplate()
- : output_(NULL), // TODO(henrika): inject proper audio manager.
- input_(&output_, NULL) {
- output_.AttachAudioBuffer(&audio_buffer_);
- if (output_.Init() != 0) {
- assert(false);
- }
- if (output_.InitPlayout() != 0) {
- assert(false);
- }
- input_.AttachAudioBuffer(&audio_buffer_);
- if (input_.Init() != 0) {
- assert(false);
- }
- if (input_.InitRecording() != 0) {
- assert(false);
- }
- }
-
- ~OpenSlRunnerTemplate() {}
-
- void StartPlayRecord() {
- output_.StartPlayout();
- input_.StartRecording();
- }
-
- void StopPlayRecord() {
- // There are large enough buffers to compensate for recording and playing
- // jitter such that the timing of stopping playing or recording should not
- // result in over or underrun.
- input_.StopRecording();
- output_.StopPlayout();
- audio_buffer_.ClearBuffer();
- }
-
- private:
- OutputType output_;
- InputType input_;
- FakeAudioDeviceBuffer audio_buffer_;
-};
-
-class OpenSlRunner
- : public OpenSlRunnerTemplate<OpenSlesInput, OpenSlesOutput> {
- public:
- // Global class implementing native code.
- static OpenSlRunner* g_runner;
-
-
- OpenSlRunner() {}
- virtual ~OpenSlRunner() {}
-
- static JNIEXPORT void JNICALL RegisterApplicationContext(
- JNIEnv* env,
- jobject obj,
- jobject context) {
- assert(!g_runner); // Should only be called once.
- OpenSlesInput::SetAndroidAudioDeviceObjects(g_vm, context);
- OpenSlesOutput::SetAndroidAudioDeviceObjects(g_vm, context);
- g_runner = new OpenSlRunner();
- }
-
- static JNIEXPORT void JNICALL Start(JNIEnv * env, jobject) {
- g_runner->StartPlayRecord();
- }
-
- static JNIEXPORT void JNICALL Stop(JNIEnv * env, jobject) {
- g_runner->StopPlayRecord();
- }
-};
-
-OpenSlRunner* OpenSlRunner::g_runner = NULL;
-
-} // namespace webrtc
-
-jint JNI_OnLoad(JavaVM* vm, void* reserved) {
- // Only called once.
- assert(!g_vm);
- JNIEnv* env;
- if (vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != JNI_OK) {
- return -1;
- }
-
- jclass local_osr = env->FindClass("org/webrtc/app/OpenSlRunner");
- assert(local_osr != NULL);
- g_osr = static_cast<jclass>(env->NewGlobalRef(local_osr));
- JNINativeMethod nativeFunctions[] = {
- {"RegisterApplicationContext", "(Landroid/content/Context;)V",
- reinterpret_cast<void*>(
- &webrtc::OpenSlRunner::RegisterApplicationContext)},
- {"Start", "()V", reinterpret_cast<void*>(&webrtc::OpenSlRunner::Start)},
- {"Stop", "()V", reinterpret_cast<void*>(&webrtc::OpenSlRunner::Stop)}
- };
- int ret_val = env->RegisterNatives(g_osr, nativeFunctions, 3);
- if (ret_val != 0) {
- assert(false);
- }
- g_vm = vm;
- return JNI_VERSION_1_6;
-}
diff --git a/webrtc/examples/android/opensl_loopback/project.properties b/webrtc/examples/android/opensl_loopback/project.properties
deleted file mode 100644
index a6ca533..0000000
--- a/webrtc/examples/android/opensl_loopback/project.properties
+++ /dev/null
@@ -1,16 +0,0 @@
-# This file is automatically generated by Android Tools.
-# Do not modify this file -- YOUR CHANGES WILL BE ERASED!
-#
-# This file must be checked in Version Control Systems.
-#
-# To customize properties used by the Ant build system edit
-# "ant.properties", and override values to adapt the script to your
-# project structure.
-#
-# To enable ProGuard to shrink and obfuscate your code, uncomment this (available properties: sdk.dir, user.home):
-#proguard.config=${sdk.dir}/tools/proguard/proguard-android.txt:proguard-project.txt
-
-# Project target.
-target=android-22
-
-java.compilerargs=-Xlint:all -Werror
diff --git a/webrtc/examples/android/opensl_loopback/res/drawable/logo.png b/webrtc/examples/android/opensl_loopback/res/drawable/logo.png
deleted file mode 100644
index a07c69f..0000000
--- a/webrtc/examples/android/opensl_loopback/res/drawable/logo.png
+++ /dev/null
Binary files differ
diff --git a/webrtc/examples/android/opensl_loopback/res/layout/open_sl_demo.xml b/webrtc/examples/android/opensl_loopback/res/layout/open_sl_demo.xml
deleted file mode 100644
index 1efad73..0000000
--- a/webrtc/examples/android/opensl_loopback/res/layout/open_sl_demo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
- android:orientation="vertical"
- android:layout_width="fill_parent"
- android:layout_height="fill_parent"
- android:gravity="bottom">
- <TextView android:layout_width="fill_parent"
- android:layout_height="fill_parent"
- android:layout_weight="1"
- android:layout_gravity="top"
- android:text="About: This application, when started, loops back audio as quickly as the native OpenSL implementation allows. Just starting it will lead to a feedback loop. It can be used to measure delay with the proper hardware. Using it as is has little utility." />
- <Button android:id="@+id/btStartStopCall"
- android:layout_width="100dip"
- android:layout_height="wrap_content"
- android:text="@string/startCall"
- android:layout_gravity="center"/>
- <Button android:id="@+id/btExit"
- android:layout_width="100dip"
- android:layout_height="wrap_content"
- android:layout_gravity="center"
- android:text="@string/exit"/>
-</LinearLayout >
diff --git a/webrtc/examples/android/opensl_loopback/res/values/strings.xml b/webrtc/examples/android/opensl_loopback/res/values/strings.xml
deleted file mode 100644
index f519806..0000000
--- a/webrtc/examples/android/opensl_loopback/res/values/strings.xml
+++ /dev/null
@@ -1,7 +0,0 @@
-<?xml version="1.0" encoding="utf-8"?>
-<resources>
- <string name="app_name">WebRTCOpenSLLoopback</string>
- <string name="startCall">StartCall</string>
- <string name="stopCall">StopCall</string>
- <string name="exit">Exit</string>
-</resources>
diff --git a/webrtc/examples/android/opensl_loopback/src/org/webrtc/app/OpenSlDemo.java b/webrtc/examples/android/opensl_loopback/src/org/webrtc/app/OpenSlDemo.java
deleted file mode 100644
index 046b415..0000000
--- a/webrtc/examples/android/opensl_loopback/src/org/webrtc/app/OpenSlDemo.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-package org.webrtc.app;
-
-import android.app.Activity;
-import android.content.Context;
-import android.content.pm.ActivityInfo;
-import android.media.AudioManager;
-import android.os.Bundle;
-import android.util.Log;
-import android.view.View;
-import android.widget.Button;
-
-public class OpenSlDemo extends Activity implements View.OnClickListener {
- private static final String TAG = "WEBRTC";
-
- private Button btStartStopCall;
- private boolean isRunning = false;
-
- private OpenSlRunner runner;
-
- // Called when activity is created.
- @Override
- public void onCreate(Bundle savedInstanceState) {
- super.onCreate(savedInstanceState);
-
- setContentView(R.layout.open_sl_demo);
-
- // Direct hardware volume controls to affect the voice call audio stream.
- setVolumeControlStream(AudioManager.STREAM_VOICE_CALL);
-
- btStartStopCall = (Button) findViewById(R.id.btStartStopCall);
- btStartStopCall.setOnClickListener(this);
- findViewById(R.id.btExit).setOnClickListener(this);
-
- runner = new OpenSlRunner();
- // Native code calls back into JVM to be able to configure OpenSL to low
- // latency mode. Provide the context needed to do this.
- OpenSlRunner.RegisterApplicationContext(getApplicationContext());
- }
-
- // Called before activity is destroyed.
- @Override
- public void onDestroy() {
- Log.d(TAG, "onDestroy");
- super.onDestroy();
- }
-
- private void startOrStop() {
- if (isRunning) {
- OpenSlRunner.Stop();
- btStartStopCall.setText(R.string.startCall);
- isRunning = false;
- } else if (!isRunning){
- OpenSlRunner.Start();
- btStartStopCall.setText(R.string.stopCall);
- isRunning = true;
- }
- }
-
- public void onClick(View arg0) {
- switch (arg0.getId()) {
- case R.id.btStartStopCall:
- startOrStop();
- break;
- case R.id.btExit:
- finish();
- break;
- }
- }
-
-}
diff --git a/webrtc/examples/android/opensl_loopback/src/org/webrtc/app/OpenSlRunner.java b/webrtc/examples/android/opensl_loopback/src/org/webrtc/app/OpenSlRunner.java
deleted file mode 100644
index 489cb55..0000000
--- a/webrtc/examples/android/opensl_loopback/src/org/webrtc/app/OpenSlRunner.java
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-package org.webrtc.app;
-
-import android.content.Context;
-
-public class OpenSlRunner {
- public OpenSlRunner() {
- System.loadLibrary("opensl-demo-jni");
- }
-
- public static native void RegisterApplicationContext(Context context);
- public static native void Start();
- public static native void Stop();
-
-}
\ No newline at end of file
diff --git a/webrtc/modules/audio_device/BUILD.gn b/webrtc/modules/audio_device/BUILD.gn
index 91d031e..f3202df 100644
--- a/webrtc/modules/audio_device/BUILD.gn
+++ b/webrtc/modules/audio_device/BUILD.gn
@@ -91,24 +91,16 @@
"android/audio_device_utility_android.h",
"android/audio_manager.cc",
"android/audio_manager.h",
- "android/audio_manager_jni.cc",
- "android/audio_manager_jni.h",
"android/audio_record_jni.cc",
"android/audio_record_jni.h",
"android/audio_track_jni.cc",
"android/audio_track_jni.h",
"android/fine_audio_buffer.cc",
"android/fine_audio_buffer.h",
- "android/low_latency_event_posix.cc",
- "android/low_latency_event.h",
"android/opensles_common.cc",
"android/opensles_common.h",
- "android/opensles_input.cc",
- "android/opensles_input.h",
- "android/opensles_output.cc",
- "android/opensles_output.h",
- "android/single_rw_fifo.cc",
- "android/single_rw_fifo.h",
+ "android/opensles_player.cc",
+ "android/opensles_player.h",
]
if (is_linux) {
defines += [ "LINUX_ALSA" ]
diff --git a/webrtc/modules/audio_device/android/audio_common.h b/webrtc/modules/audio_device/android/audio_common.h
index 447f595..e987f8f 100644
--- a/webrtc/modules/audio_device/android/audio_common.h
+++ b/webrtc/modules/audio_device/android/audio_common.h
@@ -20,15 +20,15 @@
// Number of bytes per audio frame.
// Example: 16-bit PCM in mono => 1*(16/8)=2 [bytes/frame]
kBytesPerFrame = kNumChannels * (16 / 8),
-};
-
-class PlayoutDelayProvider {
- public:
- virtual int PlayoutDelayMs() = 0;
-
- protected:
- PlayoutDelayProvider() {}
- virtual ~PlayoutDelayProvider() {}
+ // Delay estimates for the two different supported modes. These values
+ // are based on real-time round-trip delay estimates on a large set of
+ // devices and they are lower bounds since the filter length is 128 ms,
+ // so the AEC works for delays in the range [50, ~170] ms and [150, ~270] ms.
+ // Note that, in most cases, the lowest delay estimate will not be utilized
+ // since devices that support low-latency output audio often supports
+ // HW AEC as well.
+ kLowLatencyModeDelayEstimateInMilliseconds = 50,
+ kHighLatencyModeDelayEstimateInMilliseconds = 150,
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_device_template.h b/webrtc/modules/audio_device/android/audio_device_template.h
index d8f3ada..0dd0596 100644
--- a/webrtc/modules/audio_device/android/audio_device_template.h
+++ b/webrtc/modules/audio_device/android/audio_device_template.h
@@ -12,6 +12,7 @@
#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_DEVICE_TEMPLATE_H_
#include "webrtc/base/checks.h"
+#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/system_wrappers/interface/trace.h"
@@ -20,12 +21,19 @@
// InputType/OutputType can be any class that implements the capturing/rendering
// part of the AudioDeviceGeneric API.
+// Construction and destruction must be done on one and the same thread. Each
+// internal implementation of InputType and OutputType will DCHECK if that is
+// not the case. All implemented methods must also be called on the same thread.
+// See comments in each InputType/OutputType class for more
+// It is possible to call the two static methods (SetAndroidAudioDeviceObjects
+// and ClearAndroidAudioDeviceObjects) from a different thread but both will
+// CHECK that the calling thread is attached to a Java VM.
+
template <class InputType, class OutputType>
class AudioDeviceTemplate : public AudioDeviceGeneric {
public:
static void SetAndroidAudioDeviceObjects(void* javaVM,
void* context) {
- AudioManager::SetAndroidAudioDeviceObjects(javaVM, context);
OutputType::SetAndroidAudioDeviceObjects(javaVM, context);
InputType::SetAndroidAudioDeviceObjects(javaVM, context);
}
@@ -33,14 +41,17 @@
static void ClearAndroidAudioDeviceObjects() {
OutputType::ClearAndroidAudioDeviceObjects();
InputType::ClearAndroidAudioDeviceObjects();
- AudioManager::ClearAndroidAudioDeviceObjects();
}
- // TODO(henrika): remove id.
- explicit AudioDeviceTemplate(const int32_t id)
- : audio_manager_(),
- output_(&audio_manager_),
- input_(&output_, &audio_manager_) {
+ AudioDeviceTemplate(AudioDeviceModule::AudioLayer audio_layer,
+ AudioManager* audio_manager)
+ : audio_layer_(audio_layer),
+ audio_manager_(audio_manager),
+ output_(audio_manager_),
+ input_(audio_manager_),
+ initialized_(false) {
+ CHECK(audio_manager);
+ audio_manager_->SetActiveAudioLayer(audio_layer);
}
virtual ~AudioDeviceTemplate() {
@@ -48,20 +59,27 @@
int32_t ActiveAudioLayer(
AudioDeviceModule::AudioLayer& audioLayer) const override {
- audioLayer = AudioDeviceModule::kPlatformDefaultAudio;
+ audioLayer = audio_layer_;
return 0;
- };
+ }
int32_t Init() override {
- return audio_manager_.Init() | output_.Init() | input_.Init();
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!initialized_);
+ initialized_ = audio_manager_->Init() || output_.Init() || input_.Init();
+ return initialized_ ? 0 : -1;
}
int32_t Terminate() override {
- return output_.Terminate() | input_.Terminate() | audio_manager_.Close();
+ DCHECK(thread_checker_.CalledOnValidThread());
+ initialized_ =
+ !(output_.Terminate() || input_.Terminate() || audio_manager_->Close());
+ return !initialized_ ? 0 : -1;
}
bool Initialized() const override {
- return true;
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return initialized_;
}
int16_t PlayoutDevices() override {
@@ -118,7 +136,12 @@
}
int32_t InitPlayout() override {
- audio_manager_.SetCommunicationMode(true);
+ // Switches the Android audio mode to MODE_IN_COMMUNICATION to ensure that
+ // audio routing, volume control and echo performance are the best possible
+ // for VoIP. InitRecording() does the same type of call but only the first
+ // call has any effect.
+ // This call does nothing if MODE_IN_COMMUNICATION was already set.
+ audio_manager_->SetCommunicationMode(true);
return output_.InitPlayout();
}
@@ -132,7 +155,12 @@
}
int32_t InitRecording() override {
- audio_manager_.SetCommunicationMode(true);
+ // Switches the Android audio mode to MODE_IN_COMMUNICATION to ensure that
+ // audio routing, volume control and echo performance are the best possible
+ // for VoIP. InitRecording() does the same type of call but only the first
+ // call has any effect.
+ // This call does nothing if MODE_IN_COMMUNICATION was already set.
+ audio_manager_->SetCommunicationMode(true);
return input_.InitRecording();
}
@@ -152,7 +180,7 @@
if (!Recording()) {
// Restore initial audio mode since all audio streaming is disabled.
// The default mode was stored in Init().
- audio_manager_.SetCommunicationMode(false);
+ audio_manager_->SetCommunicationMode(false);
}
return err;
}
@@ -173,7 +201,7 @@
if (!Playing()) {
// Restore initial audio mode since all audio streaming is disabled.
// The default mode was is stored in Init().
- audio_manager_.SetCommunicationMode(false);
+ audio_manager_->SetCommunicationMode(false);
}
return err;
}
@@ -365,12 +393,18 @@
return -1;
}
- int32_t PlayoutDelay(uint16_t& delayMS) const override {
- return output_.PlayoutDelay(delayMS);
+ int32_t PlayoutDelay(uint16_t& delay_ms) const override {
+ // Best guess we can do is to use half of the estimated total delay.
+ delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
+ DCHECK_GT(delay_ms, 0);
+ return 0;
}
- int32_t RecordingDelay(uint16_t& delayMS) const override {
- return input_.RecordingDelay(delayMS);
+ int32_t RecordingDelay(uint16_t& delay_ms) const override {
+ // Best guess we can do is to use half of the estimated total delay.
+ delay_ms = audio_manager_->GetDelayEstimateInMilliseconds() / 2;
+ DCHECK_GT(delay_ms, 0);
+ return 0;
}
int32_t CPULoad(uint16_t& load) const override {
@@ -423,18 +457,35 @@
return -1;
}
+ // Returns true if the device both supports built in AEC and the device
+ // is not blacklisted.
bool BuiltInAECIsAvailable() const override {
- return input_.BuiltInAECIsAvailable();
+ return audio_manager_->IsAcousticEchoCancelerSupported();
}
int32_t EnableBuiltInAEC(bool enable) override {
+ CHECK(BuiltInAECIsAvailable()) << "HW AEC is not available";
return input_.EnableBuiltInAEC(enable);
}
private:
- AudioManager audio_manager_;
+ rtc::ThreadChecker thread_checker_;
+
+ // Local copy of the audio layer set during construction of the
+ // AudioDeviceModuleImpl instance. Read only value.
+ const AudioDeviceModule::AudioLayer audio_layer_;
+
+ // Non-owning raw pointer to AudioManager instance given to use at
+ // construction. The real object is owned by AudioDeviceModuleImpl and the
+ // life time is the same as that of the AudioDeviceModuleImpl, hence there
+ // is no risk of reading a NULL pointer at any time in this class.
+ AudioManager* const audio_manager_;
+
OutputType output_;
+
InputType input_;
+
+ bool initialized_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/android/audio_device_unittest.cc
index c79b925..44532f5 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/android/audio_device_unittest.cc
@@ -13,8 +13,11 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/arraysize.h"
#include "webrtc/base/criticalsection.h"
#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_device/android/audio_common.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
#include "webrtc/modules/audio_device/android/ensure_initialized.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
@@ -45,14 +48,6 @@
namespace webrtc {
-// Perform all tests for the different audio layers listed in this array.
-// See the INSTANTIATE_TEST_CASE_P statement for details.
-// TODO(henrika): the test framework supports both Java and OpenSL ES based
-// audio backends but there are currently some issues (crashes) in the
-// OpenSL ES implementation, hence it is not added to kAudioLayers yet.
-static const AudioDeviceModule::AudioLayer kAudioLayers[] = {
- AudioDeviceModule::kAndroidJavaAudio
- /*, AudioDeviceModule::kAndroidOpenSLESAudio */};
// Number of callbacks (input or output) the tests waits for before we set
// an event indicating that the test was OK.
static const int kNumCallbacks = 10;
@@ -62,9 +57,6 @@
static const int kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds).
static const int kFilePlayTimeInSec = 5;
-// Fixed value for the recording delay using Java based audio backend.
-// TODO(henrika): harmonize with OpenSL ES and look for possible improvements.
-static const uint32_t kFixedRecordingDelay = 100;
static const int kBitsPerSample = 16;
static const int kBytesPerSample = kBitsPerSample / 8;
// Run the full-duplex test during this time (unit is in seconds).
@@ -80,7 +72,7 @@
// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
static const int kMeasureLatencyTimeInSec = 11;
// Utilized in round-trip latency measurements to avoid capturing noise samples.
-static const int kImpulseThreshold = 500;
+static const int kImpulseThreshold = 1000;
static const char kTag[] = "[..........] ";
enum TransportType {
@@ -88,20 +80,6 @@
kRecording = 0x2,
};
-// Simple helper struct for device specific audio parameters.
-struct AudioParameters {
- int playout_frames_per_buffer() const {
- return playout_sample_rate / 100; // WebRTC uses 10 ms as buffer size.
- }
- int recording_frames_per_buffer() const {
- return recording_sample_rate / 100;
- }
- int playout_sample_rate;
- int recording_sample_rate;
- int playout_channels;
- int recording_channels;
-};
-
// Interface for processing the audio stream. Real implementations can e.g.
// run audio in loopback, read audio from a file or perform latency
// measurements.
@@ -185,7 +163,6 @@
~FifoAudioStream() {
Flush();
- PRINTD("[%4.3f]\n", average_size());
}
// Allocate new memory, copy |num_frames| samples from |source| into memory
@@ -510,9 +487,8 @@
rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
};
-// AudioDeviceTest is a value-parameterized test.
-class AudioDeviceTest
- : public testing::TestWithParam<AudioDeviceModule::AudioLayer> {
+// AudioDeviceTest test fixture.
+class AudioDeviceTest : public ::testing::Test {
protected:
AudioDeviceTest()
: test_is_done_(EventWrapper::Create()) {
@@ -520,54 +496,66 @@
// can do calls between C++ and Java. Initializes both Java and OpenSL ES
// implementations.
webrtc::audiodevicemodule::EnsureInitialized();
- // Creates an audio device based on the test parameter. See
- // INSTANTIATE_TEST_CASE_P() for details.
- audio_device_ = CreateAudioDevice();
+ // Creates an audio device using a default audio layer.
+ audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
EXPECT_NE(audio_device_.get(), nullptr);
EXPECT_EQ(0, audio_device_->Init());
- CacheAudioParameters();
+ playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
+ record_parameters_ = audio_manager()->GetRecordAudioParameters();
}
virtual ~AudioDeviceTest() {
EXPECT_EQ(0, audio_device_->Terminate());
}
int playout_sample_rate() const {
- return parameters_.playout_sample_rate;
+ return playout_parameters_.sample_rate();
}
- int recording_sample_rate() const {
- return parameters_.recording_sample_rate;
+ int record_sample_rate() const {
+ return record_parameters_.sample_rate();
}
int playout_channels() const {
- return parameters_.playout_channels;
+ return playout_parameters_.channels();
}
- int recording_channels() const {
- return parameters_.playout_channels;
+ int record_channels() const {
+ return record_parameters_.channels();
}
- int playout_frames_per_buffer() const {
- return parameters_.playout_frames_per_buffer();
+ int playout_frames_per_10ms_buffer() const {
+ return playout_parameters_.frames_per_10ms_buffer();
}
- int recording_frames_per_buffer() const {
- return parameters_.recording_frames_per_buffer();
+ int record_frames_per_10ms_buffer() const {
+ return record_parameters_.frames_per_10ms_buffer();
+ }
+
+ int total_delay_ms() const {
+ return audio_manager()->GetDelayEstimateInMilliseconds();
}
scoped_refptr<AudioDeviceModule> audio_device() const {
return audio_device_;
}
- scoped_refptr<AudioDeviceModule> CreateAudioDevice() {
- scoped_refptr<AudioDeviceModule> module(
- AudioDeviceModuleImpl::Create(0, GetParam()));
- return module;
+ AudioDeviceModuleImpl* audio_device_impl() const {
+ return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
}
- void CacheAudioParameters() {
- AudioDeviceBuffer* audio_buffer =
- static_cast<AudioDeviceModuleImpl*> (
- audio_device_.get())->GetAudioDeviceBuffer();
- parameters_.playout_sample_rate = audio_buffer->PlayoutSampleRate();
- parameters_.recording_sample_rate = audio_buffer->RecordingSampleRate();
- parameters_.playout_channels = audio_buffer->PlayoutChannels();
- parameters_.recording_channels = audio_buffer->RecordingChannels();
+ AudioManager* audio_manager() const {
+ return audio_device_impl()->GetAndroidAudioManagerForTest();
+ }
+
+ AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
+ return static_cast<AudioDeviceModuleImpl*>(adm)->
+ GetAndroidAudioManagerForTest();
+ }
+
+ AudioDeviceBuffer* audio_device_buffer() const {
+ return audio_device_impl()->GetAudioDeviceBuffer();
+ }
+
+ scoped_refptr<AudioDeviceModule> CreateAudioDevice(
+ AudioDeviceModule::AudioLayer audio_layer) {
+ scoped_refptr<AudioDeviceModule> module(
+ AudioDeviceModuleImpl::Create(0, audio_layer));
+ return module;
}
// Returns file name relative to the resource root given a sample rate.
@@ -592,12 +580,53 @@
return file_name;
}
+ AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
+ AudioDeviceModule::AudioLayer audio_layer;
+ EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
+ return audio_layer;
+ }
+
+ int TestDelayOnAudioLayer(
+ const AudioDeviceModule::AudioLayer& layer_to_test) {
+ scoped_refptr<AudioDeviceModule> audio_device;
+ audio_device = CreateAudioDevice(layer_to_test);
+ EXPECT_NE(audio_device.get(), nullptr);
+ AudioManager* audio_manager = GetAudioManager(audio_device.get());
+ EXPECT_NE(audio_manager, nullptr);
+ return audio_manager->GetDelayEstimateInMilliseconds();
+ }
+
+ AudioDeviceModule::AudioLayer TestActiveAudioLayer(
+ const AudioDeviceModule::AudioLayer& layer_to_test) {
+ scoped_refptr<AudioDeviceModule> audio_device;
+ audio_device = CreateAudioDevice(layer_to_test);
+ EXPECT_NE(audio_device.get(), nullptr);
+ AudioDeviceModule::AudioLayer active;
+ EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
+ return active;
+ }
+
+ // Volume control is currently only supported for the Java output audio layer.
+ // For OpenSL ES, the internal stream volume is always on max level and there
+ // is no need for this test to set it to max.
+ bool AudioLayerSupportsVolumeControl() const {
+ return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
+ }
+
void SetMaxPlayoutVolume() {
+ if (!AudioLayerSupportsVolumeControl())
+ return;
uint32_t max_volume;
EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
}
+ void DisableBuiltInAECIfAvailable() {
+ if (audio_device()->BuiltInAECIsAvailable()) {
+ EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
+ }
+ }
+
void StartPlayout() {
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
EXPECT_FALSE(audio_device()->Playing());
@@ -610,6 +639,7 @@
void StopPlayout() {
EXPECT_EQ(0, audio_device()->StopPlayout());
EXPECT_FALSE(audio_device()->Playing());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
}
void StartRecording() {
@@ -646,64 +676,129 @@
rtc::scoped_ptr<EventWrapper> test_is_done_;
scoped_refptr<AudioDeviceModule> audio_device_;
- AudioParameters parameters_;
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
};
-TEST_P(AudioDeviceTest, ConstructDestruct) {
+TEST_F(AudioDeviceTest, ConstructDestruct) {
// Using the test fixture to create and destruct the audio device module.
}
-// Create an audio device instance and print out the native audio parameters.
-TEST_P(AudioDeviceTest, AudioParameters) {
- EXPECT_NE(0, playout_sample_rate());
- PRINT("%splayout_sample_rate: %d\n", kTag, playout_sample_rate());
- EXPECT_NE(0, recording_sample_rate());
- PRINT("%srecording_sample_rate: %d\n", kTag, recording_sample_rate());
- EXPECT_NE(0, playout_channels());
- PRINT("%splayout_channels: %d\n", kTag, playout_channels());
- EXPECT_NE(0, recording_channels());
- PRINT("%srecording_channels: %d\n", kTag, recording_channels());
+// We always ask for a default audio layer when the ADM is constructed. But the
+// ADM will then internally set the best suitable combination of audio layers,
+// for input and output based on if low-latency output audio in combination
+// with OpenSL ES is supported or not. This test ensures that the correct
+// selection is done.
+TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
+ const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
+ bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
+ AudioDeviceModule::AudioLayer expected_audio_layer = low_latency_output ?
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio :
+ AudioDeviceModule::kAndroidJavaAudio;
+ EXPECT_EQ(expected_audio_layer, audio_layer);
}
-TEST_P(AudioDeviceTest, InitTerminate) {
+// Verify that it is possible to explicitly create the two types of supported
+// ADMs. These two tests overrides the default selection of native audio layer
+// by ignoring if the device supports low-latency output or not.
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
+ AudioDeviceModule::AudioLayer active_layer = TestActiveAudioLayer(
+ expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
+ AudioDeviceModule::AudioLayer expected_layer =
+ AudioDeviceModule::kAndroidJavaAudio;
+ AudioDeviceModule::AudioLayer active_layer = TestActiveAudioLayer(
+ expected_layer);
+ EXPECT_EQ(expected_layer, active_layer);
+}
+
+// The Android ADM supports two different delay reporting modes. One for the
+// low-latency output path (in combination with OpenSL ES), and one for the
+// high-latency output path (Java backends in both directions). These two tests
+// verifies that the audio manager reports correct delay estimate given the
+// selected audio layer. Note that, this delay estimate will only be utilized
+// if the HW AEC is disabled.
+TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
+ EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
+ TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
+}
+
+TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
+ EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
+ TestDelayOnAudioLayer(
+ AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
+}
+
+// Ensure that the ADM internal audio device buffer is configured to use the
+// correct set of parameters.
+TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
+ EXPECT_EQ(playout_parameters_.sample_rate(),
+ audio_device_buffer()->PlayoutSampleRate());
+ EXPECT_EQ(record_parameters_.sample_rate(),
+ audio_device_buffer()->RecordingSampleRate());
+ EXPECT_EQ(playout_parameters_.channels(),
+ audio_device_buffer()->PlayoutChannels());
+ EXPECT_EQ(record_parameters_.channels(),
+ audio_device_buffer()->RecordingChannels());
+}
+
+
+TEST_F(AudioDeviceTest, InitTerminate) {
// Initialization is part of the test fixture.
EXPECT_TRUE(audio_device()->Initialized());
EXPECT_EQ(0, audio_device()->Terminate());
EXPECT_FALSE(audio_device()->Initialized());
}
-TEST_P(AudioDeviceTest, Devices) {
+TEST_F(AudioDeviceTest, Devices) {
// Device enumeration is not supported. Verify fixed values only.
EXPECT_EQ(1, audio_device()->PlayoutDevices());
EXPECT_EQ(1, audio_device()->RecordingDevices());
}
-TEST_P(AudioDeviceTest, BuiltInAECIsAvailable) {
- PRINT("%sBuiltInAECIsAvailable: %s\n",
- kTag, audio_device()->BuiltInAECIsAvailable() ? "true" : "false");
-}
-
-TEST_P(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
+TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
bool available;
EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
EXPECT_TRUE(available);
}
-TEST_P(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
+TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
+ StartPlayout();
EXPECT_GT(GetMaxSpeakerVolume(), 0);
+ StopPlayout();
}
-TEST_P(AudioDeviceTest, MinSpeakerVolumeIsZero) {
+TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
EXPECT_EQ(GetMinSpeakerVolume(), 0);
}
-TEST_P(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
+TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
const int default_volume = GetSpeakerVolume();
EXPECT_GE(default_volume, GetMinSpeakerVolume());
EXPECT_LE(default_volume, GetMaxSpeakerVolume());
}
-TEST_P(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
+TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
+ // The OpenSL ES output audio path does not support volume control.
+ if (!AudioLayerSupportsVolumeControl())
+ return;
const int default_volume = GetSpeakerVolume();
const int max_volume = GetMaxSpeakerVolume();
EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
@@ -712,18 +807,31 @@
EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
}
-// Tests that playout can be initiated, started and stopped.
-TEST_P(AudioDeviceTest, StartStopPlayout) {
+// Tests that playout can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopPlayout) {
+ StartPlayout();
+ StopPlayout();
StartPlayout();
StopPlayout();
}
+// Verify that calling StopPlayout() will leave us in an uninitialized state
+// which will require a new call to InitPlayout(). This test does not call
+// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
+TEST_F(AudioDeviceTest, StopPlayoutRequiresInitToRestart) {
+ EXPECT_EQ(0, audio_device()->InitPlayout());
+ EXPECT_EQ(0, audio_device()->StartPlayout());
+ EXPECT_EQ(0, audio_device()->StopPlayout());
+ EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
+}
+
// Start playout and verify that the native audio layer starts asking for real
// audio samples to play out using the NeedMorePlayData callback.
-TEST_P(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
+TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
MockAudioTransport mock(kPlayout);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
- EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
+ EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
kBytesPerSample,
playout_channels(),
playout_sample_rate(),
@@ -738,15 +846,15 @@
// Start recording and verify that the native audio layer starts feeding real
// audio samples via the RecordedDataIsAvailable callback.
-TEST_P(AudioDeviceTest, StartRecordingVerifyCallbacks) {
+TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
MockAudioTransport mock(kRecording);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
- recording_frames_per_buffer(),
+ record_frames_per_10ms_buffer(),
kBytesPerSample,
- recording_channels(),
- recording_sample_rate(),
- kFixedRecordingDelay,
+ record_channels(),
+ record_sample_rate(),
+ total_delay_ms(),
0,
0,
false,
@@ -762,10 +870,10 @@
// Start playout and recording (full-duplex audio) and verify that audio is
// active in both directions.
-TEST_P(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
+TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
MockAudioTransport mock(kPlayout | kRecording);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
- EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_buffer(),
+ EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
kBytesPerSample,
playout_channels(),
playout_sample_rate(),
@@ -773,11 +881,11 @@
_, _, _))
.Times(AtLeast(kNumCallbacks));
EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
- recording_frames_per_buffer(),
+ record_frames_per_10ms_buffer(),
kBytesPerSample,
- recording_channels(),
- recording_sample_rate(),
- Gt(kFixedRecordingDelay),
+ record_channels(),
+ record_sample_rate(),
+ total_delay_ms(),
0,
0,
false,
@@ -794,7 +902,7 @@
// Start playout and read audio from an external PCM file when the audio layer
// asks for data to play out. Real audio is played out in this test but it does
// not contain any explicit verification that the audio quality is perfect.
-TEST_P(AudioDeviceTest, RunPlayoutWithFileAsSource) {
+TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
// TODO(henrika): extend test when mono output is supported.
EXPECT_EQ(1, playout_channels());
NiceMock<MockAudioTransport> mock(kPlayout);
@@ -805,7 +913,7 @@
mock.HandleCallbacks(test_is_done_.get(),
file_audio_stream.get(),
num_callbacks);
- SetMaxPlayoutVolume();
+ // SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
test_is_done_->Wait(kTestTimeOutInMilliseconds);
@@ -825,12 +933,12 @@
// recording side and decreased by the playout side.
// TODO(henrika): tune the final test parameters after running tests on several
// different devices.
-TEST_P(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
- EXPECT_EQ(recording_channels(), playout_channels());
- EXPECT_EQ(recording_sample_rate(), playout_sample_rate());
+TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
+ EXPECT_EQ(record_channels(), playout_channels());
+ EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
- new FifoAudioStream(playout_frames_per_buffer()));
+ new FifoAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(),
fifo_audio_stream.get(),
kFullDuplexTimeInSec * kNumCallbacksPerSecond);
@@ -855,17 +963,18 @@
// - Store time differences in a vector and calculate min, max and average.
// This test requires a special hardware called Audio Loopback Dongle.
// See http://source.android.com/devices/audio/loopback.html for details.
-TEST_P(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
- EXPECT_EQ(recording_channels(), playout_channels());
- EXPECT_EQ(recording_sample_rate(), playout_sample_rate());
+TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
+ EXPECT_EQ(record_channels(), playout_channels());
+ EXPECT_EQ(record_sample_rate(), playout_sample_rate());
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
- new LatencyMeasuringAudioStream(playout_frames_per_buffer()));
+ new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
mock.HandleCallbacks(test_is_done_.get(),
latency_audio_stream.get(),
kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
SetMaxPlayoutVolume();
+ DisableBuiltInAECIfAvailable();
StartRecording();
StartPlayout();
test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
@@ -878,7 +987,4 @@
latency_audio_stream->PrintResults();
}
-INSTANTIATE_TEST_CASE_P(AudioDeviceTest, AudioDeviceTest,
- ::testing::ValuesIn(kAudioLayers));
-
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_manager.cc b/webrtc/modules/audio_device/android/audio_manager.cc
index 717d164..b302cfe 100644
--- a/webrtc/modules/audio_device/android/audio_manager.cc
+++ b/webrtc/modules/audio_device/android/audio_manager.cc
@@ -14,6 +14,7 @@
#include "webrtc/base/arraysize.h"
#include "webrtc/base/checks.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
@@ -26,79 +27,87 @@
namespace webrtc {
-static JavaVM* g_jvm = NULL;
-static jobject g_context = NULL;
-static jclass g_audio_manager_class = NULL;
-
-void AudioManager::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
- ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
-
- CHECK(jvm);
- CHECK(context);
-
- g_jvm = reinterpret_cast<JavaVM*>(jvm);
- JNIEnv* jni = GetEnv(g_jvm);
- CHECK(jni) << "AttachCurrentThread must be called on this tread";
-
- g_context = NewGlobalRef(jni, reinterpret_cast<jobject>(context));
- jclass local_class = FindClass(
- jni, "org/webrtc/voiceengine/WebRtcAudioManager");
- g_audio_manager_class = reinterpret_cast<jclass>(
- NewGlobalRef(jni, local_class));
- CHECK_EXCEPTION(jni);
-
- // Register native methods with the WebRtcAudioManager class. These methods
- // are declared private native in WebRtcAudioManager.java.
- JNINativeMethod native_methods[] = {
- {"nativeCacheAudioParameters", "(IIJ)V",
- reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
- jni->RegisterNatives(g_audio_manager_class,
- native_methods, arraysize(native_methods));
- CHECK_EXCEPTION(jni) << "Error during RegisterNatives";
+// AudioManager::JavaAudioManager implementation
+AudioManager::JavaAudioManager::JavaAudioManager(
+ NativeRegistration* native_reg, rtc::scoped_ptr<GlobalRef> audio_manager)
+ : audio_manager_(audio_manager.Pass()),
+ init_(native_reg->GetMethodId("init", "()Z")),
+ dispose_(native_reg->GetMethodId("dispose", "()V")),
+ set_communication_mode_(
+ native_reg->GetMethodId("setCommunicationMode", "(Z)V")) {
+ ALOGD("JavaAudioManager::ctor%s", GetThreadInfo().c_str());
}
-void AudioManager::ClearAndroidAudioDeviceObjects() {
- ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
- JNIEnv* jni = GetEnv(g_jvm);
- CHECK(jni) << "AttachCurrentThread must be called on this tread";
- jni->UnregisterNatives(g_audio_manager_class);
- CHECK_EXCEPTION(jni) << "Error during UnregisterNatives";
- DeleteGlobalRef(jni, g_audio_manager_class);
- g_audio_manager_class = NULL;
- DeleteGlobalRef(jni, g_context);
- g_context = NULL;
- g_jvm = NULL;
+AudioManager::JavaAudioManager::~JavaAudioManager() {
+ ALOGD("JavaAudioManager::dtor%s", GetThreadInfo().c_str());
}
+bool AudioManager::JavaAudioManager::Init() {
+ return audio_manager_->CallBooleanMethod(init_);
+}
+
+void AudioManager::JavaAudioManager::Close() {
+ audio_manager_->CallVoidMethod(dispose_);
+}
+
+void AudioManager::JavaAudioManager::SetCommunicationMode(bool enable) {
+ audio_manager_->CallVoidMethod(set_communication_mode_, enable);
+}
+
+// AudioManager implementation
AudioManager::AudioManager()
- : j_audio_manager_(NULL),
- initialized_(false) {
+ : j_environment_(JVM::GetInstance()->environment()),
+ audio_layer_(AudioDeviceModule::kPlatformDefaultAudio),
+ initialized_(false),
+ hardware_aec_(false),
+ low_latency_playout_(false),
+ delay_estimate_in_milliseconds_(0) {
ALOGD("ctor%s", GetThreadInfo().c_str());
- CHECK(HasDeviceObjects());
- CreateJavaInstance();
+ CHECK(j_environment_);
+ JNINativeMethod native_methods[] = {
+ {"nativeCacheAudioParameters",
+ "(IIZZIIJ)V",
+ reinterpret_cast<void*>(&webrtc::AudioManager::CacheAudioParameters)}};
+ j_native_registration_ = j_environment_->RegisterNatives(
+ "org/webrtc/voiceengine/WebRtcAudioManager",
+ native_methods, arraysize(native_methods));
+ j_audio_manager_.reset(new JavaAudioManager(
+ j_native_registration_.get(),
+ j_native_registration_->NewObject(
+ "<init>", "(Landroid/content/Context;J)V",
+ JVM::GetInstance()->context(), PointerTojlong(this))));
}
AudioManager::~AudioManager() {
ALOGD("~dtor%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
Close();
- AttachThreadScoped ats(g_jvm);
- JNIEnv* jni = ats.env();
- jni->DeleteGlobalRef(j_audio_manager_);
- j_audio_manager_ = NULL;
+}
+
+void AudioManager::SetActiveAudioLayer(
+ AudioDeviceModule::AudioLayer audio_layer) {
+ ALOGD("SetActiveAudioLayer(%d)%s", audio_layer, GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!initialized_);
+ // Store the currenttly utilized audio layer.
+ audio_layer_ = audio_layer;
+ // The delay estimate can take one of two fixed values depending on if the
+ // device supports low-latency output or not. However, it is also possible
+ // that the user explicitly selects the high-latency audio path, hence we use
+ // the selected |audio_layer| here to set the delay estimate.
+ delay_estimate_in_milliseconds_ =
+ (audio_layer == AudioDeviceModule::kAndroidJavaAudio) ?
+ kHighLatencyModeDelayEstimateInMilliseconds :
+ kLowLatencyModeDelayEstimateInMilliseconds;
+ ALOGD("delay_estimate_in_milliseconds: %d", delay_estimate_in_milliseconds_);
}
bool AudioManager::Init() {
ALOGD("Init%s", GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!initialized_);
- AttachThreadScoped ats(g_jvm);
- JNIEnv* jni = ats.env();
- jmethodID initID = GetMethodID(jni, g_audio_manager_class, "init", "()Z");
- jboolean res = jni->CallBooleanMethod(j_audio_manager_, initID);
- CHECK_EXCEPTION(jni);
- if (!res) {
+ DCHECK_NE(audio_layer_, AudioDeviceModule::kPlatformDefaultAudio);
+ if (!j_audio_manager_->Init()) {
ALOGE("init failed!");
return false;
}
@@ -111,12 +120,7 @@
DCHECK(thread_checker_.CalledOnValidThread());
if (!initialized_)
return true;
- AttachThreadScoped ats(g_jvm);
- JNIEnv* jni = ats.env();
- jmethodID disposeID = GetMethodID(
- jni, g_audio_manager_class, "dispose", "()V");
- jni->CallVoidMethod(j_audio_manager_, disposeID);
- CHECK_EXCEPTION(jni);
+ j_audio_manager_->Close();
initialized_ = false;
return true;
}
@@ -125,61 +129,72 @@
ALOGD("SetCommunicationMode(%d)%s", enable, GetThreadInfo().c_str());
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(initialized_);
- AttachThreadScoped ats(g_jvm);
- JNIEnv* jni = ats.env();
- jmethodID setcommID = GetMethodID(
- jni, g_audio_manager_class, "setCommunicationMode", "(Z)V");
- jni->CallVoidMethod(j_audio_manager_, setcommID, enable);
- CHECK_EXCEPTION(jni);
+ j_audio_manager_->SetCommunicationMode(enable);
}
-void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env, jobject obj,
- jint sample_rate, jint channels, jlong nativeAudioManager) {
+bool AudioManager::IsAcousticEchoCancelerSupported() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return hardware_aec_;
+}
+
+bool AudioManager::IsLowLatencyPlayoutSupported() const {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ ALOGD("IsLowLatencyPlayoutSupported()");
+ return low_latency_playout_;
+}
+
+int AudioManager::GetDelayEstimateInMilliseconds() const {
+ return delay_estimate_in_milliseconds_;
+}
+
+void JNICALL AudioManager::CacheAudioParameters(JNIEnv* env,
+ jobject obj,
+ jint sample_rate,
+ jint channels,
+ jboolean hardware_aec,
+ jboolean low_latency_output,
+ jint output_buffer_size,
+ jint input_buffer_size,
+ jlong native_audio_manager) {
webrtc::AudioManager* this_object =
- reinterpret_cast<webrtc::AudioManager*> (nativeAudioManager);
- this_object->OnCacheAudioParameters(env, sample_rate, channels);
+ reinterpret_cast<webrtc::AudioManager*>(native_audio_manager);
+ this_object->OnCacheAudioParameters(
+ env, sample_rate, channels, hardware_aec, low_latency_output,
+ output_buffer_size, input_buffer_size);
}
-void AudioManager::OnCacheAudioParameters(
- JNIEnv* env, jint sample_rate, jint channels) {
+void AudioManager::OnCacheAudioParameters(JNIEnv* env,
+ jint sample_rate,
+ jint channels,
+ jboolean hardware_aec,
+ jboolean low_latency_output,
+ jint output_buffer_size,
+ jint input_buffer_size) {
ALOGD("OnCacheAudioParameters%s", GetThreadInfo().c_str());
+ ALOGD("hardware_aec: %d", hardware_aec);
+ ALOGD("low_latency_output: %d", low_latency_output);
ALOGD("sample_rate: %d", sample_rate);
ALOGD("channels: %d", channels);
+ ALOGD("output_buffer_size: %d", output_buffer_size);
+ ALOGD("input_buffer_size: %d", input_buffer_size);
DCHECK(thread_checker_.CalledOnValidThread());
- // TODO(henrika): add support stereo output.
- playout_parameters_.reset(sample_rate, channels);
- record_parameters_.reset(sample_rate, channels);
+ hardware_aec_ = hardware_aec;
+ low_latency_playout_ = low_latency_output;
+ // TODO(henrika): add support for stereo output.
+ playout_parameters_.reset(sample_rate, channels, output_buffer_size);
+ record_parameters_.reset(sample_rate, channels, input_buffer_size);
}
-AudioParameters AudioManager::GetPlayoutAudioParameters() const {
+const AudioParameters& AudioManager::GetPlayoutAudioParameters() {
CHECK(playout_parameters_.is_valid());
+ DCHECK(thread_checker_.CalledOnValidThread());
return playout_parameters_;
}
-AudioParameters AudioManager::GetRecordAudioParameters() const {
+const AudioParameters& AudioManager::GetRecordAudioParameters() {
CHECK(record_parameters_.is_valid());
+ DCHECK(thread_checker_.CalledOnValidThread());
return record_parameters_;
}
-bool AudioManager::HasDeviceObjects() {
- return (g_jvm && g_context && g_audio_manager_class);
-}
-
-void AudioManager::CreateJavaInstance() {
- ALOGD("CreateJavaInstance");
- AttachThreadScoped ats(g_jvm);
- JNIEnv* jni = ats.env();
- jmethodID constructorID = GetMethodID(
- jni, g_audio_manager_class, "<init>", "(Landroid/content/Context;J)V");
- j_audio_manager_ = jni->NewObject(g_audio_manager_class,
- constructorID,
- g_context,
- reinterpret_cast<intptr_t>(this));
- CHECK_EXCEPTION(jni) << "Error during NewObject";
- CHECK(j_audio_manager_);
- j_audio_manager_ = jni->NewGlobalRef(j_audio_manager_);
- CHECK_EXCEPTION(jni) << "Error during NewGlobalRef";
- CHECK(j_audio_manager_);
-}
-
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_manager.h b/webrtc/modules/audio_device/android/audio_manager.h
index 3ab29b7..5321f74 100644
--- a/webrtc/modules/audio_device/android/audio_manager.h
+++ b/webrtc/modules/audio_device/android/audio_manager.h
@@ -13,11 +13,13 @@
#include <jni.h>
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/thread_checker.h"
#include "webrtc/modules/audio_device/android/audio_common.h"
#include "webrtc/modules/audio_device/include/audio_device_defines.h"
#include "webrtc/modules/audio_device/audio_device_generic.h"
#include "webrtc/modules/utility/interface/helpers_android.h"
+#include "webrtc/modules/utility/interface/jvm_android.h"
namespace webrtc {
@@ -28,21 +30,25 @@
: sample_rate_(0),
channels_(0),
frames_per_buffer_(0),
+ frames_per_10ms_buffer_(0),
bits_per_sample_(kBitsPerSample) {}
- AudioParameters(int sample_rate, int channels)
+ AudioParameters(int sample_rate, int channels, int frames_per_buffer)
: sample_rate_(sample_rate),
channels_(channels),
- frames_per_buffer_(sample_rate / 100),
+ frames_per_buffer_(frames_per_buffer),
+ frames_per_10ms_buffer_(sample_rate / 100),
bits_per_sample_(kBitsPerSample) {}
- void reset(int sample_rate, int channels) {
+ void reset(int sample_rate, int channels, int frames_per_buffer) {
sample_rate_ = sample_rate;
channels_ = channels;
- // WebRTC uses a fixed buffer size equal to 10ms.
- frames_per_buffer_ = (sample_rate / 100);
+ frames_per_buffer_ = frames_per_buffer;
+ frames_per_10ms_buffer_ = (sample_rate / 100);
}
int sample_rate() const { return sample_rate_; }
int channels() const { return channels_; }
int frames_per_buffer() const { return frames_per_buffer_; }
+ int frames_per_10ms_buffer() const { return frames_per_10ms_buffer_; }
+ int bits_per_sample() const { return bits_per_sample_; }
bool is_valid() const {
return ((sample_rate_ > 0) && (channels_ > 0) && (frames_per_buffer_ > 0));
}
@@ -50,12 +56,25 @@
int GetBytesPerBuffer() const {
return frames_per_buffer_ * GetBytesPerFrame();
}
+ int GetBytesPer10msBuffer() const {
+ return frames_per_10ms_buffer_ * GetBytesPerFrame();
+ }
+ float GetBufferSizeInMilliseconds() const {
+ if (sample_rate_ == 0)
+ return 0.0f;
+ return frames_per_buffer_ / (sample_rate_ / 1000.0f);
+ }
private:
int sample_rate_;
int channels_;
+ // Lowest possible size of native audio buffer. Measured in number of frames.
+ // This size is injected into the OpenSL ES output (since it does not "talk
+ // Java") implementation but is currently not utilized by the Java
+ // implementation since it aquires the same value internally.
int frames_per_buffer_;
- const int bits_per_sample_;
+ int frames_per_10ms_buffer_;
+ int bits_per_sample_;
};
// Implements support for functions in the WebRTC audio stack for Android that
@@ -64,23 +83,36 @@
// construction. This class does not make any audio-related modifications
// unless Init() is called. Caching audio parameters makes no changes but only
// reads data from the Java side.
-// TODO(henrika): expand this class when adding support for low-latency
-// OpenSL ES. Currently, it only contains very basic functionality.
class AudioManager {
public:
- // Use the invocation API to allow the native application to use the JNI
- // interface pointer to access VM features. |jvm| denotes the Java VM and
- // |context| corresponds to android.content.Context in Java.
- // This method also sets a global jclass object, |g_audio_manager_class| for
- // the "org/webrtc/voiceengine/WebRtcAudioManager"-class.
- static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
- // Always call this method after the object has been destructed. It deletes
- // existing global references and enables garbage collection.
- static void ClearAndroidAudioDeviceObjects();
+ // Wraps the Java specific parts of the AudioManager into one helper class.
+ // Stores method IDs for all supported methods at construction and then
+ // allows calls like JavaAudioManager::Close() while hiding the Java/JNI
+ // parts that are associated with this call.
+ class JavaAudioManager {
+ public:
+ JavaAudioManager(NativeRegistration* native_registration,
+ rtc::scoped_ptr<GlobalRef> audio_manager);
+ ~JavaAudioManager();
+
+ bool Init();
+ void Close();
+ void SetCommunicationMode(bool enable);
+
+ private:
+ rtc::scoped_ptr<GlobalRef> audio_manager_;
+ jmethodID init_;
+ jmethodID dispose_;
+ jmethodID set_communication_mode_;
+ };
AudioManager();
~AudioManager();
+ // Sets the currently active audio layer combination. Must be called before
+ // Init().
+ void SetActiveAudioLayer(AudioDeviceModule::AudioLayer audio_layer);
+
// Initializes the audio manager and stores the current audio mode.
bool Init();
// Revert any setting done by Init().
@@ -91,37 +123,79 @@
void SetCommunicationMode(bool enable);
// Native audio parameters stored during construction.
- AudioParameters GetPlayoutAudioParameters() const;
- AudioParameters GetRecordAudioParameters() const;
+ const AudioParameters& GetPlayoutAudioParameters();
+ const AudioParameters& GetRecordAudioParameters();
- bool initialized() const { return initialized_; }
+ // Returns true if the device supports a built-in Acoustic Echo Canceler.
+ // Some devices can also be blacklisted for use in combination with an AEC
+ // and these devices will return false.
+ // Can currently only be used in combination with a Java based audio backend
+ // for the recoring side (i.e. using the android.media.AudioRecord API).
+ bool IsAcousticEchoCancelerSupported() const;
+
+ // Returns true if the device supports the low-latency audio paths in
+ // combination with OpenSL ES.
+ bool IsLowLatencyPlayoutSupported() const;
+
+ // Returns the estimated total delay of this device. Unit is in milliseconds.
+ // The vaule is set once at construction and never changes after that.
+ // Possible values are webrtc::kLowLatencyModeDelayEstimateInMilliseconds and
+ // webrtc::kHighLatencyModeDelayEstimateInMilliseconds.
+ int GetDelayEstimateInMilliseconds() const;
private:
// Called from Java side so we can cache the native audio parameters.
// This method will be called by the WebRtcAudioManager constructor, i.e.
// on the same thread that this object is created on.
- static void JNICALL CacheAudioParameters(JNIEnv* env, jobject obj,
- jint sample_rate, jint channels, jlong nativeAudioManager);
- void OnCacheAudioParameters(JNIEnv* env, jint sample_rate, jint channels);
-
- // Returns true if SetAndroidAudioDeviceObjects() has been called
- // successfully.
- bool HasDeviceObjects();
-
- // Called from the constructor. Defines the |j_audio_manager_| member.
- void CreateJavaInstance();
+ static void JNICALL CacheAudioParameters(JNIEnv* env,
+ jobject obj,
+ jint sample_rate,
+ jint channels,
+ jboolean hardware_aec,
+ jboolean low_latency_output,
+ jint output_buffer_size,
+ jint input_buffer_size,
+ jlong native_audio_manager);
+ void OnCacheAudioParameters(JNIEnv* env,
+ jint sample_rate,
+ jint channels,
+ jboolean hardware_aec,
+ jboolean low_latency_output,
+ jint output_buffer_size,
+ jint input_buffer_size);
// Stores thread ID in the constructor.
// We can then use ThreadChecker::CalledOnValidThread() to ensure that
// other methods are called from the same thread.
rtc::ThreadChecker thread_checker_;
- // The Java WebRtcAudioManager instance.
- jobject j_audio_manager_;
+ // Calls AttachCurrentThread() if this thread is not attached at construction.
+ // Also ensures that DetachCurrentThread() is called at destruction.
+ AttachCurrentThreadIfNeeded attach_thread_if_needed_;
+
+ rtc::scoped_ptr<JNIEnvironment> j_environment_;
+
+ // TODO(henrika): add comments...
+ rtc::scoped_ptr<NativeRegistration> j_native_registration_;
+
+ // TODO(henrika): add comments...
+ rtc::scoped_ptr<AudioManager::JavaAudioManager> j_audio_manager_;
+
+ AudioDeviceModule::AudioLayer audio_layer_;
// Set to true by Init() and false by Close().
bool initialized_;
+ // True if device supports hardware (or built-in) AEC.
+ bool hardware_aec_;
+
+ // True if device supports the low-latency OpenSL ES audio path.
+ bool low_latency_playout_;
+
+ // The delay estimate can take one of two fixed values depending on if the
+ // device supports low-latency output or not.
+ int delay_estimate_in_milliseconds_;
+
// Contains native parameters (e.g. sample rate, channel configuration).
// Set at construction in OnCacheAudioParameters() which is called from
// Java on the same thread as this object is created on.
diff --git a/webrtc/modules/audio_device/android/audio_manager_jni.cc b/webrtc/modules/audio_device/android/audio_manager_jni.cc
deleted file mode 100644
index 8a9b382..0000000
--- a/webrtc/modules/audio_device/android/audio_manager_jni.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
-
-#include <android/log.h>
-#include <assert.h>
-
-#include "webrtc/modules/utility/interface/helpers_android.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-#define TAG "AudioManagerJni"
-#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
-
-namespace webrtc {
-
-static JavaVM* g_jvm_ = NULL;
-static JNIEnv* g_jni_env_ = NULL;
-static jobject g_context_ = NULL;
-static jclass g_audio_manager_class_ = NULL;
-static jobject g_audio_manager_ = NULL;
-
-AudioManagerJni::AudioManagerJni()
- : low_latency_supported_(false),
- native_output_sample_rate_(0),
- native_buffer_size_(0) {
- if (!HasDeviceObjects()) {
- assert(false);
- }
- AttachThreadScoped ats(g_jvm_);
- JNIEnv* env = ats.env();
- assert(env && "Unsupported JNI version!");
- CreateInstance(env);
- // Pre-store device specific values.
- SetLowLatencySupported(env);
- SetNativeOutputSampleRate(env);
- SetNativeFrameSize(env);
-}
-
-void AudioManagerJni::SetAndroidAudioDeviceObjects(void* jvm, void* context) {
- ALOGD("SetAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
-
- assert(jvm);
- assert(context);
-
- // Store global Java VM variables to be accessed by API calls.
- g_jvm_ = reinterpret_cast<JavaVM*>(jvm);
- g_jni_env_ = GetEnv(g_jvm_);
- g_context_ = g_jni_env_->NewGlobalRef(reinterpret_cast<jobject>(context));
-
- // FindClass must be made in this function since this function's contract
- // requires it to be called by a Java thread.
- // See
- // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
- // as to why this is necessary.
- // Get the AudioManagerAndroid class object.
- jclass javaAmClassLocal = g_jni_env_->FindClass(
- "org/webrtc/voiceengine/AudioManagerAndroid");
- assert(javaAmClassLocal);
-
- // Create a global reference such that the class object is not recycled by
- // the garbage collector.
- g_audio_manager_class_ = reinterpret_cast<jclass>(
- g_jni_env_->NewGlobalRef(javaAmClassLocal));
- assert(g_audio_manager_class_);
-}
-
-void AudioManagerJni::ClearAndroidAudioDeviceObjects() {
- ALOGD("ClearAndroidAudioDeviceObjects%s", GetThreadInfo().c_str());
- g_jni_env_->DeleteGlobalRef(g_audio_manager_class_);
- g_audio_manager_class_ = NULL;
- g_jni_env_->DeleteGlobalRef(g_context_);
- g_context_ = NULL;
- g_jni_env_->DeleteGlobalRef(g_audio_manager_);
- g_audio_manager_ = NULL;
- g_jni_env_ = NULL;
- g_jvm_ = NULL;
-}
-
-void AudioManagerJni::SetLowLatencySupported(JNIEnv* env) {
- jmethodID id = LookUpMethodId(env, "isAudioLowLatencySupported", "()Z");
- low_latency_supported_ = env->CallBooleanMethod(g_audio_manager_, id);
-}
-
-void AudioManagerJni::SetNativeOutputSampleRate(JNIEnv* env) {
- jmethodID id = LookUpMethodId(env, "getNativeOutputSampleRate", "()I");
- native_output_sample_rate_ = env->CallIntMethod(g_audio_manager_, id);
-}
-
-void AudioManagerJni::SetNativeFrameSize(JNIEnv* env) {
- jmethodID id = LookUpMethodId(env,
- "getAudioLowLatencyOutputFrameSize", "()I");
- native_buffer_size_ = env->CallIntMethod(g_audio_manager_, id);
-}
-
-bool AudioManagerJni::HasDeviceObjects() {
- return g_jvm_ && g_jni_env_ && g_context_ && g_audio_manager_class_;
-}
-
-jmethodID AudioManagerJni::LookUpMethodId(JNIEnv* env,
- const char* method_name,
- const char* method_signature) {
- jmethodID ret_val = env->GetMethodID(g_audio_manager_class_, method_name,
- method_signature);
- assert(ret_val);
- return ret_val;
-}
-
-void AudioManagerJni::CreateInstance(JNIEnv* env) {
- // Get the method ID for the constructor taking Context.
- jmethodID id = LookUpMethodId(env, "<init>", "(Landroid/content/Context;)V");
- g_audio_manager_ = env->NewObject(g_audio_manager_class_, id, g_context_);
- // Create a global reference so that the instance is accessible until no
- // longer needed.
- g_audio_manager_ = env->NewGlobalRef(g_audio_manager_);
- assert(g_audio_manager_);
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_manager_jni.h b/webrtc/modules/audio_device/android/audio_manager_jni.h
deleted file mode 100644
index 5df2490..0000000
--- a/webrtc/modules/audio_device/android/audio_manager_jni.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-// Android APIs used to access Java functionality needed to enable low latency
-// audio.
-
-#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
-#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
-
-#include <jni.h>
-
-namespace webrtc {
-
-class AudioManagerJni {
- public:
- AudioManagerJni();
- ~AudioManagerJni() {}
-
- // SetAndroidAudioDeviceObjects must only be called once unless there has
- // been a successive call to ClearAndroidAudioDeviceObjects. For each
- // call to ClearAndroidAudioDeviceObjects, SetAndroidAudioDeviceObjects may be
- // called once.
- // This function must be called by a Java thread as calling it from a thread
- // created by the native application will prevent FindClass from working. See
- // http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
- // for more details.
- // It has to be called for this class' APIs to be successful. Calling
- // ClearAndroidAudioDeviceObjects will prevent this class' APIs to be called
- // successfully if SetAndroidAudioDeviceObjects is not called after it.
- static void SetAndroidAudioDeviceObjects(void* jvm, void* context);
- // This function must be called when the AudioManagerJni class is no
- // longer needed. It frees up the global references acquired in
- // SetAndroidAudioDeviceObjects.
- static void ClearAndroidAudioDeviceObjects();
-
- bool low_latency_supported() const { return low_latency_supported_; }
- int native_output_sample_rate() const { return native_output_sample_rate_; }
- int native_buffer_size() const { return native_buffer_size_; }
-
- private:
- bool HasDeviceObjects();
-
- // Following functions assume that the calling thread has been attached.
- void SetLowLatencySupported(JNIEnv* env);
- void SetNativeOutputSampleRate(JNIEnv* env);
- void SetNativeFrameSize(JNIEnv* env);
-
- jmethodID LookUpMethodId(JNIEnv* env, const char* method_name,
- const char* method_signature);
-
- void CreateInstance(JNIEnv* env);
-
- // Whether or not low latency audio is supported, the native output sample
- // rate and the audio buffer size do not change. I.e the values might as well
- // just be cached when initializing.
- bool low_latency_supported_;
- int native_output_sample_rate_;
- int native_buffer_size_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_AUDIO_MANAGER_JNI_H_
diff --git a/webrtc/modules/audio_device/android/audio_manager_unittest.cc b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
new file mode 100644
index 0000000..f790e6a
--- /dev/null
+++ b/webrtc/modules/audio_device/android/audio_manager_unittest.cc
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/modules/audio_device/android/build_info.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include "webrtc/modules/audio_device/android/ensure_initialized.h"
+
+#define PRINT(...) fprintf(stderr, __VA_ARGS__);
+
+namespace webrtc {
+
+static const char kTag[] = " ";
+
+class AudioManagerTest : public ::testing::Test {
+ protected:
+ AudioManagerTest() {
+ // One-time initialization of JVM and application context. Ensures that we
+ // can do calls between C++ and Java.
+ webrtc::audiodevicemodule::EnsureInitialized();
+ audio_manager_.reset(new AudioManager());
+ SetActiveAudioLayer();
+ playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
+ record_parameters_ = audio_manager()->GetRecordAudioParameters();
+ }
+
+ AudioManager* audio_manager() const { return audio_manager_.get(); }
+
+ // A valid audio layer must always be set before calling Init(), hence we
+ // might as well make it a part of the test fixture.
+ void SetActiveAudioLayer() {
+ EXPECT_EQ(0, audio_manager()->GetDelayEstimateInMilliseconds());
+ audio_manager()->SetActiveAudioLayer(AudioDeviceModule::kAndroidJavaAudio);
+ EXPECT_NE(0, audio_manager()->GetDelayEstimateInMilliseconds());
+ }
+
+ rtc::scoped_ptr<AudioManager> audio_manager_;
+ AudioParameters playout_parameters_;
+ AudioParameters record_parameters_;
+};
+
+TEST_F(AudioManagerTest, ConstructDestruct) {
+}
+
+TEST_F(AudioManagerTest, InitClose) {
+ EXPECT_TRUE(audio_manager()->Init());
+ EXPECT_TRUE(audio_manager()->Close());
+}
+
+TEST_F(AudioManagerTest, IsAcousticEchoCancelerSupported) {
+ PRINT("%sAcoustic Echo Canceler support: %s\n", kTag,
+ audio_manager()->IsAcousticEchoCancelerSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, IsLowLatencyPlayoutSupported) {
+ PRINT("%sLow latency output support: %s\n", kTag,
+ audio_manager()->IsLowLatencyPlayoutSupported() ? "Yes" : "No");
+}
+
+TEST_F(AudioManagerTest, ShowAudioParameterInfo) {
+ const bool low_latency_out = audio_manager()->IsLowLatencyPlayoutSupported();
+ PRINT("PLAYOUT:\n");
+ PRINT("%saudio layer: %s\n", kTag,
+ low_latency_out ? "Low latency OpenSL" : "Java/JNI based AudioTrack");
+ PRINT("%ssample rate: %d Hz\n", kTag, playout_parameters_.sample_rate());
+ PRINT("%schannels: %d\n", kTag, playout_parameters_.channels());
+ PRINT("%sframes per buffer: %d <=> %.2f ms\n", kTag,
+ playout_parameters_.frames_per_buffer(),
+ playout_parameters_.GetBufferSizeInMilliseconds());
+ PRINT("RECORD: \n");
+ PRINT("%saudio layer: %s\n", kTag, "Java/JNI based AudioRecord");
+ PRINT("%ssample rate: %d Hz\n", kTag, record_parameters_.sample_rate());
+ PRINT("%schannels: %d\n", kTag, record_parameters_.channels());
+ PRINT("%sframes per buffer: %d <=> %.2f ms\n", kTag,
+ record_parameters_.frames_per_buffer(),
+ record_parameters_.GetBufferSizeInMilliseconds());
+}
+
+// Add device-specific information to the test for logging purposes.
+TEST_F(AudioManagerTest, ShowDeviceInfo) {
+ BuildInfo build_info;
+ PRINT("%smodel: %s\n", kTag, build_info.GetDeviceModel().c_str());
+ PRINT("%sbrand: %s\n", kTag, build_info.GetBrand().c_str());
+ PRINT("%smanufacturer: %s\n",
+ kTag, build_info.GetDeviceManufacturer().c_str());
+}
+
+// Add Android build information to the test for logging purposes.
+TEST_F(AudioManagerTest, ShowBuildInfo) {
+ BuildInfo build_info;
+ PRINT("%sbuild release: %s\n", kTag, build_info.GetBuildRelease().c_str());
+ PRINT("%sbuild id: %s\n", kTag, build_info.GetAndroidBuildId().c_str());
+ PRINT("%sbuild type: %s\n", kTag, build_info.GetBuildType().c_str());
+ PRINT("%sSDK version: %s\n", kTag, build_info.GetSdkVersion().c_str());
+}
+
+// Basic test of the AudioParameters class using default construction where
+// all members are set to zero.
+TEST_F(AudioManagerTest, AudioParametersWithDefaultConstruction) {
+ AudioParameters params;
+ EXPECT_FALSE(params.is_valid());
+ EXPECT_EQ(0, params.sample_rate());
+ EXPECT_EQ(0, params.channels());
+ EXPECT_EQ(0, params.frames_per_buffer());
+ EXPECT_EQ(0, params.frames_per_10ms_buffer());
+ EXPECT_EQ(0, params.GetBytesPerFrame());
+ EXPECT_EQ(0, params.GetBytesPerBuffer());
+ EXPECT_EQ(0, params.GetBytesPer10msBuffer());
+ EXPECT_EQ(0.0f, params.GetBufferSizeInMilliseconds());
+}
+
+// Basic test of the AudioParameters class using non default construction.
+TEST_F(AudioManagerTest, AudioParametersWithNonDefaultConstruction) {
+ const int kSampleRate = 48000;
+ const int kChannels = 1;
+ const int kFramesPerBuffer = 480;
+ const int kFramesPer10msBuffer = 480;
+ const int kBytesPerFrame = 2;
+ const float kBufferSizeInMs = 10.0f;
+ AudioParameters params(kSampleRate, kChannels, kFramesPerBuffer);
+ EXPECT_TRUE(params.is_valid());
+ EXPECT_EQ(kSampleRate, params.sample_rate());
+ EXPECT_EQ(kChannels, params.channels());
+ EXPECT_EQ(kFramesPerBuffer, params.frames_per_buffer());
+ EXPECT_EQ(kSampleRate / 100, params.frames_per_10ms_buffer());
+ EXPECT_EQ(kBytesPerFrame, params.GetBytesPerFrame());
+ EXPECT_EQ(kBytesPerFrame * kFramesPerBuffer, params.GetBytesPerBuffer());
+ EXPECT_EQ(kBytesPerFrame * kFramesPer10msBuffer,
+ params.GetBytesPer10msBuffer());
+ EXPECT_EQ(kBufferSizeInMs, params.GetBufferSizeInMilliseconds());
+}
+
+} // namespace webrtc
+
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.cc b/webrtc/modules/audio_device/android/audio_record_jni.cc
index 60f6df1..957efe5 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_record_jni.cc
@@ -25,11 +25,6 @@
namespace webrtc {
-// We are unable to obtain exact measurements of the hardware delay on Android.
-// Instead, a lower bound (based on measurements) is used.
-// TODO(henrika): is it possible to improve this?
-static const int kHardwareDelayInMilliseconds = 100;
-
static JavaVM* g_jvm = NULL;
static jobject g_context = NULL;
static jclass g_audio_record_class = NULL;
@@ -82,18 +77,17 @@
g_jvm = NULL;
}
-AudioRecordJni::AudioRecordJni(
- PlayoutDelayProvider* delay_provider, AudioManager* audio_manager)
- : delay_provider_(delay_provider),
+AudioRecordJni::AudioRecordJni(AudioManager* audio_manager)
+ : audio_manager_(audio_manager),
audio_parameters_(audio_manager->GetRecordAudioParameters()),
+ total_delay_in_milliseconds_(0),
j_audio_record_(NULL),
direct_buffer_address_(NULL),
direct_buffer_capacity_in_bytes_(0),
frames_per_buffer_(0),
initialized_(false),
recording_(false),
- audio_device_buffer_(NULL),
- playout_delay_in_milliseconds_(0) {
+ audio_device_buffer_(NULL) {
ALOGD("ctor%s", GetThreadInfo().c_str());
DCHECK(audio_parameters_.is_valid());
CHECK(HasDeviceObjects());
@@ -150,7 +144,7 @@
ALOGD("frames_per_buffer: %d", frames_per_buffer_);
CHECK_EQ(direct_buffer_capacity_in_bytes_,
frames_per_buffer_ * kBytesPerFrame);
- CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_buffer());
+ CHECK_EQ(frames_per_buffer_, audio_parameters_.frames_per_10ms_buffer());
initialized_ = true;
return 0;
}
@@ -201,12 +195,6 @@
return 0;
}
-int32_t AudioRecordJni::RecordingDelay(uint16_t& delayMS) const { // NOLINT
- // TODO(henrika): is it possible to improve this estimate?
- delayMS = kHardwareDelayInMilliseconds;
- return 0;
-}
-
void AudioRecordJni::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
ALOGD("AttachAudioBuffer");
DCHECK(thread_checker_.CalledOnValidThread());
@@ -217,20 +205,10 @@
const int channels = audio_parameters_.channels();
ALOGD("SetRecordingChannels(%d)", channels);
audio_device_buffer_->SetRecordingChannels(channels);
-}
-
-bool AudioRecordJni::BuiltInAECIsAvailable() const {
- ALOGD("BuiltInAECIsAvailable%s", GetThreadInfo().c_str());
- AttachThreadScoped ats(g_jvm);
- JNIEnv* jni = ats.env();
- jmethodID builtInAECIsAvailable = jni->GetStaticMethodID(
- g_audio_record_class, "BuiltInAECIsAvailable", "()Z");
- CHECK_EXCEPTION(jni);
- CHECK(builtInAECIsAvailable);
- jboolean hw_aec = jni->CallStaticBooleanMethod(g_audio_record_class,
- builtInAECIsAvailable);
- CHECK_EXCEPTION(jni);
- return hw_aec;
+ total_delay_in_milliseconds_ =
+ audio_manager_->GetDelayEstimateInMilliseconds();
+ DCHECK_GT(total_delay_in_milliseconds_, 0);
+ ALOGD("total_delay_in_milliseconds: %d", total_delay_in_milliseconds_);
}
int32_t AudioRecordJni::EnableBuiltInAEC(bool enable) {
@@ -283,16 +261,15 @@
ALOGE("AttachAudioBuffer has not been called!");
return;
}
- if (playout_delay_in_milliseconds_ == 0) {
- playout_delay_in_milliseconds_ = delay_provider_->PlayoutDelayMs();
- ALOGD("cached playout delay: %d", playout_delay_in_milliseconds_);
- }
audio_device_buffer_->SetRecordedBuffer(direct_buffer_address_,
frames_per_buffer_);
- audio_device_buffer_->SetVQEData(playout_delay_in_milliseconds_,
- kHardwareDelayInMilliseconds,
- 0 /* clockDrift */);
- if (audio_device_buffer_->DeliverRecordedData() == 1) {
+ // We provide one (combined) fixed delay estimate for the APM and use the
+ // |playDelayMs| parameter only. Components like the AEC only sees the sum
+ // of |playDelayMs| and |recDelayMs|, hence the distributions does not matter.
+ audio_device_buffer_->SetVQEData(total_delay_in_milliseconds_,
+ 0, // recDelayMs
+ 0 ); // clockDrift
+ if (audio_device_buffer_->DeliverRecordedData() == -1) {
ALOGE("AudioDeviceBuffer::DeliverRecordedData failed!");
}
}
diff --git a/webrtc/modules/audio_device/android/audio_record_jni.h b/webrtc/modules/audio_device/android/audio_record_jni.h
index 19629ed..18cb0c8 100644
--- a/webrtc/modules/audio_device/android/audio_record_jni.h
+++ b/webrtc/modules/audio_device/android/audio_record_jni.h
@@ -21,8 +21,6 @@
namespace webrtc {
-class PlayoutDelayProvider;
-
// Implements 16-bit mono PCM audio input support for Android using the Java
// AudioRecord interface. Most of the work is done by its Java counterpart in
// WebRtcAudioRecord.java. This class is created and lives on a thread in
@@ -58,8 +56,7 @@
// existing global references and enables garbage collection.
static void ClearAndroidAudioDeviceObjects();
- AudioRecordJni(
- PlayoutDelayProvider* delay_provider, AudioManager* audio_manager);
+ explicit AudioRecordJni(AudioManager* audio_manager);
~AudioRecordJni();
int32_t Init();
@@ -72,11 +69,8 @@
int32_t StopRecording ();
bool Recording() const { return recording_; }
- int32_t RecordingDelay(uint16_t& delayMS) const;
-
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
- bool BuiltInAECIsAvailable() const;
int32_t EnableBuiltInAEC(bool enable);
private:
@@ -116,16 +110,18 @@
// thread in Java. Detached during construction of this object.
rtc::ThreadChecker thread_checker_java_;
- // Returns the current playout delay.
- // TODO(henrika): this value is currently fixed since initial tests have
- // shown that the estimated delay varies very little over time. It might be
- // possible to make improvements in this area.
- PlayoutDelayProvider* delay_provider_;
+ // Raw pointer to the audio manger.
+ const AudioManager* audio_manager_;
// Contains audio parameters provided to this class at construction by the
// AudioManager.
const AudioParameters audio_parameters_;
+ // Delay estimate of the total round-trip delay (input + output).
+ // Fixed value set once in AttachAudioBuffer() and it can take one out of two
+ // possible values. See audio_common.h for details.
+ int total_delay_in_milliseconds_;
+
// The Java WebRtcAudioRecord instance.
jobject j_audio_record_;
@@ -148,9 +144,6 @@
// Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
// AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
AudioDeviceBuffer* audio_device_buffer_;
-
- // Contains a delay estimate from the playout side given by |delay_provider_|.
- int playout_delay_in_milliseconds_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.cc b/webrtc/modules/audio_device/android/audio_track_jni.cc
index 6e89e36..ac8bdee 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.cc
+++ b/webrtc/modules/audio_device/android/audio_track_jni.cc
@@ -84,8 +84,7 @@
frames_per_buffer_(0),
initialized_(false),
playing_(false),
- audio_device_buffer_(NULL),
- delay_in_milliseconds_(0) {
+ audio_device_buffer_(NULL) {
ALOGD("ctor%s", GetThreadInfo().c_str());
DCHECK(audio_parameters_.is_valid());
CHECK(HasDeviceObjects());
@@ -129,17 +128,10 @@
AttachThreadScoped ats(g_jvm);
JNIEnv* jni = ats.env();
jmethodID initPlayoutID = GetMethodID(
- jni, g_audio_track_class, "InitPlayout", "(II)I");
- jint delay_in_milliseconds = jni->CallIntMethod(
- j_audio_track_, initPlayoutID, audio_parameters_.sample_rate(),
- audio_parameters_.channels());
+ jni, g_audio_track_class, "InitPlayout", "(II)V");
+ jni->CallVoidMethod(j_audio_track_, initPlayoutID,
+ audio_parameters_.sample_rate(), audio_parameters_.channels());
CHECK_EXCEPTION(jni);
- if (delay_in_milliseconds < 0) {
- ALOGE("InitPlayout failed!");
- return -1;
- }
- delay_in_milliseconds_ = delay_in_milliseconds;
- ALOGD("delay_in_milliseconds: %d", delay_in_milliseconds);
initialized_ = true;
return 0;
}
@@ -254,20 +246,6 @@
audio_device_buffer_->SetPlayoutChannels(channels);
}
-int32_t AudioTrackJni::PlayoutDelay(uint16_t& delayMS) const {
- // No need for thread check or locking since we set |delay_in_milliseconds_|
- // only once (on the creating thread) during initialization.
- delayMS = delay_in_milliseconds_;
- return 0;
-}
-
-int AudioTrackJni::PlayoutDelayMs() {
- // This method can be called from the Java based AudioRecordThread but we
- // don't need locking since it is only set once (on the main thread) during
- // initialization.
- return delay_in_milliseconds_;
-}
-
void JNICALL AudioTrackJni::CacheDirectBufferAddress(
JNIEnv* env, jobject obj, jobject byte_buffer, jlong nativeAudioTrack) {
webrtc::AudioTrackJni* this_object =
diff --git a/webrtc/modules/audio_device/android/audio_track_jni.h b/webrtc/modules/audio_device/android/audio_track_jni.h
index deb143b..82f5d19 100644
--- a/webrtc/modules/audio_device/android/audio_track_jni.h
+++ b/webrtc/modules/audio_device/android/audio_track_jni.h
@@ -39,7 +39,7 @@
// detach when method goes out of scope. We do so because this class does not
// own the thread is is created and called on and other objects on the same
// thread might put us in a detached state at any time.
-class AudioTrackJni : public PlayoutDelayProvider {
+class AudioTrackJni {
public:
// Use the invocation API to allow the native application to use the JNI
// interface pointer to access VM features.
@@ -71,13 +71,8 @@
int MaxSpeakerVolume(uint32_t& max_volume) const;
int MinSpeakerVolume(uint32_t& min_volume) const;
- int32_t PlayoutDelay(uint16_t& delayMS) const;
void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
- protected:
- // PlayoutDelayProvider implementation.
- virtual int PlayoutDelayMs();
-
private:
// Called from Java side so we can cache the address of the Java-manged
// |byte_buffer| in |direct_buffer_address_|. The size of the buffer
@@ -140,12 +135,6 @@
// The AudioDeviceBuffer is a member of the AudioDeviceModuleImpl instance
// and therefore outlives this object.
AudioDeviceBuffer* audio_device_buffer_;
-
- // Estimated playout delay caused by buffering in the Java based audio track.
- // We are using a fixed value here since measurements have shown that the
- // variations are very small (~10ms) and it is not worth the extra complexity
- // to update this estimate on a continuous basis.
- int delay_in_milliseconds_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/build_info.cc b/webrtc/modules/audio_device/android/build_info.cc
new file mode 100644
index 0000000..cb5dc29
--- /dev/null
+++ b/webrtc/modules/audio_device/android/build_info.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/build_info.h"
+
+#include "webrtc/modules/utility/interface/helpers_android.h"
+
+namespace webrtc {
+
+BuildInfo::BuildInfo()
+ : j_environment_(JVM::GetInstance()->environment()),
+ j_build_info_(JVM::GetInstance()->GetClass(
+ "org/webrtc/voiceengine/BuildInfo")) {
+}
+
+std::string BuildInfo::GetStringFromJava(const char* name) {
+ jmethodID id = j_build_info_.GetStaticMethodId(name, "()Ljava/lang/String;");
+ jstring j_string = static_cast<jstring>(
+ j_build_info_.CallStaticObjectMethod(id));
+ return j_environment_->JavaToStdString(j_string);
+}
+
+std::string BuildInfo::GetDeviceModel() {
+ return GetStringFromJava("getDeviceModel");
+}
+
+std::string BuildInfo::GetBrand() {
+ return GetStringFromJava("getBrand");
+}
+
+std::string BuildInfo::GetDeviceManufacturer() {
+ return GetStringFromJava("getDeviceManufacturer");
+}
+
+std::string BuildInfo::GetAndroidBuildId() {
+ return GetStringFromJava("getAndroidBuildId");
+}
+
+std::string BuildInfo::GetBuildType() {
+ return GetStringFromJava("getBuildType");
+}
+
+std::string BuildInfo::GetBuildRelease() {
+ return GetStringFromJava("getBuildRelease");
+}
+
+std::string BuildInfo::GetSdkVersion() {
+ return GetStringFromJava("getSdkVersion");
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/build_info.h b/webrtc/modules/audio_device/android/build_info.h
new file mode 100644
index 0000000..aea71f7
--- /dev/null
+++ b/webrtc/modules/audio_device/android/build_info.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
+
+#include <jni.h>
+#include <string>
+
+#include "webrtc/modules/utility/interface/jvm_android.h"
+
+namespace webrtc {
+
+// Utility class used to query the Java class (org/webrtc/voiceengine/BuildInfo)
+// for device and Android build information.
+// The calling thread is attached to the JVM at construction if needed and a
+// valid Java environment object is also created.
+// All Get methods must be called on the creating thread. If not, the code will
+// hit DCHECKs when calling JNIEnvironment::JavaToStdString().
+class BuildInfo {
+ public:
+ BuildInfo();
+ ~BuildInfo() {}
+
+ // End-user-visible name for the end product (e.g. "Nexus 6").
+ std::string GetDeviceModel();
+ // Consumer-visible brand (e.g. "google").
+ std::string GetBrand();
+ // Manufacturer of the product/hardware (e.g. "motorola").
+ std::string GetDeviceManufacturer();
+ // Android build ID (e.g. LMY47D).
+ std::string GetAndroidBuildId();
+ // The type of build (e.g. "user" or "eng").
+ std::string GetBuildType();
+ // The user-visible version string (e.g. "5.1").
+ std::string GetBuildRelease();
+ // The user-visible SDK version of the framework (e.g. 21).
+ std::string GetSdkVersion();
+
+ private:
+ // Helper method which calls a static getter method with |name| and returns
+ // a string from Java.
+ std::string GetStringFromJava(const char* name);
+
+ // Ensures that this class can access a valid JNI interface pointer even
+ // if the creating thread was not attached to the JVM.
+ AttachCurrentThreadIfNeeded attach_thread_if_needed_;
+
+ // Provides access to the JNIEnv interface pointer and the JavaToStdString()
+ // method which is used to translate Java strings to std strings.
+ rtc::scoped_ptr<JNIEnvironment> j_environment_;
+
+ // Holds the jclass object and provides access to CallStaticObjectMethod().
+ // Used by GetStringFromJava() during construction only.
+ JavaClass j_build_info_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_BUILD_INFO_H_
diff --git a/webrtc/modules/audio_device/android/ensure_initialized.cc b/webrtc/modules/audio_device/android/ensure_initialized.cc
index b07c04a..bd28c45 100644
--- a/webrtc/modules/audio_device/android/ensure_initialized.cc
+++ b/webrtc/modules/audio_device/android/ensure_initialized.cc
@@ -17,8 +17,7 @@
#include "webrtc/modules/audio_device/android/audio_device_template.h"
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
-#include "webrtc/modules/audio_device/android/opensles_input.h"
-#include "webrtc/modules/audio_device/android/opensles_output.h"
+#include "webrtc/modules/utility/interface/jvm_android.h"
namespace webrtc {
namespace audiodevicemodule {
@@ -32,15 +31,14 @@
CHECK_EQ(0, jni->GetJavaVM(&jvm));
jobject context = ::base::android::GetApplicationContext();
- // Provide JVM and context to Java and OpenSL ES implementations.
+ // Initialize the Java environment (currently only used by the audio manager).
+ webrtc::JVM::Initialize(jvm, context);
+ // TODO(henrika): remove this call when AudioRecordJni and AudioTrackJni
+ // are modified to use the same sort of Java initialization as the audio
+ // manager.
using AudioDeviceJava = AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>;
AudioDeviceJava::SetAndroidAudioDeviceObjects(jvm, context);
- // TODO(henrika): enable OpenSL ES when it has been refactored to avoid
- // crashes.
- // using AudioDeviceOpenSLES =
- // AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>;
- // AudioDeviceOpenSLES::SetAndroidAudioDeviceObjects(jvm, context);
}
void EnsureInitialized() {
diff --git a/webrtc/modules/audio_device/android/fine_audio_buffer.cc b/webrtc/modules/audio_device/android/fine_audio_buffer.cc
index ee56679..99f853a 100644
--- a/webrtc/modules/audio_device/android/fine_audio_buffer.cc
+++ b/webrtc/modules/audio_device/android/fine_audio_buffer.cc
@@ -14,6 +14,7 @@
#include <stdio.h>
#include <algorithm>
+#include "webrtc/base/checks.h"
#include "webrtc/modules/audio_device/audio_device_buffer.h"
namespace webrtc {
@@ -47,7 +48,7 @@
desired_frame_size_bytes_);
cached_buffer_start_ += desired_frame_size_bytes_;
cached_bytes_ -= desired_frame_size_bytes_;
- assert(cached_buffer_start_ + cached_bytes_ < bytes_per_10_ms_);
+ CHECK_LT(cached_buffer_start_ + cached_bytes_, bytes_per_10_ms_);
return;
}
memcpy(buffer, &cache_buffer_.get()[cached_buffer_start_], cached_bytes_);
@@ -62,15 +63,15 @@
device_buffer_->RequestPlayoutData(samples_per_10_ms_);
int num_out = device_buffer_->GetPlayoutData(unwritten_buffer);
if (num_out != samples_per_10_ms_) {
- assert(num_out == 0);
+ CHECK_EQ(num_out, 0);
cached_bytes_ = 0;
return;
}
unwritten_buffer += bytes_per_10_ms_;
- assert(bytes_left >= 0);
+ CHECK_GE(bytes_left, 0);
bytes_left -= bytes_per_10_ms_;
}
- assert(bytes_left <= 0);
+ CHECK_LE(bytes_left, 0);
// Put the samples that were written to |buffer| but are not used in the
// cache.
int cache_location = desired_frame_size_bytes_;
@@ -79,8 +80,8 @@
(desired_frame_size_bytes_ - cached_bytes_);
// If cached_bytes_ is larger than the cache buffer, uninitialized memory
// will be read.
- assert(cached_bytes_ <= bytes_per_10_ms_);
- assert(-bytes_left == cached_bytes_);
+ CHECK_LE(cached_bytes_, bytes_per_10_ms_);
+ CHECK_EQ(-bytes_left, cached_bytes_);
cached_buffer_start_ = 0;
memcpy(cache_buffer_.get(), cache_ptr, cached_bytes_);
}
diff --git a/webrtc/modules/audio_device/android/fine_audio_buffer.h b/webrtc/modules/audio_device/android/fine_audio_buffer.h
index 812fe1f..dce40be 100644
--- a/webrtc/modules/audio_device/android/fine_audio_buffer.h
+++ b/webrtc/modules/audio_device/android/fine_audio_buffer.h
@@ -49,7 +49,8 @@
private:
// Device buffer that provides 10ms chunks of data.
AudioDeviceBuffer* device_buffer_;
- int desired_frame_size_bytes_; // Number of bytes delivered per GetBufferData
+ // Number of bytes delivered per GetBufferData
+ int desired_frame_size_bytes_;
int sample_rate_;
int samples_per_10_ms_;
// Convenience parameter to avoid converting from samples
@@ -57,8 +58,10 @@
// Storage for samples that are not yet asked for.
rtc::scoped_ptr<int8_t[]> cache_buffer_;
- int cached_buffer_start_; // Location of first unread sample.
- int cached_bytes_; // Number of bytes stored in cache.
+ // Location of first unread sample.
+ int cached_buffer_start_;
+ // Number of bytes stored in cache.
+ int cached_bytes_;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
deleted file mode 100644
index 2783018..0000000
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/AudioManagerAndroid.java
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-// The functions in this file are called from native code. They can still be
-// accessed even though they are declared private.
-
-package org.webrtc.voiceengine;
-
-import android.content.Context;
-import android.content.pm.PackageManager;
-import android.media.AudioManager;
-
-class AudioManagerAndroid {
- // Most of Google lead devices use 44.1K as the default sampling rate, 44.1K
- // is also widely used on other android devices.
- private static final int DEFAULT_SAMPLING_RATE = 44100;
- // Randomly picked up frame size which is close to return value on N4.
- // Return this default value when
- // getProperty(PROPERTY_OUTPUT_FRAMES_PER_BUFFER) fails.
- private static final int DEFAULT_FRAMES_PER_BUFFER = 256;
-
- private int mNativeOutputSampleRate;
- private boolean mAudioLowLatencySupported;
- private int mAudioLowLatencyOutputFrameSize;
-
-
- @SuppressWarnings("unused")
- private AudioManagerAndroid(Context context) {
- AudioManager audioManager = (AudioManager)
- context.getSystemService(Context.AUDIO_SERVICE);
-
- mNativeOutputSampleRate = DEFAULT_SAMPLING_RATE;
- mAudioLowLatencyOutputFrameSize = DEFAULT_FRAMES_PER_BUFFER;
- if (android.os.Build.VERSION.SDK_INT >=
- android.os.Build.VERSION_CODES.JELLY_BEAN_MR1) {
- String sampleRateString = audioManager.getProperty(
- AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
- if (sampleRateString != null) {
- mNativeOutputSampleRate = Integer.parseInt(sampleRateString);
- }
- String framesPerBuffer = audioManager.getProperty(
- AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
- if (framesPerBuffer != null) {
- mAudioLowLatencyOutputFrameSize = Integer.parseInt(framesPerBuffer);
- }
- }
- mAudioLowLatencySupported = context.getPackageManager().hasSystemFeature(
- PackageManager.FEATURE_AUDIO_LOW_LATENCY);
- }
-
- @SuppressWarnings("unused")
- private int getNativeOutputSampleRate() {
- return mNativeOutputSampleRate;
- }
-
- @SuppressWarnings("unused")
- private boolean isAudioLowLatencySupported() {
- return mAudioLowLatencySupported;
- }
-
- @SuppressWarnings("unused")
- private int getAudioLowLatencyOutputFrameSize() {
- return mAudioLowLatencyOutputFrameSize;
- }
-}
\ No newline at end of file
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
new file mode 100644
index 0000000..9f025c4
--- /dev/null
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/BuildInfo.java
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+package org.webrtc.voiceengine;
+
+import android.os.Build;
+import android.util.Log;
+
+public final class BuildInfo {
+ public static String getDevice() {
+ return Build.DEVICE;
+ }
+
+ public static String getDeviceModel() {
+ return Build.MODEL;
+ }
+
+ public static String getProduct() {
+ return Build.PRODUCT;
+ }
+
+ public static String getBrand() {
+ return Build.BRAND;
+ }
+
+ public static String getDeviceManufacturer() {
+ return Build.MANUFACTURER;
+ }
+
+ public static String getAndroidBuildId() {
+ return Build.ID;
+ }
+
+ public static String getBuildType() {
+ return Build.TYPE;
+ }
+
+ public static String getBuildRelease() {
+ return Build.VERSION.RELEASE;
+ }
+
+ public static String getSdkVersion() {
+ return Integer.toString(Build.VERSION.SDK_INT);
+ }
+}
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
index 562cd16..dcad439 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioManager.java
@@ -12,9 +12,15 @@
import android.content.Context;
import android.content.pm.PackageManager;
+import android.media.AudioFormat;
import android.media.AudioManager;
+import android.media.AudioRecord;
+import android.media.AudioTrack;
+import android.os.Build;
import android.util.Log;
+import java.lang.Math;
+
// WebRtcAudioManager handles tasks that uses android.media.AudioManager.
// At construction, storeAudioParameters() is called and it retrieves
// fundamental audio parameters like native sample rate and number of channels.
@@ -31,6 +37,10 @@
private static final String TAG = "WebRtcAudioManager";
+ // Default audio data format is PCM 16 bit per sample.
+ // Guaranteed to be supported by all devices.
+ private static final int BITS_PER_SAMPLE = 16;
+
// Use 44.1kHz as the default sampling rate.
private static final int SAMPLE_RATE_HZ = 44100;
@@ -43,7 +53,9 @@
"MODE_RINGTONE",
"MODE_IN_CALL",
"MODE_IN_COMMUNICATION",
- };
+ };
+
+ private static final int DEFAULT_FRAME_PER_BUFFER = 256;
private final long nativeAudioManager;
private final Context context;
@@ -55,6 +67,13 @@
private int nativeChannels;
private int savedAudioMode = AudioManager.MODE_INVALID;
+ private boolean hardwareAEC;
+ private boolean lowLatencyOutput;
+ private int sampleRate;
+ private int channels;
+ private int outputBufferSize;
+ private int inputBufferSize;
+
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logd("ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
@@ -65,9 +84,9 @@
WebRtcAudioUtils.logDeviceInfo(TAG);
}
storeAudioParameters();
- // TODO(henrika): add stereo support for playout side.
nativeCacheAudioParameters(
- nativeSampleRate, nativeChannels, nativeAudioManager);
+ sampleRate, channels, hardwareAEC, lowLatencyOutput, outputBufferSize,
+ inputBufferSize, nativeAudioManager);
}
private boolean init() {
@@ -124,28 +143,115 @@
private void storeAudioParameters() {
// Only mono is supported currently (in both directions).
// TODO(henrika): add support for stereo playout.
- nativeChannels = CHANNELS;
- // Get native sample rate and store it in |nativeSampleRate|.
- // Most common rates are 44100 and 48000 Hz.
- if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
- nativeSampleRate = SAMPLE_RATE_HZ;
- } else {
- String sampleRateString = audioManager.getProperty(
- AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
- nativeSampleRate = (sampleRateString == null) ?
- SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
- }
- Logd("nativeSampleRate: " + nativeSampleRate);
- Logd("nativeChannels: " + nativeChannels);
+ channels = CHANNELS;
+ sampleRate = getNativeOutputSampleRate();
+ hardwareAEC = isAcousticEchoCancelerSupported();
+ lowLatencyOutput = isLowLatencyOutputSupported();
+ outputBufferSize = lowLatencyOutput ?
+ getLowLatencyOutputFramesPerBuffer() :
+ getMinOutputFrameSize(sampleRate, channels);
+ // TODO(henrika): add support for low-latency input.
+ inputBufferSize = getMinInputFrameSize(sampleRate, channels);
}
- /** Gets the current earpiece state. */
+ // Gets the current earpiece state.
private boolean hasEarpiece() {
return context.getPackageManager().hasSystemFeature(
PackageManager.FEATURE_TELEPHONY);
}
- /** Helper method which throws an exception when an assertion has failed. */
+ // Returns true if low-latency audio output is supported.
+ private boolean isLowLatencyOutputSupported() {
+ return isOpenSLESSupported() &&
+ context.getPackageManager().hasSystemFeature(
+ PackageManager.FEATURE_AUDIO_LOW_LATENCY);
+ }
+
+ // Returns true if low-latency audio input is supported.
+ public boolean isLowLatencyInputSupported() {
+ // TODO(henrika): investigate if some sort of device list is needed here
+ // as well. The NDK doc states that: "As of API level 21, lower latency
+ // audio input is supported on select devices. To take advantage of this
+ // feature, first confirm that lower latency output is available".
+ return WebRtcAudioUtils.runningOnLollipopOrHigher() &&
+ isLowLatencyOutputSupported();
+ }
+
+ // Returns the native output sample rate for this device's output stream.
+ private int getNativeOutputSampleRate() {
+ if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
+ return SAMPLE_RATE_HZ;
+ }
+ String sampleRateString = audioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_SAMPLE_RATE);
+ return (sampleRateString == null) ?
+ SAMPLE_RATE_HZ : Integer.parseInt(sampleRateString);
+ }
+
+ // Returns the native output buffer size for low-latency output streams.
+ private int getLowLatencyOutputFramesPerBuffer() {
+ assertTrue(isLowLatencyOutputSupported());
+ if (!WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
+ return DEFAULT_FRAME_PER_BUFFER;
+ }
+ String framesPerBuffer = audioManager.getProperty(
+ AudioManager.PROPERTY_OUTPUT_FRAMES_PER_BUFFER);
+ return framesPerBuffer == null ?
+ DEFAULT_FRAME_PER_BUFFER : Integer.parseInt(framesPerBuffer);
+ }
+
+ // Returns true if the device supports Acoustic Echo Canceler (AEC).
+ // Also takes blacklisting into account.
+ private static boolean isAcousticEchoCancelerSupported() {
+ if (WebRtcAudioUtils.deviceIsBlacklistedForHwAecUsage()) {
+ Logd(Build.MODEL + " is blacklisted for HW AEC usage!");
+ return false;
+ }
+ return WebRtcAudioUtils.isAcousticEchoCancelerSupported();
+ }
+
+ // Returns the minimum output buffer size for Java based audio (AudioTrack).
+ // This size can also be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency output.
+ private static int getMinOutputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ final int channelConfig;
+ if (numChannels == 1) {
+ channelConfig = AudioFormat.CHANNEL_OUT_MONO;
+ } else if (numChannels == 2) {
+ channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
+ } else {
+ return -1;
+ }
+ return AudioTrack.getMinBufferSize(
+ sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT) /
+ bytesPerFrame;
+ }
+
+ // Returns the native input buffer size for input streams.
+ private int getLowLatencyInputFramesPerBuffer() {
+ assertTrue(isLowLatencyInputSupported());
+ return getLowLatencyOutputFramesPerBuffer();
+ }
+
+ // Returns the minimum input buffer size for Java based audio (AudioRecord).
+ // This size can calso be used for OpenSL ES implementations on devices that
+ // lacks support of low-latency input.
+ private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
+ final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
+ assertTrue(numChannels == CHANNELS);
+ return AudioRecord.getMinBufferSize(sampleRateInHz,
+ AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT) /
+ bytesPerFrame;
+ }
+
+ // Returns true if OpenSL ES audio is supported.
+ private static boolean isOpenSLESSupported() {
+ // Check for API level 9 or higher, to confirm use of OpenSL ES.
+ return WebRtcAudioUtils.runningOnGingerBreadOrHigher();
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
@@ -161,5 +267,7 @@
}
private native void nativeCacheAudioParameters(
- int sampleRate, int channels, long nativeAudioManager);
+ int sampleRate, int channels, boolean hardwareAEC, boolean lowLatencyOutput,
+ int outputBufferSize, int inputBufferSize,
+ long nativeAudioManager);
}
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
index e062ba9..12e8dd0 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioRecord.java
@@ -127,23 +127,9 @@
}
}
- public static boolean BuiltInAECIsAvailable() {
- // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
- if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
- return false;
- }
- // TODO(henrika): add black-list based on device name. We could also
- // use uuid to exclude devices but that would require a session ID from
- // an existing AudioRecord object.
- return AcousticEchoCanceler.isAvailable();
- }
-
private boolean EnableBuiltInAEC(boolean enable) {
Logd("EnableBuiltInAEC(" + enable + ')');
- // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
- if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
- return false;
- }
+ assertTrue(WebRtcAudioUtils.isAcousticEchoCancelerApproved());
// Store the AEC state.
useBuiltInAEC = enable;
// Set AEC state if AEC has already been created.
@@ -206,11 +192,18 @@
"audio format: " + audioRecord.getAudioFormat() + ", " +
"channels: " + audioRecord.getChannelCount() + ", " +
"sample rate: " + audioRecord.getSampleRate());
- Logd("AcousticEchoCanceler.isAvailable: " + BuiltInAECIsAvailable());
- if (!BuiltInAECIsAvailable()) {
+ Logd("AcousticEchoCanceler.isAvailable: " + builtInAECIsAvailable());
+ if (!builtInAECIsAvailable()) {
return framesPerBuffer;
}
-
+ if (WebRtcAudioUtils.deviceIsBlacklistedForHwAecUsage()) {
+ // Just in case, ensure that no attempt has been done to enable the
+ // HW AEC on a blacklisted device.
+ assertTrue(!useBuiltInAEC);
+ }
+ // We create an AEC also for blacklisted devices since it is possible that
+ // HW EAC is enabled by default. Hence, the AEC object is needed to be
+ // able to check the current state and to disable the AEC if enabled.
aec = AcousticEchoCanceler.create(audioRecord.getAudioSessionId());
if (aec == null) {
Loge("AcousticEchoCanceler.create failed");
@@ -253,7 +246,13 @@
return true;
}
- /** Helper method which throws an exception when an assertion has failed. */
+ // Returns true if built-in AEC is available. Does not take blacklisting
+ // into account.
+ private static boolean builtInAECIsAvailable() {
+ return WebRtcAudioUtils.isAcousticEchoCancelerSupported();
+ }
+
+ // Helper method which throws an exception when an assertion has failed.
private static void assertTrue(boolean condition) {
if (!condition) {
throw new AssertionError("Expected condition to be true");
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
index 23ad5fe..276045d 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioTrack.java
@@ -145,7 +145,7 @@
}
}
- private int InitPlayout(int sampleRate, int channels) {
+ private void InitPlayout(int sampleRate, int channels) {
Logd("InitPlayout(sampleRate=" + sampleRate + ", channels=" +
channels + ")");
final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
@@ -184,16 +184,11 @@
AudioTrack.MODE_STREAM);
} catch (IllegalArgumentException e) {
Logd(e.getMessage());
- return -1;
+ return;
}
assertTrue(audioTrack.getState() == AudioTrack.STATE_INITIALIZED);
assertTrue(audioTrack.getPlayState() == AudioTrack.PLAYSTATE_STOPPED);
assertTrue(audioTrack.getStreamType() == AudioManager.STREAM_VOICE_CALL);
-
- // Return a delay estimate in milliseconds given the minimum buffer size.
- // TODO(henrika): improve estimate and use real measurements of total
- // latency instead. We can most likely ignore this value.
- return (1000 * (minBufferSizeInBytes / bytesPerFrame) / sampleRate);
}
private boolean StartPlayout() {
diff --git a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
index 69d41e7..9f8cb56 100644
--- a/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
+++ b/webrtc/modules/audio_device/android/java/src/org/webrtc/voiceengine/WebRtcAudioUtils.java
@@ -10,35 +10,80 @@
package org.webrtc.voiceengine;
-import java.lang.Thread;
-
+import android.media.audiofx.AcousticEchoCanceler;
+import android.media.audiofx.AudioEffect;
+import android.media.audiofx.AudioEffect.Descriptor;
import android.media.AudioManager;
import android.os.Build;
import android.util.Log;
+import java.lang.Thread;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
public final class WebRtcAudioUtils {
+ // List of devices where it has been verified that the built-in AEC performs
+ // bad and where it makes sense to avoid using it and instead rely on the
+ // native WebRTC AEC instead. The device name is given by Build.MODEL.
+ private static final String[] BLACKLISTED_AEC_MODELS = new String[] {
+ "Nexus 5", // Nexus 5
+ };
+
// Use 44.1kHz as the default sampling rate.
private static final int SAMPLE_RATE_HZ = 44100;
+ public static boolean runningOnGingerBreadOrHigher() {
+ // November 2010: Android 2.3, API Level 9.
+ return Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD;
+ }
+
public static boolean runningOnJellyBeanOrHigher() {
+ // June 2012: Android 4.1. API Level 16.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN;
}
public static boolean runningOnJellyBeanMR1OrHigher() {
+ // November 2012: Android 4.2. API Level 17.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1;
}
public static boolean runningOnLollipopOrHigher() {
+ // API Level 21.
return Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP;
}
- /** Helper method for building a string of thread information.*/
+ // Helper method for building a string of thread information.
public static String getThreadInfo() {
return "@[name=" + Thread.currentThread().getName()
+ ", id=" + Thread.currentThread().getId() + "]";
}
- /** Information about the current build, taken from system properties. */
+ // Returns true if the device is blacklisted for HW AEC usage.
+ public static boolean deviceIsBlacklistedForHwAecUsage() {
+ List<String> blackListedModels = Arrays.asList(BLACKLISTED_AEC_MODELS);
+ return blackListedModels.contains(Build.MODEL);
+ }
+
+ // Returns true if the device supports Acoustic Echo Canceler (AEC).
+ public static boolean isAcousticEchoCancelerSupported() {
+ // AcousticEchoCanceler was added in API level 16 (Jelly Bean).
+ if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
+ return false;
+ }
+ // Check if the device implements acoustic echo cancellation.
+ return AcousticEchoCanceler.isAvailable();
+ }
+
+ // Returns true if the device supports AEC and it not blacklisted.
+ public static boolean isAcousticEchoCancelerApproved() {
+ if (deviceIsBlacklistedForHwAecUsage())
+ return false;
+ return isAcousticEchoCancelerSupported();
+ }
+
+ // Information about the current build, taken from system properties.
public static void logDeviceInfo(String tag) {
Log.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
+ "Release: " + Build.VERSION.RELEASE + ", "
diff --git a/webrtc/modules/audio_device/android/low_latency_event.h b/webrtc/modules/audio_device/android/low_latency_event.h
deleted file mode 100644
index a19483d..0000000
--- a/webrtc/modules/audio_device/android/low_latency_event.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
-#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
-
-#include <errno.h>
-#include <limits.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-namespace webrtc {
-
-// Implementation of event for single waiter, single signal threads. Event
-// is sticky.
-class LowLatencyEvent {
- public:
- LowLatencyEvent();
- ~LowLatencyEvent();
-
- // Readies the event. Must be called before signaling or waiting for event.
- // Returns true on success.
- bool Start();
- // Shuts down the event and releases threads calling WaitOnEvent. Once
- // stopped SignalEvent and WaitOnEvent will have no effect. Start can be
- // called to re-enable the event.
- // Returns true on success.
- bool Stop();
-
- // Releases thread calling WaitOnEvent in a sticky fashion.
- void SignalEvent(int event_id, int event_msg);
- // Waits until SignalEvent or Stop is called.
- void WaitOnEvent(int* event_id, int* event_msg);
-
- private:
- typedef int Handle;
- static const Handle kInvalidHandle;
- static const int kReadHandle;
- static const int kWriteHandle;
-
- // Closes the handle. Returns true on success.
- static bool Close(Handle* handle);
-
- // SignalEvent and WaitOnEvent are actually read/write to file descriptors.
- // Write is signal.
- void WriteFd(int message_id, int message);
- // Read is wait.
- void ReadFd(int* message_id, int* message);
-
- Handle handles_[2];
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_LOW_LATENCY_EVENT_H_
diff --git a/webrtc/modules/audio_device/android/low_latency_event_posix.cc b/webrtc/modules/audio_device/android/low_latency_event_posix.cc
deleted file mode 100644
index f25b030..0000000
--- a/webrtc/modules/audio_device/android/low_latency_event_posix.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/low_latency_event.h"
-
-#include <assert.h>
-
-#define HANDLE_EINTR(x) ({ \
- typeof(x) eintr_wrapper_result; \
- do { \
- eintr_wrapper_result = (x); \
- } while (eintr_wrapper_result == -1 && errno == EINTR); \
- eintr_wrapper_result; \
- })
-
-#define IGNORE_EINTR(x) ({ \
- typeof(x) eintr_wrapper_result; \
- do { \
- eintr_wrapper_result = (x); \
- if (eintr_wrapper_result == -1 && errno == EINTR) { \
- eintr_wrapper_result = 0; \
- } \
- } while (0); \
- eintr_wrapper_result; \
- })
-
-namespace webrtc {
-
-const LowLatencyEvent::Handle LowLatencyEvent::kInvalidHandle = -1;
-const int LowLatencyEvent::kReadHandle = 0;
-const int LowLatencyEvent::kWriteHandle = 1;
-
-LowLatencyEvent::LowLatencyEvent() {
- handles_[kReadHandle] = kInvalidHandle;
- handles_[kWriteHandle] = kInvalidHandle;
-}
-
-LowLatencyEvent::~LowLatencyEvent() {
- Stop();
-}
-
-bool LowLatencyEvent::Start() {
- assert(handles_[kReadHandle] == kInvalidHandle);
- assert(handles_[kWriteHandle] == kInvalidHandle);
-
- return socketpair(AF_UNIX, SOCK_STREAM, 0, handles_) == 0;
-}
-
-bool LowLatencyEvent::Stop() {
- bool ret = Close(&handles_[kReadHandle]) && Close(&handles_[kWriteHandle]);
- handles_[kReadHandle] = kInvalidHandle;
- handles_[kWriteHandle] = kInvalidHandle;
- return ret;
-}
-
-void LowLatencyEvent::SignalEvent(int event_id, int event_msg) {
- WriteFd(event_id, event_msg);
-}
-
-void LowLatencyEvent::WaitOnEvent(int* event_id, int* event_msg) {
- ReadFd(event_id, event_msg);
-}
-
-bool LowLatencyEvent::Close(Handle* handle) {
- if (*handle == kInvalidHandle) {
- return false;
- }
- int retval = IGNORE_EINTR(close(*handle));
- *handle = kInvalidHandle;
- return retval == 0;
-}
-
-void LowLatencyEvent::WriteFd(int message_id, int message) {
- char buffer[sizeof(message_id) + sizeof(message)];
- size_t bytes = sizeof(buffer);
- memcpy(buffer, &message_id, sizeof(message_id));
- memcpy(&buffer[sizeof(message_id)], &message, sizeof(message));
- ssize_t bytes_written = HANDLE_EINTR(write(handles_[kWriteHandle], buffer,
- bytes));
- if (bytes_written != static_cast<ssize_t>(bytes)) {
- assert(false);
- }
-}
-
-void LowLatencyEvent::ReadFd(int* message_id, int* message) {
- char buffer[sizeof(message_id) + sizeof(message)];
- size_t bytes = sizeof(buffer);
- ssize_t bytes_read = HANDLE_EINTR(read(handles_[kReadHandle], buffer, bytes));
- if (bytes_read == 0) {
- *message_id = 0;
- *message = 0;
- return;
- } else if (bytes_read == static_cast<ssize_t>(bytes)) {
- memcpy(message_id, buffer, sizeof(*message_id));
- memcpy(message, &buffer[sizeof(*message_id)], sizeof(*message));
- } else {
- assert(false);
- }
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/low_latency_event_unittest.cc b/webrtc/modules/audio_device/android/low_latency_event_unittest.cc
deleted file mode 100644
index 2138f1f..0000000
--- a/webrtc/modules/audio_device/android/low_latency_event_unittest.cc
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/low_latency_event.h"
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/sleep.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
-
-namespace webrtc {
-
-static const int kEventMsg = 1;
-
-class LowLatencyEventTest : public testing::Test {
- public:
- LowLatencyEventTest()
- : process_thread_(ThreadWrapper::CreateThread(
- CbThread, this, "test_thread")),
- terminated_(false),
- iteration_count_(0),
- allowed_iterations_(0) {
- EXPECT_TRUE(event_.Start());
- Start();
- }
- ~LowLatencyEventTest() {
- EXPECT_GE(allowed_iterations_, 1);
- EXPECT_GE(iteration_count_, 1);
- Stop();
- }
-
- void AllowOneIteration() {
- ++allowed_iterations_;
- event_.SignalEvent(allowed_iterations_, kEventMsg);
- }
-
- private:
- void Start() {
- EXPECT_TRUE(process_thread_->Start());
- process_thread_->SetPriority(kRealtimePriority);
- }
- void Stop() {
- terminated_ = true;
- event_.Stop();
- process_thread_->Stop();
- }
-
- static bool CbThread(void* context) {
- return reinterpret_cast<LowLatencyEventTest*>(context)->CbThreadImpl();
- }
- bool CbThreadImpl() {
- int allowed_iterations;
- int message;
- ++iteration_count_;
- event_.WaitOnEvent(&allowed_iterations, &message);
- EXPECT_EQ(iteration_count_, allowed_iterations);
- EXPECT_EQ(message, kEventMsg);
- return !terminated_;
- }
-
- LowLatencyEvent event_;
-
- rtc::scoped_ptr<ThreadWrapper> process_thread_;
- bool terminated_;
- int iteration_count_;
- int allowed_iterations_;
-};
-
-
-TEST_F(LowLatencyEventTest, TriggerEvent) {
- for (int i = 0; i < 3; ++i) {
- AllowOneIteration();
- }
-}
-
-// Events trigger in less than 3ms. Wait for 3 ms to ensure there are no
-// spurious wakeups.
-TEST_F(LowLatencyEventTest, NoTriggerEvent) {
- SleepMs(3);
- // If there were spurious wakeups either the wakeups would have triggered a
- // failure as we haven't allowed an iteration yet. Or the wakeup happened
- // to signal 0, 0 in which case the mismatch will be discovered when allowing
- // an iteration to happen.
- AllowOneIteration();
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/opensles_common.cc b/webrtc/modules/audio_device/android/opensles_common.cc
index 9a16f70..da1e254 100644
--- a/webrtc/modules/audio_device/android/opensles_common.cc
+++ b/webrtc/modules/audio_device/android/opensles_common.cc
@@ -16,7 +16,7 @@
using webrtc::kNumChannels;
-namespace webrtc_opensl {
+namespace webrtc {
SLDataFormat_PCM CreatePcmConfiguration(int sample_rate) {
SLDataFormat_PCM configuration;
diff --git a/webrtc/modules/audio_device/android/opensles_common.h b/webrtc/modules/audio_device/android/opensles_common.h
index daa51a2..75e4ff4 100644
--- a/webrtc/modules/audio_device/android/opensles_common.h
+++ b/webrtc/modules/audio_device/android/opensles_common.h
@@ -13,10 +13,42 @@
#include <SLES/OpenSLES.h>
-namespace webrtc_opensl {
+#include "webrtc/base/checks.h"
+
+namespace webrtc {
SLDataFormat_PCM CreatePcmConfiguration(int sample_rate);
+// Helper class for using SLObjectItf interfaces.
+template <typename SLType, typename SLDerefType>
+class ScopedSLObject {
+ public:
+ ScopedSLObject() : obj_(nullptr) {}
+
+ ~ScopedSLObject() { Reset(); }
+
+ SLType* Receive() {
+ DCHECK(!obj_);
+ return &obj_;
+ }
+
+ SLDerefType operator->() { return *obj_; }
+
+ SLType Get() const { return obj_; }
+
+ void Reset() {
+ if (obj_) {
+ (*obj_)->Destroy(obj_);
+ obj_ = nullptr;
+ }
+ }
+
+ private:
+ SLType obj_;
+};
+
+typedef ScopedSLObject<SLObjectItf, const SLObjectItf_*> ScopedSLObjectItf;
+
} // namespace webrtc_opensl
#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_COMMON_H_
diff --git a/webrtc/modules/audio_device/android/opensles_input.cc b/webrtc/modules/audio_device/android/opensles_input.cc
deleted file mode 100644
index 0fc7cc7..0000000
--- a/webrtc/modules/audio_device/android/opensles_input.cc
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/opensles_input.h"
-
-#include <assert.h>
-
-#include "webrtc/modules/audio_device/android/audio_common.h"
-#include "webrtc/modules/audio_device/android/opensles_common.h"
-#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
-#include "webrtc/modules/audio_device/audio_device_buffer.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-#define VOID_RETURN
-#define OPENSL_RETURN_ON_FAILURE(op, ret_val) \
- do { \
- SLresult err = (op); \
- if (err != SL_RESULT_SUCCESS) { \
- assert(false); \
- return ret_val; \
- } \
- } while (0)
-
-static const SLEngineOption kOption[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
-};
-
-enum {
- kNoOverrun,
- kOverrun,
-};
-
-namespace webrtc {
-
-OpenSlesInput::OpenSlesInput(
- PlayoutDelayProvider* delay_provider, AudioManager* audio_manager)
- : delay_provider_(delay_provider),
- initialized_(false),
- mic_initialized_(false),
- rec_initialized_(false),
- crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
- recording_(false),
- num_fifo_buffers_needed_(0),
- number_overruns_(0),
- sles_engine_(NULL),
- sles_engine_itf_(NULL),
- sles_recorder_(NULL),
- sles_recorder_itf_(NULL),
- sles_recorder_sbq_itf_(NULL),
- audio_buffer_(NULL),
- active_queue_(0),
- rec_sampling_rate_(0),
- agc_enabled_(false),
- recording_delay_(0) {
-}
-
-OpenSlesInput::~OpenSlesInput() {
-}
-
-int32_t OpenSlesInput::SetAndroidAudioDeviceObjects(void* javaVM,
- void* context) {
- return 0;
-}
-
-void OpenSlesInput::ClearAndroidAudioDeviceObjects() {
-}
-
-int32_t OpenSlesInput::Init() {
- assert(!initialized_);
-
- // Set up OpenSL engine.
- OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
- NULL, NULL),
- -1);
- OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
- SL_BOOLEAN_FALSE),
- -1);
- OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
- SL_IID_ENGINE,
- &sles_engine_itf_),
- -1);
-
- if (InitSampleRate() != 0) {
- return -1;
- }
- AllocateBuffers();
- initialized_ = true;
- return 0;
-}
-
-int32_t OpenSlesInput::Terminate() {
- // It is assumed that the caller has stopped recording before terminating.
- assert(!recording_);
- (*sles_engine_)->Destroy(sles_engine_);
- initialized_ = false;
- mic_initialized_ = false;
- rec_initialized_ = false;
- return 0;
-}
-
-int32_t OpenSlesInput::RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- assert(index == 0);
- // Empty strings.
- name[0] = '\0';
- guid[0] = '\0';
- return 0;
-}
-
-int32_t OpenSlesInput::SetRecordingDevice(uint16_t index) {
- assert(index == 0);
- return 0;
-}
-
-int32_t OpenSlesInput::RecordingIsAvailable(bool& available) { // NOLINT
- available = true;
- return 0;
-}
-
-int32_t OpenSlesInput::InitRecording() {
- assert(initialized_);
- rec_initialized_ = true;
- return 0;
-}
-
-int32_t OpenSlesInput::StartRecording() {
- assert(rec_initialized_);
- assert(!recording_);
- if (!CreateAudioRecorder()) {
- return -1;
- }
- // Setup to receive buffer queue event callbacks.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_sbq_itf_)->RegisterCallback(
- sles_recorder_sbq_itf_,
- RecorderSimpleBufferQueueCallback,
- this),
- -1);
-
- if (!EnqueueAllBuffers()) {
- return -1;
- }
-
- {
- // To prevent the compiler from e.g. optimizing the code to
- // recording_ = StartCbThreads() which wouldn't have been thread safe.
- CriticalSectionScoped lock(crit_sect_.get());
- recording_ = true;
- }
- if (!StartCbThreads()) {
- recording_ = false;
- return -1;
- }
- return 0;
-}
-
-int32_t OpenSlesInput::StopRecording() {
- StopCbThreads();
- DestroyAudioRecorder();
- recording_ = false;
- return 0;
-}
-
-int32_t OpenSlesInput::SetAGC(bool enable) {
- agc_enabled_ = enable;
- return 0;
-}
-
-int32_t OpenSlesInput::InitMicrophone() {
- assert(initialized_);
- assert(!recording_);
- mic_initialized_ = true;
- return 0;
-}
-
-int32_t OpenSlesInput::MicrophoneVolumeIsAvailable(bool& available) { // NOLINT
- available = false;
- return 0;
-}
-
-int32_t OpenSlesInput::MinMicrophoneVolume(
- uint32_t& minVolume) const { // NOLINT
- minVolume = 0;
- return 0;
-}
-
-int32_t OpenSlesInput::MicrophoneVolumeStepSize(
- uint16_t& stepSize) const {
- stepSize = 1;
- return 0;
-}
-
-int32_t OpenSlesInput::MicrophoneMuteIsAvailable(bool& available) { // NOLINT
- available = false; // Mic mute not supported on Android
- return 0;
-}
-
-int32_t OpenSlesInput::MicrophoneBoostIsAvailable(bool& available) { // NOLINT
- available = false; // Mic boost not supported on Android.
- return 0;
-}
-
-int32_t OpenSlesInput::SetMicrophoneBoost(bool enable) {
- assert(false);
- return -1; // Not supported
-}
-
-int32_t OpenSlesInput::MicrophoneBoost(bool& enabled) const { // NOLINT
- assert(false);
- return -1; // Not supported
-}
-
-int32_t OpenSlesInput::StereoRecordingIsAvailable(bool& available) { // NOLINT
- available = false; // Stereo recording not supported on Android.
- return 0;
-}
-
-int32_t OpenSlesInput::StereoRecording(bool& enabled) const { // NOLINT
- enabled = false;
- return 0;
-}
-
-int32_t OpenSlesInput::RecordingDelay(uint16_t& delayMS) const { // NOLINT
- delayMS = recording_delay_;
- return 0;
-}
-
-void OpenSlesInput::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
- audio_buffer_ = audioBuffer;
-}
-
-int OpenSlesInput::InitSampleRate() {
- UpdateSampleRate();
- audio_buffer_->SetRecordingSampleRate(rec_sampling_rate_);
- audio_buffer_->SetRecordingChannels(kNumChannels);
- UpdateRecordingDelay();
- return 0;
-}
-
-int OpenSlesInput::buffer_size_samples() const {
- // Since there is no low latency recording, use buffer size corresponding to
- // 10ms of data since that's the framesize WebRTC uses. Getting any other
- // size would require patching together buffers somewhere before passing them
- // to WebRTC.
- return rec_sampling_rate_ * 10 / 1000;
-}
-
-int OpenSlesInput::buffer_size_bytes() const {
- return buffer_size_samples() * kNumChannels * sizeof(int16_t);
-}
-
-void OpenSlesInput::UpdateRecordingDelay() {
- // TODO(hellner): Add accurate delay estimate.
- // On average half the current buffer will have been filled with audio.
- int outstanding_samples =
- (TotalBuffersUsed() - 0.5) * buffer_size_samples();
- recording_delay_ = outstanding_samples / (rec_sampling_rate_ / 1000);
-}
-
-void OpenSlesInput::UpdateSampleRate() {
- rec_sampling_rate_ = audio_manager_.low_latency_supported() ?
- audio_manager_.native_output_sample_rate() : kDefaultSampleRate;
-}
-
-void OpenSlesInput::CalculateNumFifoBuffersNeeded() {
- // Buffer size is 10ms of data.
- num_fifo_buffers_needed_ = kNum10MsToBuffer;
-}
-
-void OpenSlesInput::AllocateBuffers() {
- // Allocate FIFO to handle passing buffers between processing and OpenSL
- // threads.
- CalculateNumFifoBuffersNeeded();
- assert(num_fifo_buffers_needed_ > 0);
- fifo_.reset(new SingleRwFifo(num_fifo_buffers_needed_));
-
- // Allocate the memory area to be used.
- rec_buf_.reset(new rtc::scoped_ptr<int8_t[]>[TotalBuffersUsed()]);
- for (int i = 0; i < TotalBuffersUsed(); ++i) {
- rec_buf_[i].reset(new int8_t[buffer_size_bytes()]);
- }
-}
-
-int OpenSlesInput::TotalBuffersUsed() const {
- return num_fifo_buffers_needed_ + kNumOpenSlBuffers;
-}
-
-bool OpenSlesInput::EnqueueAllBuffers() {
- active_queue_ = 0;
- number_overruns_ = 0;
- for (int i = 0; i < kNumOpenSlBuffers; ++i) {
- memset(rec_buf_[i].get(), 0, buffer_size_bytes());
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_sbq_itf_)->Enqueue(
- sles_recorder_sbq_itf_,
- reinterpret_cast<void*>(rec_buf_[i].get()),
- buffer_size_bytes()),
- false);
- }
- // In case of underrun the fifo will be at capacity. In case of first enqueue
- // no audio can have been returned yet meaning fifo must be empty. Any other
- // values are unexpected.
- assert(fifo_->size() == fifo_->capacity() ||
- fifo_->size() == 0);
- // OpenSL recording has been stopped. I.e. only this thread is touching
- // |fifo_|.
- while (fifo_->size() != 0) {
- // Clear the fifo.
- fifo_->Pop();
- }
- return true;
-}
-
-bool OpenSlesInput::CreateAudioRecorder() {
- if (!event_.Start()) {
- assert(false);
- return false;
- }
- SLDataLocator_IODevice micLocator = {
- SL_DATALOCATOR_IODEVICE, SL_IODEVICE_AUDIOINPUT,
- SL_DEFAULTDEVICEID_AUDIOINPUT, NULL };
- SLDataSource audio_source = { &micLocator, NULL };
-
- SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<SLuint32>(TotalBuffersUsed())
- };
- SLDataFormat_PCM configuration =
- webrtc_opensl::CreatePcmConfiguration(rec_sampling_rate_);
- SLDataSink audio_sink = { &simple_buf_queue, &configuration };
-
- // Interfaces for recording android audio data and Android are needed.
- // Note the interfaces still need to be initialized. This only tells OpenSl
- // that the interfaces will be needed at some point.
- const SLInterfaceID id[kNumInterfaces] = {
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE, SL_IID_ANDROIDCONFIGURATION };
- const SLboolean req[kNumInterfaces] = {
- SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
- OPENSL_RETURN_ON_FAILURE(
- (*sles_engine_itf_)->CreateAudioRecorder(sles_engine_itf_,
- &sles_recorder_,
- &audio_source,
- &audio_sink,
- kNumInterfaces,
- id,
- req),
- false);
-
- SLAndroidConfigurationItf recorder_config;
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_)->GetInterface(sles_recorder_,
- SL_IID_ANDROIDCONFIGURATION,
- &recorder_config),
- false);
-
- // Set audio recorder configuration to
- // SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION which ensures that we
- // use the main microphone tuned for audio communications.
- SLint32 stream_type = SL_ANDROID_RECORDING_PRESET_VOICE_COMMUNICATION;
- OPENSL_RETURN_ON_FAILURE(
- (*recorder_config)->SetConfiguration(recorder_config,
- SL_ANDROID_KEY_RECORDING_PRESET,
- &stream_type,
- sizeof(SLint32)),
- false);
-
- // Realize the recorder in synchronous mode.
- OPENSL_RETURN_ON_FAILURE((*sles_recorder_)->Realize(sles_recorder_,
- SL_BOOLEAN_FALSE),
- false);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_)->GetInterface(sles_recorder_, SL_IID_RECORD,
- static_cast<void*>(&sles_recorder_itf_)),
- false);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_)->GetInterface(
- sles_recorder_,
- SL_IID_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<void*>(&sles_recorder_sbq_itf_)),
- false);
- return true;
-}
-
-void OpenSlesInput::DestroyAudioRecorder() {
- event_.Stop();
- if (sles_recorder_sbq_itf_) {
- // Release all buffers currently queued up.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_sbq_itf_)->Clear(sles_recorder_sbq_itf_),
- VOID_RETURN);
- sles_recorder_sbq_itf_ = NULL;
- }
- sles_recorder_itf_ = NULL;
-
- if (sles_recorder_) {
- (*sles_recorder_)->Destroy(sles_recorder_);
- sles_recorder_ = NULL;
- }
-}
-
-bool OpenSlesInput::HandleOverrun(int event_id, int event_msg) {
- if (!recording_) {
- return false;
- }
- if (event_id == kNoOverrun) {
- return false;
- }
- assert(event_id == kOverrun);
- assert(event_msg > 0);
- // Wait for all enqueued buffers be flushed.
- if (event_msg != kNumOpenSlBuffers) {
- return true;
- }
- // All buffers passed to OpenSL have been flushed. Restart the audio from
- // scratch.
- // No need to check sles_recorder_itf_ as recording_ would be false before it
- // is set to NULL.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
- SL_RECORDSTATE_STOPPED),
- true);
- EnqueueAllBuffers();
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
- SL_RECORDSTATE_RECORDING),
- true);
- return true;
-}
-
-void OpenSlesInput::RecorderSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queue_itf,
- void* context) {
- OpenSlesInput* audio_device = reinterpret_cast<OpenSlesInput*>(context);
- audio_device->RecorderSimpleBufferQueueCallbackHandler(queue_itf);
-}
-
-void OpenSlesInput::RecorderSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queue_itf) {
- if (fifo_->size() >= fifo_->capacity() || number_overruns_ > 0) {
- ++number_overruns_;
- event_.SignalEvent(kOverrun, number_overruns_);
- return;
- }
- int8_t* audio = rec_buf_[active_queue_].get();
- // There is at least one spot available in the fifo.
- fifo_->Push(audio);
- active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
- event_.SignalEvent(kNoOverrun, 0);
- // active_queue_ is indexing the next buffer to record to. Since the current
- // buffer has been recorded it means that the buffer index
- // kNumOpenSlBuffers - 1 past |active_queue_| contains the next free buffer.
- // Since |fifo_| wasn't at capacity, at least one buffer is free to be used.
- int next_free_buffer =
- (active_queue_ + kNumOpenSlBuffers - 1) % TotalBuffersUsed();
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_sbq_itf_)->Enqueue(
- sles_recorder_sbq_itf_,
- reinterpret_cast<void*>(rec_buf_[next_free_buffer].get()),
- buffer_size_bytes()),
- VOID_RETURN);
-}
-
-bool OpenSlesInput::StartCbThreads() {
- rec_thread_ = ThreadWrapper::CreateThread(CbThread, this,
- "opensl_rec_thread");
- assert(rec_thread_.get());
- if (!rec_thread_->Start()) {
- assert(false);
- return false;
- }
- rec_thread_->SetPriority(kRealtimePriority);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
- SL_RECORDSTATE_RECORDING),
- false);
- return true;
-}
-
-void OpenSlesInput::StopCbThreads() {
- {
- CriticalSectionScoped lock(crit_sect_.get());
- recording_ = false;
- }
- if (sles_recorder_itf_) {
- OPENSL_RETURN_ON_FAILURE(
- (*sles_recorder_itf_)->SetRecordState(sles_recorder_itf_,
- SL_RECORDSTATE_STOPPED),
- VOID_RETURN);
- }
- if (rec_thread_.get() == NULL) {
- return;
- }
- event_.Stop();
- if (rec_thread_->Stop()) {
- rec_thread_.reset();
- } else {
- assert(false);
- }
-}
-
-bool OpenSlesInput::CbThread(void* context) {
- return reinterpret_cast<OpenSlesInput*>(context)->CbThreadImpl();
-}
-
-bool OpenSlesInput::CbThreadImpl() {
- int event_id;
- int event_msg;
- // event_ must not be waited on while a lock has been taken.
- event_.WaitOnEvent(&event_id, &event_msg);
-
- CriticalSectionScoped lock(crit_sect_.get());
- if (HandleOverrun(event_id, event_msg)) {
- return recording_;
- }
- // If the fifo_ has audio data process it.
- while (fifo_->size() > 0 && recording_) {
- int8_t* audio = fifo_->Pop();
- audio_buffer_->SetRecordedBuffer(audio, buffer_size_samples());
- audio_buffer_->SetVQEData(delay_provider_->PlayoutDelayMs(),
- recording_delay_, 0);
- audio_buffer_->DeliverRecordedData();
- }
- return recording_;
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/opensles_input.h b/webrtc/modules/audio_device/android/opensles_input.h
deleted file mode 100644
index 1102543..0000000
--- a/webrtc/modules/audio_device/android/opensles_input.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
-#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
-
-#include <SLES/OpenSLES.h>
-#include <SLES/OpenSLES_Android.h>
-#include <SLES/OpenSLES_AndroidConfiguration.h>
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
-#include "webrtc/modules/audio_device/android/low_latency_event.h"
-#include "webrtc/modules/audio_device/include/audio_device.h"
-#include "webrtc/modules/audio_device/include/audio_device_defines.h"
-
-namespace webrtc {
-
-class AudioDeviceBuffer;
-class AudioManager;
-class CriticalSectionWrapper;
-class PlayoutDelayProvider;
-class SingleRwFifo;
-class ThreadWrapper;
-
-// OpenSL implementation that facilitate capturing PCM data from an android
-// device's microphone.
-// This class is Thread-compatible. I.e. Given an instance of this class, calls
-// to non-const methods require exclusive access to the object.
-class OpenSlesInput {
- public:
- OpenSlesInput(
- PlayoutDelayProvider* delay_provider, AudioManager* audio_manager);
- ~OpenSlesInput();
-
- static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
- void* context);
- static void ClearAndroidAudioDeviceObjects();
-
- // Main initializaton and termination
- int32_t Init();
- int32_t Terminate();
- bool Initialized() const { return initialized_; }
-
- // Device enumeration
- int16_t RecordingDevices() { return 1; }
- int32_t RecordingDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
-
- // Device selection
- int32_t SetRecordingDevice(uint16_t index);
- int32_t SetRecordingDevice(
- AudioDeviceModule::WindowsDeviceType device) { return -1; }
-
- // No-op
- int32_t SetRecordingSampleRate(uint32_t sample_rate_hz) { return 0; }
-
- // Audio transport initialization
- int32_t RecordingIsAvailable(bool& available); // NOLINT
- int32_t InitRecording();
- bool RecordingIsInitialized() const { return rec_initialized_; }
-
- // Audio transport control
- int32_t StartRecording();
- int32_t StopRecording();
- bool Recording() const { return recording_; }
-
- // Microphone Automatic Gain Control (AGC)
- int32_t SetAGC(bool enable);
- bool AGC() const { return agc_enabled_; }
-
- // Audio mixer initialization
- int32_t InitMicrophone();
- bool MicrophoneIsInitialized() const { return mic_initialized_; }
-
- // Microphone volume controls
- int32_t MicrophoneVolumeIsAvailable(bool& available); // NOLINT
- // TODO(leozwang): Add microphone volume control when OpenSL APIs
- // are available.
- int32_t SetMicrophoneVolume(uint32_t volume) { return 0; }
- int32_t MicrophoneVolume(uint32_t& volume) const { return -1; } // NOLINT
- int32_t MaxMicrophoneVolume(
- uint32_t& maxVolume) const { return 0; } // NOLINT
- int32_t MinMicrophoneVolume(uint32_t& minVolume) const; // NOLINT
- int32_t MicrophoneVolumeStepSize(
- uint16_t& stepSize) const; // NOLINT
-
- // Microphone mute control
- int32_t MicrophoneMuteIsAvailable(bool& available); // NOLINT
- int32_t SetMicrophoneMute(bool enable) { return -1; }
- int32_t MicrophoneMute(bool& enabled) const { return -1; } // NOLINT
-
- // Microphone boost control
- int32_t MicrophoneBoostIsAvailable(bool& available); // NOLINT
- int32_t SetMicrophoneBoost(bool enable);
- int32_t MicrophoneBoost(bool& enabled) const; // NOLINT
-
- // Stereo support
- int32_t StereoRecordingIsAvailable(bool& available); // NOLINT
- int32_t SetStereoRecording(bool enable) { return -1; }
- int32_t StereoRecording(bool& enabled) const; // NOLINT
-
- // Delay information and control
- int32_t RecordingDelay(uint16_t& delayMS) const; // NOLINT
-
- bool RecordingWarning() const { return false; }
- bool RecordingError() const { return false; }
- void ClearRecordingWarning() {}
- void ClearRecordingError() {}
-
- // Attach audio buffer
- void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
-
- // Built-in AEC is only supported in combination with Java/AudioRecord.
- bool BuiltInAECIsAvailable() const { return false; }
- int32_t EnableBuiltInAEC(bool enable) { return -1; }
-
- private:
- enum {
- kNumInterfaces = 2,
- // Keep as few OpenSL buffers as possible to avoid wasting memory. 2 is
- // minimum for playout. Keep 2 for recording as well.
- kNumOpenSlBuffers = 2,
- kNum10MsToBuffer = 3,
- };
-
- int InitSampleRate();
- int buffer_size_samples() const;
- int buffer_size_bytes() const;
- void UpdateRecordingDelay();
- void UpdateSampleRate();
- void CalculateNumFifoBuffersNeeded();
- void AllocateBuffers();
- int TotalBuffersUsed() const;
- bool EnqueueAllBuffers();
- // This function also configures the audio recorder, e.g. sample rate to use
- // etc, so it should be called when starting recording.
- bool CreateAudioRecorder();
- void DestroyAudioRecorder();
-
- // When overrun happens there will be more frames received from OpenSL than
- // the desired number of buffers. It is possible to expand the number of
- // buffers as you go but that would greatly increase the complexity of this
- // code. HandleOverrun gracefully handles the scenario by restarting playout,
- // throwing away all pending audio data. This will sound like a click. This
- // is also logged to identify these types of clicks.
- // This function returns true if there has been overrun. Further processing
- // of audio data should be avoided until this function returns false again.
- // The function needs to be protected by |crit_sect_|.
- bool HandleOverrun(int event_id, int event_msg);
-
- static void RecorderSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queueItf,
- void* pContext);
- // This function must not take any locks or do any heavy work. It is a
- // requirement for the OpenSL implementation to work as intended. The reason
- // for this is that taking locks exposes the OpenSL thread to the risk of
- // priority inversion.
- void RecorderSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queueItf);
-
- bool StartCbThreads();
- void StopCbThreads();
- static bool CbThread(void* context);
- // This function must be protected against data race with threads calling this
- // class' public functions. It is a requirement for this class to be
- // Thread-compatible.
- bool CbThreadImpl();
-
- PlayoutDelayProvider* delay_provider_;
-
- // Java API handle
- AudioManagerJni audio_manager_;
-
- // TODO(henrika): improve this area
- // PlayoutDelayProvider* delay_provider_;
-
- bool initialized_;
- bool mic_initialized_;
- bool rec_initialized_;
-
- // Members that are read/write accessed concurrently by the process thread and
- // threads calling public functions of this class.
- rtc::scoped_ptr<ThreadWrapper> rec_thread_; // Processing thread
- rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
- // This member controls the starting and stopping of recording audio to the
- // the device.
- bool recording_;
-
- // Only one thread, T1, may push and only one thread, T2, may pull. T1 may or
- // may not be the same thread as T2. T2 is the process thread and T1 is the
- // OpenSL thread.
- rtc::scoped_ptr<SingleRwFifo> fifo_;
- int num_fifo_buffers_needed_;
- LowLatencyEvent event_;
- int number_overruns_;
-
- // OpenSL handles
- SLObjectItf sles_engine_;
- SLEngineItf sles_engine_itf_;
- SLObjectItf sles_recorder_;
- SLRecordItf sles_recorder_itf_;
- SLAndroidSimpleBufferQueueItf sles_recorder_sbq_itf_;
-
- // Audio buffers
- AudioDeviceBuffer* audio_buffer_;
- // Holds all allocated memory such that it is deallocated properly.
- rtc::scoped_ptr<rtc::scoped_ptr<int8_t[]>[]> rec_buf_;
- // Index in |rec_buf_| pointing to the audio buffer that will be ready the
- // next time RecorderSimpleBufferQueueCallbackHandler is invoked.
- // Ready means buffer contains audio data from the device.
- int active_queue_;
-
- // Audio settings
- uint32_t rec_sampling_rate_;
- bool agc_enabled_;
-
- // Audio status
- uint16_t recording_delay_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_INPUT_H_
diff --git a/webrtc/modules/audio_device/android/opensles_output.cc b/webrtc/modules/audio_device/android/opensles_output.cc
deleted file mode 100644
index 9d34c69..0000000
--- a/webrtc/modules/audio_device/android/opensles_output.cc
+++ /dev/null
@@ -1,576 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/opensles_output.h"
-
-#include <assert.h>
-
-#include "webrtc/modules/audio_device/android/opensles_common.h"
-#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
-#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
-#include "webrtc/modules/audio_device/audio_device_buffer.h"
-#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
-#include "webrtc/system_wrappers/interface/thread_wrapper.h"
-#include "webrtc/system_wrappers/interface/trace.h"
-
-#define VOID_RETURN
-#define OPENSL_RETURN_ON_FAILURE(op, ret_val) \
- do { \
- SLresult err = (op); \
- if (err != SL_RESULT_SUCCESS) { \
- assert(false); \
- return ret_val; \
- } \
- } while (0)
-
-static const SLEngineOption kOption[] = {
- { SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE) },
-};
-
-enum {
- kNoUnderrun,
- kUnderrun,
-};
-
-namespace webrtc {
-
-OpenSlesOutput::OpenSlesOutput(AudioManager* audio_manager)
- : initialized_(false),
- speaker_initialized_(false),
- play_initialized_(false),
- crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
- playing_(false),
- num_fifo_buffers_needed_(0),
- number_underruns_(0),
- sles_engine_(NULL),
- sles_engine_itf_(NULL),
- sles_player_(NULL),
- sles_player_itf_(NULL),
- sles_player_sbq_itf_(NULL),
- sles_output_mixer_(NULL),
- audio_buffer_(NULL),
- active_queue_(0),
- speaker_sampling_rate_(kDefaultSampleRate),
- buffer_size_samples_(0),
- buffer_size_bytes_(0),
- playout_delay_(0) {
-}
-
-OpenSlesOutput::~OpenSlesOutput() {
-}
-
-int32_t OpenSlesOutput::SetAndroidAudioDeviceObjects(void* javaVM,
- void* context) {
- AudioManagerJni::SetAndroidAudioDeviceObjects(javaVM, context);
- return 0;
-}
-
-void OpenSlesOutput::ClearAndroidAudioDeviceObjects() {
- AudioManagerJni::ClearAndroidAudioDeviceObjects();
-}
-
-int32_t OpenSlesOutput::Init() {
- assert(!initialized_);
-
- // Set up OpenSl engine.
- OPENSL_RETURN_ON_FAILURE(slCreateEngine(&sles_engine_, 1, kOption, 0,
- NULL, NULL),
- -1);
- OPENSL_RETURN_ON_FAILURE((*sles_engine_)->Realize(sles_engine_,
- SL_BOOLEAN_FALSE),
- -1);
- OPENSL_RETURN_ON_FAILURE((*sles_engine_)->GetInterface(sles_engine_,
- SL_IID_ENGINE,
- &sles_engine_itf_),
- -1);
- // Set up OpenSl output mix.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_engine_itf_)->CreateOutputMix(sles_engine_itf_,
- &sles_output_mixer_,
- 0,
- NULL,
- NULL),
- -1);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_output_mixer_)->Realize(sles_output_mixer_,
- SL_BOOLEAN_FALSE),
- -1);
-
- if (!InitSampleRate()) {
- return -1;
- }
- AllocateBuffers();
- initialized_ = true;
- return 0;
-}
-
-int32_t OpenSlesOutput::Terminate() {
- // It is assumed that the caller has stopped recording before terminating.
- assert(!playing_);
- (*sles_output_mixer_)->Destroy(sles_output_mixer_);
- (*sles_engine_)->Destroy(sles_engine_);
- initialized_ = false;
- speaker_initialized_ = false;
- play_initialized_ = false;
- return 0;
-}
-
-int32_t OpenSlesOutput::PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]) {
- assert(index == 0);
- // Empty strings.
- name[0] = '\0';
- guid[0] = '\0';
- return 0;
-}
-
-int32_t OpenSlesOutput::SetPlayoutDevice(uint16_t index) {
- assert(index == 0);
- return 0;
-}
-
-int32_t OpenSlesOutput::PlayoutIsAvailable(bool& available) { // NOLINT
- available = true;
- return 0;
-}
-
-int32_t OpenSlesOutput::InitPlayout() {
- assert(initialized_);
- play_initialized_ = true;
- return 0;
-}
-
-int32_t OpenSlesOutput::StartPlayout() {
- assert(play_initialized_);
- assert(!playing_);
- if (!CreateAudioPlayer()) {
- return -1;
- }
-
- // Register callback to receive enqueued buffers.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_sbq_itf_)->RegisterCallback(sles_player_sbq_itf_,
- PlayerSimpleBufferQueueCallback,
- this),
- -1);
- if (!EnqueueAllBuffers()) {
- return -1;
- }
-
- {
- // To prevent the compiler from e.g. optimizing the code to
- // playing_ = StartCbThreads() which wouldn't have been thread safe.
- CriticalSectionScoped lock(crit_sect_.get());
- playing_ = true;
- }
- if (!StartCbThreads()) {
- playing_ = false;
- }
- return 0;
-}
-
-int32_t OpenSlesOutput::StopPlayout() {
- StopCbThreads();
- DestroyAudioPlayer();
- playing_ = false;
- return 0;
-}
-
-int32_t OpenSlesOutput::InitSpeaker() {
- assert(!playing_);
- speaker_initialized_ = true;
- return 0;
-}
-
-int32_t OpenSlesOutput::SpeakerVolumeIsAvailable(bool& available) { // NOLINT
- available = true;
- return 0;
-}
-
-int32_t OpenSlesOutput::SetSpeakerVolume(uint32_t volume) {
- assert(speaker_initialized_);
- assert(initialized_);
- // TODO(hellner): implement.
- return 0;
-}
-
-int32_t OpenSlesOutput::MaxSpeakerVolume(uint32_t& maxVolume) const { // NOLINT
- assert(speaker_initialized_);
- assert(initialized_);
- // TODO(hellner): implement.
- maxVolume = 0;
- return 0;
-}
-
-int32_t OpenSlesOutput::MinSpeakerVolume(uint32_t& minVolume) const { // NOLINT
- assert(speaker_initialized_);
- assert(initialized_);
- // TODO(hellner): implement.
- minVolume = 0;
- return 0;
-}
-
-int32_t OpenSlesOutput::SpeakerVolumeStepSize(
- uint16_t& stepSize) const { // NOLINT
- assert(speaker_initialized_);
- stepSize = 1;
- return 0;
-}
-
-int32_t OpenSlesOutput::SpeakerMuteIsAvailable(bool& available) { // NOLINT
- available = false;
- return 0;
-}
-
-int32_t OpenSlesOutput::StereoPlayoutIsAvailable(bool& available) { // NOLINT
- available = false;
- return 0;
-}
-
-int32_t OpenSlesOutput::SetStereoPlayout(bool enable) {
- if (enable) {
- assert(false);
- return -1;
- }
- return 0;
-}
-
-int32_t OpenSlesOutput::StereoPlayout(bool& enabled) const { // NOLINT
- enabled = kNumChannels == 2;
- return 0;
-}
-
-int32_t OpenSlesOutput::PlayoutBuffer(
- AudioDeviceModule::BufferType& type, // NOLINT
- uint16_t& sizeMS) const { // NOLINT
- type = AudioDeviceModule::kAdaptiveBufferSize;
- sizeMS = playout_delay_;
- return 0;
-}
-
-int32_t OpenSlesOutput::PlayoutDelay(uint16_t& delayMS) const { // NOLINT
- delayMS = playout_delay_;
- return 0;
-}
-
-void OpenSlesOutput::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
- audio_buffer_ = audioBuffer;
-}
-
-int32_t OpenSlesOutput::SetLoudspeakerStatus(bool enable) {
- return 0;
-}
-
-int32_t OpenSlesOutput::GetLoudspeakerStatus(bool& enabled) const { // NOLINT
- enabled = true;
- return 0;
-}
-
-int OpenSlesOutput::PlayoutDelayMs() {
- return playout_delay_;
-}
-
-bool OpenSlesOutput::InitSampleRate() {
- if (!SetLowLatency()) {
- speaker_sampling_rate_ = kDefaultSampleRate;
- // Default is to use 10ms buffers.
- buffer_size_samples_ = speaker_sampling_rate_ * 10 / 1000;
- }
- if (audio_buffer_->SetPlayoutSampleRate(speaker_sampling_rate_) < 0) {
- return false;
- }
- if (audio_buffer_->SetPlayoutChannels(kNumChannels) < 0) {
- return false;
- }
- UpdatePlayoutDelay();
- return true;
-}
-
-void OpenSlesOutput::UpdatePlayoutDelay() {
- // TODO(hellner): Add accurate delay estimate.
- // On average half the current buffer will have been played out.
- int outstanding_samples = (TotalBuffersUsed() - 0.5) * buffer_size_samples_;
- playout_delay_ = outstanding_samples / (speaker_sampling_rate_ / 1000);
-}
-
-bool OpenSlesOutput::SetLowLatency() {
- if (!audio_manager_.low_latency_supported()) {
- return false;
- }
- buffer_size_samples_ = audio_manager_.native_buffer_size();
- assert(buffer_size_samples_ > 0);
- speaker_sampling_rate_ = audio_manager_.native_output_sample_rate();
- assert(speaker_sampling_rate_ > 0);
- return true;
-}
-
-void OpenSlesOutput::CalculateNumFifoBuffersNeeded() {
- int number_of_bytes_needed =
- (speaker_sampling_rate_ * kNumChannels * sizeof(int16_t)) * 10 / 1000;
-
- // Ceiling of integer division: 1 + ((x - 1) / y)
- int buffers_per_10_ms =
- 1 + ((number_of_bytes_needed - 1) / buffer_size_bytes_);
- // |num_fifo_buffers_needed_| is a multiple of 10ms of buffered up audio.
- num_fifo_buffers_needed_ = kNum10MsToBuffer * buffers_per_10_ms;
-}
-
-void OpenSlesOutput::AllocateBuffers() {
- // Allocate fine buffer to provide frames of the desired size.
- buffer_size_bytes_ = buffer_size_samples_ * kNumChannels * sizeof(int16_t);
- fine_buffer_.reset(new FineAudioBuffer(audio_buffer_, buffer_size_bytes_,
- speaker_sampling_rate_));
-
- // Allocate FIFO to handle passing buffers between processing and OpenSl
- // threads.
- CalculateNumFifoBuffersNeeded(); // Needs |buffer_size_bytes_| to be known
- assert(num_fifo_buffers_needed_ > 0);
- fifo_.reset(new SingleRwFifo(num_fifo_buffers_needed_));
-
- // Allocate the memory area to be used.
- play_buf_.reset(new rtc::scoped_ptr<int8_t[]>[TotalBuffersUsed()]);
- int required_buffer_size = fine_buffer_->RequiredBufferSizeBytes();
- for (int i = 0; i < TotalBuffersUsed(); ++i) {
- play_buf_[i].reset(new int8_t[required_buffer_size]);
- }
-}
-
-int OpenSlesOutput::TotalBuffersUsed() const {
- return num_fifo_buffers_needed_ + kNumOpenSlBuffers;
-}
-
-bool OpenSlesOutput::EnqueueAllBuffers() {
- active_queue_ = 0;
- number_underruns_ = 0;
- for (int i = 0; i < kNumOpenSlBuffers; ++i) {
- memset(play_buf_[i].get(), 0, buffer_size_bytes_);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_sbq_itf_)->Enqueue(
- sles_player_sbq_itf_,
- reinterpret_cast<void*>(play_buf_[i].get()),
- buffer_size_bytes_),
- false);
- }
- // OpenSL playing has been stopped. I.e. only this thread is touching
- // |fifo_|.
- while (fifo_->size() != 0) {
- // Underrun might have happened when pushing new buffers to the FIFO.
- fifo_->Pop();
- }
- for (int i = kNumOpenSlBuffers; i < TotalBuffersUsed(); ++i) {
- memset(play_buf_[i].get(), 0, buffer_size_bytes_);
- fifo_->Push(play_buf_[i].get());
- }
- return true;
-}
-
-bool OpenSlesOutput::CreateAudioPlayer() {
- if (!event_.Start()) {
- assert(false);
- return false;
- }
- SLDataLocator_AndroidSimpleBufferQueue simple_buf_queue = {
- SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
- static_cast<SLuint32>(kNumOpenSlBuffers)
- };
- SLDataFormat_PCM configuration =
- webrtc_opensl::CreatePcmConfiguration(speaker_sampling_rate_);
- SLDataSource audio_source = { &simple_buf_queue, &configuration };
-
- SLDataLocator_OutputMix locator_outputmix;
- // Setup the data sink structure.
- locator_outputmix.locatorType = SL_DATALOCATOR_OUTPUTMIX;
- locator_outputmix.outputMix = sles_output_mixer_;
- SLDataSink audio_sink = { &locator_outputmix, NULL };
-
- // Interfaces for streaming audio data, setting volume and Android are needed.
- // Note the interfaces still need to be initialized. This only tells OpenSl
- // that the interfaces will be needed at some point.
- SLInterfaceID ids[kNumInterfaces] = {
- SL_IID_BUFFERQUEUE, SL_IID_VOLUME, SL_IID_ANDROIDCONFIGURATION };
- SLboolean req[kNumInterfaces] = {
- SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE };
- OPENSL_RETURN_ON_FAILURE(
- (*sles_engine_itf_)->CreateAudioPlayer(sles_engine_itf_, &sles_player_,
- &audio_source, &audio_sink,
- kNumInterfaces, ids, req),
- false);
-
- SLAndroidConfigurationItf player_config;
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_)->GetInterface(sles_player_,
- SL_IID_ANDROIDCONFIGURATION,
- &player_config),
- false);
-
- // Set audio player configuration to SL_ANDROID_STREAM_VOICE which corresponds
- // to android.media.AudioManager.STREAM_VOICE_CALL.
- SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
- OPENSL_RETURN_ON_FAILURE(
- (*player_config)->SetConfiguration(player_config,
- SL_ANDROID_KEY_STREAM_TYPE,
- &stream_type,
- sizeof(SLint32)),
- false);
-
- // Realize the player in synchronous mode.
- OPENSL_RETURN_ON_FAILURE((*sles_player_)->Realize(sles_player_,
- SL_BOOLEAN_FALSE),
- false);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_)->GetInterface(sles_player_, SL_IID_PLAY,
- &sles_player_itf_),
- false);
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_)->GetInterface(sles_player_, SL_IID_BUFFERQUEUE,
- &sles_player_sbq_itf_),
- false);
- return true;
-}
-
-void OpenSlesOutput::DestroyAudioPlayer() {
- SLAndroidSimpleBufferQueueItf sles_player_sbq_itf = sles_player_sbq_itf_;
- {
- CriticalSectionScoped lock(crit_sect_.get());
- sles_player_sbq_itf_ = NULL;
- sles_player_itf_ = NULL;
- }
- event_.Stop();
- if (sles_player_sbq_itf) {
- // Release all buffers currently queued up.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_sbq_itf)->Clear(sles_player_sbq_itf),
- VOID_RETURN);
- }
-
- if (sles_player_) {
- (*sles_player_)->Destroy(sles_player_);
- sles_player_ = NULL;
- }
-}
-
-bool OpenSlesOutput::HandleUnderrun(int event_id, int event_msg) {
- if (!playing_) {
- return false;
- }
- if (event_id == kNoUnderrun) {
- return false;
- }
- assert(event_id == kUnderrun);
- assert(event_msg > 0);
- // Wait for all enqueued buffers to be flushed.
- if (event_msg != kNumOpenSlBuffers) {
- return true;
- }
- // All buffers have been flushed. Restart the audio from scratch.
- // No need to check sles_player_itf_ as playing_ would be false before it is
- // set to NULL.
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_itf_)->SetPlayState(sles_player_itf_,
- SL_PLAYSTATE_STOPPED),
- true);
- EnqueueAllBuffers();
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_itf_)->SetPlayState(sles_player_itf_,
- SL_PLAYSTATE_PLAYING),
- true);
- return true;
-}
-
-void OpenSlesOutput::PlayerSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf sles_player_sbq_itf,
- void* p_context) {
- OpenSlesOutput* audio_device = reinterpret_cast<OpenSlesOutput*>(p_context);
- audio_device->PlayerSimpleBufferQueueCallbackHandler(sles_player_sbq_itf);
-}
-
-void OpenSlesOutput::PlayerSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf sles_player_sbq_itf) {
- if (fifo_->size() <= 0 || number_underruns_ > 0) {
- ++number_underruns_;
- event_.SignalEvent(kUnderrun, number_underruns_);
- return;
- }
- int8_t* audio = fifo_->Pop();
- if (audio)
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_sbq_itf)->Enqueue(sles_player_sbq_itf,
- audio,
- buffer_size_bytes_),
- VOID_RETURN);
- event_.SignalEvent(kNoUnderrun, 0);
-}
-
-bool OpenSlesOutput::StartCbThreads() {
- play_thread_ = ThreadWrapper::CreateThread(CbThread, this,
- "opensl_play_thread");
- assert(play_thread_.get());
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_itf_)->SetPlayState(sles_player_itf_,
- SL_PLAYSTATE_PLAYING),
- false);
-
- if (!play_thread_->Start()) {
- assert(false);
- return false;
- }
- play_thread_->SetPriority(kRealtimePriority);
- return true;
-}
-
-void OpenSlesOutput::StopCbThreads() {
- {
- CriticalSectionScoped lock(crit_sect_.get());
- playing_ = false;
- }
- if (sles_player_itf_) {
- OPENSL_RETURN_ON_FAILURE(
- (*sles_player_itf_)->SetPlayState(sles_player_itf_,
- SL_PLAYSTATE_STOPPED),
- VOID_RETURN);
- }
- if (play_thread_.get() == NULL) {
- return;
- }
- event_.Stop();
- if (play_thread_->Stop()) {
- play_thread_.reset();
- } else {
- assert(false);
- }
-}
-
-bool OpenSlesOutput::CbThread(void* context) {
- return reinterpret_cast<OpenSlesOutput*>(context)->CbThreadImpl();
-}
-
-bool OpenSlesOutput::CbThreadImpl() {
- assert(fine_buffer_.get() != NULL);
- int event_id;
- int event_msg;
- // event_ must not be waited on while a lock has been taken.
- event_.WaitOnEvent(&event_id, &event_msg);
-
- CriticalSectionScoped lock(crit_sect_.get());
- if (HandleUnderrun(event_id, event_msg)) {
- return playing_;
- }
- // if fifo_ is not full it means next item in memory must be free.
- while (fifo_->size() < num_fifo_buffers_needed_ && playing_) {
- int8_t* audio = play_buf_[active_queue_].get();
- fine_buffer_->GetBufferData(audio);
- fifo_->Push(audio);
- active_queue_ = (active_queue_ + 1) % TotalBuffersUsed();
- }
- return playing_;
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/opensles_output.h b/webrtc/modules/audio_device/android/opensles_output.h
deleted file mode 100644
index 8f63ecf..0000000
--- a/webrtc/modules/audio_device/android/opensles_output.h
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
-#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
-
-#include <SLES/OpenSLES.h>
-#include <SLES/OpenSLES_Android.h>
-#include <SLES/OpenSLES_AndroidConfiguration.h>
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/modules/audio_device/android/audio_manager.h"
-#include "webrtc/modules/audio_device/android/audio_manager_jni.h"
-#include "webrtc/modules/audio_device/android/low_latency_event.h"
-#include "webrtc/modules/audio_device/android/audio_common.h"
-#include "webrtc/modules/audio_device/include/audio_device_defines.h"
-#include "webrtc/modules/audio_device/include/audio_device.h"
-
-namespace webrtc {
-
-class AudioDeviceBuffer;
-class CriticalSectionWrapper;
-class FineAudioBuffer;
-class SingleRwFifo;
-class ThreadWrapper;
-
-// OpenSL implementation that facilitate playing PCM data to an android device.
-// This class is Thread-compatible. I.e. Given an instance of this class, calls
-// to non-const methods require exclusive access to the object.
-class OpenSlesOutput : public PlayoutDelayProvider {
- public:
- // TODO(henrika): use this new audio manager instead of old.
- explicit OpenSlesOutput(AudioManager* audio_manager);
- virtual ~OpenSlesOutput();
-
- static int32_t SetAndroidAudioDeviceObjects(void* javaVM,
- void* context);
- static void ClearAndroidAudioDeviceObjects();
-
- // Main initializaton and termination
- int32_t Init();
- int32_t Terminate();
- bool Initialized() const { return initialized_; }
-
- // Device enumeration
- int16_t PlayoutDevices() { return 1; }
-
- int32_t PlayoutDeviceName(uint16_t index,
- char name[kAdmMaxDeviceNameSize],
- char guid[kAdmMaxGuidSize]);
-
- // Device selection
- int32_t SetPlayoutDevice(uint16_t index);
- int32_t SetPlayoutDevice(
- AudioDeviceModule::WindowsDeviceType device) { return 0; }
-
- // No-op
- int32_t SetPlayoutSampleRate(uint32_t sample_rate_hz) { return 0; }
-
- // Audio transport initialization
- int32_t PlayoutIsAvailable(bool& available); // NOLINT
- int32_t InitPlayout();
- bool PlayoutIsInitialized() const { return play_initialized_; }
-
- // Audio transport control
- int32_t StartPlayout();
- int32_t StopPlayout();
- bool Playing() const { return playing_; }
-
- // Audio mixer initialization
- int32_t InitSpeaker();
- bool SpeakerIsInitialized() const { return speaker_initialized_; }
-
- // Speaker volume controls
- int32_t SpeakerVolumeIsAvailable(bool& available); // NOLINT
- int32_t SetSpeakerVolume(uint32_t volume);
- int32_t SpeakerVolume(uint32_t& volume) const { return 0; } // NOLINT
- int32_t MaxSpeakerVolume(uint32_t& maxVolume) const; // NOLINT
- int32_t MinSpeakerVolume(uint32_t& minVolume) const; // NOLINT
- int32_t SpeakerVolumeStepSize(uint16_t& stepSize) const; // NOLINT
-
- // Speaker mute control
- int32_t SpeakerMuteIsAvailable(bool& available); // NOLINT
- int32_t SetSpeakerMute(bool enable) { return -1; }
- int32_t SpeakerMute(bool& enabled) const { return -1; } // NOLINT
-
-
- // Stereo support
- int32_t StereoPlayoutIsAvailable(bool& available); // NOLINT
- int32_t SetStereoPlayout(bool enable);
- int32_t StereoPlayout(bool& enabled) const; // NOLINT
-
- // Delay information and control
- int32_t SetPlayoutBuffer(const AudioDeviceModule::BufferType type,
- uint16_t sizeMS) { return -1; }
- int32_t PlayoutBuffer(AudioDeviceModule::BufferType& type, // NOLINT
- uint16_t& sizeMS) const;
- int32_t PlayoutDelay(uint16_t& delayMS) const; // NOLINT
-
-
- // Error and warning information
- bool PlayoutWarning() const { return false; }
- bool PlayoutError() const { return false; }
- void ClearPlayoutWarning() {}
- void ClearPlayoutError() {}
-
- // Attach audio buffer
- void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
-
- // Speaker audio routing
- int32_t SetLoudspeakerStatus(bool enable);
- int32_t GetLoudspeakerStatus(bool& enable) const; // NOLINT
-
- protected:
- virtual int PlayoutDelayMs();
-
- private:
- enum {
- kNumInterfaces = 3,
- // TODO(xians): Reduce the numbers of buffers to improve the latency.
- // Currently 30ms worth of buffers are needed due to audio
- // pipeline processing jitter. Note: kNumOpenSlBuffers must
- // not be changed.
- // According to the opensles documentation in the ndk:
- // The lower output latency path is used only if the application requests a
- // buffer count of 2 or more. Use minimum number of buffers to keep delay
- // as low as possible.
- kNumOpenSlBuffers = 2,
- // NetEq delivers frames on a 10ms basis. This means that every 10ms there
- // will be a time consuming task. Keeping 10ms worth of buffers will ensure
- // that there is 10ms to perform the time consuming task without running
- // into underflow.
- // In addition to the 10ms that needs to be stored for NetEq processing
- // there will be jitter in audio pipe line due to the acquisition of locks.
- // Note: The buffers in the OpenSL queue do not count towards the 10ms of
- // frames needed since OpenSL needs to have them ready for playout.
- kNum10MsToBuffer = 6,
- };
-
- bool InitSampleRate();
- bool SetLowLatency();
- void UpdatePlayoutDelay();
- // It might be possible to dynamically add or remove buffers based on how
- // close to depletion the fifo is. Few buffers means low delay. Too few
- // buffers will cause underrun. Dynamically changing the number of buffer
- // will greatly increase code complexity.
- void CalculateNumFifoBuffersNeeded();
- void AllocateBuffers();
- int TotalBuffersUsed() const;
- bool EnqueueAllBuffers();
- // This function also configures the audio player, e.g. sample rate to use
- // etc, so it should be called when starting playout.
- bool CreateAudioPlayer();
- void DestroyAudioPlayer();
-
- // When underrun happens there won't be a new frame ready for playout that
- // can be retrieved yet. Since the OpenSL thread must return ASAP there will
- // be one less queue available to OpenSL. This function handles this case
- // gracefully by restarting the audio, pushing silent frames to OpenSL for
- // playout. This will sound like a click. Underruns are also logged to
- // make it possible to identify these types of audio artifacts.
- // This function returns true if there has been underrun. Further processing
- // of audio data should be avoided until this function returns false again.
- // The function needs to be protected by |crit_sect_|.
- bool HandleUnderrun(int event_id, int event_msg);
-
- static void PlayerSimpleBufferQueueCallback(
- SLAndroidSimpleBufferQueueItf queueItf,
- void* pContext);
- // This function must not take any locks or do any heavy work. It is a
- // requirement for the OpenSL implementation to work as intended. The reason
- // for this is that taking locks exposes the OpenSL thread to the risk of
- // priority inversion.
- void PlayerSimpleBufferQueueCallbackHandler(
- SLAndroidSimpleBufferQueueItf queueItf);
-
- bool StartCbThreads();
- void StopCbThreads();
- static bool CbThread(void* context);
- // This function must be protected against data race with threads calling this
- // class' public functions. It is a requirement for this class to be
- // Thread-compatible.
- bool CbThreadImpl();
-
- // Java API handle
- AudioManagerJni audio_manager_;
-
- bool initialized_;
- bool speaker_initialized_;
- bool play_initialized_;
-
- // Members that are read/write accessed concurrently by the process thread and
- // threads calling public functions of this class.
- rtc::scoped_ptr<ThreadWrapper> play_thread_; // Processing thread
- rtc::scoped_ptr<CriticalSectionWrapper> crit_sect_;
- // This member controls the starting and stopping of playing audio to the
- // the device.
- bool playing_;
-
- // Only one thread, T1, may push and only one thread, T2, may pull. T1 may or
- // may not be the same thread as T2. T1 is the process thread and T2 is the
- // OpenSL thread.
- rtc::scoped_ptr<SingleRwFifo> fifo_;
- int num_fifo_buffers_needed_;
- LowLatencyEvent event_;
- int number_underruns_;
-
- // OpenSL handles
- SLObjectItf sles_engine_;
- SLEngineItf sles_engine_itf_;
- SLObjectItf sles_player_;
- SLPlayItf sles_player_itf_;
- SLAndroidSimpleBufferQueueItf sles_player_sbq_itf_;
- SLObjectItf sles_output_mixer_;
-
- // Audio buffers
- AudioDeviceBuffer* audio_buffer_;
- rtc::scoped_ptr<FineAudioBuffer> fine_buffer_;
- rtc::scoped_ptr<rtc::scoped_ptr<int8_t[]>[]> play_buf_;
- // Index in |rec_buf_| pointing to the audio buffer that will be ready the
- // next time PlayerSimpleBufferQueueCallbackHandler is invoked.
- // Ready means buffer is ready to be played out to device.
- int active_queue_;
-
- // Audio settings
- uint32_t speaker_sampling_rate_;
- int buffer_size_samples_;
- int buffer_size_bytes_;
-
- // Audio status
- uint16_t playout_delay_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_OUTPUT_H_
diff --git a/webrtc/modules/audio_device/android/opensles_player.cc b/webrtc/modules/audio_device/android/opensles_player.cc
new file mode 100644
index 0000000..0789ebf
--- /dev/null
+++ b/webrtc/modules/audio_device/android/opensles_player.cc
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "webrtc/modules/audio_device/android/opensles_player.h"
+
+#include <android/log.h>
+
+#include "webrtc/base/arraysize.h"
+#include "webrtc/base/checks.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include "webrtc/modules/audio_device/android/fine_audio_buffer.h"
+
+#define TAG "OpenSLESPlayer"
+#define ALOGV(...) __android_log_print(ANDROID_LOG_VERBOSE, TAG, __VA_ARGS__)
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+#define ALOGW(...) __android_log_print(ANDROID_LOG_WARN, TAG, __VA_ARGS__)
+#define ALOGI(...) __android_log_print(ANDROID_LOG_INFO, TAG, __VA_ARGS__)
+
+#define RETURN_ON_ERROR(op, ...) \
+ do { \
+ SLresult err = (op); \
+ if (err != SL_RESULT_SUCCESS) { \
+ ALOGE("%s failed: %d", #op, err); \
+ return __VA_ARGS__; \
+ } \
+ } while (0)
+
+namespace webrtc {
+
+OpenSLESPlayer::OpenSLESPlayer(AudioManager* audio_manager)
+ : audio_parameters_(audio_manager->GetPlayoutAudioParameters()),
+ audio_device_buffer_(NULL),
+ initialized_(false),
+ playing_(false),
+ bytes_per_buffer_(0),
+ buffer_index_(0),
+ engine_(nullptr),
+ player_(nullptr),
+ simple_buffer_queue_(nullptr),
+ volume_(nullptr) {
+ ALOGD("ctor%s", GetThreadInfo().c_str());
+ // Use native audio output parameters provided by the audio manager and
+ // define the PCM format structure.
+ pcm_format_ = CreatePCMConfiguration(audio_parameters_.channels(),
+ audio_parameters_.sample_rate(),
+ audio_parameters_.bits_per_sample());
+ // Detach from this thread since we want to use the checker to verify calls
+ // from the internal audio thread.
+ thread_checker_opensles_.DetachFromThread();
+}
+
+OpenSLESPlayer::~OpenSLESPlayer() {
+ ALOGD("dtor%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ Terminate();
+ DestroyAudioPlayer();
+ DestroyMix();
+ DestroyEngine();
+ DCHECK(!engine_object_.Get());
+ DCHECK(!engine_);
+ DCHECK(!output_mix_.Get());
+ DCHECK(!player_);
+ DCHECK(!simple_buffer_queue_);
+ DCHECK(!volume_);
+}
+
+int OpenSLESPlayer::Init() {
+ ALOGD("Init%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return 0;
+}
+
+int OpenSLESPlayer::Terminate() {
+ ALOGD("Terminate%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ StopPlayout();
+ return 0;
+}
+
+int OpenSLESPlayer::InitPlayout() {
+ ALOGD("InitPlayout%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!initialized_);
+ DCHECK(!playing_);
+ CreateEngine();
+ CreateMix();
+ initialized_ = true;
+ buffer_index_ = 0;
+ return 0;
+}
+
+int OpenSLESPlayer::StartPlayout() {
+ ALOGD("StartPlayout%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(initialized_);
+ DCHECK(!playing_);
+ // The number of lower latency audio players is limited, hence we create the
+ // audio player in Start() and destroy it in Stop().
+ CreateAudioPlayer();
+ // Fill up audio buffers to avoid initial glitch and to ensure that playback
+ // starts when mode is later changed to SL_PLAYSTATE_PLAYING.
+ // TODO(henrika): we can save some delay by only making one call to
+ // EnqueuePlayoutData. Most likely not worth the risk of adding a glitch.
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ EnqueuePlayoutData();
+ }
+ // Start streaming data by setting the play state to SL_PLAYSTATE_PLAYING.
+ // For a player object, when the object is in the SL_PLAYSTATE_PLAYING
+ // state, adding buffers will implicitly start playback.
+ RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_PLAYING), -1);
+ playing_ = (GetPlayState() == SL_PLAYSTATE_PLAYING);
+ DCHECK(playing_);
+ return 0;
+}
+
+int OpenSLESPlayer::StopPlayout() {
+ ALOGD("StopPlayout%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!initialized_ || !playing_) {
+ return 0;
+ }
+ // Stop playing by setting the play state to SL_PLAYSTATE_STOPPED.
+ RETURN_ON_ERROR((*player_)->SetPlayState(player_, SL_PLAYSTATE_STOPPED), -1);
+ // Clear the buffer queue to flush out any remaining data.
+ RETURN_ON_ERROR((*simple_buffer_queue_)->Clear(simple_buffer_queue_), -1);
+#ifndef NDEBUG
+ // Verify that the buffer queue is in fact cleared as it should.
+ SLAndroidSimpleBufferQueueState buffer_queue_state;
+ (*simple_buffer_queue_)->GetState(simple_buffer_queue_, &buffer_queue_state);
+ DCHECK_EQ(0u, buffer_queue_state.count);
+ DCHECK_EQ(0u, buffer_queue_state.index);
+#endif
+ // The number of lower latency audio players is limited, hence we create the
+ // audio player in Start() and destroy it in Stop().
+ DestroyAudioPlayer();
+ thread_checker_opensles_.DetachFromThread();
+ initialized_ = false;
+ playing_ = false;
+ return 0;
+}
+
+int OpenSLESPlayer::SpeakerVolumeIsAvailable(bool& available) {
+ available = false;
+ return 0;
+}
+
+int OpenSLESPlayer::MaxSpeakerVolume(uint32_t& maxVolume) const {
+ return -1;
+}
+
+int OpenSLESPlayer::MinSpeakerVolume(uint32_t& minVolume) const {
+ return -1;
+}
+
+int OpenSLESPlayer::SetSpeakerVolume(uint32_t volume) {
+ return -1;
+}
+
+int OpenSLESPlayer::SpeakerVolume(uint32_t& volume) const {
+ return -1;
+}
+
+void OpenSLESPlayer::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
+ ALOGD("AttachAudioBuffer");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ audio_device_buffer_ = audioBuffer;
+ const int sample_rate_hz = audio_parameters_.sample_rate();
+ ALOGD("SetPlayoutSampleRate(%d)", sample_rate_hz);
+ audio_device_buffer_->SetPlayoutSampleRate(sample_rate_hz);
+ const int channels = audio_parameters_.channels();
+ ALOGD("SetPlayoutChannels(%d)", channels);
+ audio_device_buffer_->SetPlayoutChannels(channels);
+ CHECK(audio_device_buffer_);
+ AllocateDataBuffers();
+}
+
+SLDataFormat_PCM OpenSLESPlayer::CreatePCMConfiguration(int channels,
+ int sample_rate,
+ int bits_per_sample) {
+ ALOGD("CreatePCMConfiguration");
+ CHECK_EQ(bits_per_sample, SL_PCMSAMPLEFORMAT_FIXED_16);
+ SLDataFormat_PCM format;
+ format.formatType = SL_DATAFORMAT_PCM;
+ format.numChannels = static_cast<SLuint32>(channels);
+ // Note that, the unit of sample rate is actually in milliHertz and not Hertz.
+ switch (sample_rate) {
+ case 8000:
+ format.samplesPerSec = SL_SAMPLINGRATE_8;
+ break;
+ case 16000:
+ format.samplesPerSec = SL_SAMPLINGRATE_16;
+ break;
+ case 22050:
+ format.samplesPerSec = SL_SAMPLINGRATE_22_05;
+ break;
+ case 32000:
+ format.samplesPerSec = SL_SAMPLINGRATE_32;
+ break;
+ case 44100:
+ format.samplesPerSec = SL_SAMPLINGRATE_44_1;
+ break;
+ case 48000:
+ format.samplesPerSec = SL_SAMPLINGRATE_48;
+ break;
+ default:
+ CHECK(false) << "Unsupported sample rate: " << sample_rate;
+ }
+ format.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16;
+ format.containerSize = SL_PCMSAMPLEFORMAT_FIXED_16;
+ format.endianness = SL_BYTEORDER_LITTLEENDIAN;
+ if (format.numChannels == 1)
+ format.channelMask = SL_SPEAKER_FRONT_CENTER;
+ else if (format.numChannels == 2)
+ format.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
+ else
+ CHECK(false) << "Unsupported number of channels: " << format.numChannels;
+ return format;
+}
+
+void OpenSLESPlayer::AllocateDataBuffers() {
+ ALOGD("AllocateDataBuffers");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(!simple_buffer_queue_);
+ CHECK(audio_device_buffer_);
+ bytes_per_buffer_ = audio_parameters_.GetBytesPerBuffer();
+ ALOGD("native buffer size: %d", bytes_per_buffer_);
+ // Create a modified audio buffer class which allows us to ask for any number
+ // of samples (and not only multiple of 10ms) to match the native OpenSL ES
+ // buffer size.
+ fine_buffer_.reset(new FineAudioBuffer(audio_device_buffer_,
+ bytes_per_buffer_,
+ audio_parameters_.sample_rate()));
+ // Each buffer must be of this size to avoid unnecessary memcpy while caching
+ // data between successive callbacks.
+ const int required_buffer_size = fine_buffer_->RequiredBufferSizeBytes();
+ ALOGD("required buffer size: %d", required_buffer_size);
+ for (int i = 0; i < kNumOfOpenSLESBuffers; ++i) {
+ audio_buffers_[i].reset(new SLint8[required_buffer_size]);
+ }
+}
+
+bool OpenSLESPlayer::CreateEngine() {
+ ALOGD("CreateEngine");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (engine_object_.Get())
+ return true;
+ DCHECK(!engine_);
+ const SLEngineOption option[] = {
+ {SL_ENGINEOPTION_THREADSAFE, static_cast<SLuint32>(SL_BOOLEAN_TRUE)}};
+ RETURN_ON_ERROR(
+ slCreateEngine(engine_object_.Receive(), 1, option, 0, NULL, NULL),
+ false);
+ RETURN_ON_ERROR(
+ engine_object_->Realize(engine_object_.Get(), SL_BOOLEAN_FALSE), false);
+ RETURN_ON_ERROR(engine_object_->GetInterface(engine_object_.Get(),
+ SL_IID_ENGINE, &engine_),
+ false);
+ return true;
+}
+
+void OpenSLESPlayer::DestroyEngine() {
+ ALOGD("DestroyEngine");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!engine_object_.Get())
+ return;
+ engine_ = nullptr;
+ engine_object_.Reset();
+}
+
+bool OpenSLESPlayer::CreateMix() {
+ ALOGD("CreateMix");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(engine_);
+ if (output_mix_.Get())
+ return true;
+
+ // Create the ouput mix on the engine object. No interfaces will be used.
+ RETURN_ON_ERROR((*engine_)->CreateOutputMix(engine_, output_mix_.Receive(), 0,
+ NULL, NULL),
+ false);
+ RETURN_ON_ERROR(output_mix_->Realize(output_mix_.Get(), SL_BOOLEAN_FALSE),
+ false);
+ return true;
+}
+
+void OpenSLESPlayer::DestroyMix() {
+ ALOGD("DestroyMix");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!output_mix_.Get())
+ return;
+ output_mix_.Reset();
+}
+
+bool OpenSLESPlayer::CreateAudioPlayer() {
+ ALOGD("CreateAudioPlayer");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(engine_object_.Get());
+ DCHECK(output_mix_.Get());
+ if (player_object_.Get())
+ return true;
+ DCHECK(!player_);
+ DCHECK(!simple_buffer_queue_);
+ DCHECK(!volume_);
+
+ // source: Android Simple Buffer Queue Data Locator is source.
+ SLDataLocator_AndroidSimpleBufferQueue simple_buffer_queue = {
+ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,
+ static_cast<SLuint32>(kNumOfOpenSLESBuffers)};
+ SLDataSource audio_source = {&simple_buffer_queue, &pcm_format_};
+
+ // sink: OutputMix-based data is sink.
+ SLDataLocator_OutputMix locator_output_mix = {SL_DATALOCATOR_OUTPUTMIX,
+ output_mix_.Get()};
+ SLDataSink audio_sink = {&locator_output_mix, NULL};
+
+ // Define interfaces that we indend to use and realize.
+ const SLInterfaceID interface_ids[] = {
+ SL_IID_ANDROIDCONFIGURATION, SL_IID_BUFFERQUEUE, SL_IID_VOLUME};
+ const SLboolean interface_required[] = {
+ SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
+
+ // Create the audio player on the engine interface.
+ RETURN_ON_ERROR(
+ (*engine_)->CreateAudioPlayer(
+ engine_, player_object_.Receive(), &audio_source, &audio_sink,
+ arraysize(interface_ids), interface_ids, interface_required),
+ false);
+
+ // Use the Android configuration interface to set platform-specific
+ // parameters. Should be done before player is realized.
+ SLAndroidConfigurationItf player_config;
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(),
+ SL_IID_ANDROIDCONFIGURATION, &player_config),
+ false);
+ // Set audio player configuration to SL_ANDROID_STREAM_VOICE which
+ // corresponds to android.media.AudioManager.STREAM_VOICE_CALL.
+ SLint32 stream_type = SL_ANDROID_STREAM_VOICE;
+ RETURN_ON_ERROR(
+ (*player_config)
+ ->SetConfiguration(player_config, SL_ANDROID_KEY_STREAM_TYPE,
+ &stream_type, sizeof(SLint32)),
+ false);
+
+ // Realize the audio player object after configuration has been set.
+ RETURN_ON_ERROR(
+ player_object_->Realize(player_object_.Get(), SL_BOOLEAN_FALSE), false);
+
+ // Get the SLPlayItf interface on the audio player.
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_PLAY, &player_),
+ false);
+
+ // Get the SLAndroidSimpleBufferQueueItf interface on the audio player.
+ RETURN_ON_ERROR(
+ player_object_->GetInterface(player_object_.Get(), SL_IID_BUFFERQUEUE,
+ &simple_buffer_queue_),
+ false);
+
+ // Register callback method for the Android Simple Buffer Queue interface.
+ // This method will be called when the native audio layer needs audio data.
+ RETURN_ON_ERROR((*simple_buffer_queue_)
+ ->RegisterCallback(simple_buffer_queue_,
+ SimpleBufferQueueCallback, this),
+ false);
+
+ // Get the SLVolumeItf interface on the audio player.
+ RETURN_ON_ERROR(player_object_->GetInterface(player_object_.Get(),
+ SL_IID_VOLUME, &volume_),
+ false);
+
+ // TODO(henrika): might not be required to set volume to max here since it
+ // seems to be default on most devices. Might be required for unit tests.
+ // RETURN_ON_ERROR((*volume_)->SetVolumeLevel(volume_, 0), false);
+
+ return true;
+}
+
+void OpenSLESPlayer::DestroyAudioPlayer() {
+ ALOGD("DestroyAudioPlayer");
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (!player_object_.Get())
+ return;
+ player_object_.Reset();
+ player_ = nullptr;
+ simple_buffer_queue_ = nullptr;
+ volume_ = nullptr;
+}
+
+// static
+void OpenSLESPlayer::SimpleBufferQueueCallback(
+ SLAndroidSimpleBufferQueueItf caller,
+ void* context) {
+ OpenSLESPlayer* stream = reinterpret_cast<OpenSLESPlayer*>(context);
+ stream->FillBufferQueue();
+}
+
+void OpenSLESPlayer::FillBufferQueue() {
+ DCHECK(thread_checker_opensles_.CalledOnValidThread());
+ SLuint32 state = GetPlayState();
+ if (state != SL_PLAYSTATE_PLAYING) {
+ ALOGW("Buffer callback in non-playing state!");
+ return;
+ }
+ EnqueuePlayoutData();
+}
+
+void OpenSLESPlayer::EnqueuePlayoutData() {
+ // Read audio data from the WebRTC source using the FineAudioBuffer object
+ // to adjust for differences in buffer size between WebRTC (10ms) and native
+ // OpenSL ES.
+ SLint8* audio_ptr = audio_buffers_[buffer_index_].get();
+ fine_buffer_->GetBufferData(audio_ptr);
+ // Enqueue the decoded audio buffer for playback.
+ SLresult err =
+ (*simple_buffer_queue_)
+ ->Enqueue(simple_buffer_queue_, audio_ptr, bytes_per_buffer_);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("Enqueue failed: %d", err);
+ }
+ buffer_index_ = (buffer_index_ + 1) % kNumOfOpenSLESBuffers;
+}
+
+SLuint32 OpenSLESPlayer::GetPlayState() const {
+ DCHECK(player_);
+ SLuint32 state;
+ SLresult err = (*player_)->GetPlayState(player_, &state);
+ if (SL_RESULT_SUCCESS != err) {
+ ALOGE("GetPlayState failed: %d", err);
+ }
+ return state;
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/opensles_player.h b/webrtc/modules/audio_device/android/opensles_player.h
new file mode 100644
index 0000000..2217fa0
--- /dev/null
+++ b/webrtc/modules/audio_device/android/opensles_player.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
+
+#include <SLES/OpenSLES.h>
+#include <SLES/OpenSLES_Android.h>
+#include <SLES/OpenSLES_AndroidConfiguration.h>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/modules/audio_device/android/audio_common.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include "webrtc/modules/audio_device/android/opensles_common.h"
+#include "webrtc/modules/audio_device/include/audio_device_defines.h"
+#include "webrtc/modules/audio_device/audio_device_generic.h"
+#include "webrtc/modules/utility/interface/helpers_android.h"
+
+namespace webrtc {
+
+class FineAudioBuffer;
+
+// Implements 16-bit mono PCM audio output support for Android using the
+// C based OpenSL ES API. No calls from C/C++ to Java using JNI is done.
+//
+// An instance must be created and destroyed on one and the same thread.
+// All public methods must also be called on the same thread. A thread checker
+// will DCHECK if any method is called on an invalid thread. Decoded audio
+// buffers are requested on a dedicated internal thread managed by the OpenSL
+// ES layer.
+//
+// The existing design forces the user to call InitPlayout() after Stoplayout()
+// to be able to call StartPlayout() again. This is inline with how the Java-
+// based implementation works.
+//
+// OpenSL ES is a native C API which have no Dalvik-related overhead such as
+// garbage collection pauses and it supports reduced audio output latency.
+// If the device doesn't claim this feature but supports API level 9 (Android
+// platform version 2.3) or later, then we can still use the OpenSL ES APIs but
+// the output latency may be higher.
+class OpenSLESPlayer {
+ public:
+ // The lower output latency path is used only if the application requests a
+ // buffer count of 2 or more, and a buffer size and sample rate that are
+ // compatible with the device's native output configuration provided via the
+ // audio manager at construction.
+ static const int kNumOfOpenSLESBuffers = 2;
+
+ // There is no need for this class to use JNI.
+ static int32_t SetAndroidAudioDeviceObjects(void* javaVM, void* context) {
+ return 0;
+ }
+ static void ClearAndroidAudioDeviceObjects() {}
+
+ explicit OpenSLESPlayer(AudioManager* audio_manager);
+ ~OpenSLESPlayer();
+
+ int Init();
+ int Terminate();
+
+ int InitPlayout();
+ bool PlayoutIsInitialized() const { return initialized_; }
+
+ int StartPlayout();
+ int StopPlayout();
+ bool Playing() const { return playing_; }
+
+ int SpeakerVolumeIsAvailable(bool& available);
+ int SetSpeakerVolume(uint32_t volume);
+ int SpeakerVolume(uint32_t& volume) const;
+ int MaxSpeakerVolume(uint32_t& maxVolume) const;
+ int MinSpeakerVolume(uint32_t& minVolume) const;
+
+ void AttachAudioBuffer(AudioDeviceBuffer* audioBuffer);
+
+ private:
+ // These callback methods are called when data is required for playout.
+ // They are both called from an internal "OpenSL ES thread" which is not
+ // attached to the Dalvik VM.
+ static void SimpleBufferQueueCallback(SLAndroidSimpleBufferQueueItf caller,
+ void* context);
+ void FillBufferQueue();
+ // Reads audio data in PCM format using the AudioDeviceBuffer.
+ // Can be called both on the main thread (during Start()) and from the
+ // internal audio thread while output streaming is active.
+ void EnqueuePlayoutData();
+
+ // Configures the SL_DATAFORMAT_PCM structure.
+ SLDataFormat_PCM CreatePCMConfiguration(int channels,
+ int sample_rate,
+ int bits_per_sample);
+
+ // Allocate memory for audio buffers which will be used to render audio
+ // via the SLAndroidSimpleBufferQueueItf interface.
+ void AllocateDataBuffers();
+
+ // Creates/destroys the main engine object and the SLEngineItf interface.
+ bool CreateEngine();
+ void DestroyEngine();
+
+ // Creates/destroys the output mix object.
+ bool CreateMix();
+ void DestroyMix();
+
+ // Creates/destroys the audio player and the simple-buffer object.
+ // Also creates the volume object.
+ bool CreateAudioPlayer();
+ void DestroyAudioPlayer();
+
+ SLuint32 GetPlayState() const;
+
+ // Ensures that methods are called from the same thread as this object is
+ // created on.
+ rtc::ThreadChecker thread_checker_;
+
+ // Stores thread ID in first call to SimpleBufferQueueCallback() from internal
+ // non-application thread which is not attached to the Dalvik JVM.
+ // Detached during construction of this object.
+ rtc::ThreadChecker thread_checker_opensles_;
+
+ // Contains audio parameters provided to this class at construction by the
+ // AudioManager.
+ const AudioParameters audio_parameters_;
+
+ // Raw pointer handle provided to us in AttachAudioBuffer(). Owned by the
+ // AudioDeviceModuleImpl class and called by AudioDeviceModuleImpl::Create().
+ AudioDeviceBuffer* audio_device_buffer_;
+
+ bool initialized_;
+ bool playing_;
+
+ // PCM-type format definition.
+ // TODO(henrika): add support for SLAndroidDataFormat_PCM_EX (android-21) if
+ // 32-bit float representation is needed.
+ SLDataFormat_PCM pcm_format_;
+
+ // Number of bytes per audio buffer in each |audio_buffers_[i]|.
+ // Typical sizes are 480 or 512 bytes corresponding to native output buffer
+ // sizes of 240 or 256 audio frames respectively.
+ int bytes_per_buffer_;
+
+ // Queue of audio buffers to be used by the player object for rendering
+ // audio. They will be used in a Round-robin way and the size of each buffer
+ // is given by FineAudioBuffer::RequiredBufferSizeBytes().
+ rtc::scoped_ptr<SLint8[]> audio_buffers_[kNumOfOpenSLESBuffers];
+
+ // FineAudioBuffer takes an AudioDeviceBuffer which delivers audio data
+ // in chunks of 10ms. It then allows for this data to be pulled in
+ // a finer or coarser granularity. I.e. interacting with this class instead
+ // of directly with the AudioDeviceBuffer one can ask for any number of
+ // audio data samples.
+ // Example: native buffer size is 240 audio frames at 48kHz sample rate.
+ // WebRTC will provide 480 audio frames per 10ms but OpenSL ES asks for 240
+ // in each callback (one every 5ms). This class can then ask for 240 and the
+ // FineAudioBuffer will ask WebRTC for new data only every second callback
+ // and also cach non-utilized audio.
+ rtc::scoped_ptr<FineAudioBuffer> fine_buffer_;
+
+ // Keeps track of active audio buffer 'n' in the audio_buffers_[n] queue.
+ // Example (kNumOfOpenSLESBuffers = 2): counts 0, 1, 0, 1, ...
+ int buffer_index_;
+
+ // The engine object which provides the SLEngineItf interface.
+ // Created by the global Open SL ES constructor slCreateEngine().
+ webrtc::ScopedSLObjectItf engine_object_;
+
+ // This interface exposes creation methods for all the OpenSL ES object types.
+ // It is the OpenSL ES API entry point.
+ SLEngineItf engine_;
+
+ // Output mix object to be used by the player object.
+ webrtc::ScopedSLObjectItf output_mix_;
+
+ // The audio player media object plays out audio to the speakers. It also
+ // supports volume control.
+ webrtc::ScopedSLObjectItf player_object_;
+
+ // This interface is supported on the audio player and it controls the state
+ // of the audio player.
+ SLPlayItf player_;
+
+ // The Android Simple Buffer Queue interface is supported on the audio player
+ // and it provides methods to send audio data from the source to the audio
+ // player for rendering.
+ SLAndroidSimpleBufferQueueItf simple_buffer_queue_;
+
+ // This interface exposes controls for manipulating the object’s audio volume
+ // properties. This interface is supported on the Audio Player object.
+ SLVolumeItf volume_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_OPENSLES_PLAYER_H_
diff --git a/webrtc/modules/audio_device/android/single_rw_fifo.cc b/webrtc/modules/audio_device/android/single_rw_fifo.cc
deleted file mode 100644
index 883265a..0000000
--- a/webrtc/modules/audio_device/android/single_rw_fifo.cc
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
-
-#include <assert.h>
-
-static int UpdatePos(int pos, int capacity) {
- return (pos + 1) % capacity;
-}
-
-namespace webrtc {
-
-namespace subtle {
-
-#if defined(__aarch64__)
-// From http://http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm64_gcc.h
-inline void MemoryBarrier() {
- __asm__ __volatile__ ("dmb ish" ::: "memory");
-}
-
-#elif defined(__ARMEL__)
-// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_arm_gcc.h
-inline void MemoryBarrier() {
- // Note: This is a function call, which is also an implicit compiler barrier.
- typedef void (*KernelMemoryBarrierFunc)();
- ((KernelMemoryBarrierFunc)0xffff0fa0)();
-}
-
-#elif defined(__x86_64__) || defined (__i386__)
-// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_x86_gcc.h
-// mfence exists on x64 and x86 platforms containing SSE2.
-// x86 platforms that don't have SSE2 will crash with SIGILL.
-// If this code needs to run on such platforms in the future,
-// add runtime CPU detection here.
-inline void MemoryBarrier() {
- __asm__ __volatile__("mfence" : : : "memory");
-}
-
-#elif defined(__MIPSEL__)
-// From http://src.chromium.org/viewvc/chrome/trunk/src/base/atomicops_internals_mips_gcc.h
-inline void MemoryBarrier() {
- __asm__ __volatile__("sync" : : : "memory");
-}
-
-#else
-#error Add an implementation of MemoryBarrier() for this platform!
-#endif
-
-} // namespace subtle
-
-SingleRwFifo::SingleRwFifo(int capacity)
- : capacity_(capacity),
- size_(0),
- read_pos_(0),
- write_pos_(0) {
- queue_.reset(new int8_t*[capacity_]);
-}
-
-SingleRwFifo::~SingleRwFifo() {
-}
-
-void SingleRwFifo::Push(int8_t* mem) {
- assert(mem);
-
- // Ensure that there is space for the new data in the FIFO.
- // Note there is only one writer meaning that the other thread is guaranteed
- // only to decrease the size.
- const int free_slots = capacity() - size();
- if (free_slots <= 0) {
- // Size can be queried outside of the Push function. The caller is assumed
- // to ensure that Push will be successful before calling it.
- assert(false);
- return;
- }
- queue_[write_pos_] = mem;
- // Memory barrier ensures that |size_| is updated after the size has changed.
- subtle::MemoryBarrier();
- ++size_;
- write_pos_ = UpdatePos(write_pos_, capacity());
-}
-
-int8_t* SingleRwFifo::Pop() {
- int8_t* ret_val = NULL;
- if (size() <= 0) {
- // Size can be queried outside of the Pop function. The caller is assumed
- // to ensure that Pop will be successfull before calling it.
- assert(false);
- return ret_val;
- }
- ret_val = queue_[read_pos_];
- // Memory barrier ensures that |size_| is updated after the size has changed.
- subtle::MemoryBarrier();
- --size_;
- read_pos_ = UpdatePos(read_pos_, capacity());
- return ret_val;
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/android/single_rw_fifo.h b/webrtc/modules/audio_device/android/single_rw_fifo.h
deleted file mode 100644
index e51ea5a..0000000
--- a/webrtc/modules/audio_device/android/single_rw_fifo.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#ifndef WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
-#define WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
-
-#include "webrtc/base/scoped_ptr.h"
-#include "webrtc/system_wrappers/interface/atomic32.h"
-#include "webrtc/typedefs.h"
-
-namespace webrtc {
-
-// Implements a lock-free FIFO losely based on
-// http://src.chromium.org/viewvc/chrome/trunk/src/media/base/audio_fifo.cc
-// Note that this class assumes there is one producer (writer) and one
-// consumer (reader) thread.
-class SingleRwFifo {
- public:
- explicit SingleRwFifo(int capacity);
- ~SingleRwFifo();
-
- void Push(int8_t* mem);
- int8_t* Pop();
-
- void Clear();
-
- int size() { return size_.Value(); }
- int capacity() const { return capacity_; }
-
- private:
- rtc::scoped_ptr<int8_t* []> queue_;
- int capacity_;
-
- Atomic32 size_;
-
- int read_pos_;
- int write_pos_;
-};
-
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_AUDIO_DEVICE_ANDROID_SINGLE_RW_FIFO_H_
diff --git a/webrtc/modules/audio_device/android/single_rw_fifo_unittest.cc b/webrtc/modules/audio_device/android/single_rw_fifo_unittest.cc
deleted file mode 100644
index b53c9e4..0000000
--- a/webrtc/modules/audio_device/android/single_rw_fifo_unittest.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
- *
- * Use of this source code is governed by a BSD-style license
- * that can be found in the LICENSE file in the root of the source
- * tree. An additional intellectual property rights grant can be found
- * in the file PATENTS. All contributing project authors may
- * be found in the AUTHORS file in the root of the source tree.
- */
-
-#include "webrtc/modules/audio_device/android/single_rw_fifo.h"
-
-#include <list>
-
-#include "testing/gtest/include/gtest/gtest.h"
-#include "webrtc/base/scoped_ptr.h"
-
-namespace webrtc {
-
-class SingleRwFifoTest : public testing::Test {
- public:
- enum {
- // Uninteresting as it does not affect test
- kBufferSize = 8,
- kCapacity = 6,
- };
-
- SingleRwFifoTest() : fifo_(kCapacity), pushed_(0), available_(0) {
- }
- virtual ~SingleRwFifoTest() {}
-
- void SetUp() {
- for (int8_t i = 0; i < kCapacity; ++i) {
- // Create memory area.
- buffer_[i].reset(new int8_t[kBufferSize]);
- // Set the first byte in the buffer to the order in which it was created
- // this allows us to e.g. check that the buffers don't re-arrange.
- buffer_[i][0] = i;
- // Queue used by test.
- memory_queue_.push_back(buffer_[i].get());
- }
- available_ = kCapacity;
- VerifySizes();
- }
-
- void Push(int number_of_buffers) {
- for (int8_t i = 0; i < number_of_buffers; ++i) {
- int8_t* data = memory_queue_.front();
- memory_queue_.pop_front();
- fifo_.Push(data);
- --available_;
- ++pushed_;
- }
- VerifySizes();
- VerifyOrdering();
- }
- void Pop(int number_of_buffers) {
- for (int8_t i = 0; i < number_of_buffers; ++i) {
- int8_t* data = fifo_.Pop();
- memory_queue_.push_back(data);
- ++available_;
- --pushed_;
- }
- VerifySizes();
- VerifyOrdering();
- }
-
- void VerifyOrdering() const {
- std::list<int8_t*>::const_iterator iter = memory_queue_.begin();
- if (iter == memory_queue_.end()) {
- return;
- }
- int8_t previous_index = DataToElementIndex(*iter);
- ++iter;
- for (; iter != memory_queue_.end(); ++iter) {
- int8_t current_index = DataToElementIndex(*iter);
- EXPECT_EQ(current_index, ++previous_index % kCapacity);
- }
- }
-
- void VerifySizes() {
- EXPECT_EQ(available_, static_cast<int>(memory_queue_.size()));
- EXPECT_EQ(pushed_, fifo_.size());
- }
-
- int8_t DataToElementIndex(int8_t* data) const {
- return data[0];
- }
-
- protected:
- SingleRwFifo fifo_;
- // Memory area for proper de-allocation.
- rtc::scoped_ptr<int8_t[]> buffer_[kCapacity];
- std::list<int8_t*> memory_queue_;
-
- int pushed_;
- int available_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SingleRwFifoTest);
-};
-
-TEST_F(SingleRwFifoTest, Construct) {
- // All verifications are done in SetUp.
-}
-
-TEST_F(SingleRwFifoTest, Push) {
- Push(kCapacity);
-}
-
-TEST_F(SingleRwFifoTest, Pop) {
- // Push all available.
- Push(available_);
-
- // Test border cases:
- // At capacity
- Pop(1);
- Push(1);
-
- // At minimal capacity
- Pop(pushed_);
- Push(1);
- Pop(1);
-}
-
-} // namespace webrtc
diff --git a/webrtc/modules/audio_device/audio_device.gypi b/webrtc/modules/audio_device/audio_device.gypi
index ee8a3e4..d8e45ef 100644
--- a/webrtc/modules/audio_device/audio_device.gypi
+++ b/webrtc/modules/audio_device/audio_device.gypi
@@ -121,29 +121,23 @@
'win/audio_device_utility_win.h',
'win/audio_mixer_manager_win.cc',
'win/audio_mixer_manager_win.h',
+ 'android/build_info.cc',
+ 'android/build_info.h',
'android/audio_device_template.h',
'android/audio_device_utility_android.cc',
'android/audio_device_utility_android.h',
'android/audio_manager.cc',
'android/audio_manager.h',
- 'android/audio_manager_jni.cc',
- 'android/audio_manager_jni.h',
'android/audio_record_jni.cc',
'android/audio_record_jni.h',
'android/audio_track_jni.cc',
'android/audio_track_jni.h',
'android/fine_audio_buffer.cc',
'android/fine_audio_buffer.h',
- 'android/low_latency_event_posix.cc',
- 'android/low_latency_event.h',
'android/opensles_common.cc',
'android/opensles_common.h',
- 'android/opensles_input.cc',
- 'android/opensles_input.h',
- 'android/opensles_output.cc',
- 'android/opensles_output.h',
- 'android/single_rw_fifo.cc',
- 'android/single_rw_fifo.h',
+ 'android/opensles_player.cc',
+ 'android/opensles_player.h',
],
'conditions': [
['OS=="android"', {
@@ -272,28 +266,6 @@
},
],
}],
- ['OS=="android"', {
- 'targets': [
- {
- 'target_name': 'audio_device_unittest',
- 'type': 'executable',
- 'dependencies': [
- 'audio_device',
- 'webrtc_utility',
- '<(DEPTH)/testing/gmock.gyp:gmock',
- '<(DEPTH)/testing/gtest.gyp:gtest',
- '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
- '<(webrtc_root)/test/test.gyp:test_support_main',
- ],
- 'sources': [
- 'android/fine_audio_buffer_unittest.cc',
- 'android/low_latency_event_unittest.cc',
- 'android/single_rw_fifo_unittest.cc',
- 'mock/mock_audio_device_buffer.h',
- ],
- },
- ],
- }],
],
}], # include_tests
],
diff --git a/webrtc/modules/audio_device/audio_device_impl.cc b/webrtc/modules/audio_device/audio_device_impl.cc
index 0bcba98..6e31fc3 100644
--- a/webrtc/modules/audio_device/audio_device_impl.cc
+++ b/webrtc/modules/audio_device/audio_device_impl.cc
@@ -23,13 +23,13 @@
#include "audio_device_core_win.h"
#endif
#elif defined(WEBRTC_ANDROID)
- #include <stdlib.h>
- #include "audio_device_utility_android.h"
- #include "webrtc/modules/audio_device/android/audio_device_template.h"
- #include "webrtc/modules/audio_device/android/audio_record_jni.h"
- #include "webrtc/modules/audio_device/android/audio_track_jni.h"
- #include "webrtc/modules/audio_device/android/opensles_input.h"
- #include "webrtc/modules/audio_device/android/opensles_output.h"
+#include <stdlib.h>
+#include "audio_device_utility_android.h"
+#include "webrtc/modules/audio_device/android/audio_device_template.h"
+#include "webrtc/modules/audio_device/android/audio_manager.h"
+#include "webrtc/modules/audio_device/android/audio_record_jni.h"
+#include "webrtc/modules/audio_device/android/audio_track_jni.h"
+#include "webrtc/modules/audio_device/android/opensles_player.h"
#elif defined(WEBRTC_LINUX)
#include "audio_device_utility_linux.h"
#if defined(LINUX_ALSA)
@@ -269,24 +269,35 @@
}
#endif // #if defined(_WIN32)
- // Create the *Android OpenSLES* implementation of the Audio Device
- //
#if defined(WEBRTC_ANDROID)
-#ifdef WEBRTC_ANDROID_OPENSLES
- // Force default audio layer to OpenSL ES if the special compiler flag
- // (enable_android_opensl) has been set to one.
+ // Create an Android audio manager.
+ _audioManagerAndroid.reset(new AudioManager());
+ // Select best possible combination of audio layers.
if (audioLayer == kPlatformDefaultAudio) {
- audioLayer = kAndroidOpenSLESAudio;
+ if (_audioManagerAndroid->IsLowLatencyPlayoutSupported()) {
+ // Always use OpenSL ES for output on devices that supports the
+ // low-latency output audio path.
+ audioLayer = kAndroidJavaInputAndOpenSLESOutputAudio;
+ } else {
+ // Use Java-based audio in both directions when low-latency output
+ // is not supported.
+ audioLayer = kAndroidJavaAudio;
+ }
}
-#endif
- if (audioLayer == kPlatformDefaultAudio ||
- audioLayer == kAndroidJavaAudio) {
- ptrAudioDevice =
- new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(Id());
- } else if (audioLayer == kAndroidOpenSLESAudio) {
- // AudioRecordJni provides hardware AEC and OpenSlesOutput low latency.
- ptrAudioDevice =
- new AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>(Id());
+ AudioManager* audio_manager = _audioManagerAndroid.get();
+ if (audioLayer == kAndroidJavaAudio) {
+ // Java audio for both input and output audio.
+ ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>(
+ audioLayer, audio_manager);
+ } else if (audioLayer == kAndroidJavaInputAndOpenSLESOutputAudio) {
+ // Java audio for input and OpenSL ES for output audio (i.e. mixed APIs).
+ // This combination provides low-latency output audio and at the same
+ // time support for HW AEC using the AudioRecord Java API.
+ ptrAudioDevice = new AudioDeviceTemplate<AudioRecordJni, OpenSLESPlayer>(
+ audioLayer, audio_manager);
+ } else {
+ // Invalid audio layer.
+ ptrAudioDevice = NULL;
}
if (ptrAudioDevice != NULL) {
@@ -549,36 +560,13 @@
// ActiveAudioLayer
// ----------------------------------------------------------------------------
-int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const
-{
-
- AudioLayer activeAudio;
-
- if (_ptrAudioDevice->ActiveAudioLayer(activeAudio) == -1)
- {
- return -1;
- }
-
- *audioLayer = activeAudio;
-
- if (*audioLayer == AudioDeviceModule::kWindowsWaveAudio)
- {
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, "output: kWindowsWaveAudio");
- }
- else if (*audioLayer == AudioDeviceModule::kWindowsCoreAudio)
- {
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, "output: kWindowsCoreAudio");
- }
- else if (*audioLayer == AudioDeviceModule::kLinuxAlsaAudio)
- {
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, "output: kLinuxAlsaAudio");
- }
- else
- {
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id, "output: NOT_SUPPORTED");
- }
-
- return 0;
+int32_t AudioDeviceModuleImpl::ActiveAudioLayer(AudioLayer* audioLayer) const {
+ AudioLayer activeAudio;
+ if (_ptrAudioDevice->ActiveAudioLayer(activeAudio) == -1) {
+ return -1;
+ }
+ *audioLayer = activeAudio;
+ return 0;
}
// ----------------------------------------------------------------------------
@@ -2004,35 +1992,6 @@
AudioDeviceModule::AudioLayer AudioDeviceModuleImpl::PlatformAudioLayer() const
{
-
- switch (_platformAudioLayer)
- {
- case kPlatformDefaultAudio:
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
- "output: kPlatformDefaultAudio");
- break;
- case kWindowsWaveAudio:
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
- "output: kWindowsWaveAudio");
- break;
- case kWindowsCoreAudio:
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
- "output: kWindowsCoreAudio");
- break;
- case kLinuxAlsaAudio:
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
- "output: kLinuxAlsaAudio");
- break;
- case kDummyAudio:
- WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, _id,
- "output: kDummyAudio");
- break;
- default:
- WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
- "output: INVALID");
- break;
- }
-
return _platformAudioLayer;
}
diff --git a/webrtc/modules/audio_device/audio_device_impl.h b/webrtc/modules/audio_device/audio_device_impl.h
index 97f266a..010c537 100644
--- a/webrtc/modules/audio_device/audio_device_impl.h
+++ b/webrtc/modules/audio_device/audio_device_impl.h
@@ -11,6 +11,8 @@
#ifndef WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H
#define WEBRTC_AUDIO_DEVICE_AUDIO_DEVICE_IMPL_H
+#include "webrtc/base/checks.h"
+#include "webrtc/base/scoped_ptr.h"
#include "webrtc/modules/audio_device/audio_device_buffer.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
@@ -19,6 +21,7 @@
class AudioDeviceGeneric;
class AudioDeviceUtility;
+class AudioManager;
class CriticalSectionWrapper;
class AudioDeviceModuleImpl : public AudioDeviceModule
@@ -192,9 +195,17 @@
public:
int32_t Id() {return _id;}
-
+ // Only use this acccessor for test purposes on Android.
+ AudioManager* GetAndroidAudioManagerForTest() {
+#if defined(WEBRTC_ANDROID)
+ return _audioManagerAndroid.get();
+#else
+ CHECK(false) << "Invalid usage of GetAndroidAudioManagerForTest";
+ return nullptr;
+#endif
+ }
AudioDeviceBuffer* GetAudioDeviceBuffer() {
- return &_audioDeviceBuffer;
+ return &_audioDeviceBuffer;
}
private:
@@ -202,23 +213,25 @@
AudioLayer PlatformAudioLayer() const;
private:
- CriticalSectionWrapper& _critSect;
- CriticalSectionWrapper& _critSectEventCb;
- CriticalSectionWrapper& _critSectAudioCb;
+ CriticalSectionWrapper& _critSect;
+ CriticalSectionWrapper& _critSectEventCb;
+ CriticalSectionWrapper& _critSectAudioCb;
- AudioDeviceObserver* _ptrCbAudioDeviceObserver;
+ AudioDeviceObserver* _ptrCbAudioDeviceObserver;
- AudioDeviceUtility* _ptrAudioDeviceUtility;
- AudioDeviceGeneric* _ptrAudioDevice;
+ AudioDeviceUtility* _ptrAudioDeviceUtility;
+ AudioDeviceGeneric* _ptrAudioDevice;
- AudioDeviceBuffer _audioDeviceBuffer;
-
- int32_t _id;
- AudioLayer _platformAudioLayer;
- uint32_t _lastProcessTime;
- PlatformType _platformType;
- bool _initialized;
- mutable ErrorCode _lastError;
+ AudioDeviceBuffer _audioDeviceBuffer;
+#if defined(WEBRTC_ANDROID)
+ rtc::scoped_ptr<AudioManager> _audioManagerAndroid;
+#endif
+ int32_t _id;
+ AudioLayer _platformAudioLayer;
+ uint32_t _lastProcessTime;
+ PlatformType _platformType;
+ bool _initialized;
+ mutable ErrorCode _lastError;
};
} // namespace webrtc
diff --git a/webrtc/modules/audio_device/include/audio_device.h b/webrtc/modules/audio_device/include/audio_device.h
index dc9a63f..2f0c6b5 100644
--- a/webrtc/modules/audio_device/include/audio_device.h
+++ b/webrtc/modules/audio_device/include/audio_device.h
@@ -30,8 +30,8 @@
kLinuxAlsaAudio = 3,
kLinuxPulseAudio = 4,
kAndroidJavaAudio = 5,
- kAndroidOpenSLESAudio = 6,
- kDummyAudio = 7
+ kAndroidJavaInputAndOpenSLESOutputAudio = 6,
+ kDummyAudio = 8
};
enum WindowsDeviceType {
diff --git a/webrtc/modules/modules.gyp b/webrtc/modules/modules.gyp
index 814ed84..db249b2 100644
--- a/webrtc/modules/modules.gyp
+++ b/webrtc/modules/modules.gyp
@@ -338,8 +338,10 @@
],
'sources': [
'audio_device/android/audio_device_unittest.cc',
+ 'audio_device/android/audio_manager_unittest.cc',
'audio_device/android/ensure_initialized.cc',
'audio_device/android/ensure_initialized.h',
+ 'audio_device/android/fine_audio_buffer_unittest.cc',
],
}],
],
diff --git a/webrtc/modules/utility/BUILD.gn b/webrtc/modules/utility/BUILD.gn
index 4503be6..3deed6f 100644
--- a/webrtc/modules/utility/BUILD.gn
+++ b/webrtc/modules/utility/BUILD.gn
@@ -14,6 +14,7 @@
"interface/file_player.h",
"interface/file_recorder.h",
"interface/helpers_android.h",
+ "interface/jvm_android.h",
"interface/process_thread.h",
"interface/rtp_dump.h",
"source/audio_frame_operations.cc",
@@ -24,6 +25,7 @@
"source/file_recorder_impl.cc",
"source/file_recorder_impl.h",
"source/helpers_android.cc",
+ "source/jvm_android.cc",
"source/process_thread_impl.cc",
"source/process_thread_impl.h",
"source/rtp_dump_impl.cc",
diff --git a/webrtc/modules/utility/interface/helpers_android.h b/webrtc/modules/utility/interface/helpers_android.h
index 3424e28..19ff098 100644
--- a/webrtc/modules/utility/interface/helpers_android.h
+++ b/webrtc/modules/utility/interface/helpers_android.h
@@ -25,13 +25,21 @@
// Return a |JNIEnv*| usable on this thread or NULL if this thread is detached.
JNIEnv* GetEnv(JavaVM* jvm);
+// Return a |jlong| that will correctly convert back to |ptr|. This is needed
+// because the alternative (of silently passing a 32-bit pointer to a vararg
+// function expecting a 64-bit param) picks up garbage in the high 32 bits.
+jlong PointerTojlong(void* ptr);
+
// JNIEnv-helper methods that wraps the API which uses the JNI interface
// pointer (JNIEnv*). It allows us to CHECK success and that no Java exception
// is thrown while calling the method.
-jmethodID GetMethodID (
- JNIEnv* jni, jclass c, const std::string& name, const char* signature);
+jmethodID GetMethodID(
+ JNIEnv* jni, jclass c, const char* name, const char* signature);
-jclass FindClass(JNIEnv* jni, const std::string& name);
+jmethodID GetStaticMethodID(
+ JNIEnv* jni, jclass c, const char* name, const char* signature);
+
+jclass FindClass(JNIEnv* jni, const char* name);
jobject NewGlobalRef(JNIEnv* jni, jobject o);
diff --git a/webrtc/modules/utility/interface/jvm_android.h b/webrtc/modules/utility/interface/jvm_android.h
new file mode 100644
index 0000000..543ffea
--- /dev/null
+++ b/webrtc/modules/utility/interface/jvm_android.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_UTILITY_SOURCE_JVM_H_
+#define WEBRTC_MODULES_UTILITY_SOURCE_JVM_H_
+
+#include <jni.h>
+#include <string>
+
+#include "webrtc/base/scoped_ptr.h"
+#include "webrtc/base/thread_checker.h"
+#include "webrtc/modules/utility/interface/helpers_android.h"
+
+namespace webrtc {
+
+// The JNI interface pointer (JNIEnv) is valid only in the current thread.
+// Should another thread need to access the Java VM, it must first call
+// AttachCurrentThread() to attach itself to the VM and obtain a JNI interface
+// pointer. The native thread remains attached to the VM until it calls
+// DetachCurrentThread() to detach.
+class AttachCurrentThreadIfNeeded {
+ public:
+ AttachCurrentThreadIfNeeded();
+ ~AttachCurrentThreadIfNeeded();
+
+ private:
+ rtc::ThreadChecker thread_checker_;
+ bool attached_;
+};
+
+// This class is created by the NativeRegistration class and is used to wrap
+// the actual Java object handle (jobject) on which we can call methods from
+// C++ in to Java. See example in JVM for more details.
+// TODO(henrika): extend support for type of function calls.
+class GlobalRef {
+ public:
+ GlobalRef(JNIEnv* jni, jobject object);
+ ~GlobalRef();
+
+ jboolean CallBooleanMethod(jmethodID methodID, ...);
+ void CallVoidMethod(jmethodID methodID, ...);
+
+ private:
+ JNIEnv* const jni_;
+ const jobject j_object_;
+};
+
+// Wraps the jclass object on which we can call GetMethodId() functions to
+// query method IDs.
+class JavaClass {
+ public:
+ JavaClass(JNIEnv* jni, jclass clazz) : jni_(jni), j_class_(clazz) {}
+ ~JavaClass() {}
+
+ jmethodID GetMethodId(const char* name, const char* signature);
+ jmethodID GetStaticMethodId(const char* name, const char* signature);
+ jobject CallStaticObjectMethod(jmethodID methodID, ...);
+
+ protected:
+ JNIEnv* const jni_;
+ jclass const j_class_;
+};
+
+// Adds support of the NewObject factory method to the JavaClass class.
+// See example in JVM for more details on how to use it.
+class NativeRegistration : public JavaClass {
+ public:
+ NativeRegistration(JNIEnv* jni, jclass clazz);
+ ~NativeRegistration();
+
+ rtc::scoped_ptr<GlobalRef> NewObject(
+ const char* name, const char* signature, ...);
+
+ private:
+ JNIEnv* const jni_;
+};
+
+// This class is created by the JVM class and is used to expose methods that
+// needs the JNI interface pointer but its main purpose is to create a
+// NativeRegistration object given name of a Java class and a list of native
+// methods. See example in JVM for more details.
+class JNIEnvironment {
+ public:
+ explicit JNIEnvironment(JNIEnv* jni);
+ ~JNIEnvironment();
+
+ // Registers native methods with the Java class specified by |name|.
+ // Note that the class name must be one of the names in the static
+ // |loaded_classes| array defined in jvm_android.cc.
+ // This method must be called on the construction thread.
+ rtc::scoped_ptr<NativeRegistration> RegisterNatives(
+ const char* name, const JNINativeMethod *methods, int num_methods);
+
+ // Converts from Java string to std::string.
+ // This method must be called on the construction thread.
+ std::string JavaToStdString(const jstring& j_string);
+
+ private:
+ rtc::ThreadChecker thread_checker_;
+ JNIEnv* const jni_;
+};
+
+// Main class for working with Java from C++ using JNI in WebRTC.
+//
+// Example usage:
+//
+// // At initialization (e.g. in JNI_OnLoad), call JVM::Initialize.
+// JNIEnv* jni = ::base::android::AttachCurrentThread();
+// JavaVM* jvm = NULL;
+// jni->GetJavaVM(&jvm);
+// jobject context = ::base::android::GetApplicationContext();
+// webrtc::JVM::Initialize(jvm, context);
+//
+// // Header (.h) file of example class called User.
+// rtc::scoped_ptr<JNIEnvironment> env;
+// rtc::scoped_ptr<NativeRegistration> reg;
+// rtc::scoped_ptr<GlobalRef> obj;
+//
+// // Construction (in .cc file) of User class.
+// User::User() {
+// // Calling thread must be attached to the JVM.
+// env = JVM::GetInstance()->environment();
+// reg = env->RegisterNatives("org/webrtc/WebRtcTest", ,);
+// obj = reg->NewObject("<init>", ,);
+// }
+//
+// // Each User method can now use |reg| and |obj| and call Java functions
+// // in WebRtcTest.java, e.g. boolean init() {}.
+// bool User::Foo() {
+// jmethodID id = reg->GetMethodId("init", "()Z");
+// return obj->CallBooleanMethod(id);
+// }
+//
+// // And finally, e.g. in JNI_OnUnLoad, call JVM::Uninitialize.
+// JVM::Uninitialize();
+class JVM {
+ public:
+ // Stores global handles to the Java VM interface and the application context.
+ // Should be called once on a thread that is attached to the JVM.
+ static void Initialize(JavaVM* jvm, jobject context);
+ // Clears handles stored in Initialize(). Must be called on same thread as
+ // Initialize().
+ static void Uninitialize();
+ // Gives access to the global Java VM interface pointer, which then can be
+ // used to create a valid JNIEnvironment object or to get a JavaClass object.
+ static JVM* GetInstance();
+
+ // Creates a JNIEnvironment object.
+ // This method returns a NULL pointer if AttachCurrentThread() has not been
+ // called successfully. Use the AttachCurrentThreadIfNeeded class if needed.
+ rtc::scoped_ptr<JNIEnvironment> environment();
+
+ // Returns a JavaClass object given class |name|.
+ // Note that the class name must be one of the names in the static
+ // |loaded_classes| array defined in jvm_android.cc.
+ // This method must be called on the construction thread.
+ JavaClass GetClass(const char* name);
+
+ // TODO(henrika): can we make these private?
+ JavaVM* jvm() const { return jvm_; }
+ jobject context() const { return context_; }
+
+ protected:
+ JVM(JavaVM* jvm, jobject context);
+ ~JVM();
+
+ private:
+ JNIEnv* jni() const { return GetEnv(jvm_); }
+
+ rtc::ThreadChecker thread_checker_;
+ JavaVM* const jvm_;
+ jobject context_;
+};
+
+} // namespace webrtc
+
+#endif // WEBRTC_MODULES_UTILITY_SOURCE_JVM_H_
diff --git a/webrtc/modules/utility/source/helpers_android.cc b/webrtc/modules/utility/source/helpers_android.cc
index f429db1..175dd23 100644
--- a/webrtc/modules/utility/source/helpers_android.cc
+++ b/webrtc/modules/utility/source/helpers_android.cc
@@ -31,17 +31,40 @@
return reinterpret_cast<JNIEnv*>(env);
}
+// Return a |jlong| that will correctly convert back to |ptr|. This is needed
+// because the alternative (of silently passing a 32-bit pointer to a vararg
+// function expecting a 64-bit param) picks up garbage in the high 32 bits.
+jlong PointerTojlong(void* ptr) {
+ static_assert(sizeof(intptr_t) <= sizeof(jlong),
+ "Time to rethink the use of jlongs");
+ // Going through intptr_t to be obvious about the definedness of the
+ // conversion from pointer to integral type. intptr_t to jlong is a standard
+ // widening by the static_assert above.
+ jlong ret = reinterpret_cast<intptr_t>(ptr);
+ DCHECK(reinterpret_cast<void*>(ret) == ptr);
+ return ret;
+}
+
jmethodID GetMethodID (
- JNIEnv* jni, jclass c, const std::string& name, const char* signature) {
- jmethodID m = jni->GetMethodID(c, name.c_str(), signature);
+ JNIEnv* jni, jclass c, const char* name, const char* signature) {
+ jmethodID m = jni->GetMethodID(c, name, signature);
CHECK_EXCEPTION(jni) << "Error during GetMethodID: " << name << ", "
<< signature;
CHECK(m) << name << ", " << signature;
return m;
}
-jclass FindClass(JNIEnv* jni, const std::string& name) {
- jclass c = jni->FindClass(name.c_str());
+jmethodID GetStaticMethodID (
+ JNIEnv* jni, jclass c, const char* name, const char* signature) {
+ jmethodID m = jni->GetStaticMethodID(c, name, signature);
+ CHECK_EXCEPTION(jni) << "Error during GetStaticMethodID: " << name << ", "
+ << signature;
+ CHECK(m) << name << ", " << signature;
+ return m;
+}
+
+jclass FindClass(JNIEnv* jni, const char* name) {
+ jclass c = jni->FindClass(name);
CHECK_EXCEPTION(jni) << "Error during FindClass: " << name;
CHECK(c) << name;
return c;
diff --git a/webrtc/modules/utility/source/jvm_android.cc b/webrtc/modules/utility/source/jvm_android.cc
new file mode 100644
index 0000000..2655195
--- /dev/null
+++ b/webrtc/modules/utility/source/jvm_android.cc
@@ -0,0 +1,251 @@
+/*
+ * Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <android/log.h>
+
+#include "webrtc/modules/utility/interface/jvm_android.h"
+
+#include "webrtc/base/checks.h"
+
+#define TAG "JVM"
+#define ALOGD(...) __android_log_print(ANDROID_LOG_DEBUG, TAG, __VA_ARGS__)
+#define ALOGE(...) __android_log_print(ANDROID_LOG_ERROR, TAG, __VA_ARGS__)
+
+namespace webrtc {
+
+JVM* g_jvm;
+
+// TODO(henrika): add more clases here if needed.
+struct {
+ const char* name;
+ jclass clazz;
+} loaded_classes[] = {
+ {"org/webrtc/voiceengine/WebRtcAudioManager", nullptr},
+ {"org/webrtc/voiceengine/BuildInfo", nullptr},
+};
+
+// Android's FindClass() is trickier than usual because the app-specific
+// ClassLoader is not consulted when there is no app-specific frame on the
+// stack. Consequently, we only look up all classes once in native WebRTC.
+// http://developer.android.com/training/articles/perf-jni.html#faq_FindClass
+void LoadClasses(JNIEnv* jni) {
+ for (auto& c : loaded_classes) {
+ jclass localRef = FindClass(jni, c.name);
+ CHECK_EXCEPTION(jni) << "Error during FindClass: " << c.name;
+ CHECK(localRef) << c.name;
+ jclass globalRef = reinterpret_cast<jclass>(jni->NewGlobalRef(localRef));
+ CHECK_EXCEPTION(jni) << "Error during NewGlobalRef: " << c.name;
+ CHECK(globalRef) << c.name;
+ c.clazz = globalRef;
+ }
+}
+
+void FreeClassReferences(JNIEnv* jni) {
+ for (auto& c : loaded_classes) {
+ jni->DeleteGlobalRef(c.clazz);
+ c.clazz = nullptr;
+ }
+}
+
+jclass LookUpClass(const char* name) {
+ for (auto& c : loaded_classes) {
+ if (c.name == name)
+ return c.clazz;
+ }
+ CHECK(false) << "Unable to find class in lookup table";
+ return 0;
+}
+
+// AttachCurrentThreadIfNeeded implementation.
+AttachCurrentThreadIfNeeded::AttachCurrentThreadIfNeeded()
+ : attached_(false) {
+ ALOGD("AttachCurrentThreadIfNeeded::ctor%s", GetThreadInfo().c_str());
+ JavaVM* jvm = JVM::GetInstance()->jvm();
+ CHECK(jvm);
+ JNIEnv* jni = GetEnv(jvm);
+ if (!jni) {
+ ALOGD("Attaching thread to JVM");
+ JNIEnv* env = nullptr;
+ jint ret = jvm->AttachCurrentThread(&env, nullptr);
+ attached_ = (ret == JNI_OK);
+ }
+}
+
+AttachCurrentThreadIfNeeded::~AttachCurrentThreadIfNeeded() {
+ ALOGD("AttachCurrentThreadIfNeeded::dtor%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (attached_) {
+ ALOGD("Detaching thread from JVM");
+ jint res = JVM::GetInstance()->jvm()->DetachCurrentThread();
+ CHECK(res == JNI_OK) << "DetachCurrentThread failed: " << res;
+ }
+}
+
+// GlobalRef implementation.
+GlobalRef::GlobalRef(JNIEnv* jni, jobject object)
+ : jni_(jni), j_object_(NewGlobalRef(jni, object)) {
+ ALOGD("GlobalRef::ctor%s", GetThreadInfo().c_str());
+}
+
+GlobalRef::~GlobalRef() {
+ ALOGD("GlobalRef::dtor%s", GetThreadInfo().c_str());
+ DeleteGlobalRef(jni_, j_object_);
+}
+
+jboolean GlobalRef::CallBooleanMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jboolean res = jni_->CallBooleanMethod(j_object_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallBooleanMethod";
+ return res;
+}
+
+void GlobalRef::CallVoidMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jni_->CallVoidMethod(j_object_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallVoidMethod";
+}
+
+// NativeRegistration implementation.
+NativeRegistration::NativeRegistration(JNIEnv* jni, jclass clazz)
+ : JavaClass(jni, clazz), jni_(jni) {
+ ALOGD("NativeRegistration::ctor%s", GetThreadInfo().c_str());
+}
+
+NativeRegistration::~NativeRegistration() {
+ ALOGD("NativeRegistration::dtor%s", GetThreadInfo().c_str());
+ jni_->UnregisterNatives(j_class_);
+ CHECK_EXCEPTION(jni_) << "Error during UnregisterNatives";
+}
+
+rtc::scoped_ptr<GlobalRef> NativeRegistration::NewObject(
+ const char* name, const char* signature, ...) {
+ ALOGD("NativeRegistration::NewObject%s", GetThreadInfo().c_str());
+ va_list args;
+ va_start(args, signature);
+ jobject obj = jni_->NewObjectV(j_class_,
+ GetMethodID(jni_, j_class_, name, signature),
+ args);
+ CHECK_EXCEPTION(jni_) << "Error during NewObjectV";
+ va_end(args);
+ return rtc::scoped_ptr<GlobalRef>(new GlobalRef(jni_, obj));
+}
+
+// JavaClass implementation.
+jmethodID JavaClass::GetMethodId(
+ const char* name, const char* signature) {
+ return GetMethodID(jni_, j_class_, name, signature);
+}
+
+jmethodID JavaClass::GetStaticMethodId(
+ const char* name, const char* signature) {
+ return GetStaticMethodID(jni_, j_class_, name, signature);
+}
+
+jobject JavaClass::CallStaticObjectMethod(jmethodID methodID, ...) {
+ va_list args;
+ va_start(args, methodID);
+ jobject res = jni_->CallStaticObjectMethod(j_class_, methodID, args);
+ CHECK_EXCEPTION(jni_) << "Error during CallStaticObjectMethod";
+ return res;
+}
+
+// JNIEnvironment implementation.
+JNIEnvironment::JNIEnvironment(JNIEnv* jni) : jni_(jni) {
+ ALOGD("JNIEnvironment::ctor%s", GetThreadInfo().c_str());
+}
+
+JNIEnvironment::~JNIEnvironment() {
+ ALOGD("JNIEnvironment::dtor%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+}
+
+rtc::scoped_ptr<NativeRegistration> JNIEnvironment::RegisterNatives(
+ const char* name, const JNINativeMethod *methods, int num_methods) {
+ ALOGD("JNIEnvironment::RegisterNatives(%s)", name);
+ DCHECK(thread_checker_.CalledOnValidThread());
+ jclass clazz = LookUpClass(name);
+ jni_->RegisterNatives(clazz, methods, num_methods);
+ CHECK_EXCEPTION(jni_) << "Error during RegisterNatives";
+ return rtc::scoped_ptr<NativeRegistration>(
+ new NativeRegistration(jni_, clazz));
+}
+
+std::string JNIEnvironment::JavaToStdString(const jstring& j_string) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ const char* jchars = jni_->GetStringUTFChars(j_string, nullptr);
+ CHECK_EXCEPTION(jni_);
+ const int size = jni_->GetStringUTFLength(j_string);
+ CHECK_EXCEPTION(jni_);
+ std::string ret(jchars, size);
+ jni_->ReleaseStringUTFChars(j_string, jchars);
+ CHECK_EXCEPTION(jni_);
+ return ret;
+}
+
+// static
+void JVM::Initialize(JavaVM* jvm, jobject context) {
+ ALOGD("JVM::Initialize%s", GetThreadInfo().c_str());
+ CHECK(!g_jvm);
+ g_jvm = new JVM(jvm, context);
+}
+
+// static
+void JVM::Uninitialize() {
+ ALOGD("JVM::Uninitialize%s", GetThreadInfo().c_str());
+ DCHECK(g_jvm);
+ delete g_jvm;
+ g_jvm = nullptr;
+}
+
+// static
+JVM* JVM::GetInstance() {
+ DCHECK(g_jvm);
+ return g_jvm;
+}
+
+JVM::JVM(JavaVM* jvm, jobject context)
+ : jvm_(jvm) {
+ ALOGD("JVM::JVM%s", GetThreadInfo().c_str());
+ CHECK(jni()) << "AttachCurrentThread() must be called on this thread.";
+ context_ = NewGlobalRef(jni(), context);
+ LoadClasses(jni());
+}
+
+JVM::~JVM() {
+ ALOGD("JVM::~JVM%s", GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ FreeClassReferences(jni());
+ DeleteGlobalRef(jni(), context_);
+}
+
+rtc::scoped_ptr<JNIEnvironment> JVM::environment() {
+ ALOGD("JVM::environment%s", GetThreadInfo().c_str());
+ // The JNIEnv is used for thread-local storage. For this reason, we cannot
+ // share a JNIEnv between threads. If a piece of code has no other way to get
+ // its JNIEnv, we should share the JavaVM, and use GetEnv to discover the
+ // thread's JNIEnv. (Assuming it has one, if not, use AttachCurrentThread).
+ // See // http://developer.android.com/training/articles/perf-jni.html.
+ JNIEnv* jni = GetEnv(jvm_);
+ if (!jni) {
+ ALOGE("AttachCurrentThread() has not been called on this thread.");
+ return rtc::scoped_ptr<JNIEnvironment>();
+ }
+ return rtc::scoped_ptr<JNIEnvironment>(new JNIEnvironment(jni));
+}
+
+JavaClass JVM::GetClass(const char* name) {
+ ALOGD("JVM::GetClass(%s)%s", name, GetThreadInfo().c_str());
+ DCHECK(thread_checker_.CalledOnValidThread());
+ return JavaClass(jni(), LookUpClass(name));
+}
+
+} // namespace webrtc
diff --git a/webrtc/modules/utility/utility.gypi b/webrtc/modules/utility/utility.gypi
index 46014e8..51ee624 100644
--- a/webrtc/modules/utility/utility.gypi
+++ b/webrtc/modules/utility/utility.gypi
@@ -22,6 +22,7 @@
'interface/file_player.h',
'interface/file_recorder.h',
'interface/helpers_android.h',
+ 'interface/jvm_android.h',
'interface/process_thread.h',
'interface/rtp_dump.h',
'source/audio_frame_operations.cc',
@@ -32,6 +33,7 @@
'source/file_recorder_impl.cc',
'source/file_recorder_impl.h',
'source/helpers_android.cc',
+ 'source/jvm_android.cc',
'source/process_thread_impl.cc',
'source/process_thread_impl.h',
'source/rtp_dump_impl.cc',
diff --git a/webrtc/voice_engine/voice_engine_impl.cc b/webrtc/voice_engine/voice_engine_impl.cc
index cea5113..c626917 100644
--- a/webrtc/voice_engine/voice_engine_impl.cc
+++ b/webrtc/voice_engine/voice_engine_impl.cc
@@ -12,10 +12,7 @@
#include "webrtc/modules/audio_device/android/audio_device_template.h"
#include "webrtc/modules/audio_device/android/audio_record_jni.h"
#include "webrtc/modules/audio_device/android/audio_track_jni.h"
-#if !defined(WEBRTC_CHROMIUM_BUILD)
-#include "webrtc/modules/audio_device/android/opensles_input.h"
-#include "webrtc/modules/audio_device/android/opensles_output.h"
-#endif
+#include "webrtc/modules/utility/interface/jvm_android.h"
#endif
#include "webrtc/modules/audio_coding/main/interface/audio_coding_module.h"
@@ -144,19 +141,20 @@
}
#if !defined(WEBRTC_CHROMIUM_BUILD)
+// TODO(henrika): change types to JavaVM* and jobject instead of void*.
int VoiceEngine::SetAndroidObjects(void* javaVM, void* context) {
#ifdef WEBRTC_ANDROID
-#ifdef WEBRTC_ANDROID_OPENSLES
- typedef AudioDeviceTemplate<OpenSlesInput, OpenSlesOutput>
- AudioDeviceInstance;
-#else
- typedef AudioDeviceTemplate<AudioRecordJni, AudioTrackJni>
- AudioDeviceInstance;
-#endif
+ webrtc::JVM::Initialize(reinterpret_cast<JavaVM*>(javaVM),
+ reinterpret_cast<jobject>(context));
+ // The Android ADM implementation supports dynamic selection of the audio
+ // layer in both directions if a default audio layer is selected. Both
+ // Java-based audio backends are initialized here to ensure that the user
+ // can switch backend dynamically as well.
+ typedef AudioDeviceTemplate<AudioRecordJni, AudioTrackJni> AudioDevice;
if (javaVM && context) {
- AudioDeviceInstance::SetAndroidAudioDeviceObjects(javaVM, context);
+ AudioDevice::SetAndroidAudioDeviceObjects(javaVM, context);
} else {
- AudioDeviceInstance::ClearAndroidAudioDeviceObjects();
+ AudioDevice::ClearAndroidAudioDeviceObjects();
}
return 0;
#else
diff --git a/webrtc/webrtc_examples.gyp b/webrtc/webrtc_examples.gyp
index 35fc938..51a8265 100644
--- a/webrtc/webrtc_examples.gyp
+++ b/webrtc/webrtc_examples.gyp
@@ -90,68 +90,6 @@
},
],
},
- {
- 'target_name': 'libopensl-demo-jni',
- 'type': 'loadable_module',
- 'dependencies': [
- '<(webrtc_root)/modules/modules.gyp:audio_device',
- ],
- 'sources': [
- 'examples/android/opensl_loopback/jni/opensl_runner.cc',
- 'examples/android/opensl_loopback/fake_audio_device_buffer.cc',
- ],
- 'link_settings': {
- 'libraries': [
- '-llog',
- '-lOpenSLES',
- ],
- },
- },
- {
- 'target_name': 'OpenSlDemo',
- 'type': 'none',
- 'dependencies': [
- 'libopensl-demo-jni',
- '<(modules_java_gyp_path):*',
- ],
- 'actions': [
- {
- # TODO(henrik): Convert building of the demo to a proper GYP
- # target so this action is not needed once chromium's
- # apk-building machinery can be used. (crbug.com/225101)
- 'action_name': 'build_opensldemo_apk',
- 'variables': {
- 'android_opensl_demo_root': '<(webrtc_root)/examples/android/opensl_loopback',
- 'ant_log': '../../../<(INTERMEDIATE_DIR)/ant.log', # ../../.. to compensate for the cd below.
- },
- 'inputs' : [
- '<(PRODUCT_DIR)/lib.java/audio_device_module_java.jar',
- '<(PRODUCT_DIR)/libopensl-demo-jni.so',
- '<!@(find <(android_opensl_demo_root)/src -name "*.java")',
- '<!@(find <(android_opensl_demo_root)/res -name "*.xml")',
- '<!@(find <(android_opensl_demo_root)/res -name "*.png")',
- '<(android_opensl_demo_root)/AndroidManifest.xml',
- '<(android_opensl_demo_root)/build.xml',
- '<(android_opensl_demo_root)/project.properties',
- ],
- 'outputs': ['<(PRODUCT_DIR)/OpenSlDemo-debug.apk'],
- 'action': [
- 'bash', '-ec',
- 'rm -fr <(_outputs) <(android_opensl_demo_root)/{bin,libs,gen,obj} && '
- 'mkdir -p <(android_opensl_demo_root)/libs/<(android_app_abi) && '
- 'mkdir -p <(INTERMEDIATE_DIR) && ' # Must happen _before_ the cd below
- '<(android_strip) -o <(android_opensl_demo_root)/libs/<(android_app_abi)/libopensl-demo-jni.so <(PRODUCT_DIR)/libopensl-demo-jni.so && '
- 'cp <(PRODUCT_DIR)/lib.java/audio_device_module_java.jar <(android_opensl_demo_root)/libs/ &&'
- 'cd <(android_opensl_demo_root) && '
- '{ ANDROID_SDK_ROOT=<(android_sdk_root) '
- 'ant debug > <(ant_log) 2>&1 || '
- ' { cat <(ant_log) ; exit 1; } } && '
- 'cd - > /dev/null && '
- 'cp <(android_opensl_demo_root)/bin/OpenSlDemo-debug.apk <(_outputs)'
- ],
- },
- ],
- },
],
}],
],