blob: 3f4667dce4a17ff2cf15714b42a6d2b9723cd175 [file] [log] [blame]
/*
* libjingle
* Copyright 2004 Google Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef HAVE_WEBRTC_VIDEO
#include "talk/media/webrtc/webrtcvideoengine.h"
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <math.h>
#include <set>
#include "talk/base/basictypes.h"
#include "talk/base/buffer.h"
#include "talk/base/byteorder.h"
#include "talk/base/common.h"
#include "talk/base/cpumonitor.h"
#include "talk/base/logging.h"
#include "talk/base/stringutils.h"
#include "talk/base/thread.h"
#include "talk/base/timeutils.h"
#include "talk/media/base/constants.h"
#include "talk/media/base/rtputils.h"
#include "talk/media/base/streamparams.h"
#include "talk/media/base/videoadapter.h"
#include "talk/media/base/videocapturer.h"
#include "talk/media/base/videorenderer.h"
#include "talk/media/devices/filevideocapturer.h"
#include "talk/media/webrtc/webrtcpassthroughrender.h"
#include "talk/media/webrtc/webrtctexturevideoframe.h"
#include "talk/media/webrtc/webrtcvideocapturer.h"
#include "talk/media/webrtc/webrtcvideodecoderfactory.h"
#include "talk/media/webrtc/webrtcvideoencoderfactory.h"
#include "talk/media/webrtc/webrtcvideoframe.h"
#include "talk/media/webrtc/webrtcvie.h"
#include "talk/media/webrtc/webrtcvoe.h"
#include "talk/media/webrtc/webrtcvoiceengine.h"
#if !defined(LIBPEERCONNECTION_LIB)
#ifndef HAVE_WEBRTC_VIDEO
#error Need webrtc video
#endif
#include "talk/media/webrtc/webrtcmediaengine.h"
WRME_EXPORT
cricket::MediaEngineInterface* CreateWebRtcMediaEngine(
webrtc::AudioDeviceModule* adm, webrtc::AudioDeviceModule* adm_sc,
cricket::WebRtcVideoEncoderFactory* encoder_factory,
cricket::WebRtcVideoDecoderFactory* decoder_factory) {
return new cricket::WebRtcMediaEngine(adm, adm_sc, encoder_factory,
decoder_factory);
}
WRME_EXPORT
void DestroyWebRtcMediaEngine(cricket::MediaEngineInterface* media_engine) {
delete static_cast<cricket::WebRtcMediaEngine*>(media_engine);
}
#endif
namespace cricket {
static const int kDefaultLogSeverity = talk_base::LS_WARNING;
static const int kMinVideoBitrate = 50;
static const int kStartVideoBitrate = 300;
static const int kMaxVideoBitrate = 2000;
static const int kDefaultConferenceModeMaxVideoBitrate = 500;
static const int kVideoMtu = 1200;
static const int kVideoRtpBufferSize = 65536;
static const char kVp8PayloadName[] = "VP8";
static const char kRedPayloadName[] = "red";
static const char kFecPayloadName[] = "ulpfec";
static const int kDefaultNumberOfTemporalLayers = 1; // 1:1
static const int kTimestampDeltaInSecondsForWarning = 2;
static const int kMaxExternalVideoCodecs = 8;
static const int kExternalVideoPayloadTypeBase = 120;
// Static allocation of payload type values for external video codec.
static int GetExternalVideoPayloadType(int index) {
ASSERT(index >= 0 && index < kMaxExternalVideoCodecs);
return kExternalVideoPayloadTypeBase + index;
}
static void LogMultiline(talk_base::LoggingSeverity sev, char* text) {
const char* delim = "\r\n";
// TODO(fbarchard): Fix strtok lint warning.
for (char* tok = strtok(text, delim); tok; tok = strtok(NULL, delim)) {
LOG_V(sev) << tok;
}
}
// Severity is an integer because it comes is assumed to be from command line.
static int SeverityToFilter(int severity) {
int filter = webrtc::kTraceNone;
switch (severity) {
case talk_base::LS_VERBOSE:
filter |= webrtc::kTraceAll;
case talk_base::LS_INFO:
filter |= (webrtc::kTraceStateInfo | webrtc::kTraceInfo);
case talk_base::LS_WARNING:
filter |= (webrtc::kTraceTerseInfo | webrtc::kTraceWarning);
case talk_base::LS_ERROR:
filter |= (webrtc::kTraceError | webrtc::kTraceCritical);
}
return filter;
}
static const int kCpuMonitorPeriodMs = 2000; // 2 seconds.
static const bool kNotSending = false;
// Extension header for RTP timestamp offset, see RFC 5450 for details:
// http://tools.ietf.org/html/rfc5450
static const char kRtpTimestampOffsetHeaderExtension[] =
"urn:ietf:params:rtp-hdrext:toffset";
static const int kRtpTimeOffsetExtensionId = 2;
// Extension header for absolute send time, see url for details:
// http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time
static const char kRtpAbsoluteSendTimeHeaderExtension[] =
"http://www.webrtc.org/experiments/rtp-hdrext/abs-send-time";
static const int kRtpAbsoluteSendTimeExtensionId = 3;
static bool IsNackEnabled(const VideoCodec& codec) {
return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamNack,
kParamValueEmpty));
}
// Returns true if Receiver Estimated Max Bitrate is enabled.
static bool IsRembEnabled(const VideoCodec& codec) {
return codec.HasFeedbackParam(FeedbackParam(kRtcpFbParamRemb,
kParamValueEmpty));
}
struct FlushBlackFrameData : public talk_base::MessageData {
FlushBlackFrameData(uint32 s, int64 t) : ssrc(s), timestamp(t) {
}
uint32 ssrc;
int64 timestamp;
};
class WebRtcRenderAdapter : public webrtc::ExternalRenderer {
public:
explicit WebRtcRenderAdapter(VideoRenderer* renderer)
: renderer_(renderer), width_(0), height_(0), watermark_enabled_(false) {
}
virtual ~WebRtcRenderAdapter() {
}
void set_watermark_enabled(bool enable) {
talk_base::CritScope cs(&crit_);
watermark_enabled_ = enable;
}
void SetRenderer(VideoRenderer* renderer) {
talk_base::CritScope cs(&crit_);
renderer_ = renderer;
// FrameSizeChange may have already been called when renderer was not set.
// If so we should call SetSize here.
// TODO(ronghuawu): Add unit test for this case. Didn't do it now
// because the WebRtcRenderAdapter is currently hiding in cc file. No
// good way to get access to it from the unit test.
if (width_ > 0 && height_ > 0 && renderer_ != NULL) {
if (!renderer_->SetSize(width_, height_, 0)) {
LOG(LS_ERROR)
<< "WebRtcRenderAdapter SetRenderer failed to SetSize to: "
<< width_ << "x" << height_;
}
}
}
// Implementation of webrtc::ExternalRenderer.
virtual int FrameSizeChange(unsigned int width, unsigned int height,
unsigned int /*number_of_streams*/) {
talk_base::CritScope cs(&crit_);
width_ = width;
height_ = height;
LOG(LS_INFO) << "WebRtcRenderAdapter frame size changed to: "
<< width << "x" << height;
if (renderer_ == NULL) {
LOG(LS_VERBOSE) << "WebRtcRenderAdapter the renderer has not been set. "
<< "SetSize will be called later in SetRenderer.";
return 0;
}
return renderer_->SetSize(width_, height_, 0) ? 0 : -1;
}
virtual int DeliverFrame(unsigned char* buffer, int buffer_size,
uint32_t time_stamp, int64_t render_time
, void* handle
) {
talk_base::CritScope cs(&crit_);
frame_rate_tracker_.Update(1);
if (renderer_ == NULL) {
return 0;
}
// Convert 90K rtp timestamp to ns timestamp.
int64 rtp_time_stamp_in_ns = (time_stamp / 90) *
talk_base::kNumNanosecsPerMillisec;
// Convert milisecond render time to ns timestamp.
int64 render_time_stamp_in_ns = render_time *
talk_base::kNumNanosecsPerMillisec;
// Send the rtp timestamp to renderer as the VideoFrame timestamp.
// and the render timestamp as the VideoFrame elapsed_time.
if (handle == NULL) {
return DeliverBufferFrame(buffer, buffer_size, render_time_stamp_in_ns,
rtp_time_stamp_in_ns);
} else {
return DeliverTextureFrame(handle, render_time_stamp_in_ns,
rtp_time_stamp_in_ns);
}
}
virtual bool IsTextureSupported() { return true; }
int DeliverBufferFrame(unsigned char* buffer, int buffer_size,
int64 elapsed_time, int64 time_stamp) {
WebRtcVideoFrame video_frame;
video_frame.Attach(buffer, buffer_size, width_, height_,
1, 1, elapsed_time, time_stamp, 0);
// Sanity check on decoded frame size.
if (buffer_size != static_cast<int>(VideoFrame::SizeOf(width_, height_))) {
LOG(LS_WARNING) << "WebRtcRenderAdapter received a strange frame size: "
<< buffer_size;
}
int ret = renderer_->RenderFrame(&video_frame) ? 0 : -1;
uint8* buffer_temp;
size_t buffer_size_temp;
video_frame.Detach(&buffer_temp, &buffer_size_temp);
return ret;
}
int DeliverTextureFrame(void* handle, int64 elapsed_time, int64 time_stamp) {
WebRtcTextureVideoFrame video_frame(
static_cast<webrtc::NativeHandle*>(handle), width_, height_,
elapsed_time, time_stamp);
return renderer_->RenderFrame(&video_frame);
}
unsigned int width() {
talk_base::CritScope cs(&crit_);
return width_;
}
unsigned int height() {
talk_base::CritScope cs(&crit_);
return height_;
}
int framerate() {
talk_base::CritScope cs(&crit_);
return static_cast<int>(frame_rate_tracker_.units_second());
}
VideoRenderer* renderer() {
talk_base::CritScope cs(&crit_);
return renderer_;
}
private:
talk_base::CriticalSection crit_;
VideoRenderer* renderer_;
unsigned int width_;
unsigned int height_;
talk_base::RateTracker frame_rate_tracker_;
bool watermark_enabled_;
};
class WebRtcDecoderObserver : public webrtc::ViEDecoderObserver {
public:
explicit WebRtcDecoderObserver(int video_channel)
: video_channel_(video_channel),
framerate_(0),
bitrate_(0),
firs_requested_(0) {
}
// virtual functions from VieDecoderObserver.
virtual void IncomingCodecChanged(const int videoChannel,
const webrtc::VideoCodec& videoCodec) {}
virtual void IncomingRate(const int videoChannel,
const unsigned int framerate,
const unsigned int bitrate) {
talk_base::CritScope cs(&crit_);
ASSERT(video_channel_ == videoChannel);
framerate_ = framerate;
bitrate_ = bitrate;
}
virtual void RequestNewKeyFrame(const int videoChannel) {
talk_base::CritScope cs(&crit_);
ASSERT(video_channel_ == videoChannel);
++firs_requested_;
}
int framerate() const {
talk_base::CritScope cs(&crit_);
return framerate_;
}
int bitrate() const {
talk_base::CritScope cs(&crit_);
return bitrate_;
}
int firs_requested() const {
talk_base::CritScope cs(&crit_);
return firs_requested_;
}
private:
mutable talk_base::CriticalSection crit_;
int video_channel_;
int framerate_;
int bitrate_;
int firs_requested_;
};
class WebRtcEncoderObserver : public webrtc::ViEEncoderObserver {
public:
explicit WebRtcEncoderObserver(int video_channel)
: video_channel_(video_channel),
framerate_(0),
bitrate_(0) {
}
// virtual functions from VieEncoderObserver.
virtual void OutgoingRate(const int videoChannel,
const unsigned int framerate,
const unsigned int bitrate) {
talk_base::CritScope cs(&crit_);
ASSERT(video_channel_ == videoChannel);
framerate_ = framerate;
bitrate_ = bitrate;
}
int framerate() const {
talk_base::CritScope cs(&crit_);
return framerate_;
}
int bitrate() const {
talk_base::CritScope cs(&crit_);
return bitrate_;
}
private:
mutable talk_base::CriticalSection crit_;
int video_channel_;
int framerate_;
int bitrate_;
};
class WebRtcLocalStreamInfo {
public:
WebRtcLocalStreamInfo()
: width_(0), height_(0), elapsed_time_(-1), time_stamp_(-1) {}
size_t width() const {
talk_base::CritScope cs(&crit_);
return width_;
}
size_t height() const {
talk_base::CritScope cs(&crit_);
return height_;
}
int64 elapsed_time() const {
talk_base::CritScope cs(&crit_);
return elapsed_time_;
}
int64 time_stamp() const {
talk_base::CritScope cs(&crit_);
return time_stamp_;
}
int framerate() {
talk_base::CritScope cs(&crit_);
return static_cast<int>(rate_tracker_.units_second());
}
void GetLastFrameInfo(
size_t* width, size_t* height, int64* elapsed_time) const {
talk_base::CritScope cs(&crit_);
*width = width_;
*height = height_;
*elapsed_time = elapsed_time_;
}
void UpdateFrame(const VideoFrame* frame) {
talk_base::CritScope cs(&crit_);
width_ = frame->GetWidth();
height_ = frame->GetHeight();
elapsed_time_ = frame->GetElapsedTime();
time_stamp_ = frame->GetTimeStamp();
rate_tracker_.Update(1);
}
private:
mutable talk_base::CriticalSection crit_;
size_t width_;
size_t height_;
int64 elapsed_time_;
int64 time_stamp_;
talk_base::RateTracker rate_tracker_;
DISALLOW_COPY_AND_ASSIGN(WebRtcLocalStreamInfo);
};
// WebRtcVideoChannelRecvInfo is a container class with members such as renderer
// and a decoder observer that is used by receive channels.
// It must exist as long as the receive channel is connected to renderer or a
// decoder observer in this class and methods in the class should only be called
// from the worker thread.
class WebRtcVideoChannelRecvInfo {
public:
typedef std::map<int, webrtc::VideoDecoder*> DecoderMap; // key: payload type
explicit WebRtcVideoChannelRecvInfo(int channel_id)
: channel_id_(channel_id),
render_adapter_(NULL),
decoder_observer_(channel_id) {
}
int channel_id() { return channel_id_; }
void SetRenderer(VideoRenderer* renderer) {
render_adapter_.SetRenderer(renderer);
}
WebRtcRenderAdapter* render_adapter() { return &render_adapter_; }
WebRtcDecoderObserver* decoder_observer() { return &decoder_observer_; }
void RegisterDecoder(int pl_type, webrtc::VideoDecoder* decoder) {
ASSERT(!IsDecoderRegistered(pl_type));
registered_decoders_[pl_type] = decoder;
}
bool IsDecoderRegistered(int pl_type) {
return registered_decoders_.count(pl_type) != 0;
}
const DecoderMap& registered_decoders() {
return registered_decoders_;
}
void ClearRegisteredDecoders() {
registered_decoders_.clear();
}
private:
int channel_id_; // Webrtc video channel number.
// Renderer for this channel.
WebRtcRenderAdapter render_adapter_;
WebRtcDecoderObserver decoder_observer_;
DecoderMap registered_decoders_;
};
class WebRtcOveruseObserver : public webrtc::CpuOveruseObserver {
public:
explicit WebRtcOveruseObserver(CoordinatedVideoAdapter* video_adapter)
: video_adapter_(video_adapter),
enabled_(false) {
}
// TODO(mflodman): Consider sending resolution as part of event, to let
// adapter know what resolution the request is based on. Helps eliminate stale
// data, race conditions.
virtual void OveruseDetected() OVERRIDE {
talk_base::CritScope cs(&crit_);
if (!enabled_) {
return;
}
video_adapter_->OnCpuResolutionRequest(CoordinatedVideoAdapter::DOWNGRADE);
}
virtual void NormalUsage() OVERRIDE {
talk_base::CritScope cs(&crit_);
if (!enabled_) {
return;
}
video_adapter_->OnCpuResolutionRequest(CoordinatedVideoAdapter::UPGRADE);
}
void Enable(bool enable) {
talk_base::CritScope cs(&crit_);
enabled_ = enable;
}
private:
CoordinatedVideoAdapter* video_adapter_;
bool enabled_;
talk_base::CriticalSection crit_;
};
class WebRtcVideoChannelSendInfo : public sigslot::has_slots<> {
public:
typedef std::map<int, webrtc::VideoEncoder*> EncoderMap; // key: payload type
WebRtcVideoChannelSendInfo(int channel_id, int capture_id,
webrtc::ViEExternalCapture* external_capture,
talk_base::CpuMonitor* cpu_monitor)
: channel_id_(channel_id),
capture_id_(capture_id),
sending_(false),
muted_(false),
video_capturer_(NULL),
encoder_observer_(channel_id),
external_capture_(external_capture),
capturer_updated_(false),
interval_(0),
video_adapter_(new CoordinatedVideoAdapter),
cpu_monitor_(cpu_monitor) {
overuse_observer_.reset(new WebRtcOveruseObserver(video_adapter_.get()));
SignalCpuAdaptationUnable.repeat(video_adapter_->SignalCpuAdaptationUnable);
if (cpu_monitor) {
cpu_monitor->SignalUpdate.connect(
video_adapter_.get(), &CoordinatedVideoAdapter::OnCpuLoadUpdated);
}
}
int channel_id() const { return channel_id_; }
int capture_id() const { return capture_id_; }
void set_sending(bool sending) { sending_ = sending; }
bool sending() const { return sending_; }
void set_muted(bool on) {
// TODO(asapersson): add support.
// video_adapter_->SetBlackOutput(on);
muted_ = on;
}
bool muted() {return muted_; }
WebRtcEncoderObserver* encoder_observer() { return &encoder_observer_; }
webrtc::ViEExternalCapture* external_capture() { return external_capture_; }
const VideoFormat& video_format() const {
return video_format_;
}
void set_video_format(const VideoFormat& video_format) {
video_format_ = video_format;
if (video_format_ != cricket::VideoFormat()) {
interval_ = video_format_.interval;
}
video_adapter_->OnOutputFormatRequest(video_format_);
}
void set_interval(int64 interval) {
if (video_format() == cricket::VideoFormat()) {
interval_ = interval;
}
}
int64 interval() { return interval_; }
void InitializeAdapterOutputFormat(const webrtc::VideoCodec& codec) {
VideoFormat format(codec.width, codec.height,
VideoFormat::FpsToInterval(codec.maxFramerate),
FOURCC_I420);
if (video_adapter_->output_format().IsSize0x0()) {
video_adapter_->SetOutputFormat(format);
}
}
bool AdaptFrame(const VideoFrame* in_frame, const VideoFrame** out_frame) {
*out_frame = NULL;
return video_adapter_->AdaptFrame(in_frame, out_frame);
}
int CurrentAdaptReason() const {
return video_adapter_->adapt_reason();
}
webrtc::CpuOveruseObserver* overuse_observer() {
return overuse_observer_.get();
}
StreamParams* stream_params() { return stream_params_.get(); }
void set_stream_params(const StreamParams& sp) {
stream_params_.reset(new StreamParams(sp));
}
void ClearStreamParams() { stream_params_.reset(); }
bool has_ssrc(uint32 local_ssrc) const {
return !stream_params_ ? false :
stream_params_->has_ssrc(local_ssrc);
}
WebRtcLocalStreamInfo* local_stream_info() {
return &local_stream_info_;
}
VideoCapturer* video_capturer() {
return video_capturer_;
}
void set_video_capturer(VideoCapturer* video_capturer) {
if (video_capturer == video_capturer_) {
return;
}
capturer_updated_ = true;
video_capturer_ = video_capturer;
if (video_capturer && !video_capturer->IsScreencast()) {
const VideoFormat* capture_format = video_capturer->GetCaptureFormat();
if (capture_format) {
// TODO(thorcarpenter): This is broken. Video capturer doesn't have
// a capture format until the capturer is started. So, if
// the capturer is started immediately after calling set_video_capturer
// video adapter may not have the input format set, the interval may
// be zero, and all frames may be dropped.
// Consider fixing this by having video_adapter keep a pointer to the
// video capturer.
video_adapter_->SetInputFormat(*capture_format);
}
}
}
void ApplyCpuOptions(const VideoOptions& options) {
bool cpu_adapt, cpu_smoothing, adapt_third;
float low, med, high;
if (options.adapt_input_to_cpu_usage.Get(&cpu_adapt)) {
video_adapter_->set_cpu_adaptation(cpu_adapt);
}
if (options.adapt_cpu_with_smoothing.Get(&cpu_smoothing)) {
video_adapter_->set_cpu_smoothing(cpu_smoothing);
}
if (options.process_adaptation_threshhold.Get(&med)) {
video_adapter_->set_process_threshold(med);
}
if (options.system_low_adaptation_threshhold.Get(&low)) {
video_adapter_->set_low_system_threshold(low);
}
if (options.system_high_adaptation_threshhold.Get(&high)) {
video_adapter_->set_high_system_threshold(high);
}
if (options.video_adapt_third.Get(&adapt_third)) {
video_adapter_->set_scale_third(adapt_third);
}
}
void SetCpuOveruseDetection(bool enable) {
if (cpu_monitor_ && enable) {
cpu_monitor_->SignalUpdate.disconnect(video_adapter_.get());
}
overuse_observer_->Enable(enable);
video_adapter_->set_cpu_adaptation(enable);
}
void ProcessFrame(const VideoFrame& original_frame, bool mute,
VideoFrame** processed_frame) {
if (!mute) {
*processed_frame = original_frame.Copy();
} else {
WebRtcVideoFrame* black_frame = new WebRtcVideoFrame();
black_frame->InitToBlack(static_cast<int>(original_frame.GetWidth()),
static_cast<int>(original_frame.GetHeight()),
1, 1,
original_frame.GetElapsedTime(),
original_frame.GetTimeStamp());
*processed_frame = black_frame;
}
local_stream_info_.UpdateFrame(*processed_frame);
}
void RegisterEncoder(int pl_type, webrtc::VideoEncoder* encoder) {
ASSERT(!IsEncoderRegistered(pl_type));
registered_encoders_[pl_type] = encoder;
}
bool IsEncoderRegistered(int pl_type) {
return registered_encoders_.count(pl_type) != 0;
}
const EncoderMap& registered_encoders() {
return registered_encoders_;
}
void ClearRegisteredEncoders() {
registered_encoders_.clear();
}
sigslot::repeater0<> SignalCpuAdaptationUnable;
private:
int channel_id_;
int capture_id_;
bool sending_;
bool muted_;
VideoCapturer* video_capturer_;
WebRtcEncoderObserver encoder_observer_;
webrtc::ViEExternalCapture* external_capture_;
EncoderMap registered_encoders_;
VideoFormat video_format_;
talk_base::scoped_ptr<StreamParams> stream_params_;
WebRtcLocalStreamInfo local_stream_info_;
bool capturer_updated_;
int64 interval_;
talk_base::scoped_ptr<CoordinatedVideoAdapter> video_adapter_;
talk_base::CpuMonitor* cpu_monitor_;
talk_base::scoped_ptr<WebRtcOveruseObserver> overuse_observer_;
};
const WebRtcVideoEngine::VideoCodecPref
WebRtcVideoEngine::kVideoCodecPrefs[] = {
{kVp8PayloadName, 100, 0},
{kRedPayloadName, 116, 1},
{kFecPayloadName, 117, 2},
};
// The formats are sorted by the descending order of width. We use the order to
// find the next format for CPU and bandwidth adaptation.
const VideoFormatPod WebRtcVideoEngine::kVideoFormats[] = {
{1280, 800, FPS_TO_INTERVAL(30), FOURCC_ANY},
{1280, 720, FPS_TO_INTERVAL(30), FOURCC_ANY},
{960, 600, FPS_TO_INTERVAL(30), FOURCC_ANY},
{960, 540, FPS_TO_INTERVAL(30), FOURCC_ANY},
{640, 400, FPS_TO_INTERVAL(30), FOURCC_ANY},
{640, 360, FPS_TO_INTERVAL(30), FOURCC_ANY},
{640, 480, FPS_TO_INTERVAL(30), FOURCC_ANY},
{480, 300, FPS_TO_INTERVAL(30), FOURCC_ANY},
{480, 270, FPS_TO_INTERVAL(30), FOURCC_ANY},
{480, 360, FPS_TO_INTERVAL(30), FOURCC_ANY},
{320, 200, FPS_TO_INTERVAL(30), FOURCC_ANY},
{320, 180, FPS_TO_INTERVAL(30), FOURCC_ANY},
{320, 240, FPS_TO_INTERVAL(30), FOURCC_ANY},
{240, 150, FPS_TO_INTERVAL(30), FOURCC_ANY},
{240, 135, FPS_TO_INTERVAL(30), FOURCC_ANY},
{240, 180, FPS_TO_INTERVAL(30), FOURCC_ANY},
{160, 100, FPS_TO_INTERVAL(30), FOURCC_ANY},
{160, 90, FPS_TO_INTERVAL(30), FOURCC_ANY},
{160, 120, FPS_TO_INTERVAL(30), FOURCC_ANY},
};
const VideoFormatPod WebRtcVideoEngine::kDefaultVideoFormat =
{640, 400, FPS_TO_INTERVAL(30), FOURCC_ANY};
static void UpdateVideoCodec(const cricket::VideoFormat& video_format,
webrtc::VideoCodec* target_codec) {
if ((target_codec == NULL) || (video_format == cricket::VideoFormat())) {
return;
}
target_codec->width = video_format.width;
target_codec->height = video_format.height;
target_codec->maxFramerate = cricket::VideoFormat::IntervalToFps(
video_format.interval);
}
WebRtcVideoEngine::WebRtcVideoEngine() {
Construct(new ViEWrapper(), new ViETraceWrapper(), NULL,
new talk_base::CpuMonitor(NULL));
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
ViEWrapper* vie_wrapper,
talk_base::CpuMonitor* cpu_monitor) {
Construct(vie_wrapper, new ViETraceWrapper(), voice_engine, cpu_monitor);
}
WebRtcVideoEngine::WebRtcVideoEngine(WebRtcVoiceEngine* voice_engine,
ViEWrapper* vie_wrapper,
ViETraceWrapper* tracing,
talk_base::CpuMonitor* cpu_monitor) {
Construct(vie_wrapper, tracing, voice_engine, cpu_monitor);
}
void WebRtcVideoEngine::Construct(ViEWrapper* vie_wrapper,
ViETraceWrapper* tracing,
WebRtcVoiceEngine* voice_engine,
talk_base::CpuMonitor* cpu_monitor) {
LOG(LS_INFO) << "WebRtcVideoEngine::WebRtcVideoEngine";
worker_thread_ = NULL;
vie_wrapper_.reset(vie_wrapper);
vie_wrapper_base_initialized_ = false;
tracing_.reset(tracing);
voice_engine_ = voice_engine;
initialized_ = false;
SetTraceFilter(SeverityToFilter(kDefaultLogSeverity));
render_module_.reset(new WebRtcPassthroughRender());
local_renderer_w_ = local_renderer_h_ = 0;
local_renderer_ = NULL;
capture_started_ = false;
decoder_factory_ = NULL;
encoder_factory_ = NULL;
cpu_monitor_.reset(cpu_monitor);
SetTraceOptions("");
if (tracing_->SetTraceCallback(this) != 0) {
LOG_RTCERR1(SetTraceCallback, this);
}
// Set default quality levels for our supported codecs. We override them here
// if we know your cpu performance is low, and they can be updated explicitly
// by calling SetDefaultCodec. For example by a flute preference setting, or
// by the server with a jec in response to our reported system info.
VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
kVideoCodecPrefs[0].name,
kDefaultVideoFormat.width,
kDefaultVideoFormat.height,
VideoFormat::IntervalToFps(kDefaultVideoFormat.interval),
0);
if (!SetDefaultCodec(max_codec)) {
LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
}
// Load our RTP Header extensions.
rtp_header_extensions_.push_back(
RtpHeaderExtension(kRtpTimestampOffsetHeaderExtension,
kRtpTimeOffsetExtensionId));
rtp_header_extensions_.push_back(
RtpHeaderExtension(kRtpAbsoluteSendTimeHeaderExtension,
kRtpAbsoluteSendTimeExtensionId));
}
WebRtcVideoEngine::~WebRtcVideoEngine() {
LOG(LS_INFO) << "WebRtcVideoEngine::~WebRtcVideoEngine";
if (initialized_) {
Terminate();
}
if (encoder_factory_) {
encoder_factory_->RemoveObserver(this);
}
tracing_->SetTraceCallback(NULL);
// Test to see if the media processor was deregistered properly.
ASSERT(SignalMediaFrame.is_empty());
}
bool WebRtcVideoEngine::Init(talk_base::Thread* worker_thread) {
LOG(LS_INFO) << "WebRtcVideoEngine::Init";
worker_thread_ = worker_thread;
ASSERT(worker_thread_ != NULL);
cpu_monitor_->set_thread(worker_thread_);
if (!cpu_monitor_->Start(kCpuMonitorPeriodMs)) {
LOG(LS_ERROR) << "Failed to start CPU monitor.";
cpu_monitor_.reset();
}
bool result = InitVideoEngine();
if (result) {
LOG(LS_INFO) << "VideoEngine Init done";
} else {
LOG(LS_ERROR) << "VideoEngine Init failed, releasing";
Terminate();
}
return result;
}
bool WebRtcVideoEngine::InitVideoEngine() {
LOG(LS_INFO) << "WebRtcVideoEngine::InitVideoEngine";
// Init WebRTC VideoEngine.
if (!vie_wrapper_base_initialized_) {
if (vie_wrapper_->base()->Init() != 0) {
LOG_RTCERR0(Init);
return false;
}
vie_wrapper_base_initialized_ = true;
}
// Log the VoiceEngine version info.
char buffer[1024] = "";
if (vie_wrapper_->base()->GetVersion(buffer) != 0) {
LOG_RTCERR0(GetVersion);
return false;
}
LOG(LS_INFO) << "WebRtc VideoEngine Version:";
LogMultiline(talk_base::LS_INFO, buffer);
// Hook up to VoiceEngine for sync purposes, if supplied.
if (!voice_engine_) {
LOG(LS_WARNING) << "NULL voice engine";
} else if ((vie_wrapper_->base()->SetVoiceEngine(
voice_engine_->voe()->engine())) != 0) {
LOG_RTCERR0(SetVoiceEngine);
return false;
}
// Register our custom render module.
if (vie_wrapper_->render()->RegisterVideoRenderModule(
*render_module_.get()) != 0) {
LOG_RTCERR0(RegisterVideoRenderModule);
return false;
}
initialized_ = true;
return true;
}
void WebRtcVideoEngine::Terminate() {
LOG(LS_INFO) << "WebRtcVideoEngine::Terminate";
initialized_ = false;
if (vie_wrapper_->render()->DeRegisterVideoRenderModule(
*render_module_.get()) != 0) {
LOG_RTCERR0(DeRegisterVideoRenderModule);
}
if (vie_wrapper_->base()->SetVoiceEngine(NULL) != 0) {
LOG_RTCERR0(SetVoiceEngine);
}
cpu_monitor_->Stop();
}
int WebRtcVideoEngine::GetCapabilities() {
return VIDEO_RECV | VIDEO_SEND;
}
bool WebRtcVideoEngine::SetOptions(const VideoOptions &options) {
return true;
}
bool WebRtcVideoEngine::SetDefaultEncoderConfig(
const VideoEncoderConfig& config) {
return SetDefaultCodec(config.max_codec);
}
VideoEncoderConfig WebRtcVideoEngine::GetDefaultEncoderConfig() const {
ASSERT(!video_codecs_.empty());
VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
kVideoCodecPrefs[0].name,
video_codecs_[0].width,
video_codecs_[0].height,
video_codecs_[0].framerate,
0);
return VideoEncoderConfig(max_codec);
}
// SetDefaultCodec may be called while the capturer is running. For example, a
// test call is started in a page with QVGA default codec, and then a real call
// is started in another page with VGA default codec. This is the corner case
// and happens only when a session is started. We ignore this case currently.
bool WebRtcVideoEngine::SetDefaultCodec(const VideoCodec& codec) {
if (!RebuildCodecList(codec)) {
LOG(LS_WARNING) << "Failed to RebuildCodecList";
return false;
}
ASSERT(!video_codecs_.empty());
default_codec_format_ = VideoFormat(
video_codecs_[0].width,
video_codecs_[0].height,
VideoFormat::FpsToInterval(video_codecs_[0].framerate),
FOURCC_ANY);
return true;
}
WebRtcVideoMediaChannel* WebRtcVideoEngine::CreateChannel(
VoiceMediaChannel* voice_channel) {
WebRtcVideoMediaChannel* channel =
new WebRtcVideoMediaChannel(this, voice_channel);
if (!channel->Init()) {
delete channel;
channel = NULL;
}
return channel;
}
bool WebRtcVideoEngine::SetLocalRenderer(VideoRenderer* renderer) {
local_renderer_w_ = local_renderer_h_ = 0;
local_renderer_ = renderer;
return true;
}
const std::vector<VideoCodec>& WebRtcVideoEngine::codecs() const {
return video_codecs_;
}
const std::vector<RtpHeaderExtension>&
WebRtcVideoEngine::rtp_header_extensions() const {
return rtp_header_extensions_;
}
void WebRtcVideoEngine::SetLogging(int min_sev, const char* filter) {
// if min_sev == -1, we keep the current log level.
if (min_sev >= 0) {
SetTraceFilter(SeverityToFilter(min_sev));
}
SetTraceOptions(filter);
}
int WebRtcVideoEngine::GetLastEngineError() {
return vie_wrapper_->error();
}
// Checks to see whether we comprehend and could receive a particular codec
bool WebRtcVideoEngine::FindCodec(const VideoCodec& in) {
for (int i = 0; i < ARRAY_SIZE(kVideoFormats); ++i) {
const VideoFormat fmt(kVideoFormats[i]);
if ((in.width == 0 && in.height == 0) ||
(fmt.width == in.width && fmt.height == in.height)) {
if (encoder_factory_) {
const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
encoder_factory_->codecs();
for (size_t j = 0; j < codecs.size(); ++j) {
VideoCodec codec(GetExternalVideoPayloadType(static_cast<int>(j)),
codecs[j].name, 0, 0, 0, 0);
if (codec.Matches(in))
return true;
}
}
for (size_t j = 0; j < ARRAY_SIZE(kVideoCodecPrefs); ++j) {
VideoCodec codec(kVideoCodecPrefs[j].payload_type,
kVideoCodecPrefs[j].name, 0, 0, 0, 0);
if (codec.Matches(in)) {
return true;
}
}
}
}
return false;
}
// Given the requested codec, returns true if we can send that codec type and
// updates out with the best quality we could send for that codec. If current is
// not empty, we constrain out so that its aspect ratio matches current's.
bool WebRtcVideoEngine::CanSendCodec(const VideoCodec& requested,
const VideoCodec& current,
VideoCodec* out) {
if (!out) {
return false;
}
std::vector<VideoCodec>::const_iterator local_max;
for (local_max = video_codecs_.begin();
local_max < video_codecs_.end();
++local_max) {
// First match codecs by payload type
if (!requested.Matches(*local_max)) {
continue;
}
out->id = requested.id;
out->name = requested.name;
out->preference = requested.preference;
out->params = requested.params;
out->framerate = talk_base::_min(requested.framerate, local_max->framerate);
out->width = 0;
out->height = 0;
out->params = requested.params;
out->feedback_params = requested.feedback_params;
if (0 == requested.width && 0 == requested.height) {
// Special case with resolution 0. The channel should not send frames.
return true;
} else if (0 == requested.width || 0 == requested.height) {
// 0xn and nx0 are invalid resolutions.
return false;
}
// Pick the best quality that is within their and our bounds and has the
// correct aspect ratio.
for (int j = 0; j < ARRAY_SIZE(kVideoFormats); ++j) {
const VideoFormat format(kVideoFormats[j]);
// Skip any format that is larger than the local or remote maximums, or
// smaller than the current best match
if (format.width > requested.width || format.height > requested.height ||
format.width > local_max->width ||
(format.width < out->width && format.height < out->height)) {
continue;
}
bool better = false;
// Check any further constraints on this prospective format
if (!out->width || !out->height) {
// If we don't have any matches yet, this is the best so far.
better = true;
} else if (current.width && current.height) {
// current is set so format must match its ratio exactly.
better =
(format.width * current.height == format.height * current.width);
} else {
// Prefer closer aspect ratios i.e
// format.aspect - requested.aspect < out.aspect - requested.aspect
better = abs(format.width * requested.height * out->height -
requested.width * format.height * out->height) <
abs(out->width * format.height * requested.height -
requested.width * format.height * out->height);
}
if (better) {
out->width = format.width;
out->height = format.height;
}
}
if (out->width > 0) {
return true;
}
}
return false;
}
static void ConvertToCricketVideoCodec(
const webrtc::VideoCodec& in_codec, VideoCodec* out_codec) {
out_codec->id = in_codec.plType;
out_codec->name = in_codec.plName;
out_codec->width = in_codec.width;
out_codec->height = in_codec.height;
out_codec->framerate = in_codec.maxFramerate;
out_codec->SetParam(kCodecParamMinBitrate, in_codec.minBitrate);
out_codec->SetParam(kCodecParamMaxBitrate, in_codec.maxBitrate);
if (in_codec.qpMax) {
out_codec->SetParam(kCodecParamMaxQuantization, in_codec.qpMax);
}
}
bool WebRtcVideoEngine::ConvertFromCricketVideoCodec(
const VideoCodec& in_codec, webrtc::VideoCodec* out_codec) {
bool found = false;
int ncodecs = vie_wrapper_->codec()->NumberOfCodecs();
for (int i = 0; i < ncodecs; ++i) {
if (vie_wrapper_->codec()->GetCodec(i, *out_codec) == 0 &&
_stricmp(in_codec.name.c_str(), out_codec->plName) == 0) {
found = true;
break;
}
}
// If not found, check if this is supported by external encoder factory.
if (!found && encoder_factory_) {
const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
encoder_factory_->codecs();
for (size_t i = 0; i < codecs.size(); ++i) {
if (_stricmp(in_codec.name.c_str(), codecs[i].name.c_str()) == 0) {
out_codec->codecType = codecs[i].type;
out_codec->plType = GetExternalVideoPayloadType(static_cast<int>(i));
talk_base::strcpyn(out_codec->plName, sizeof(out_codec->plName),
codecs[i].name.c_str(), codecs[i].name.length());
found = true;
break;
}
}
}
if (!found) {
LOG(LS_ERROR) << "invalid codec type";
return false;
}
if (in_codec.id != 0)
out_codec->plType = in_codec.id;
if (in_codec.width != 0)
out_codec->width = in_codec.width;
if (in_codec.height != 0)
out_codec->height = in_codec.height;
if (in_codec.framerate != 0)
out_codec->maxFramerate = in_codec.framerate;
// Convert bitrate parameters.
int max_bitrate = kMaxVideoBitrate;
int min_bitrate = kMinVideoBitrate;
int start_bitrate = kStartVideoBitrate;
in_codec.GetParam(kCodecParamMinBitrate, &min_bitrate);
in_codec.GetParam(kCodecParamMaxBitrate, &max_bitrate);
if (max_bitrate < min_bitrate) {
return false;
}
start_bitrate = talk_base::_max(start_bitrate, min_bitrate);
start_bitrate = talk_base::_min(start_bitrate, max_bitrate);
out_codec->minBitrate = min_bitrate;
out_codec->startBitrate = start_bitrate;
out_codec->maxBitrate = max_bitrate;
// Convert general codec parameters.
int max_quantization = 0;
if (in_codec.GetParam(kCodecParamMaxQuantization, &max_quantization)) {
if (max_quantization < 0) {
return false;
}
out_codec->qpMax = max_quantization;
}
return true;
}
void WebRtcVideoEngine::RegisterChannel(WebRtcVideoMediaChannel *channel) {
talk_base::CritScope cs(&channels_crit_);
channels_.push_back(channel);
}
void WebRtcVideoEngine::UnregisterChannel(WebRtcVideoMediaChannel *channel) {
talk_base::CritScope cs(&channels_crit_);
channels_.erase(std::remove(channels_.begin(), channels_.end(), channel),
channels_.end());
}
bool WebRtcVideoEngine::SetVoiceEngine(WebRtcVoiceEngine* voice_engine) {
if (initialized_) {
LOG(LS_WARNING) << "SetVoiceEngine can not be called after Init";
return false;
}
voice_engine_ = voice_engine;
return true;
}
bool WebRtcVideoEngine::EnableTimedRender() {
if (initialized_) {
LOG(LS_WARNING) << "EnableTimedRender can not be called after Init";
return false;
}
render_module_.reset(webrtc::VideoRender::CreateVideoRender(0, NULL,
false, webrtc::kRenderExternal));
return true;
}
void WebRtcVideoEngine::SetTraceFilter(int filter) {
tracing_->SetTraceFilter(filter);
}
// See https://sites.google.com/a/google.com/wavelet/
// Home/Magic-Flute--RTC-Engine-/Magic-Flute-Command-Line-Parameters
// for all supported command line setttings.
void WebRtcVideoEngine::SetTraceOptions(const std::string& options) {
// Set WebRTC trace file.
std::vector<std::string> opts;
talk_base::tokenize(options, ' ', '"', '"', &opts);
std::vector<std::string>::iterator tracefile =
std::find(opts.begin(), opts.end(), "tracefile");
if (tracefile != opts.end() && ++tracefile != opts.end()) {
// Write WebRTC debug output (at same loglevel) to file
if (tracing_->SetTraceFile(tracefile->c_str()) == -1) {
LOG_RTCERR1(SetTraceFile, *tracefile);
}
}
}
static void AddDefaultFeedbackParams(VideoCodec* codec) {
const FeedbackParam kFir(kRtcpFbParamCcm, kRtcpFbCcmParamFir);
codec->AddFeedbackParam(kFir);
const FeedbackParam kNack(kRtcpFbParamNack, kParamValueEmpty);
codec->AddFeedbackParam(kNack);
const FeedbackParam kRemb(kRtcpFbParamRemb, kParamValueEmpty);
codec->AddFeedbackParam(kRemb);
}
// Rebuilds the codec list to be only those that are less intensive
// than the specified codec. Prefers internal codec over external with
// higher preference field.
bool WebRtcVideoEngine::RebuildCodecList(const VideoCodec& in_codec) {
if (!FindCodec(in_codec))
return false;
video_codecs_.clear();
bool found = false;
std::set<std::string> internal_codec_names;
for (size_t i = 0; i < ARRAY_SIZE(kVideoCodecPrefs); ++i) {
const VideoCodecPref& pref(kVideoCodecPrefs[i]);
if (!found)
found = (in_codec.name == pref.name);
if (found) {
VideoCodec codec(pref.payload_type, pref.name,
in_codec.width, in_codec.height, in_codec.framerate,
static_cast<int>(ARRAY_SIZE(kVideoCodecPrefs) - i));
if (_stricmp(kVp8PayloadName, codec.name.c_str()) == 0) {
AddDefaultFeedbackParams(&codec);
}
video_codecs_.push_back(codec);
internal_codec_names.insert(codec.name);
}
}
if (encoder_factory_) {
const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
encoder_factory_->codecs();
for (size_t i = 0; i < codecs.size(); ++i) {
bool is_internal_codec = internal_codec_names.find(codecs[i].name) !=
internal_codec_names.end();
if (!is_internal_codec) {
if (!found)
found = (in_codec.name == codecs[i].name);
VideoCodec codec(
GetExternalVideoPayloadType(static_cast<int>(i)),
codecs[i].name,
codecs[i].max_width,
codecs[i].max_height,
codecs[i].max_fps,
// Use negative preference on external codec to ensure the internal
// codec is preferred.
static_cast<int>(0 - i));
AddDefaultFeedbackParams(&codec);
video_codecs_.push_back(codec);
}
}
}
ASSERT(found);
return true;
}
// Ignore spammy trace messages, mostly from the stats API when we haven't
// gotten RTCP info yet from the remote side.
bool WebRtcVideoEngine::ShouldIgnoreTrace(const std::string& trace) {
static const char* const kTracesToIgnore[] = {
NULL
};
for (const char* const* p = kTracesToIgnore; *p; ++p) {
if (trace.find(*p) == 0) {
return true;
}
}
return false;
}
int WebRtcVideoEngine::GetNumOfChannels() {
talk_base::CritScope cs(&channels_crit_);
return static_cast<int>(channels_.size());
}
void WebRtcVideoEngine::Print(webrtc::TraceLevel level, const char* trace,
int length) {
talk_base::LoggingSeverity sev = talk_base::LS_VERBOSE;
if (level == webrtc::kTraceError || level == webrtc::kTraceCritical)
sev = talk_base::LS_ERROR;
else if (level == webrtc::kTraceWarning)
sev = talk_base::LS_WARNING;
else if (level == webrtc::kTraceStateInfo || level == webrtc::kTraceInfo)
sev = talk_base::LS_INFO;
else if (level == webrtc::kTraceTerseInfo)
sev = talk_base::LS_INFO;
// Skip past boilerplate prefix text
if (length < 72) {
std::string msg(trace, length);
LOG(LS_ERROR) << "Malformed webrtc log message: ";
LOG_V(sev) << msg;
} else {
std::string msg(trace + 71, length - 72);
if (!ShouldIgnoreTrace(msg) &&
(!voice_engine_ || !voice_engine_->ShouldIgnoreTrace(msg))) {
LOG_V(sev) << "webrtc: " << msg;
}
}
}
webrtc::VideoDecoder* WebRtcVideoEngine::CreateExternalDecoder(
webrtc::VideoCodecType type) {
if (decoder_factory_ == NULL) {
return NULL;
}
return decoder_factory_->CreateVideoDecoder(type);
}
void WebRtcVideoEngine::DestroyExternalDecoder(webrtc::VideoDecoder* decoder) {
ASSERT(decoder_factory_ != NULL);
if (decoder_factory_ == NULL)
return;
decoder_factory_->DestroyVideoDecoder(decoder);
}
webrtc::VideoEncoder* WebRtcVideoEngine::CreateExternalEncoder(
webrtc::VideoCodecType type) {
if (encoder_factory_ == NULL) {
return NULL;
}
return encoder_factory_->CreateVideoEncoder(type);
}
void WebRtcVideoEngine::DestroyExternalEncoder(webrtc::VideoEncoder* encoder) {
ASSERT(encoder_factory_ != NULL);
if (encoder_factory_ == NULL)
return;
encoder_factory_->DestroyVideoEncoder(encoder);
}
bool WebRtcVideoEngine::IsExternalEncoderCodecType(
webrtc::VideoCodecType type) const {
if (!encoder_factory_)
return false;
const std::vector<WebRtcVideoEncoderFactory::VideoCodec>& codecs =
encoder_factory_->codecs();
std::vector<WebRtcVideoEncoderFactory::VideoCodec>::const_iterator it;
for (it = codecs.begin(); it != codecs.end(); ++it) {
if (it->type == type)
return true;
}
return false;
}
void WebRtcVideoEngine::SetExternalDecoderFactory(
WebRtcVideoDecoderFactory* decoder_factory) {
decoder_factory_ = decoder_factory;
}
void WebRtcVideoEngine::SetExternalEncoderFactory(
WebRtcVideoEncoderFactory* encoder_factory) {
if (encoder_factory_ == encoder_factory)
return;
if (encoder_factory_) {
encoder_factory_->RemoveObserver(this);
}
encoder_factory_ = encoder_factory;
if (encoder_factory_) {
encoder_factory_->AddObserver(this);
}
// Invoke OnCodecAvailable() here in case the list of codecs is already
// available when the encoder factory is installed. If not the encoder
// factory will invoke the callback later when the codecs become available.
OnCodecsAvailable();
}
void WebRtcVideoEngine::OnCodecsAvailable() {
// Rebuild codec list while reapplying the current default codec format.
VideoCodec max_codec(kVideoCodecPrefs[0].payload_type,
kVideoCodecPrefs[0].name,
video_codecs_[0].width,
video_codecs_[0].height,
video_codecs_[0].framerate,
0);
if (!RebuildCodecList(max_codec)) {
LOG(LS_ERROR) << "Failed to initialize list of supported codec types";
}
}
// WebRtcVideoMediaChannel
WebRtcVideoMediaChannel::WebRtcVideoMediaChannel(
WebRtcVideoEngine* engine,
VoiceMediaChannel* channel)
: engine_(engine),
voice_channel_(channel),
vie_channel_(-1),
nack_enabled_(true),
remb_enabled_(false),
render_started_(false),
first_receive_ssrc_(0),
send_red_type_(-1),
send_fec_type_(-1),
send_min_bitrate_(kMinVideoBitrate),
send_start_bitrate_(kStartVideoBitrate),
send_max_bitrate_(kMaxVideoBitrate),
sending_(false),
ratio_w_(0),
ratio_h_(0) {
engine->RegisterChannel(this);
}
bool WebRtcVideoMediaChannel::Init() {
const uint32 ssrc_key = 0;
return CreateChannel(ssrc_key, MD_SENDRECV, &vie_channel_);
}
WebRtcVideoMediaChannel::~WebRtcVideoMediaChannel() {
const bool send = false;
SetSend(send);
const bool render = false;
SetRender(render);
while (!send_channels_.empty()) {
if (!DeleteSendChannel(send_channels_.begin()->first)) {
LOG(LS_ERROR) << "Unable to delete channel with ssrc key "
<< send_channels_.begin()->first;
ASSERT(false);
break;
}
}
// Remove all receive streams and the default channel.
while (!recv_channels_.empty()) {
RemoveRecvStream(recv_channels_.begin()->first);
}
// Unregister the channel from the engine.
engine()->UnregisterChannel(this);
if (worker_thread()) {
worker_thread()->Clear(this);
}
}
bool WebRtcVideoMediaChannel::SetRecvCodecs(
const std::vector<VideoCodec>& codecs) {
receive_codecs_.clear();
for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (engine()->FindCodec(*iter)) {
webrtc::VideoCodec wcodec;
if (engine()->ConvertFromCricketVideoCodec(*iter, &wcodec)) {
receive_codecs_.push_back(wcodec);
}
} else {
LOG(LS_INFO) << "Unknown codec " << iter->name;
return false;
}
}
for (RecvChannelMap::iterator it = recv_channels_.begin();
it != recv_channels_.end(); ++it) {
if (!SetReceiveCodecs(it->second))
return false;
}
return true;
}
bool WebRtcVideoMediaChannel::SetSendCodecs(
const std::vector<VideoCodec>& codecs) {
// Match with local video codec list.
std::vector<webrtc::VideoCodec> send_codecs;
VideoCodec checked_codec;
VideoCodec current; // defaults to 0x0
if (sending_) {
ConvertToCricketVideoCodec(*send_codec_, &current);
}
for (std::vector<VideoCodec>::const_iterator iter = codecs.begin();
iter != codecs.end(); ++iter) {
if (_stricmp(iter->name.c_str(), kRedPayloadName) == 0) {
send_red_type_ = iter->id;
} else if (_stricmp(iter->name.c_str(), kFecPayloadName) == 0) {
send_fec_type_ = iter->id;
} else if (engine()->CanSendCodec(*iter, current, &checked_codec)) {
webrtc::VideoCodec wcodec;
if (engine()->ConvertFromCricketVideoCodec(checked_codec, &wcodec)) {
if (send_codecs.empty()) {
nack_enabled_ = IsNackEnabled(checked_codec);
remb_enabled_ = IsRembEnabled(checked_codec);
}
send_codecs.push_back(wcodec);
}
} else {
LOG(LS_WARNING) << "Unknown codec " << iter->name;
}
}
// Fail if we don't have a match.
if (send_codecs.empty()) {
LOG(LS_WARNING) << "No matching codecs available";
return false;
}
// Recv protection.
for (RecvChannelMap::iterator it = recv_channels_.begin();
it != recv_channels_.end(); ++it) {
int channel_id = it->second->channel_id();
if (!SetNackFec(channel_id, send_red_type_, send_fec_type_,
nack_enabled_)) {
return false;
}
if (engine_->vie()->rtp()->SetRembStatus(channel_id,
kNotSending,
remb_enabled_) != 0) {
LOG_RTCERR3(SetRembStatus, channel_id, kNotSending, remb_enabled_);
return false;
}
}
// Send settings.
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
int channel_id = iter->second->channel_id();
if (!SetNackFec(channel_id, send_red_type_, send_fec_type_,
nack_enabled_)) {
return false;
}
if (engine_->vie()->rtp()->SetRembStatus(channel_id,
remb_enabled_,
remb_enabled_) != 0) {
LOG_RTCERR3(SetRembStatus, channel_id, remb_enabled_, remb_enabled_);
return false;
}
}
// Select the first matched codec.
webrtc::VideoCodec& codec(send_codecs[0]);
if (!SetSendCodec(
codec, codec.minBitrate, codec.startBitrate, codec.maxBitrate)) {
return false;
}
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
send_channel->InitializeAdapterOutputFormat(codec);
}
LogSendCodecChange("SetSendCodecs()");
return true;
}
bool WebRtcVideoMediaChannel::GetSendCodec(VideoCodec* send_codec) {
if (!send_codec_) {
return false;
}
ConvertToCricketVideoCodec(*send_codec_, send_codec);
return true;
}
bool WebRtcVideoMediaChannel::SetSendStreamFormat(uint32 ssrc,
const VideoFormat& format) {
if (!send_codec_) {
LOG(LS_ERROR) << "The send codec has not been set yet.";
return false;
}
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
if (!send_channel) {
LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";
return false;
}
send_channel->set_video_format(format);
return true;
}
bool WebRtcVideoMediaChannel::SetRender(bool render) {
if (render == render_started_) {
return true; // no action required
}
bool ret = true;
for (RecvChannelMap::iterator it = recv_channels_.begin();
it != recv_channels_.end(); ++it) {
if (render) {
if (engine()->vie()->render()->StartRender(
it->second->channel_id()) != 0) {
LOG_RTCERR1(StartRender, it->second->channel_id());
ret = false;
}
} else {
if (engine()->vie()->render()->StopRender(
it->second->channel_id()) != 0) {
LOG_RTCERR1(StopRender, it->second->channel_id());
ret = false;
}
}
}
if (ret) {
render_started_ = render;
}
return ret;
}
bool WebRtcVideoMediaChannel::SetSend(bool send) {
if (!HasReadySendChannels() && send) {
LOG(LS_ERROR) << "No stream added";
return false;
}
if (send == sending()) {
return true; // No action required.
}
if (send) {
// We've been asked to start sending.
// SetSendCodecs must have been called already.
if (!send_codec_) {
return false;
}
// Start send now.
if (!StartSend()) {
return false;
}
} else {
// We've been asked to stop sending.
if (!StopSend()) {
return false;
}
}
sending_ = send;
return true;
}
bool WebRtcVideoMediaChannel::AddSendStream(const StreamParams& sp) {
LOG(LS_INFO) << "AddSendStream " << sp.ToString();
if (!IsOneSsrcStream(sp)) {
LOG(LS_ERROR) << "AddSendStream: bad local stream parameters";
return false;
}
uint32 ssrc_key;
if (!CreateSendChannelKey(sp.first_ssrc(), &ssrc_key)) {
LOG(LS_ERROR) << "Trying to register duplicate ssrc: " << sp.first_ssrc();
return false;
}
// If the default channel is already used for sending create a new channel
// otherwise use the default channel for sending.
int channel_id = -1;
if (send_channels_[0]->stream_params() == NULL) {
channel_id = vie_channel_;
} else {
if (!CreateChannel(ssrc_key, MD_SEND, &channel_id)) {
LOG(LS_ERROR) << "AddSendStream: unable to create channel";
return false;
}
}
WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
// Set the send (local) SSRC.
// If there are multiple send SSRCs, we can only set the first one here, and
// the rest of the SSRC(s) need to be set after SetSendCodec has been called
// (with a codec requires multiple SSRC(s)).
if (engine()->vie()->rtp()->SetLocalSSRC(channel_id,
sp.first_ssrc()) != 0) {
LOG_RTCERR2(SetLocalSSRC, channel_id, sp.first_ssrc());
return false;
}
// Set RTCP CName.
if (engine()->vie()->rtp()->SetRTCPCName(channel_id,
sp.cname.c_str()) != 0) {
LOG_RTCERR2(SetRTCPCName, channel_id, sp.cname.c_str());
return false;
}
// At this point the channel's local SSRC has been updated. If the channel is
// the default channel make sure that all the receive channels are updated as
// well. Receive channels have to have the same SSRC as the default channel in
// order to send receiver reports with this SSRC.
if (IsDefaultChannel(channel_id)) {
for (RecvChannelMap::const_iterator it = recv_channels_.begin();
it != recv_channels_.end(); ++it) {
WebRtcVideoChannelRecvInfo* info = it->second;
int channel_id = info->channel_id();
if (engine()->vie()->rtp()->SetLocalSSRC(channel_id,
sp.first_ssrc()) != 0) {
LOG_RTCERR1(SetLocalSSRC, it->first);
return false;
}
}
}
send_channel->set_stream_params(sp);
// Reset send codec after stream parameters changed.
if (send_codec_) {
if (!SetSendCodec(send_channel, *send_codec_, send_min_bitrate_,
send_start_bitrate_, send_max_bitrate_)) {
return false;
}
LogSendCodecChange("SetSendStreamFormat()");
}
if (sending_) {
return StartSend(send_channel);
}
return true;
}
bool WebRtcVideoMediaChannel::RemoveSendStream(uint32 ssrc) {
uint32 ssrc_key;
if (!GetSendChannelKey(ssrc, &ssrc_key)) {
LOG(LS_WARNING) << "Try to remove stream with ssrc " << ssrc
<< " which doesn't exist.";
return false;
}
WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
int channel_id = send_channel->channel_id();
if (IsDefaultChannel(channel_id) && (send_channel->stream_params() == NULL)) {
// Default channel will still exist. However, if stream_params() is NULL
// there is no stream to remove.
return false;
}
if (sending_) {
StopSend(send_channel);
}
const WebRtcVideoChannelSendInfo::EncoderMap& encoder_map =
send_channel->registered_encoders();
for (WebRtcVideoChannelSendInfo::EncoderMap::const_iterator it =
encoder_map.begin(); it != encoder_map.end(); ++it) {
if (engine()->vie()->ext_codec()->DeRegisterExternalSendCodec(
channel_id, it->first) != 0) {
LOG_RTCERR1(DeregisterEncoderObserver, channel_id);
}
engine()->DestroyExternalEncoder(it->second);
}
send_channel->ClearRegisteredEncoders();
// The receive channels depend on the default channel, recycle it instead.
if (IsDefaultChannel(channel_id)) {
SetCapturer(GetDefaultChannelSsrc(), NULL);
send_channel->ClearStreamParams();
} else {
return DeleteSendChannel(ssrc_key);
}
return true;
}
bool WebRtcVideoMediaChannel::AddRecvStream(const StreamParams& sp) {
// TODO(zhurunz) Remove this once BWE works properly across different send
// and receive channels.
// Reuse default channel for recv stream in 1:1 call.
if (!InConferenceMode() && first_receive_ssrc_ == 0) {
LOG(LS_INFO) << "Recv stream " << sp.first_ssrc()
<< " reuse default channel #"
<< vie_channel_;
first_receive_ssrc_ = sp.first_ssrc();
if (render_started_) {
if (engine()->vie()->render()->StartRender(vie_channel_) !=0) {
LOG_RTCERR1(StartRender, vie_channel_);
}
}
return true;
}
if (recv_channels_.find(sp.first_ssrc()) != recv_channels_.end() ||
first_receive_ssrc_ == sp.first_ssrc()) {
LOG(LS_ERROR) << "Stream already exists";
return false;
}
// TODO(perkj): Implement recv media from multiple SSRCs per stream.
if (sp.ssrcs.size() != 1) {
LOG(LS_ERROR) << "WebRtcVideoMediaChannel supports one receiving SSRC per"
<< " stream";
return false;
}
// Create a new channel for receiving video data.
// In order to get the bandwidth estimation work fine for
// receive only channels, we connect all receiving channels
// to our master send channel.
int channel_id = -1;
if (!CreateChannel(sp.first_ssrc(), MD_RECV, &channel_id)) {
return false;
}
// Get the default renderer.
VideoRenderer* default_renderer = NULL;
if (InConferenceMode()) {
// The recv_channels_ size start out being 1, so if it is two here this
// is the first receive channel created (vie_channel_ is not used for
// receiving in a conference call). This means that the renderer stored
// inside vie_channel_ should be used for the just created channel.
if (recv_channels_.size() == 2 &&
recv_channels_.find(0) != recv_channels_.end()) {
GetRenderer(0, &default_renderer);
}
}
// The first recv stream reuses the default renderer (if a default renderer
// has been set).
if (default_renderer) {
SetRenderer(sp.first_ssrc(), default_renderer);
}
LOG(LS_INFO) << "New video stream " << sp.first_ssrc()
<< " registered to VideoEngine channel #"
<< channel_id << " and connected to channel #" << vie_channel_;
return true;
}
bool WebRtcVideoMediaChannel::RemoveRecvStream(uint32 ssrc) {
RecvChannelMap::iterator it = recv_channels_.find(ssrc);
if (it == recv_channels_.end()) {
// TODO(perkj): Remove this once BWE works properly across different send
// and receive channels.
// The default channel is reused for recv stream in 1:1 call.
if (first_receive_ssrc_ == ssrc) {
first_receive_ssrc_ = 0;
// Need to stop the renderer and remove it since the render window can be
// deleted after this.
if (render_started_) {
if (engine()->vie()->render()->StopRender(vie_channel_) !=0) {
LOG_RTCERR1(StopRender, it->second->channel_id());
}
}
recv_channels_[0]->SetRenderer(NULL);
return true;
}
return false;
}
WebRtcVideoChannelRecvInfo* info = it->second;
int channel_id = info->channel_id();
if (engine()->vie()->render()->RemoveRenderer(channel_id) != 0) {
LOG_RTCERR1(RemoveRenderer, channel_id);
}
if (engine()->vie()->network()->DeregisterSendTransport(channel_id) !=0) {
LOG_RTCERR1(DeRegisterSendTransport, channel_id);
}
if (engine()->vie()->codec()->DeregisterDecoderObserver(
channel_id) != 0) {
LOG_RTCERR1(DeregisterDecoderObserver, channel_id);
}
const WebRtcVideoChannelRecvInfo::DecoderMap& decoder_map =
info->registered_decoders();
for (WebRtcVideoChannelRecvInfo::DecoderMap::const_iterator it =
decoder_map.begin(); it != decoder_map.end(); ++it) {
if (engine()->vie()->ext_codec()->DeRegisterExternalReceiveCodec(
channel_id, it->first) != 0) {
LOG_RTCERR1(DeregisterDecoderObserver, channel_id);
}
engine()->DestroyExternalDecoder(it->second);
}
info->ClearRegisteredDecoders();
LOG(LS_INFO) << "Removing video stream " << ssrc
<< " with VideoEngine channel #"
<< channel_id;
if (engine()->vie()->base()->DeleteChannel(channel_id) == -1) {
LOG_RTCERR1(DeleteChannel, channel_id);
// Leak the WebRtcVideoChannelRecvInfo owned by |it| but remove the channel
// from recv_channels_.
recv_channels_.erase(it);
return false;
}
// Delete the WebRtcVideoChannelRecvInfo pointed to by it->second.
delete info;
recv_channels_.erase(it);
return true;
}
bool WebRtcVideoMediaChannel::StartSend() {
bool success = true;
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
if (!StartSend(send_channel)) {
success = false;
}
}
return success;
}
bool WebRtcVideoMediaChannel::StartSend(
WebRtcVideoChannelSendInfo* send_channel) {
const int channel_id = send_channel->channel_id();
if (engine()->vie()->base()->StartSend(channel_id) != 0) {
LOG_RTCERR1(StartSend, channel_id);
return false;
}
send_channel->set_sending(true);
return true;
}
bool WebRtcVideoMediaChannel::StopSend() {
bool success = true;
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
if (!StopSend(send_channel)) {
success = false;
}
}
return success;
}
bool WebRtcVideoMediaChannel::StopSend(
WebRtcVideoChannelSendInfo* send_channel) {
const int channel_id = send_channel->channel_id();
if (engine()->vie()->base()->StopSend(channel_id) != 0) {
LOG_RTCERR1(StopSend, channel_id);
return false;
}
send_channel->set_sending(false);
return true;
}
bool WebRtcVideoMediaChannel::SendIntraFrame() {
bool success = true;
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end();
++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
const int channel_id = send_channel->channel_id();
if (engine()->vie()->codec()->SendKeyFrame(channel_id) != 0) {
LOG_RTCERR1(SendKeyFrame, channel_id);
success = false;
}
}
return success;
}
bool WebRtcVideoMediaChannel::IsOneSsrcStream(const StreamParams& sp) {
return (sp.ssrcs.size() == 1 && sp.ssrc_groups.size() == 0);
}
bool WebRtcVideoMediaChannel::HasReadySendChannels() {
return !send_channels_.empty() &&
((send_channels_.size() > 1) ||
(send_channels_[0]->stream_params() != NULL));
}
bool WebRtcVideoMediaChannel::GetSendChannelKey(uint32 local_ssrc,
uint32* key) {
*key = 0;
// If a send channel is not ready to send it will not have local_ssrc
// registered to it.
if (!HasReadySendChannels()) {
return false;
}
// The default channel is stored with key 0. The key therefore does not match
// the SSRC associated with the default channel. Check if the SSRC provided
// corresponds to the default channel's SSRC.
if (local_ssrc == GetDefaultChannelSsrc()) {
return true;
}
if (send_channels_.find(local_ssrc) == send_channels_.end()) {
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
if (send_channel->has_ssrc(local_ssrc)) {
*key = iter->first;
return true;
}
}
return false;
}
// The key was found in the above std::map::find call. This means that the
// ssrc is the key.
*key = local_ssrc;
return true;
}
WebRtcVideoChannelSendInfo* WebRtcVideoMediaChannel::GetSendChannel(
VideoCapturer* video_capturer) {
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
if (send_channel->video_capturer() == video_capturer) {
return send_channel;
}
}
return NULL;
}
WebRtcVideoChannelSendInfo* WebRtcVideoMediaChannel::GetSendChannel(
uint32 local_ssrc) {
uint32 key;
if (!GetSendChannelKey(local_ssrc, &key)) {
return NULL;
}
return send_channels_[key];
}
bool WebRtcVideoMediaChannel::CreateSendChannelKey(uint32 local_ssrc,
uint32* key) {
if (GetSendChannelKey(local_ssrc, key)) {
// If there is a key corresponding to |local_ssrc|, the SSRC is already in
// use. SSRCs need to be unique in a session and at this point a duplicate
// SSRC has been detected.
return false;
}
if (send_channels_[0]->stream_params() == NULL) {
// key should be 0 here as the default channel should be re-used whenever it
// is not used.
*key = 0;
return true;
}
// SSRC is currently not in use and the default channel is already in use. Use
// the SSRC as key since it is supposed to be unique in a session.
*key = local_ssrc;
return true;
}
uint32 WebRtcVideoMediaChannel::GetDefaultChannelSsrc() {
WebRtcVideoChannelSendInfo* send_channel = send_channels_[0];
const StreamParams* sp = send_channel->stream_params();
if (sp == NULL) {
// This happens if no send stream is currently registered.
return 0;
}
return sp->first_ssrc();
}
bool WebRtcVideoMediaChannel::DeleteSendChannel(uint32 ssrc_key) {
if (send_channels_.find(ssrc_key) == send_channels_.end()) {
return false;
}
WebRtcVideoChannelSendInfo* send_channel = send_channels_[ssrc_key];
VideoCapturer* capturer = send_channel->video_capturer();
if (capturer != NULL) {
capturer->SignalVideoFrame.disconnect(this);
send_channel->set_video_capturer(NULL);
}
int channel_id = send_channel->channel_id();
int capture_id = send_channel->capture_id();
if (engine()->vie()->codec()->DeregisterEncoderObserver(
channel_id) != 0) {
LOG_RTCERR1(DeregisterEncoderObserver, channel_id);
}
// Destroy the external capture interface.
if (engine()->vie()->capture()->DisconnectCaptureDevice(
channel_id) != 0) {
LOG_RTCERR1(DisconnectCaptureDevice, channel_id);
}
if (engine()->vie()->capture()->ReleaseCaptureDevice(
capture_id) != 0) {
LOG_RTCERR1(ReleaseCaptureDevice, capture_id);
}
// The default channel is stored in both |send_channels_| and
// |recv_channels_|. To make sure it is only deleted once from vie let the
// delete call happen when tearing down |recv_channels_| and not here.
if (!IsDefaultChannel(channel_id)) {
engine_->vie()->base()->DeleteChannel(channel_id);
}
delete send_channel;
send_channels_.erase(ssrc_key);
return true;
}
bool WebRtcVideoMediaChannel::RemoveCapturer(uint32 ssrc) {
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
if (!send_channel) {
return false;
}
VideoCapturer* capturer = send_channel->video_capturer();
if (capturer == NULL) {
return false;
}
capturer->SignalVideoFrame.disconnect(this);
send_channel->set_video_capturer(NULL);
const int64 timestamp = send_channel->local_stream_info()->time_stamp();
if (send_codec_) {
QueueBlackFrame(ssrc, timestamp, send_codec_->maxFramerate);
}
return true;
}
bool WebRtcVideoMediaChannel::SetRenderer(uint32 ssrc,
VideoRenderer* renderer) {
if (recv_channels_.find(ssrc) == recv_channels_.end()) {
// TODO(perkj): Remove this once BWE works properly across different send
// and receive channels.
// The default channel is reused for recv stream in 1:1 call.
if (first_receive_ssrc_ == ssrc &&
recv_channels_.find(0) != recv_channels_.end()) {
LOG(LS_INFO) << "SetRenderer " << ssrc
<< " reuse default channel #"
<< vie_channel_;
recv_channels_[0]->SetRenderer(renderer);
return true;
}
return false;
}
recv_channels_[ssrc]->SetRenderer(renderer);
return true;
}
bool WebRtcVideoMediaChannel::GetStats(VideoMediaInfo* info) {
// Get sender statistics and build VideoSenderInfo.
unsigned int total_bitrate_sent = 0;
unsigned int video_bitrate_sent = 0;
unsigned int fec_bitrate_sent = 0;
unsigned int nack_bitrate_sent = 0;
unsigned int estimated_send_bandwidth = 0;
unsigned int target_enc_bitrate = 0;
if (send_codec_) {
for (SendChannelMap::const_iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
const int channel_id = send_channel->channel_id();
VideoSenderInfo sinfo;
const StreamParams* send_params = send_channel->stream_params();
if (send_params == NULL) {
// This should only happen if the default vie channel is not in use.
// This can happen if no streams have ever been added or the stream
// corresponding to the default channel has been removed. Note that
// there may be non-default vie channels in use when this happen so
// asserting send_channels_.size() == 1 is not correct and neither is
// breaking out of the loop.
ASSERT(channel_id == vie_channel_);
continue;
}
unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
if (engine_->vie()->rtp()->GetRTPStatistics(channel_id, bytes_sent,
packets_sent, bytes_recv,
packets_recv) != 0) {
LOG_RTCERR1(GetRTPStatistics, vie_channel_);
continue;
}
WebRtcLocalStreamInfo* channel_stream_info =
send_channel->local_stream_info();
sinfo.ssrcs = send_params->ssrcs;
sinfo.codec_name = send_codec_->plName;
sinfo.bytes_sent = bytes_sent;
sinfo.packets_sent = packets_sent;
sinfo.packets_cached = -1;
sinfo.packets_lost = -1;
sinfo.fraction_lost = -1;
sinfo.firs_rcvd = -1;
sinfo.nacks_rcvd = -1;
sinfo.rtt_ms = -1;
sinfo.frame_width = static_cast<int>(channel_stream_info->width());
sinfo.frame_height = static_cast<int>(channel_stream_info->height());
sinfo.framerate_input = channel_stream_info->framerate();
sinfo.framerate_sent = send_channel->encoder_observer()->framerate();
sinfo.nominal_bitrate = send_channel->encoder_observer()->bitrate();
sinfo.preferred_bitrate = send_max_bitrate_;
sinfo.adapt_reason = send_channel->CurrentAdaptReason();
// Get received RTCP statistics for the sender, if available.
// It's not a fatal error if we can't, since RTCP may not have arrived
// yet.
uint16 r_fraction_lost;
unsigned int r_cumulative_lost;
unsigned int r_extended_max;
unsigned int r_jitter;
int r_rtt_ms;
if (engine_->vie()->rtp()->GetSentRTCPStatistics(
channel_id,
r_fraction_lost,
r_cumulative_lost,
r_extended_max,
r_jitter, r_rtt_ms) == 0) {
// Convert Q8 to float.
sinfo.packets_lost = r_cumulative_lost;
sinfo.fraction_lost = static_cast<float>(r_fraction_lost) / (1 << 8);
sinfo.rtt_ms = r_rtt_ms;
}
info->senders.push_back(sinfo);
unsigned int channel_total_bitrate_sent = 0;
unsigned int channel_video_bitrate_sent = 0;
unsigned int channel_fec_bitrate_sent = 0;
unsigned int channel_nack_bitrate_sent = 0;
if (engine_->vie()->rtp()->GetBandwidthUsage(
channel_id, channel_total_bitrate_sent, channel_video_bitrate_sent,
channel_fec_bitrate_sent, channel_nack_bitrate_sent) == 0) {
total_bitrate_sent += channel_total_bitrate_sent;
video_bitrate_sent += channel_video_bitrate_sent;
fec_bitrate_sent += channel_fec_bitrate_sent;
nack_bitrate_sent += channel_nack_bitrate_sent;
} else {
LOG_RTCERR1(GetBandwidthUsage, channel_id);
}
unsigned int estimated_stream_send_bandwidth = 0;
if (engine_->vie()->rtp()->GetEstimatedSendBandwidth(
channel_id, &estimated_stream_send_bandwidth) == 0) {
estimated_send_bandwidth += estimated_stream_send_bandwidth;
} else {
LOG_RTCERR1(GetEstimatedSendBandwidth, channel_id);
}
unsigned int target_enc_stream_bitrate = 0;
if (engine_->vie()->codec()->GetCodecTargetBitrate(
channel_id, &target_enc_stream_bitrate) == 0) {
target_enc_bitrate += target_enc_stream_bitrate;
} else {
LOG_RTCERR1(GetCodecTargetBitrate, channel_id);
}
}
} else {
LOG(LS_WARNING) << "GetStats: sender information not ready.";
}
// Get the SSRC and stats for each receiver, based on our own calculations.
unsigned int estimated_recv_bandwidth = 0;
for (RecvChannelMap::const_iterator it = recv_channels_.begin();
it != recv_channels_.end(); ++it) {
// Don't report receive statistics from the default channel if we have
// specified receive channels.
if (it->first == 0 && recv_channels_.size() > 1)
continue;
WebRtcVideoChannelRecvInfo* channel = it->second;
unsigned int ssrc;
// Get receiver statistics and build VideoReceiverInfo, if we have data.
// Skip the default channel (ssrc == 0).
if (engine_->vie()->rtp()->GetRemoteSSRC(
channel->channel_id(), ssrc) != 0 ||
ssrc == 0)
continue;
unsigned int bytes_sent, packets_sent, bytes_recv, packets_recv;
if (engine_->vie()->rtp()->GetRTPStatistics(
channel->channel_id(), bytes_sent, packets_sent, bytes_recv,
packets_recv) != 0) {
LOG_RTCERR1(GetRTPStatistics, channel->channel_id());
return false;
}
VideoReceiverInfo rinfo;
rinfo.ssrcs.push_back(ssrc);
rinfo.bytes_rcvd = bytes_recv;
rinfo.packets_rcvd = packets_recv;
rinfo.packets_lost = -1;
rinfo.packets_concealed = -1;
rinfo.fraction_lost = -1; // from SentRTCP
rinfo.firs_sent = channel->decoder_observer()->firs_requested();
rinfo.nacks_sent = -1;
rinfo.frame_width = channel->render_adapter()->width();
rinfo.frame_height = channel->render_adapter()->height();
rinfo.framerate_rcvd = channel->decoder_observer()->framerate();
int fps = channel->render_adapter()->framerate();
rinfo.framerate_decoded = fps;
rinfo.framerate_output = fps;
// Get sent RTCP statistics.
uint16 s_fraction_lost;
unsigned int s_cumulative_lost;
unsigned int s_extended_max;
unsigned int s_jitter;
int s_rtt_ms;
if (engine_->vie()->rtp()->GetReceivedRTCPStatistics(channel->channel_id(),
s_fraction_lost, s_cumulative_lost, s_extended_max,
s_jitter, s_rtt_ms) == 0) {
// Convert Q8 to float.
rinfo.packets_lost = s_cumulative_lost;
rinfo.fraction_lost = static_cast<float>(s_fraction_lost) / (1 << 8);
}
info->receivers.push_back(rinfo);
unsigned int estimated_recv_stream_bandwidth = 0;
if (engine_->vie()->rtp()->GetEstimatedReceiveBandwidth(
channel->channel_id(), &estimated_recv_stream_bandwidth) == 0) {
estimated_recv_bandwidth += estimated_recv_stream_bandwidth;
} else {
LOG_RTCERR1(GetEstimatedReceiveBandwidth, channel->channel_id());
}
}
// Build BandwidthEstimationInfo.
// TODO(zhurunz): Add real unittest for this.
BandwidthEstimationInfo bwe;
// Calculations done above per send/receive stream.
bwe.actual_enc_bitrate = video_bitrate_sent;
bwe.transmit_bitrate = total_bitrate_sent;
bwe.retransmit_bitrate = nack_bitrate_sent;
bwe.available_send_bandwidth = estimated_send_bandwidth;
bwe.available_recv_bandwidth = estimated_recv_bandwidth;
bwe.target_enc_bitrate = target_enc_bitrate;
info->bw_estimations.push_back(bwe);
return true;
}
bool WebRtcVideoMediaChannel::SetCapturer(uint32 ssrc,
VideoCapturer* capturer) {
ASSERT(ssrc != 0);
if (!capturer) {
return RemoveCapturer(ssrc);
}
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
if (!send_channel) {
return false;
}
VideoCapturer* old_capturer = send_channel->video_capturer();
if (old_capturer) {
old_capturer->SignalVideoFrame.disconnect(this);
}
send_channel->set_video_capturer(capturer);
capturer->SignalVideoFrame.connect(
this,
&WebRtcVideoMediaChannel::AdaptAndSendFrame);
if (!capturer->IsScreencast() && ratio_w_ != 0 && ratio_h_ != 0) {
capturer->UpdateAspectRatio(ratio_w_, ratio_h_);
}
const int64 timestamp = send_channel->local_stream_info()->time_stamp();
if (send_codec_) {
QueueBlackFrame(ssrc, timestamp, send_codec_->maxFramerate);
}
return true;
}
bool WebRtcVideoMediaChannel::RequestIntraFrame() {
// There is no API exposed to application to request a key frame
// ViE does this internally when there are errors from decoder
return false;
}
void WebRtcVideoMediaChannel::OnPacketReceived(talk_base::Buffer* packet) {
// Pick which channel to send this packet to. If this packet doesn't match
// any multiplexed streams, just send it to the default channel. Otherwise,
// send it to the specific decoder instance for that stream.
uint32 ssrc = 0;
if (!GetRtpSsrc(packet->data(), packet->length(), &ssrc))
return;
int which_channel = GetRecvChannelNum(ssrc);
if (which_channel == -1) {
which_channel = video_channel();
}
engine()->vie()->network()->ReceivedRTPPacket(
which_channel,
packet->data(),
static_cast<int>(packet->length()));
}
void WebRtcVideoMediaChannel::OnRtcpReceived(talk_base::Buffer* packet) {
// Sending channels need all RTCP packets with feedback information.
// Even sender reports can contain attached report blocks.
// Receiving channels need sender reports in order to create
// correct receiver reports.
uint32 ssrc = 0;
if (!GetRtcpSsrc(packet->data(), packet->length(), &ssrc)) {
LOG(LS_WARNING) << "Failed to parse SSRC from received RTCP packet";
return;
}
int type = 0;
if (!GetRtcpType(packet->data(), packet->length(), &type)) {
LOG(LS_WARNING) << "Failed to parse type from received RTCP packet";
return;
}
// If it is a sender report, find the channel that is listening.
if (type == kRtcpTypeSR) {
int which_channel = GetRecvChannelNum(ssrc);
if (which_channel != -1 && !IsDefaultChannel(which_channel)) {
engine_->vie()->network()->ReceivedRTCPPacket(
which_channel,
packet->data(),
static_cast<int>(packet->length()));
}
}
// SR may continue RR and any RR entry may correspond to any one of the send
// channels. So all RTCP packets must be forwarded all send channels. ViE
// will filter out RR internally.
for (SendChannelMap::iterator iter = send_channels_.begin();
iter != send_channels_.end(); ++iter) {
WebRtcVideoChannelSendInfo* send_channel = iter->second;
int channel_id = send_channel->channel_id();
engine_->vie()->network()->ReceivedRTCPPacket(
channel_id,
packet->data(),
static_cast<int>(packet->length()));
}
}
void WebRtcVideoMediaChannel::OnReadyToSend(bool ready) {
SetNetworkTransmissionState(ready);
}
bool WebRtcVideoMediaChannel::MuteStream(uint32 ssrc, bool muted) {
WebRtcVideoChannelSendInfo* send_channel = GetSendChannel(ssrc);
if (!send_channel) {
LOG(LS_ERROR) << "The specified ssrc " << ssrc << " is not in use.";
return false;
}
send_channel->set_muted(muted);
return true;
}
bool WebRtcVideoMediaChannel::SetRecvRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
if (receive_extensions_ == extensions) {
return true;
}
receive_extensions_ = extensions;
const RtpHeaderExtension* offset_extension =
FindHeaderExtension(extensions, kRtpTimestampOffsetHeaderExtension);
const RtpHeaderExtension* send_time_extension =
FindHeaderExtension(extensions, kRtpAbsoluteSendTimeHeaderExtension);
// Loop through all receive channels and enable/disable the extensions.
for (RecvChannelMap::iterator channel_it = recv_channels_.begin();
channel_it != recv_channels_.end(); ++channel_it) {
int channel_id = channel_it->second->channel_id();
if (!SetHeaderExtension(
&webrtc::ViERTP_RTCP::SetReceiveTimestampOffsetStatus, channel_id,
offset_extension)) {
return false;
}
if (!SetHeaderExtension(
&webrtc::ViERTP_RTCP::SetReceiveAbsoluteSendTimeStatus, channel_id,
send_time_extension)) {
return false;
}
}
return true;
}
bool WebRtcVideoMediaChannel::SetSendRtpHeaderExtensions(
const std::vector<RtpHeaderExtension>& extensions) {
send_extensions_ = extensions;
const RtpHeaderExtension* offset_extension =
FindHeaderExtension(extensions, kRtpTimestampOffsetHeaderExtension);
const RtpHeaderExtension* send_time_extension =
FindHeaderExtension(extensions, kRtpAbsoluteSendTimeHeaderExtension);
// Loop through all send channels and enable/disable the extensions.
for (SendChannelMap::iterator channel_it = send_channels_.begin();
channel_it != send_channels_.end(); ++channel_it) {
int channel_id = channel_it->second->channel_id();
if (!SetHeaderExtension(
&webrtc::ViERTP_RTCP::SetSendTimestampOffsetStatus, channel_id,
offset_extension)) {
return false;
}
if (!SetHeaderExtension(
&webrtc::ViERTP_RTCP::SetSendAbsoluteSendTimeStatus, channel_id,
send_time_extension)) {
return false;
}
}
return true;
}
bool WebRtcVideoMediaChannel::SetSendBandwidth(bool autobw, int bps) {
LOG(LS_INFO) << "WebRtcVideoMediaChanne::SetSendBandwidth";
if (InConferenceMode()) {
LOG(LS_INFO) << "Conference mode ignores SetSendBandWidth";
return true;
}
if (!send_codec_) {
LOG(LS_INFO) << "The send codec has not been set up yet";
return true;
}
int min_bitrate;
int start_bitrate;
int max_bitrate;
if (autobw) {
// Use the default values for min bitrate.
min_bitrate = kMinVideoBitrate;
// Use the default value or the bps for the max
max_bitrate = (bps <= 0) ? send_max_bitrate_ : (bps / 1000);
// Maximum start bitrate can be kStartVideoBitrate.
start_bitrate = talk_base::_min(kStartVideoBitrate, max_bitrate);
} else {
// Use the default start or the bps as the target bitrate.
int target_bitrate = (bps <= 0) ? kStartVideoBitrate : (bps / 1000);
min_bitrate = target_bitrate;
start_bitrate = target_bitrate;
max_bitrate = target_bitrate;
}
if (!SetSendCodec(*send_codec_, min_bitrate, start_bitrate, max_bitrate)) {
return false;
}
LogSendCodecChange("SetSendBandwidth()");
return true;
}
bool WebRtcVideoMediaChannel::SetOptions(const VideoOptions &options) {
// Always accept options that are unchanged.
if (options_ == options) {
return true;
}
// Trigger SetSendCodec to set correct noise reduction state if the option has
// changed.
bool denoiser_changed = options.video_noise_reduction.IsSet() &&
(options_.video_noise_reduction != options.video_noise_reduction);
bool leaky_bucket_changed = options.video_leaky_bucket.IsSet() &&
(options_.video_leaky_bucket != options.video_leaky_bucket);