mirror of https://github.com/NekoX-Dev/NekoX.git
Update to 7.1.1 (2094)
This commit is contained in:
parent
2eeca37bb4
commit
4992f231b3
|
@ -284,7 +284,7 @@ android {
|
|||
}
|
||||
}
|
||||
|
||||
defaultConfig.versionCode = 2092
|
||||
defaultConfig.versionCode = 2094
|
||||
|
||||
applicationVariants.all { variant ->
|
||||
variant.outputs.all { output ->
|
||||
|
@ -319,7 +319,7 @@ android {
|
|||
defaultConfig {
|
||||
minSdkVersion 16
|
||||
targetSdkVersion 28
|
||||
versionName "7.1.0"
|
||||
versionName "7.1.1"
|
||||
|
||||
vectorDrawables.generatedDensities = ['mdpi', 'hdpi', 'xhdpi', 'xxhdpi']
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#include <rtc_base/ssl_adapter.h>
|
||||
#include <modules/utility/include/jvm_android.h>
|
||||
#include <sdk/android/native_api/base/init.h>
|
||||
#include <voip/webrtc/media/base/media_constants.h>
|
||||
#include <tgnet/FileLog.h>
|
||||
|
||||
#include "pc/video_track.h"
|
||||
#include "legacy/InstanceImplLegacy.h"
|
||||
|
@ -266,10 +268,13 @@ JNIEXPORT jlong JNICALL Java_org_telegram_messenger_voip_NativeInstance_makeNati
|
|||
.enableAEC = configObject.getBooleanField("enableAec") == JNI_TRUE,
|
||||
.enableNS = configObject.getBooleanField("enableNs") == JNI_TRUE,
|
||||
.enableAGC = configObject.getBooleanField("enableAgc") == JNI_TRUE,
|
||||
.enableStunMarking = configObject.getBooleanField("enableSm") == JNI_TRUE,
|
||||
.enableVolumeControl = true,
|
||||
.logPath = tgvoip::jni::JavaStringToStdString(env, configObject.getStringField("logPath")),
|
||||
.maxApiLayer = configObject.getIntField("maxApiLayer"),
|
||||
.preferredAspectRatio = aspectRatio
|
||||
.enableHighBitrateVideo = true,
|
||||
.statsLogPath = tgvoip::jni::JavaStringToStdString(env, configObject.getStringField("statsLogPath")),
|
||||
.preferredVideoCodecs = {cricket::kVp9CodecName}
|
||||
},
|
||||
.encryptionKey = EncryptionKey(
|
||||
std::move(encryptionKeyValue),
|
||||
|
@ -346,6 +351,7 @@ JNIEXPORT jlong JNICALL Java_org_telegram_messenger_voip_NativeInstance_makeNati
|
|||
holder->_platformContext = platformContext;
|
||||
holder->nativeInstance->setIncomingVideoOutput(webrtc::JavaToNativeVideoSink(env, remoteSink));
|
||||
holder->nativeInstance->setNetworkType(parseNetworkType(networkType));
|
||||
holder->nativeInstance->setRequestedVideoAspect(aspectRatio);
|
||||
return reinterpret_cast<jlong>(holder);
|
||||
}
|
||||
|
||||
|
@ -410,9 +416,9 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_stopNativ
|
|||
});
|
||||
}
|
||||
|
||||
JNIEXPORT long JNICALL Java_org_telegram_messenger_voip_NativeInstance_createVideoCapturer(JNIEnv *env, jclass clazz, jobject localSink) {
|
||||
JNIEXPORT jlong JNICALL Java_org_telegram_messenger_voip_NativeInstance_createVideoCapturer(JNIEnv *env, jclass clazz, jobject localSink, jboolean front) {
|
||||
initWebRTC(env);
|
||||
std::unique_ptr<VideoCaptureInterface> capture = tgcalls::VideoCaptureInterface::Create(std::make_shared<AndroidContext>(env));
|
||||
std::unique_ptr<VideoCaptureInterface> capture = tgcalls::VideoCaptureInterface::Create(front ? "front" : "back", std::make_shared<AndroidContext>(env));
|
||||
capture->setOutput(webrtc::JavaToNativeVideoSink(env, localSink));
|
||||
capture->setState(VideoState::Active);
|
||||
return reinterpret_cast<intptr_t>(capture.release());
|
||||
|
@ -423,9 +429,9 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_destroyVi
|
|||
delete capturer;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_switchCameraCapturer(JNIEnv *env, jclass clazz, jlong videoCapturer) {
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_switchCameraCapturer(JNIEnv *env, jclass clazz, jlong videoCapturer, jboolean front) {
|
||||
VideoCaptureInterface *capturer = reinterpret_cast<VideoCaptureInterface *>(videoCapturer);
|
||||
capturer->switchCamera();
|
||||
capturer->switchToDevice(front ? "front" : "back");
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_setVideoStateCapturer(JNIEnv *env, jclass clazz, jlong videoCapturer, jint videoState) {
|
||||
|
@ -433,12 +439,12 @@ JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_setVideoS
|
|||
capturer->setState(static_cast<VideoState>(videoState));
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_switchCamera(JNIEnv *env, jobject obj) {
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_switchCamera(JNIEnv *env, jobject obj, jboolean front) {
|
||||
InstanceHolder *instance = getInstanceHolder(env, obj);
|
||||
if (instance->_videoCapture == nullptr) {
|
||||
return;
|
||||
}
|
||||
instance->_videoCapture->switchCamera();
|
||||
instance->_videoCapture->switchToDevice(front ? "front" : "back");
|
||||
}
|
||||
|
||||
JNIEXPORT void Java_org_telegram_messenger_voip_NativeInstance_setVideoState(JNIEnv *env, jobject obj, jint state) {
|
||||
|
@ -449,12 +455,12 @@ JNIEXPORT void Java_org_telegram_messenger_voip_NativeInstance_setVideoState(JNI
|
|||
instance->_videoCapture->setState(static_cast<VideoState>(state));
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_setupOutgoingVideo(JNIEnv *env, jobject obj, jobject localSink) {
|
||||
JNIEXPORT void JNICALL Java_org_telegram_messenger_voip_NativeInstance_setupOutgoingVideo(JNIEnv *env, jobject obj, jobject localSink, jboolean front) {
|
||||
InstanceHolder *instance = getInstanceHolder(env, obj);
|
||||
if (instance->_videoCapture) {
|
||||
return;
|
||||
}
|
||||
instance->_videoCapture = tgcalls::VideoCaptureInterface::Create(instance->_platformContext);
|
||||
instance->_videoCapture = tgcalls::VideoCaptureInterface::Create(front ? "front" : "back", instance->_platformContext);
|
||||
instance->_videoCapture->setOutput(webrtc::JavaToNativeVideoSink(env, localSink));
|
||||
instance->_videoCapture->setState(VideoState::Active);
|
||||
instance->nativeInstance->setVideoCapture(instance->_videoCapture);
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
#include <memory>
|
||||
#include <map>
|
||||
|
||||
#include "Stats.h"
|
||||
|
||||
namespace rtc {
|
||||
template <typename VideoFrameT>
|
||||
class VideoSinkInterface;
|
||||
|
@ -21,6 +23,12 @@ namespace tgcalls {
|
|||
class VideoCaptureInterface;
|
||||
class PlatformContext;
|
||||
|
||||
#ifndef _WIN32
|
||||
using FilePath = std::string;
|
||||
#else
|
||||
using FilePath = std::wstring;
|
||||
#endif
|
||||
|
||||
struct Proxy {
|
||||
std::string host;
|
||||
uint16_t port = 0;
|
||||
|
@ -91,18 +99,16 @@ struct Config {
|
|||
double receiveTimeout = 0.;
|
||||
DataSaving dataSaving = DataSaving::Never;
|
||||
bool enableP2P = false;
|
||||
bool allowTCP = false;
|
||||
bool enableStunMarking = false;
|
||||
bool enableAEC = false;
|
||||
bool enableNS = false;
|
||||
bool enableAGC = false;
|
||||
bool enableCallUpgrade = false;
|
||||
bool enableVolumeControl = false;
|
||||
#ifndef _WIN32
|
||||
std::string logPath;
|
||||
#else
|
||||
std::wstring logPath;
|
||||
#endif
|
||||
FilePath logPath;
|
||||
FilePath statsLogPath;
|
||||
int maxApiLayer = 0;
|
||||
float preferredAspectRatio;
|
||||
bool enableHighBitrateVideo = false;
|
||||
std::vector<std::string> preferredVideoCodecs;
|
||||
ProtocolVersion protocolVersion = ProtocolVersion::V0;
|
||||
|
@ -148,9 +154,17 @@ struct FinalState {
|
|||
PersistentState persistentState;
|
||||
std::string debugLog;
|
||||
TrafficStats trafficStats;
|
||||
CallStats callStats;
|
||||
bool isRatingSuggested = false;
|
||||
};
|
||||
|
||||
struct MediaDevicesConfig {
|
||||
std::string audioInputId;
|
||||
std::string audioOutputId;
|
||||
float inputVolume = 1.f;
|
||||
float outputVolume = 1.f;
|
||||
};
|
||||
|
||||
class Instance {
|
||||
protected:
|
||||
Instance() = default;
|
||||
|
@ -163,6 +177,7 @@ public:
|
|||
virtual void setAudioOutputGainControlEnabled(bool enabled) = 0;
|
||||
virtual void setEchoCancellationStrength(int strength) = 0;
|
||||
|
||||
virtual bool supportsVideo() = 0;
|
||||
virtual void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
||||
virtual void setAudioInputDevice(std::string id) = 0;
|
||||
|
@ -170,7 +185,7 @@ public:
|
|||
virtual void setInputVolume(float level) = 0;
|
||||
virtual void setOutputVolume(float level) = 0;
|
||||
virtual void setAudioOutputDuckingEnabled(bool enabled) = 0;
|
||||
|
||||
|
||||
virtual void setIsLowBatteryLevel(bool isLowBatteryLevel) = 0;
|
||||
|
||||
virtual std::string getLastError() = 0;
|
||||
|
@ -181,6 +196,7 @@ public:
|
|||
|
||||
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
||||
virtual void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) = 0;
|
||||
virtual void setRequestedVideoAspect(float aspect) = 0;
|
||||
|
||||
virtual void stop(std::function<void(FinalState)> completion) = 0;
|
||||
|
||||
|
@ -197,6 +213,7 @@ struct Descriptor {
|
|||
std::vector<RtcServer> rtcServers;
|
||||
NetworkType initialNetworkType = NetworkType();
|
||||
EncryptionKey encryptionKey;
|
||||
MediaDevicesConfig mediaDevicesConfig;
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture;
|
||||
std::function<void(State)> stateUpdated;
|
||||
std::function<void(int)> signalBarsUpdated;
|
||||
|
@ -204,7 +221,7 @@ struct Descriptor {
|
|||
std::function<void(AudioState, VideoState)> remoteMediaStateUpdated;
|
||||
std::function<void(float)> remotePrefferedAspectRatioUpdated;
|
||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted;
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
std::shared_ptr<PlatformContext> platformContext;
|
||||
};
|
||||
|
||||
class Meta {
|
||||
|
|
|
@ -58,6 +58,12 @@ void InstanceImpl::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoC
|
|||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setRequestedVideoAspect(float aspect) {
|
||||
_manager->perform(RTC_FROM_HERE, [aspect](Manager *manager) {
|
||||
manager->setRequestedVideoAspect(aspect);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setNetworkType(NetworkType networkType) {
|
||||
bool isLowCostNetwork = false;
|
||||
switch (networkType) {
|
||||
|
@ -93,19 +99,27 @@ void InstanceImpl::setEchoCancellationStrength(int strength) {
|
|||
}
|
||||
|
||||
void InstanceImpl::setAudioInputDevice(std::string id) {
|
||||
// TODO: not implemented
|
||||
_manager->perform(RTC_FROM_HERE, [id](Manager *manager) {
|
||||
manager->setAudioInputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setAudioOutputDevice(std::string id) {
|
||||
// TODO: not implemented
|
||||
_manager->perform(RTC_FROM_HERE, [id](Manager *manager) {
|
||||
manager->setAudioOutputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setInputVolume(float level) {
|
||||
// TODO: not implemented
|
||||
_manager->perform(RTC_FROM_HERE, [level](Manager *manager) {
|
||||
manager->setInputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setOutputVolume(float level) {
|
||||
// TODO: not implemented
|
||||
_manager->perform(RTC_FROM_HERE, [level](Manager *manager) {
|
||||
manager->setOutputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImpl::setAudioOutputDuckingEnabled(bool enabled) {
|
||||
|
@ -142,40 +156,18 @@ void InstanceImpl::stop(std::function<void(FinalState)> completion) {
|
|||
std::string debugLog = _logSink->result();
|
||||
|
||||
_manager->perform(RTC_FROM_HERE, [completion, debugLog = std::move(debugLog)](Manager *manager) {
|
||||
manager->getNetworkStats([completion, debugLog = std::move(debugLog)](TrafficStats stats) {
|
||||
manager->getNetworkStats([completion, debugLog = std::move(debugLog)](TrafficStats stats, CallStats callStats) {
|
||||
FinalState finalState;
|
||||
finalState.debugLog = debugLog;
|
||||
finalState.isRatingSuggested = false;
|
||||
finalState.trafficStats = stats;
|
||||
finalState.callStats = callStats;
|
||||
|
||||
completion(finalState);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/*void InstanceImpl::controllerStateCallback(Controller::State state) {
|
||||
if (onStateUpdated_) {
|
||||
const auto mappedState = [&] {
|
||||
switch (state) {
|
||||
case Controller::State::WaitInit:
|
||||
return State::WaitInit;
|
||||
case Controller::State::WaitInitAck:
|
||||
return State::WaitInitAck;
|
||||
case Controller::State::Established:
|
||||
return State::Estabilished;
|
||||
case Controller::State::Failed:
|
||||
return State::Failed;
|
||||
case Controller::State::Reconnecting:
|
||||
return State::Reconnecting;
|
||||
default:
|
||||
return State::Estabilished;
|
||||
}
|
||||
}();
|
||||
|
||||
onStateUpdated_(mappedState);
|
||||
}
|
||||
}*/
|
||||
|
||||
int InstanceImpl::GetConnectionMaxLayer() {
|
||||
return 92; // TODO: retrieve from LayerBase
|
||||
}
|
||||
|
|
|
@ -21,8 +21,12 @@ public:
|
|||
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data) override;
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) override;
|
||||
void setRequestedVideoAspect(float aspect) override;
|
||||
void setNetworkType(NetworkType networkType) override;
|
||||
void setMuteMicrophone(bool muteMicrophone) override;
|
||||
bool supportsVideo() override {
|
||||
return true;
|
||||
}
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
void setAudioOutputGainControlEnabled(bool enabled) override;
|
||||
void setEchoCancellationStrength(int strength) override;
|
||||
|
@ -38,7 +42,6 @@ public:
|
|||
TrafficStats getTrafficStats() override;
|
||||
PersistentState getPersistentState() override;
|
||||
void stop(std::function<void(FinalState)> completion) override;
|
||||
//void controllerStateCallback(Controller::State state);
|
||||
|
||||
private:
|
||||
std::unique_ptr<ThreadLocalObject<Manager>> _manager;
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
#include "rtc_base/byte_buffer.h"
|
||||
|
||||
#include <fstream>
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
|
@ -24,8 +26,74 @@ rtc::Thread *makeMediaThread() {
|
|||
return value.get();
|
||||
}
|
||||
|
||||
void dumpStatsLog(const FilePath &path, const CallStats &stats) {
|
||||
if (path.empty()) {
|
||||
return;
|
||||
}
|
||||
std::ofstream file;
|
||||
file.open(path);
|
||||
|
||||
file << "{";
|
||||
file << "\"v\":\"" << 1 << "\"";
|
||||
file << ",";
|
||||
|
||||
file << "\"codec\":\"" << stats.outgoingCodec << "\"";
|
||||
file << ",";
|
||||
|
||||
file << "\"bitrate\":[";
|
||||
bool addComma = false;
|
||||
for (auto &it : stats.bitrateRecords) {
|
||||
if (addComma) {
|
||||
file << ",";
|
||||
}
|
||||
file << "{";
|
||||
file << "\"t\":\"" << it.timestamp << "\"";
|
||||
file << ",";
|
||||
file << "\"b\":\"" << it.bitrate << "\"";
|
||||
file << "}";
|
||||
addComma = true;
|
||||
}
|
||||
file << "]";
|
||||
file << ",";
|
||||
|
||||
file << "\"network\":[";
|
||||
addComma = false;
|
||||
for (auto &it : stats.networkRecords) {
|
||||
if (addComma) {
|
||||
file << ",";
|
||||
}
|
||||
file << "{";
|
||||
file << "\"t\":\"" << it.timestamp << "\"";
|
||||
file << ",";
|
||||
file << "\"e\":\"" << (int)(it.endpointType) << "\"";
|
||||
file << ",";
|
||||
file << "\"w\":\"" << (it.isLowCost ? 1 : 0) << "\"";
|
||||
file << "}";
|
||||
addComma = true;
|
||||
}
|
||||
file << "]";
|
||||
|
||||
file << "}";
|
||||
|
||||
file.close();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool Manager::ResolvedNetworkStatus::operator==(const ResolvedNetworkStatus &rhs) {
|
||||
if (rhs.isLowCost != isLowCost) {
|
||||
return false;
|
||||
}
|
||||
if (rhs.isLowDataRequested != isLowDataRequested) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Manager::ResolvedNetworkStatus::operator!=(const ResolvedNetworkStatus &rhs) {
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
rtc::Thread *Manager::getMediaThread() {
|
||||
static rtc::Thread *value = makeMediaThread();
|
||||
return value;
|
||||
|
@ -39,8 +107,12 @@ _signaling(
|
|||
_encryptionKey,
|
||||
[=](int delayMs, int cause) { sendSignalingAsync(delayMs, cause); }),
|
||||
_enableP2P(descriptor.config.enableP2P),
|
||||
_enableTCP(descriptor.config.allowTCP),
|
||||
_enableStunMarking(descriptor.config.enableStunMarking),
|
||||
_protocolVersion(descriptor.config.protocolVersion),
|
||||
_statsLogPath(descriptor.config.statsLogPath),
|
||||
_rtcServers(std::move(descriptor.rtcServers)),
|
||||
_mediaDevicesConfig(std::move(descriptor.mediaDevicesConfig)),
|
||||
_videoCapture(std::move(descriptor.videoCapture)),
|
||||
_stateUpdated(std::move(descriptor.stateUpdated)),
|
||||
_remoteMediaStateUpdated(std::move(descriptor.remoteMediaStateUpdated)),
|
||||
|
@ -48,13 +120,14 @@ _remoteBatteryLevelIsLowUpdated(std::move(descriptor.remoteBatteryLevelIsLowUpda
|
|||
_remotePrefferedAspectRatioUpdated(std::move(descriptor.remotePrefferedAspectRatioUpdated)),
|
||||
_signalingDataEmitted(std::move(descriptor.signalingDataEmitted)),
|
||||
_signalBarsUpdated(std::move(descriptor.signalBarsUpdated)),
|
||||
_localPreferredVideoAspectRatio(descriptor.config.preferredAspectRatio),
|
||||
_enableHighBitrateVideo(descriptor.config.enableHighBitrateVideo),
|
||||
_dataSaving(descriptor.config.dataSaving),
|
||||
_platformContext(descriptor.platformContext) {
|
||||
|
||||
assert(_thread->IsCurrent());
|
||||
assert(_stateUpdated != nullptr);
|
||||
assert(_signalingDataEmitted != nullptr);
|
||||
|
||||
|
||||
_preferredCodecs = descriptor.config.preferredVideoCodecs;
|
||||
|
||||
_sendSignalingMessage = [=](const Message &message) {
|
||||
|
@ -104,11 +177,13 @@ void Manager::start() {
|
|||
strong->_sendSignalingMessage(std::move(message));
|
||||
});
|
||||
};
|
||||
_networkManager.reset(new ThreadLocalObject<NetworkManager>(getNetworkThread(), [weak, thread, sendSignalingMessage, encryptionKey = _encryptionKey, enableP2P = _enableP2P, rtcServers = _rtcServers] {
|
||||
_networkManager.reset(new ThreadLocalObject<NetworkManager>(getNetworkThread(), [weak, thread, sendSignalingMessage, encryptionKey = _encryptionKey, enableP2P = _enableP2P, enableTCP = _enableTCP, enableStunMarking = _enableStunMarking, rtcServers = _rtcServers] {
|
||||
return new NetworkManager(
|
||||
getNetworkThread(),
|
||||
encryptionKey,
|
||||
enableP2P,
|
||||
enableTCP,
|
||||
enableStunMarking,
|
||||
rtcServers,
|
||||
[=](const NetworkManager::State &state) {
|
||||
thread->PostTask(RTC_FROM_HERE, [=] {
|
||||
|
@ -137,7 +212,7 @@ void Manager::start() {
|
|||
strong->_mediaManager->perform(RTC_FROM_HERE, [=](MediaManager *mediaManager) {
|
||||
mediaManager->setIsConnected(state.isReadyToSendData);
|
||||
});
|
||||
|
||||
|
||||
if (isFirstConnection) {
|
||||
strong->sendInitialSignalingMessages();
|
||||
}
|
||||
|
@ -167,10 +242,12 @@ void Manager::start() {
|
|||
});
|
||||
}));
|
||||
bool isOutgoing = _encryptionKey.isOutgoing;
|
||||
_mediaManager.reset(new ThreadLocalObject<MediaManager>(getMediaThread(), [weak, isOutgoing, thread, sendSignalingMessage, videoCapture = _videoCapture, localPreferredVideoAspectRatio = _localPreferredVideoAspectRatio, enableHighBitrateVideo = _enableHighBitrateVideo, signalBarsUpdated = _signalBarsUpdated, preferredCodecs = _preferredCodecs, platformContext = _platformContext]() {
|
||||
_mediaManager.reset(new ThreadLocalObject<MediaManager>(getMediaThread(), [weak, isOutgoing, protocolVersion = _protocolVersion, thread, sendSignalingMessage, videoCapture = _videoCapture, mediaDevicesConfig = _mediaDevicesConfig, enableHighBitrateVideo = _enableHighBitrateVideo, signalBarsUpdated = _signalBarsUpdated, preferredCodecs = _preferredCodecs, platformContext = _platformContext]() {
|
||||
return new MediaManager(
|
||||
getMediaThread(),
|
||||
isOutgoing,
|
||||
protocolVersion,
|
||||
mediaDevicesConfig,
|
||||
videoCapture,
|
||||
sendSignalingMessage,
|
||||
[=](Message &&message) {
|
||||
|
@ -183,7 +260,6 @@ void Manager::start() {
|
|||
});
|
||||
},
|
||||
signalBarsUpdated,
|
||||
localPreferredVideoAspectRatio,
|
||||
enableHighBitrateVideo,
|
||||
preferredCodecs,
|
||||
platformContext);
|
||||
|
@ -228,10 +304,10 @@ void Manager::receiveMessage(DecryptedMessage &&message) {
|
|||
if (_remoteBatteryLevelIsLowUpdated) {
|
||||
_remoteBatteryLevelIsLowUpdated(remoteBatteryLevelIsLow->batteryLow);
|
||||
}
|
||||
} else if (const auto remoteNetworkType = absl::get_if<RemoteNetworkTypeMessage>(data)) {
|
||||
bool wasCurrentNetworkLowCost = calculateIsCurrentNetworkLowCost();
|
||||
_remoteNetworkIsLowCost = remoteNetworkType->isLowCost;
|
||||
updateIsCurrentNetworkLowCost(wasCurrentNetworkLowCost);
|
||||
} else if (const auto remoteNetworkStatus = absl::get_if<RemoteNetworkStatusMessage>(data)) {
|
||||
_remoteNetworkIsLowCost = remoteNetworkStatus->isLowCost;
|
||||
_remoteIsLowDataRequested = remoteNetworkStatus->isLowDataRequested;
|
||||
updateCurrentResolvedNetworkStatus();
|
||||
} else {
|
||||
if (const auto videoParameters = absl::get_if<VideoParametersMessage>(data)) {
|
||||
float value = ((float)videoParameters->aspectRatio) / 1000.0;
|
||||
|
@ -257,6 +333,12 @@ void Manager::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCaptur
|
|||
});
|
||||
}
|
||||
|
||||
void Manager::setRequestedVideoAspect(float aspect) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [aspect](MediaManager *mediaManager) {
|
||||
mediaManager->setRequestedVideoAspect(aspect);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setMuteOutgoingAudio(bool mute) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [mute](MediaManager *mediaManager) {
|
||||
mediaManager->setMuteOutgoingAudio(mute);
|
||||
|
@ -278,49 +360,114 @@ void Manager::setIsLocalNetworkLowCost(bool isLocalNetworkLowCost) {
|
|||
_networkManager->perform(RTC_FROM_HERE, [isLocalNetworkLowCost](NetworkManager *networkManager) {
|
||||
networkManager->setIsLocalNetworkLowCost(isLocalNetworkLowCost);
|
||||
});
|
||||
|
||||
bool wasCurrentNetworkLowCost = calculateIsCurrentNetworkLowCost();
|
||||
|
||||
_localNetworkIsLowCost = isLocalNetworkLowCost;
|
||||
updateIsCurrentNetworkLowCost(wasCurrentNetworkLowCost);
|
||||
|
||||
updateCurrentResolvedNetworkStatus();
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::getNetworkStats(std::function<void (TrafficStats, CallStats)> completion) {
|
||||
_networkManager->perform(RTC_FROM_HERE, [thread = _thread, weak = std::weak_ptr<Manager>(shared_from_this()), completion = std::move(completion), statsLogPath = _statsLogPath](NetworkManager *networkManager) {
|
||||
auto networkStats = networkManager->getNetworkStats();
|
||||
|
||||
CallStats callStats;
|
||||
networkManager->fillCallStats(callStats);
|
||||
|
||||
thread->PostTask(RTC_FROM_HERE, [weak, networkStats, completion = std::move(completion), callStats = std::move(callStats), statsLogPath = statsLogPath] {
|
||||
const auto strong = weak.lock();
|
||||
if (!strong) {
|
||||
return;
|
||||
}
|
||||
|
||||
strong->_mediaManager->perform(RTC_FROM_HERE, [networkStats, completion = std::move(completion), callStatsValue = std::move(callStats), statsLogPath = statsLogPath](MediaManager *mediaManager) {
|
||||
CallStats callStats = std::move(callStatsValue);
|
||||
mediaManager->fillCallStats(callStats);
|
||||
dumpStatsLog(statsLogPath, callStats);
|
||||
completion(networkStats, callStats);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::updateCurrentResolvedNetworkStatus() {
|
||||
bool localIsLowDataRequested = false;
|
||||
switch (_dataSaving) {
|
||||
case DataSaving::Never:
|
||||
localIsLowDataRequested = false;
|
||||
break;
|
||||
case DataSaving::Mobile:
|
||||
localIsLowDataRequested = !_localNetworkIsLowCost;
|
||||
break;
|
||||
case DataSaving::Always:
|
||||
localIsLowDataRequested = true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ResolvedNetworkStatus localStatus;
|
||||
localStatus.isLowCost = _localNetworkIsLowCost;
|
||||
localStatus.isLowDataRequested = localIsLowDataRequested;
|
||||
|
||||
if (!_currentResolvedLocalNetworkStatus.has_value() || *_currentResolvedLocalNetworkStatus != localStatus) {
|
||||
_currentResolvedLocalNetworkStatus = localStatus;
|
||||
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
if (_didConnectOnce) {
|
||||
_sendTransportMessage({ RemoteNetworkTypeMessage{ isLocalNetworkLowCost } });
|
||||
_sendTransportMessage({ RemoteNetworkStatusMessage{ localStatus.isLowCost, localStatus.isLowDataRequested } });
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ResolvedNetworkStatus status;
|
||||
status.isLowCost = _localNetworkIsLowCost && _remoteNetworkIsLowCost;
|
||||
status.isLowDataRequested = localIsLowDataRequested || _remoteIsLowDataRequested;
|
||||
|
||||
if (!_currentResolvedNetworkStatus.has_value() || *_currentResolvedNetworkStatus != status) {
|
||||
_currentResolvedNetworkStatus = status;
|
||||
_mediaManager->perform(RTC_FROM_HERE, [status](MediaManager *mediaManager) {
|
||||
mediaManager->setNetworkParameters(status.isLowCost, status.isLowDataRequested);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::sendInitialSignalingMessages() {
|
||||
if (_currentResolvedLocalNetworkStatus.has_value()) {
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
_sendTransportMessage({ RemoteNetworkStatusMessage{ _currentResolvedLocalNetworkStatus->isLowCost, _currentResolvedLocalNetworkStatus->isLowDataRequested } });
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::getNetworkStats(std::function<void (TrafficStats)> completion) {
|
||||
_networkManager->perform(RTC_FROM_HERE, [completion = std::move(completion)](NetworkManager *networkManager) {
|
||||
completion(networkManager->getNetworkStats());
|
||||
});
|
||||
void Manager::setAudioInputDevice(std::string id) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [id](MediaManager *mediaManager) {
|
||||
mediaManager->setAudioInputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
bool Manager::calculateIsCurrentNetworkLowCost() const {
|
||||
return _localNetworkIsLowCost && _remoteNetworkIsLowCost;
|
||||
}
|
||||
void Manager::updateIsCurrentNetworkLowCost(bool wasLowCost) {
|
||||
bool isLowCost = calculateIsCurrentNetworkLowCost();
|
||||
if (isLowCost != wasLowCost) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [isLowCost](MediaManager *mediaManager) {
|
||||
mediaManager->setIsCurrentNetworkLowCost(isLowCost);
|
||||
});
|
||||
}
|
||||
void Manager::setAudioOutputDevice(std::string id) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [id](MediaManager *mediaManager) {
|
||||
mediaManager->setAudioOutputDevice(id);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::sendInitialSignalingMessages() {
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
_sendTransportMessage({ RemoteNetworkTypeMessage{ _localNetworkIsLowCost } });
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
void Manager::setInputVolume(float level) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [level](MediaManager *mediaManager) {
|
||||
mediaManager->setInputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
void Manager::setOutputVolume(float level) {
|
||||
_mediaManager->perform(RTC_FROM_HERE, [level](MediaManager *mediaManager) {
|
||||
mediaManager->setOutputVolume(level);
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
|
@ -6,10 +6,20 @@
|
|||
#include "NetworkManager.h"
|
||||
#include "MediaManager.h"
|
||||
#include "Instance.h"
|
||||
#include "Stats.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class Manager final : public std::enable_shared_from_this<Manager> {
|
||||
private:
|
||||
struct ResolvedNetworkStatus {
|
||||
bool isLowCost = false;
|
||||
bool isLowDataRequested = false;
|
||||
|
||||
bool operator==(const ResolvedNetworkStatus &rhs);
|
||||
bool operator!=(const ResolvedNetworkStatus &rhs);
|
||||
};
|
||||
|
||||
public:
|
||||
static rtc::Thread *getMediaThread();
|
||||
|
||||
|
@ -19,25 +29,35 @@ public:
|
|||
void start();
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void setRequestedVideoAspect(float aspect);
|
||||
void setMuteOutgoingAudio(bool mute);
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setIsLowBatteryLevel(bool isLowBatteryLevel);
|
||||
void setIsLocalNetworkLowCost(bool isLocalNetworkLowCost);
|
||||
void getNetworkStats(std::function<void(TrafficStats)> completion);
|
||||
|
||||
void getNetworkStats(std::function<void(TrafficStats, CallStats)> completion);
|
||||
|
||||
|
||||
void setAudioInputDevice(std::string id);
|
||||
void setAudioOutputDevice(std::string id);
|
||||
void setInputVolume(float level);
|
||||
void setOutputVolume(float level);
|
||||
|
||||
private:
|
||||
void sendSignalingAsync(int delayMs, int cause);
|
||||
void receiveMessage(DecryptedMessage &&message);
|
||||
bool calculateIsCurrentNetworkLowCost() const;
|
||||
void updateIsCurrentNetworkLowCost(bool wasLowCost);
|
||||
void updateCurrentResolvedNetworkStatus();
|
||||
void sendInitialSignalingMessages();
|
||||
|
||||
rtc::Thread *_thread;
|
||||
EncryptionKey _encryptionKey;
|
||||
EncryptedConnection _signaling;
|
||||
bool _enableP2P = false;
|
||||
bool _enableTCP = false;
|
||||
bool _enableStunMarking = false;
|
||||
ProtocolVersion _protocolVersion = ProtocolVersion::V0;
|
||||
FilePath _statsLogPath;
|
||||
std::vector<RtcServer> _rtcServers;
|
||||
MediaDevicesConfig _mediaDevicesConfig;
|
||||
std::shared_ptr<VideoCaptureInterface> _videoCapture;
|
||||
std::function<void(State)> _stateUpdated;
|
||||
std::function<void(AudioState, VideoState)> _remoteMediaStateUpdated;
|
||||
|
@ -51,11 +71,14 @@ private:
|
|||
std::unique_ptr<ThreadLocalObject<MediaManager>> _mediaManager;
|
||||
State _state = State::Reconnecting;
|
||||
bool _didConnectOnce = false;
|
||||
float _localPreferredVideoAspectRatio = 0.0f;
|
||||
bool _enableHighBitrateVideo = false;
|
||||
DataSaving _dataSaving = DataSaving::Never;
|
||||
std::vector<std::string> _preferredCodecs;
|
||||
bool _localNetworkIsLowCost = false;
|
||||
bool _remoteNetworkIsLowCost = false;
|
||||
bool _remoteIsLowDataRequested = false;
|
||||
absl::optional<ResolvedNetworkStatus> _currentResolvedLocalNetworkStatus;
|
||||
absl::optional<ResolvedNetworkStatus> _currentResolvedNetworkStatus;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
|
||||
|
|
|
@ -45,6 +45,43 @@ VideoCaptureInterfaceObject *GetVideoCaptureAssumingSameThread(VideoCaptureInter
|
|||
|
||||
} // namespace
|
||||
|
||||
class VideoSinkInterfaceProxyImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
VideoSinkInterfaceProxyImpl(bool rewriteRotation) :
|
||||
_rewriteRotation(rewriteRotation) {
|
||||
}
|
||||
|
||||
virtual ~VideoSinkInterfaceProxyImpl() {
|
||||
}
|
||||
|
||||
virtual void OnFrame(const webrtc::VideoFrame& frame) override {
|
||||
if (_impl) {
|
||||
if (_rewriteRotation) {
|
||||
webrtc::VideoFrame updatedFrame = frame;
|
||||
//updatedFrame.set_rotation(webrtc::VideoRotation::kVideoRotation_90);
|
||||
_impl->OnFrame(updatedFrame);
|
||||
} else {
|
||||
_impl->OnFrame(frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtual void OnDiscardedFrame() override {
|
||||
if (_impl) {
|
||||
_impl->OnDiscardedFrame();
|
||||
}
|
||||
}
|
||||
|
||||
void setSink(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> impl) {
|
||||
_impl = impl;
|
||||
}
|
||||
|
||||
private:
|
||||
bool _rewriteRotation = false;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _impl;
|
||||
|
||||
};
|
||||
|
||||
rtc::Thread *MediaManager::getWorkerThread() {
|
||||
static rtc::Thread *value = makeWorkerThread();
|
||||
return value;
|
||||
|
@ -53,25 +90,39 @@ rtc::Thread *MediaManager::getWorkerThread() {
|
|||
MediaManager::MediaManager(
|
||||
rtc::Thread *thread,
|
||||
bool isOutgoing,
|
||||
ProtocolVersion protocolVersion,
|
||||
const MediaDevicesConfig &devicesConfig,
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture,
|
||||
std::function<void(Message &&)> sendSignalingMessage,
|
||||
std::function<void(Message &&)> sendTransportMessage,
|
||||
std::function<void(int)> signalBarsUpdated,
|
||||
float localPreferredVideoAspectRatio,
|
||||
bool enableHighBitrateVideo,
|
||||
std::vector<std::string> preferredCodecs,
|
||||
std::shared_ptr<PlatformContext> platformContext) :
|
||||
std::shared_ptr<PlatformContext> platformContext) :
|
||||
_thread(thread),
|
||||
_eventLog(std::make_unique<webrtc::RtcEventLogNull>()),
|
||||
_taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()),
|
||||
_sendSignalingMessage(std::move(sendSignalingMessage)),
|
||||
_sendTransportMessage(std::move(sendTransportMessage)),
|
||||
_signalBarsUpdated(std::move(signalBarsUpdated)),
|
||||
_protocolVersion(protocolVersion),
|
||||
_outgoingVideoState(videoCapture ? VideoState::Active : VideoState::Inactive),
|
||||
_videoCapture(std::move(videoCapture)),
|
||||
_localPreferredVideoAspectRatio(localPreferredVideoAspectRatio),
|
||||
_enableHighBitrateVideo(enableHighBitrateVideo),
|
||||
_platformContext(platformContext) {
|
||||
bool rewriteFrameRotation = false;
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V0:
|
||||
rewriteFrameRotation = true;
|
||||
break;
|
||||
case ProtocolVersion::V1:
|
||||
rewriteFrameRotation = false;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
_incomingVideoSinkProxy.reset(new VideoSinkInterfaceProxyImpl(rewriteFrameRotation));
|
||||
|
||||
_ssrcAudio.incoming = isOutgoing ? ssrcAudioIncoming : ssrcAudioOutgoing;
|
||||
_ssrcAudio.outgoing = (!isOutgoing) ? ssrcAudioIncoming : ssrcAudioOutgoing;
|
||||
_ssrcAudio.fecIncoming = isOutgoing ? ssrcAudioFecIncoming : ssrcAudioFecOutgoing;
|
||||
|
@ -101,18 +152,31 @@ _platformContext(platformContext) {
|
|||
mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory<webrtc::AudioEncoderOpus>();
|
||||
mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory<webrtc::AudioDecoderOpus>();
|
||||
|
||||
mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(_platformContext);
|
||||
mediaDeps.video_decoder_factory = PlatformInterface::SharedInstance()->makeVideoDecoderFactory(_platformContext);
|
||||
mediaDeps.video_encoder_factory = PlatformInterface::SharedInstance()->makeVideoEncoderFactory(_platformContext);
|
||||
mediaDeps.video_decoder_factory = PlatformInterface::SharedInstance()->makeVideoDecoderFactory(_platformContext);
|
||||
|
||||
_myVideoFormats = ComposeSupportedFormats(
|
||||
_myVideoFormats = ComposeSupportedFormats(
|
||||
mediaDeps.video_encoder_factory->GetSupportedFormats(),
|
||||
mediaDeps.video_decoder_factory->GetSupportedFormats(),
|
||||
preferredCodecs,
|
||||
_platformContext);
|
||||
|
||||
mediaDeps.audio_processing = webrtc::AudioProcessingBuilder().Create();
|
||||
|
||||
/*_audioDeviceModule = createAudioDeviceModule();
|
||||
if (!_audioDeviceModule) {
|
||||
return;
|
||||
}
|
||||
mediaDeps.adm = _audioDeviceModule;*/
|
||||
|
||||
_mediaEngine = cricket::CreateMediaEngine(std::move(mediaDeps));
|
||||
_mediaEngine->Init();
|
||||
|
||||
/*setAudioInputDevice(devicesConfig.audioInputId);
|
||||
setAudioOutputDevice(devicesConfig.audioOutputId);
|
||||
setInputVolume(devicesConfig.inputVolume);
|
||||
setOutputVolume(devicesConfig.outputVolume);*/
|
||||
|
||||
webrtc::Call::Config callConfig(_eventLog.get());
|
||||
callConfig.task_queue_factory = _taskQueueFactory.get();
|
||||
callConfig.trials = &_fieldTrials;
|
||||
|
@ -124,8 +188,8 @@ _platformContext(platformContext) {
|
|||
audioOptions.noise_suppression = true;
|
||||
audioOptions.audio_jitter_buffer_fast_accelerate = true;
|
||||
|
||||
std::vector<std::string> streamIds;
|
||||
streamIds.push_back("1");
|
||||
std::vector<std::string> streamIds;
|
||||
streamIds.push_back("1");
|
||||
|
||||
_audioChannel.reset(_mediaEngine->voice().CreateMediaChannel(_call.get(), cricket::MediaConfig(), audioOptions, webrtc::CryptoOptions::NoGcm()));
|
||||
_videoChannel.reset(_mediaEngine->video().CreateMediaChannel(_call.get(), cricket::MediaConfig(), cricket::VideoOptions(), webrtc::CryptoOptions::NoGcm(), _videoBitrateAllocatorFactory.get()));
|
||||
|
@ -172,16 +236,33 @@ _platformContext(platformContext) {
|
|||
audioRecvParameters.rtcp.remote_estimate = true;
|
||||
|
||||
_audioChannel->SetRecvParameters(audioRecvParameters);
|
||||
cricket::StreamParams audioRecvStreamParams = cricket::StreamParams::CreateLegacy(_ssrcAudio.incoming);
|
||||
audioRecvStreamParams.set_stream_ids(streamIds);
|
||||
_audioChannel->AddRecvStream(audioRecvStreamParams);
|
||||
cricket::StreamParams audioRecvStreamParams = cricket::StreamParams::CreateLegacy(_ssrcAudio.incoming);
|
||||
audioRecvStreamParams.set_stream_ids(streamIds);
|
||||
_audioChannel->AddRecvStream(audioRecvStreamParams);
|
||||
_audioChannel->SetPlayout(true);
|
||||
|
||||
_videoChannel->SetInterface(_videoNetworkInterface.get());
|
||||
|
||||
|
||||
adjustBitratePreferences(true);
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<webrtc::AudioDeviceModule> MediaManager::createAudioDeviceModule() {
|
||||
const auto check = [&](webrtc::AudioDeviceModule::AudioLayer layer) {
|
||||
auto result = webrtc::AudioDeviceModule::Create(
|
||||
layer,
|
||||
_taskQueueFactory.get());
|
||||
return (result && (result->Init() == 0)) ? result : nullptr;
|
||||
};
|
||||
if (auto result = check(webrtc::AudioDeviceModule::kPlatformDefaultAudio)) {
|
||||
return result;
|
||||
#ifdef WEBRTC_LINUX
|
||||
} else if (auto result = check(webrtc::AudioDeviceModule::kLinuxAlsaAudio)) {
|
||||
return result;
|
||||
#endif // WEBRTC_LINUX
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void MediaManager::start() {
|
||||
_sendSignalingMessage({ _myVideoFormats });
|
||||
|
||||
|
@ -194,7 +275,7 @@ void MediaManager::start() {
|
|||
|
||||
MediaManager::~MediaManager() {
|
||||
assert(_thread->IsCurrent());
|
||||
|
||||
|
||||
RTC_LOG(LS_INFO) << "MediaManager::~MediaManager()";
|
||||
|
||||
_call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkDown);
|
||||
|
@ -212,18 +293,18 @@ MediaManager::~MediaManager() {
|
|||
_audioChannel->SetInterface(nullptr);
|
||||
|
||||
setSendVideo(nullptr);
|
||||
|
||||
|
||||
if (computeIsReceivingVideo()) {
|
||||
_videoChannel->RemoveRecvStream(_ssrcVideo.incoming);
|
||||
if (_enableFlexfec) {
|
||||
_videoChannel->RemoveRecvStream(_ssrcVideo.fecIncoming);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (_didConfigureVideo) {
|
||||
_videoChannel->OnReadyToSend(false);
|
||||
_videoChannel->SetSend(false);
|
||||
|
||||
|
||||
if (_enableFlexfec) {
|
||||
_videoChannel->RemoveSendStream(_ssrcVideo.outgoing);
|
||||
_videoChannel->RemoveSendStream(_ssrcVideo.fecOutgoing);
|
||||
|
@ -231,7 +312,7 @@ MediaManager::~MediaManager() {
|
|||
_videoChannel->RemoveSendStream(_ssrcVideo.outgoing);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
_videoChannel->SetInterface(nullptr);
|
||||
}
|
||||
|
||||
|
@ -239,6 +320,11 @@ void MediaManager::setIsConnected(bool isConnected) {
|
|||
if (_isConnected == isConnected) {
|
||||
return;
|
||||
}
|
||||
bool isFirstConnection = false;
|
||||
if (!_isConnected && isConnected) {
|
||||
_didConnectOnce = true;
|
||||
isFirstConnection = true;
|
||||
}
|
||||
_isConnected = isConnected;
|
||||
|
||||
if (_isConnected) {
|
||||
|
@ -257,8 +343,10 @@ void MediaManager::setIsConnected(bool isConnected) {
|
|||
_videoChannel->OnReadyToSend(_isConnected);
|
||||
_videoChannel->SetSend(_isConnected);
|
||||
}
|
||||
sendVideoParametersMessage();
|
||||
sendOutgoingMediaStateMessage();
|
||||
if (isFirstConnection) {
|
||||
sendVideoParametersMessage();
|
||||
sendOutgoingMediaStateMessage();
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::sendVideoParametersMessage() {
|
||||
|
@ -292,7 +380,7 @@ void MediaManager::collectStats() {
|
|||
break;
|
||||
}
|
||||
float sendBitrateKbps = ((float)stats.send_bandwidth_bps / 1000.0f);
|
||||
|
||||
|
||||
RTC_LOG(LS_INFO) << "MediaManager sendBitrateKbps=" << (stats.send_bandwidth_bps / 1000);
|
||||
|
||||
float signalBarsNorm = 4.0f;
|
||||
|
@ -303,6 +391,8 @@ void MediaManager::collectStats() {
|
|||
_signalBarsUpdated((int)(adjustedQuality * signalBarsNorm));
|
||||
}
|
||||
|
||||
_bitrateRecords.push_back(CallStatsBitrateRecord { (int32_t)(rtc::TimeMillis() / 1000), stats.send_bandwidth_bps / 1000 });
|
||||
|
||||
beginStatsTimer(2000);
|
||||
}
|
||||
|
||||
|
@ -376,6 +466,15 @@ void MediaManager::setSendVideo(std::shared_ptr<VideoCaptureInterface> videoCapt
|
|||
checkIsReceivingVideoChanged(wasReceiving);
|
||||
}
|
||||
|
||||
void MediaManager::setRequestedVideoAspect(float aspect) {
|
||||
if (_localPreferredVideoAspectRatio != aspect) {
|
||||
_localPreferredVideoAspectRatio = aspect;
|
||||
if (_didConnectOnce) {
|
||||
sendVideoParametersMessage();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::configureSendingVideoIfNeeded() {
|
||||
if (_didConfigureVideo) {
|
||||
return;
|
||||
|
@ -384,7 +483,7 @@ void MediaManager::configureSendingVideoIfNeeded() {
|
|||
return;
|
||||
}
|
||||
_didConfigureVideo = true;
|
||||
|
||||
|
||||
auto codec = *_videoCodecOut;
|
||||
|
||||
codec.SetParam(cricket::kCodecParamMinBitrate, 64);
|
||||
|
@ -404,6 +503,15 @@ void MediaManager::configureSendingVideoIfNeeded() {
|
|||
}
|
||||
|
||||
videoSendParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 2);
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
videoSendParameters.extensions.emplace_back(webrtc::RtpExtension::kVideoRotationUri, 3);
|
||||
videoSendParameters.extensions.emplace_back(
|
||||
webrtc::RtpExtension::kTimestampOffsetUri, 4);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
videoSendParameters.rtcp.remote_estimate = true;
|
||||
_videoChannel->SetSendParameters(videoSendParameters);
|
||||
|
||||
|
@ -417,7 +525,7 @@ void MediaManager::configureSendingVideoIfNeeded() {
|
|||
} else {
|
||||
_videoChannel->AddSendStream(cricket::StreamParams::CreateLegacy(_ssrcVideo.outgoing));
|
||||
}
|
||||
|
||||
|
||||
adjustBitratePreferences(true);
|
||||
}
|
||||
|
||||
|
@ -427,12 +535,12 @@ void MediaManager::checkIsSendingVideoChanged(bool wasSending) {
|
|||
return;
|
||||
} else if (sending) {
|
||||
configureSendingVideoIfNeeded();
|
||||
|
||||
|
||||
if (_enableFlexfec) {
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, GetVideoCaptureAssumingSameThread(_videoCapture.get())->_videoSource);
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, GetVideoCaptureAssumingSameThread(_videoCapture.get())->source());
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
||||
} else {
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, GetVideoCaptureAssumingSameThread(_videoCapture.get())->_videoSource);
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, GetVideoCaptureAssumingSameThread(_videoCapture.get())->source());
|
||||
}
|
||||
|
||||
_videoChannel->OnReadyToSend(_isConnected);
|
||||
|
@ -441,7 +549,7 @@ void MediaManager::checkIsSendingVideoChanged(bool wasSending) {
|
|||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, nullptr);
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
||||
}
|
||||
|
||||
|
||||
adjustBitratePreferences(true);
|
||||
}
|
||||
|
||||
|
@ -449,6 +557,14 @@ int MediaManager::getMaxVideoBitrate() const {
|
|||
return (_enableHighBitrateVideo && _isLowCostNetwork) ? 2000000 : 800000;
|
||||
}
|
||||
|
||||
int MediaManager::getMaxAudioBitrate() const {
|
||||
if (_isDataSavingActive) {
|
||||
return 16000;
|
||||
} else {
|
||||
return 32000;
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::adjustBitratePreferences(bool resetStartBitrate) {
|
||||
if (computeIsSendingVideo()) {
|
||||
webrtc::BitrateConstraints preferences;
|
||||
|
@ -457,7 +573,7 @@ void MediaManager::adjustBitratePreferences(bool resetStartBitrate) {
|
|||
preferences.start_bitrate_bps = 400000;
|
||||
}
|
||||
preferences.max_bitrate_bps = getMaxVideoBitrate();
|
||||
|
||||
|
||||
_call->GetTransportControllerSend()->SetSdpBitrateParameters(preferences);
|
||||
} else {
|
||||
webrtc::BitrateConstraints preferences;
|
||||
|
@ -474,9 +590,9 @@ void MediaManager::adjustBitratePreferences(bool resetStartBitrate) {
|
|||
if (resetStartBitrate) {
|
||||
preferences.start_bitrate_bps = 16000;
|
||||
}
|
||||
preferences.max_bitrate_bps = 32000;
|
||||
preferences.max_bitrate_bps = getMaxAudioBitrate();
|
||||
}
|
||||
|
||||
|
||||
_call->GetTransportControllerSend()->SetSdpBitrateParameters(preferences);
|
||||
}
|
||||
}
|
||||
|
@ -506,7 +622,16 @@ void MediaManager::checkIsReceivingVideoChanged(bool wasReceiving) {
|
|||
}
|
||||
|
||||
videoRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 2);
|
||||
//recv_parameters.rtcp.reduced_size = true;
|
||||
switch (_protocolVersion) {
|
||||
case ProtocolVersion::V1:
|
||||
videoRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kVideoRotationUri, 3);
|
||||
videoRecvParameters.extensions.emplace_back(
|
||||
webrtc::RtpExtension::kTimestampOffsetUri, 4);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
videoRecvParameters.rtcp.reduced_size = true;
|
||||
videoRecvParameters.rtcp.remote_estimate = true;
|
||||
|
||||
cricket::StreamParams videoRecvStreamParams;
|
||||
|
@ -514,16 +639,14 @@ void MediaManager::checkIsReceivingVideoChanged(bool wasReceiving) {
|
|||
videoRecvStreamParams.ssrcs = {_ssrcVideo.incoming};
|
||||
videoRecvStreamParams.ssrc_groups.push_back(videoRecvSsrcGroup);
|
||||
videoRecvStreamParams.cname = "cname";
|
||||
std::vector<std::string> streamIds;
|
||||
streamIds.push_back("1");
|
||||
videoRecvStreamParams.set_stream_ids(streamIds);
|
||||
std::vector<std::string> streamIds;
|
||||
streamIds.push_back("1");
|
||||
videoRecvStreamParams.set_stream_ids(streamIds);
|
||||
|
||||
_videoChannel->SetRecvParameters(videoRecvParameters);
|
||||
_videoChannel->AddRecvStream(videoRecvStreamParams);
|
||||
_readyToReceiveVideo = true;
|
||||
if (_currentIncomingVideoSink) {
|
||||
_videoChannel->SetSink(_ssrcVideo.incoming, _currentIncomingVideoSink.get());
|
||||
}
|
||||
_videoChannel->SetSink(_ssrcVideo.incoming, _incomingVideoSinkProxy.get());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -549,8 +672,7 @@ void MediaManager::setOutgoingVideoState(VideoState state) {
|
|||
}
|
||||
|
||||
void MediaManager::setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_currentIncomingVideoSink = sink;
|
||||
_videoChannel->SetSink(_ssrcVideo.incoming, _currentIncomingVideoSink.get());
|
||||
_incomingVideoSinkProxy->setSink(sink);
|
||||
}
|
||||
|
||||
static bool IsRtcp(const uint8_t* packet, size_t length) {
|
||||
|
@ -595,14 +717,148 @@ void MediaManager::remoteVideoStateUpdated(VideoState videoState) {
|
|||
}
|
||||
}
|
||||
|
||||
void MediaManager::setIsCurrentNetworkLowCost(bool isCurrentNetworkLowCost) {
|
||||
if (_isLowCostNetwork != isCurrentNetworkLowCost) {
|
||||
_isLowCostNetwork = isCurrentNetworkLowCost;
|
||||
RTC_LOG(LS_INFO) << "MediaManager isLowCostNetwork updated: " << isCurrentNetworkLowCost ? 1 : 0;
|
||||
void MediaManager::setNetworkParameters(bool isLowCost, bool isDataSavingActive) {
|
||||
if (_isLowCostNetwork != isLowCost || _isDataSavingActive != isDataSavingActive) {
|
||||
_isLowCostNetwork = isLowCost;
|
||||
_isDataSavingActive = isDataSavingActive;
|
||||
RTC_LOG(LS_INFO) << "MediaManager isLowCostNetwork: " << (isLowCost ? 1 : 0) << ", isDataSavingActive: " << (isDataSavingActive ? 1 : 0);
|
||||
adjustBitratePreferences(false);
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::fillCallStats(CallStats &callStats) {
|
||||
if (_videoCodecOut.has_value()) {
|
||||
callStats.outgoingCodec = _videoCodecOut->name;
|
||||
}
|
||||
callStats.bitrateRecords = std::move(_bitrateRecords);
|
||||
}
|
||||
|
||||
void MediaManager::setAudioInputDevice(std::string id) {
|
||||
const auto recording = _audioDeviceModule->Recording();
|
||||
if (recording) {
|
||||
_audioDeviceModule->StopRecording();
|
||||
}
|
||||
const auto finish = [&] {
|
||||
if (recording) {
|
||||
_audioDeviceModule->InitRecording();
|
||||
_audioDeviceModule->StartRecording();
|
||||
}
|
||||
};
|
||||
if (id == "default" || id.empty()) {
|
||||
if (const auto result = _audioDeviceModule->SetRecordingDevice(webrtc::AudioDeviceModule::kDefaultCommunicationDevice)) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << "): SetRecordingDevice(kDefaultCommunicationDevice) failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioInputDevice(" << id << "): SetRecordingDevice(kDefaultCommunicationDevice) success.";
|
||||
}
|
||||
return finish();
|
||||
}
|
||||
const auto count = _audioDeviceModule
|
||||
? _audioDeviceModule->RecordingDevices()
|
||||
: int16_t(-666);
|
||||
if (count <= 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << "): Could not get recording devices count: " << count << ".";
|
||||
return finish();
|
||||
}
|
||||
for (auto i = 0; i != count; ++i) {
|
||||
char name[webrtc::kAdmMaxDeviceNameSize + 1] = { 0 };
|
||||
char guid[webrtc::kAdmMaxGuidSize + 1] = { 0 };
|
||||
_audioDeviceModule->RecordingDeviceName(i, name, guid);
|
||||
if (id == guid) {
|
||||
const auto result = _audioDeviceModule->SetRecordingDevice(i);
|
||||
if (result != 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << ") name '" << std::string(name) << "' failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioInputDevice(" << id << ") name '" << std::string(name) << "' success.";
|
||||
}
|
||||
return finish();
|
||||
}
|
||||
}
|
||||
RTC_LOG(LS_ERROR) << "setAudioInputDevice(" << id << "): Could not find recording device.";
|
||||
return finish();
|
||||
}
|
||||
|
||||
void MediaManager::setAudioOutputDevice(std::string id) {
|
||||
const auto playing = _audioDeviceModule->Playing();
|
||||
if (playing) {
|
||||
_audioDeviceModule->StopPlayout();
|
||||
}
|
||||
const auto finish = [&] {
|
||||
if (playing) {
|
||||
_audioDeviceModule->InitPlayout();
|
||||
_audioDeviceModule->StartPlayout();
|
||||
}
|
||||
};
|
||||
if (id == "default" || id.empty()) {
|
||||
if (const auto result = _audioDeviceModule->SetPlayoutDevice(webrtc::AudioDeviceModule::kDefaultCommunicationDevice)) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << "): SetPlayoutDevice(kDefaultCommunicationDevice) failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioOutputDevice(" << id << "): SetPlayoutDevice(kDefaultCommunicationDevice) success.";
|
||||
}
|
||||
return finish();
|
||||
}
|
||||
const auto count = _audioDeviceModule
|
||||
? _audioDeviceModule->PlayoutDevices()
|
||||
: int16_t(-666);
|
||||
if (count <= 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << "): Could not get playout devices count: " << count << ".";
|
||||
return finish();
|
||||
}
|
||||
for (auto i = 0; i != count; ++i) {
|
||||
char name[webrtc::kAdmMaxDeviceNameSize + 1] = { 0 };
|
||||
char guid[webrtc::kAdmMaxGuidSize + 1] = { 0 };
|
||||
_audioDeviceModule->PlayoutDeviceName(i, name, guid);
|
||||
if (id == guid) {
|
||||
const auto result = _audioDeviceModule->SetPlayoutDevice(i);
|
||||
if (result != 0) {
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << ") name '" << std::string(name) << "' failed: " << result << ".";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "setAudioOutputDevice(" << id << ") name '" << std::string(name) << "' success.";
|
||||
}
|
||||
return finish();
|
||||
}
|
||||
}
|
||||
RTC_LOG(LS_ERROR) << "setAudioOutputDevice(" << id << "): Could not find playout device.";
|
||||
return finish();
|
||||
}
|
||||
|
||||
void MediaManager::setInputVolume(float level) {
|
||||
// This is not what we want, it changes OS volume on macOS.
|
||||
// auto min = uint32_t();
|
||||
// auto max = uint32_t();
|
||||
// if (const auto result = _audioDeviceModule->MinMicrophoneVolume(&min)) {
|
||||
// RTC_LOG(LS_ERROR) << "setInputVolume(" << level << "): MinMicrophoneVolume failed: " << result << ".";
|
||||
// return;
|
||||
// } else if (const auto result = _audioDeviceModule->MaxMicrophoneVolume(&max)) {
|
||||
// RTC_LOG(LS_ERROR) << "setInputVolume(" << level << "): MaxMicrophoneVolume failed: " << result << ".";
|
||||
// return;
|
||||
// }
|
||||
// const auto volume = min + uint32_t(std::round((max - min) * std::min(std::max(level, 0.f), 1.f)));
|
||||
// if (const auto result = _audioDeviceModule->SetMicrophoneVolume(volume)) {
|
||||
// RTC_LOG(LS_ERROR) << "setInputVolume(" << level << "): SetMicrophoneVolume(" << volume << ") failed: " << result << ".";
|
||||
// } else {
|
||||
// RTC_LOG(LS_INFO) << "setInputVolume(" << level << ") volume " << volume << " success.";
|
||||
// }
|
||||
}
|
||||
|
||||
void MediaManager::setOutputVolume(float level) {
|
||||
// This is not what we want, it changes OS volume on macOS.
|
||||
// auto min = uint32_t();
|
||||
// auto max = uint32_t();
|
||||
// if (const auto result = _audioDeviceModule->MinSpeakerVolume(&min)) {
|
||||
// RTC_LOG(LS_ERROR) << "setOutputVolume(" << level << "): MinSpeakerVolume failed: " << result << ".";
|
||||
// return;
|
||||
// } else if (const auto result = _audioDeviceModule->MaxSpeakerVolume(&max)) {
|
||||
// RTC_LOG(LS_ERROR) << "setOutputVolume(" << level << "): MaxSpeakerVolume failed: " << result << ".";
|
||||
// return;
|
||||
// }
|
||||
// const auto volume = min + uint32_t(std::round((max - min) * std::min(std::max(level, 0.f), 1.f)));
|
||||
// if (const auto result = _audioDeviceModule->SetSpeakerVolume(volume)) {
|
||||
// RTC_LOG(LS_ERROR) << "setOutputVolume(" << level << "): SetSpeakerVolume(" << volume << ") failed: " << result << ".";
|
||||
// } else {
|
||||
// RTC_LOG(LS_INFO) << "setOutputVolume(" << level << ") volume " << volume << " success.";
|
||||
// }
|
||||
}
|
||||
|
||||
MediaManager::NetworkInterfaceImpl::NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo) :
|
||||
_mediaManager(mediaManager),
|
||||
_isVideo(isVideo) {
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "Instance.h"
|
||||
#include "Message.h"
|
||||
#include "VideoCaptureInterface.h"
|
||||
#include "Stats.h"
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
@ -20,6 +21,7 @@ class RtcEventLogNull;
|
|||
class TaskQueueFactory;
|
||||
class VideoBitrateAllocatorFactory;
|
||||
class VideoTrackSourceInterface;
|
||||
class AudioDeviceModule;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace cricket {
|
||||
|
@ -30,6 +32,8 @@ class VideoMediaChannel;
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoSinkInterfaceProxyImpl;
|
||||
|
||||
class MediaManager : public sigslot::has_slots<>, public std::enable_shared_from_this<MediaManager> {
|
||||
public:
|
||||
static rtc::Thread *getWorkerThread();
|
||||
|
@ -37,11 +41,12 @@ public:
|
|||
MediaManager(
|
||||
rtc::Thread *thread,
|
||||
bool isOutgoing,
|
||||
ProtocolVersion protocolVersion,
|
||||
const MediaDevicesConfig &devicesConfig,
|
||||
std::shared_ptr<VideoCaptureInterface> videoCapture,
|
||||
std::function<void(Message &&)> sendSignalingMessage,
|
||||
std::function<void(Message &&)> sendTransportMessage,
|
||||
std::function<void(int)> signalBarsUpdated,
|
||||
float localPreferredVideoAspectRatio,
|
||||
bool enableHighBitrateVideo,
|
||||
std::vector<std::string> preferredCodecs,
|
||||
std::shared_ptr<PlatformContext> platformContext);
|
||||
|
@ -51,11 +56,18 @@ public:
|
|||
void setIsConnected(bool isConnected);
|
||||
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
||||
void setSendVideo(std::shared_ptr<VideoCaptureInterface> videoCapture);
|
||||
void setRequestedVideoAspect(float aspect);
|
||||
void setMuteOutgoingAudio(bool mute);
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void receiveMessage(DecryptedMessage &&message);
|
||||
void remoteVideoStateUpdated(VideoState videoState);
|
||||
void setIsCurrentNetworkLowCost(bool isCurrentNetworkLowCost);
|
||||
void setNetworkParameters(bool isLowCost, bool isDataSavingActive);
|
||||
void fillCallStats(CallStats &callStats);
|
||||
|
||||
void setAudioInputDevice(std::string id);
|
||||
void setAudioOutputDevice(std::string id);
|
||||
void setInputVolume(float level);
|
||||
void setOutputVolume(float level);
|
||||
|
||||
private:
|
||||
struct SSRC {
|
||||
|
@ -90,6 +102,7 @@ private:
|
|||
bool videoCodecsNegotiated() const;
|
||||
|
||||
int getMaxVideoBitrate() const;
|
||||
int getMaxAudioBitrate() const;
|
||||
void adjustBitratePreferences(bool resetStartBitrate);
|
||||
bool computeIsReceivingVideo() const;
|
||||
void checkIsReceivingVideoChanged(bool wasReceiving);
|
||||
|
@ -98,7 +111,9 @@ private:
|
|||
void setOutgoingAudioState(AudioState state);
|
||||
void sendVideoParametersMessage();
|
||||
void sendOutgoingMediaStateMessage();
|
||||
|
||||
|
||||
rtc::scoped_refptr<webrtc::AudioDeviceModule> createAudioDeviceModule();
|
||||
|
||||
void beginStatsTimer(int timeoutMs);
|
||||
void collectStats();
|
||||
|
||||
|
@ -113,8 +128,11 @@ private:
|
|||
SSRC _ssrcAudio;
|
||||
SSRC _ssrcVideo;
|
||||
bool _enableFlexfec = true;
|
||||
|
||||
ProtocolVersion _protocolVersion;
|
||||
|
||||
bool _isConnected = false;
|
||||
bool _didConnectOnce = false;
|
||||
bool _readyToReceiveVideo = false;
|
||||
bool _didConfigureVideo = false;
|
||||
AudioState _outgoingAudioState = AudioState::Active;
|
||||
|
@ -128,19 +146,23 @@ private:
|
|||
std::unique_ptr<webrtc::Call> _call;
|
||||
webrtc::FieldTrialBasedConfig _fieldTrials;
|
||||
webrtc::LocalAudioSinkAdapter _audioSource;
|
||||
rtc::scoped_refptr<webrtc::AudioDeviceModule> _audioDeviceModule;
|
||||
std::unique_ptr<cricket::VoiceMediaChannel> _audioChannel;
|
||||
std::unique_ptr<cricket::VideoMediaChannel> _videoChannel;
|
||||
std::unique_ptr<webrtc::VideoBitrateAllocatorFactory> _videoBitrateAllocatorFactory;
|
||||
std::shared_ptr<VideoCaptureInterface> _videoCapture;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _currentIncomingVideoSink;
|
||||
std::shared_ptr<VideoSinkInterfaceProxyImpl> _incomingVideoSinkProxy;
|
||||
|
||||
float _localPreferredVideoAspectRatio = 0.0f;
|
||||
float _preferredAspectRatio = 0.0f;
|
||||
bool _enableHighBitrateVideo = false;
|
||||
bool _isLowCostNetwork = false;
|
||||
bool _isDataSavingActive = false;
|
||||
|
||||
std::unique_ptr<MediaManager::NetworkInterfaceImpl> _audioNetworkInterface;
|
||||
std::unique_ptr<MediaManager::NetworkInterfaceImpl> _videoNetworkInterface;
|
||||
|
||||
std::vector<CallStatsBitrateRecord> _bitrateRecords;
|
||||
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
};
|
||||
|
|
|
@ -264,17 +264,21 @@ bool Deserialize(RemoteBatteryLevelIsLowMessage &to, rtc::ByteBufferReader &read
|
|||
return true;
|
||||
}
|
||||
|
||||
void Serialize(rtc::ByteBufferWriter &to, const RemoteNetworkTypeMessage &from, bool singleMessagePacket) {
|
||||
void Serialize(rtc::ByteBufferWriter &to, const RemoteNetworkStatusMessage &from, bool singleMessagePacket) {
|
||||
to.WriteUInt8(from.isLowCost ? 1 : 0);
|
||||
to.WriteUInt8(from.isLowDataRequested ? 1 : 0);
|
||||
}
|
||||
|
||||
bool Deserialize(RemoteNetworkTypeMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
bool Deserialize(RemoteNetworkStatusMessage &to, rtc::ByteBufferReader &reader, bool singleMessagePacket) {
|
||||
uint8_t value = 0;
|
||||
if (!reader.ReadUInt8(&value)) {
|
||||
RTC_LOG(LS_ERROR) << "Could not read isLowCost.";
|
||||
return false;
|
||||
}
|
||||
to.isLowCost = (value != 0);
|
||||
if (reader.ReadUInt8(&value)) {
|
||||
to.isLowDataRequested = (value != 0);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -97,11 +97,12 @@ struct RemoteBatteryLevelIsLowMessage {
|
|||
bool batteryLow = false;
|
||||
};
|
||||
|
||||
struct RemoteNetworkTypeMessage {
|
||||
struct RemoteNetworkStatusMessage {
|
||||
static constexpr uint8_t kId = 10;
|
||||
static constexpr bool kRequiresAck = true;
|
||||
|
||||
bool isLowCost = false;
|
||||
bool isLowDataRequested = false;
|
||||
};
|
||||
|
||||
// To add a new message you should:
|
||||
|
@ -120,7 +121,7 @@ struct Message {
|
|||
UnstructuredDataMessage,
|
||||
VideoParametersMessage,
|
||||
RemoteBatteryLevelIsLowMessage,
|
||||
RemoteNetworkTypeMessage> data;
|
||||
RemoteNetworkStatusMessage> data;
|
||||
};
|
||||
|
||||
rtc::CopyOnWriteBuffer SerializeMessageWithSeq(
|
||||
|
|
|
@ -23,10 +23,30 @@ extern "C" {
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
class TurnCustomizerImpl : public webrtc::TurnCustomizer {
|
||||
public:
|
||||
TurnCustomizerImpl() {
|
||||
}
|
||||
|
||||
virtual ~TurnCustomizerImpl() {
|
||||
}
|
||||
|
||||
void MaybeModifyOutgoingStunMessage(cricket::PortInterface* port,
|
||||
cricket::StunMessage* message) override {
|
||||
message->AddAttribute(std::make_unique<cricket::StunByteStringAttribute>(cricket::STUN_ATTR_SOFTWARE, "Telegram "));
|
||||
}
|
||||
|
||||
bool AllowChannelData(cricket::PortInterface* port, const void *data, size_t size, bool payload) override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
NetworkManager::NetworkManager(
|
||||
rtc::Thread *thread,
|
||||
EncryptionKey encryptionKey,
|
||||
bool enableP2P,
|
||||
bool enableTCP,
|
||||
bool enableStunMarking,
|
||||
std::vector<RtcServer> const &rtcServers,
|
||||
std::function<void(const NetworkManager::State &)> stateUpdated,
|
||||
std::function<void(DecryptedMessage &&)> transportMessageReceived,
|
||||
|
@ -34,6 +54,8 @@ NetworkManager::NetworkManager(
|
|||
std::function<void(int delayMs, int cause)> sendTransportServiceAsync) :
|
||||
_thread(thread),
|
||||
_enableP2P(enableP2P),
|
||||
_enableTCP(enableTCP),
|
||||
_enableStunMarking(enableStunMarking),
|
||||
_rtcServers(rtcServers),
|
||||
_transport(
|
||||
EncryptedConnection::Type::Transport,
|
||||
|
@ -63,9 +85,17 @@ void NetworkManager::start() {
|
|||
_socketFactory.reset(new rtc::BasicPacketSocketFactory(_thread));
|
||||
|
||||
_networkManager = std::make_unique<rtc::BasicNetworkManager>();
|
||||
_portAllocator.reset(new cricket::BasicPortAllocator(_networkManager.get(), _socketFactory.get(), nullptr, nullptr));
|
||||
|
||||
if (_enableStunMarking) {
|
||||
_turnCustomizer.reset(new TurnCustomizerImpl());
|
||||
}
|
||||
|
||||
_portAllocator.reset(new cricket::BasicPortAllocator(_networkManager.get(), _socketFactory.get(), _turnCustomizer.get(), nullptr));
|
||||
|
||||
uint32_t flags = cricket::PORTALLOCATOR_DISABLE_TCP;
|
||||
uint32_t flags = 0;
|
||||
if (!_enableTCP) {
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_TCP;
|
||||
}
|
||||
if (!_enableP2P) {
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_UDP;
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_STUN;
|
||||
|
@ -90,7 +120,7 @@ void NetworkManager::start() {
|
|||
}
|
||||
}
|
||||
|
||||
_portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE);
|
||||
_portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE, _turnCustomizer.get());
|
||||
|
||||
_asyncResolverFactory = std::make_unique<webrtc::BasicAsyncResolverFactory>();
|
||||
_transportChannel.reset(new cricket::P2PTransportChannel("transport", 0, _portAllocator.get(), _asyncResolverFactory.get(), nullptr));
|
||||
|
@ -167,6 +197,8 @@ void NetworkManager::sendTransportService(int cause) {
|
|||
|
||||
void NetworkManager::setIsLocalNetworkLowCost(bool isLocalNetworkLowCost) {
|
||||
_isLocalNetworkLowCost = isLocalNetworkLowCost;
|
||||
|
||||
logCurrentNetworkState();
|
||||
}
|
||||
|
||||
TrafficStats NetworkManager::getNetworkStats() {
|
||||
|
@ -178,6 +210,22 @@ TrafficStats NetworkManager::getNetworkStats() {
|
|||
return stats;
|
||||
}
|
||||
|
||||
void NetworkManager::fillCallStats(CallStats &callStats) {
|
||||
callStats.networkRecords = std::move(_networkRecords);
|
||||
}
|
||||
|
||||
void NetworkManager::logCurrentNetworkState() {
|
||||
if (!_currentEndpointType.has_value()) {
|
||||
return;
|
||||
}
|
||||
|
||||
CallStatsNetworkRecord record;
|
||||
record.timestamp = (int32_t)(rtc::TimeMillis() / 1000);
|
||||
record.endpointType = *_currentEndpointType;
|
||||
record.isLowCost = _isLocalNetworkLowCost;
|
||||
_networkRecords.push_back(std::move(record));
|
||||
}
|
||||
|
||||
void NetworkManager::checkConnectionTimeout() {
|
||||
const auto weak = std::weak_ptr<NetworkManager>(shared_from_this());
|
||||
_thread->PostDelayedTask(RTC_FROM_HERE, [weak]() {
|
||||
|
@ -258,6 +306,17 @@ void NetworkManager::transportRouteChanged(absl::optional<rtc::NetworkRoute> rou
|
|||
bool remoteIsWifi = route->remote.adapter_type() == rtc::AdapterType::ADAPTER_TYPE_WIFI;
|
||||
|
||||
RTC_LOG(LS_INFO) << "NetworkManager is wifi: local=" << localIsWifi << ", remote=" << remoteIsWifi;
|
||||
|
||||
CallStatsConnectionEndpointType endpointType;
|
||||
if (route->local.uses_turn()) {
|
||||
endpointType = CallStatsConnectionEndpointType::ConnectionEndpointTURN;
|
||||
} else {
|
||||
endpointType = CallStatsConnectionEndpointType::ConnectionEndpointP2P;
|
||||
}
|
||||
if (!_currentEndpointType.has_value() || _currentEndpointType != endpointType) {
|
||||
_currentEndpointType = endpointType;
|
||||
logCurrentNetworkState();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include "EncryptedConnection.h"
|
||||
#include "Instance.h"
|
||||
#include "Message.h"
|
||||
#include "Stats.h"
|
||||
|
||||
#include "rtc_base/copy_on_write_buffer.h"
|
||||
#include "api/candidate.h"
|
||||
|
@ -28,6 +29,7 @@ class IceTransportInternal;
|
|||
|
||||
namespace webrtc {
|
||||
class BasicAsyncResolverFactory;
|
||||
class TurnCustomizer;
|
||||
} // namespace webrtc
|
||||
|
||||
namespace tgcalls {
|
||||
|
@ -50,6 +52,8 @@ public:
|
|||
rtc::Thread *thread,
|
||||
EncryptionKey encryptionKey,
|
||||
bool enableP2P,
|
||||
bool enableTCP,
|
||||
bool enableStunMarking,
|
||||
std::vector<RtcServer> const &rtcServers,
|
||||
std::function<void(const State &)> stateUpdated,
|
||||
std::function<void(DecryptedMessage &&)> transportMessageReceived,
|
||||
|
@ -63,6 +67,8 @@ public:
|
|||
void sendTransportService(int cause);
|
||||
void setIsLocalNetworkLowCost(bool isLocalNetworkLowCost);
|
||||
TrafficStats getNetworkStats();
|
||||
void fillCallStats(CallStats &callStats);
|
||||
void logCurrentNetworkState();
|
||||
|
||||
private:
|
||||
void checkConnectionTimeout();
|
||||
|
@ -76,6 +82,8 @@ private:
|
|||
|
||||
rtc::Thread *_thread = nullptr;
|
||||
bool _enableP2P = false;
|
||||
bool _enableTCP = false;
|
||||
bool _enableStunMarking = false;
|
||||
std::vector<RtcServer> _rtcServers;
|
||||
EncryptedConnection _transport;
|
||||
bool _isOutgoing = false;
|
||||
|
@ -85,6 +93,7 @@ private:
|
|||
|
||||
std::unique_ptr<rtc::BasicPacketSocketFactory> _socketFactory;
|
||||
std::unique_ptr<rtc::BasicNetworkManager> _networkManager;
|
||||
std::unique_ptr<webrtc::TurnCustomizer> _turnCustomizer;
|
||||
std::unique_ptr<cricket::BasicPortAllocator> _portAllocator;
|
||||
std::unique_ptr<webrtc::BasicAsyncResolverFactory> _asyncResolverFactory;
|
||||
std::unique_ptr<cricket::P2PTransportChannel> _transportChannel;
|
||||
|
@ -96,6 +105,9 @@ private:
|
|||
int64_t _lastNetworkActivityMs = 0;
|
||||
InterfaceTrafficStats _trafficStatsWifi;
|
||||
InterfaceTrafficStats _trafficStatsCellular;
|
||||
|
||||
absl::optional<CallStatsConnectionEndpointType> _currentEndpointType;
|
||||
std::vector<CallStatsNetworkRecord> _networkRecords;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
#ifndef TGCALLS_STATS_H
|
||||
#define TGCALLS_STATS_H
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
enum class CallStatsConnectionEndpointType {
|
||||
ConnectionEndpointP2P = 0,
|
||||
ConnectionEndpointTURN = 1
|
||||
};
|
||||
|
||||
struct CallStatsNetworkRecord {
|
||||
int32_t timestamp = 0;
|
||||
CallStatsConnectionEndpointType endpointType = CallStatsConnectionEndpointType::ConnectionEndpointP2P;
|
||||
bool isLowCost = false;
|
||||
};
|
||||
|
||||
struct CallStatsBitrateRecord {
|
||||
int32_t timestamp = 0;
|
||||
int32_t bitrate = 0;
|
||||
};
|
||||
|
||||
struct CallStats {
|
||||
std::string outgoingCodec;
|
||||
std::vector<CallStatsNetworkRecord> networkRecords;
|
||||
std::vector<CallStatsBitrateRecord> bitrateRecords;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
std::unique_ptr<VideoCaptureInterface> VideoCaptureInterface::Create(std::shared_ptr<PlatformContext> platformContext) {
|
||||
return std::make_unique<VideoCaptureInterfaceImpl>(platformContext);
|
||||
std::unique_ptr<VideoCaptureInterface> VideoCaptureInterface::Create(std::string deviceId, std::shared_ptr<PlatformContext> platformContext) {
|
||||
return std::make_unique<VideoCaptureInterfaceImpl>(deviceId, platformContext);
|
||||
}
|
||||
|
||||
VideoCaptureInterface::~VideoCaptureInterface() = default;
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURE_INTERFACE_H
|
||||
#define TGCALLS_VIDEO_CAPTURE_INTERFACE_H
|
||||
|
||||
#include <string>
|
||||
#include <memory>
|
||||
|
||||
namespace rtc {
|
||||
|
@ -27,11 +28,13 @@ protected:
|
|||
VideoCaptureInterface() = default;
|
||||
|
||||
public:
|
||||
static std::unique_ptr<VideoCaptureInterface> Create(std::shared_ptr<PlatformContext> platformContext = nullptr);
|
||||
static std::unique_ptr<VideoCaptureInterface> Create(
|
||||
std::string deviceId = std::string(),
|
||||
std::shared_ptr<PlatformContext> platformContext = nullptr);
|
||||
|
||||
virtual ~VideoCaptureInterface();
|
||||
|
||||
virtual void switchCamera() = 0;
|
||||
virtual void switchToDevice(std::string deviceId) = 0;
|
||||
virtual void setState(VideoState state) = 0;
|
||||
virtual void setPreferredAspectRatio(float aspectRatio) = 0;
|
||||
virtual void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#include <tgnet/FileLog.h>
|
||||
#include "VideoCaptureInterfaceImpl.h"
|
||||
|
||||
#include "VideoCapturerInterface.h"
|
||||
|
@ -7,39 +8,44 @@
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCaptureInterfaceObject::VideoCaptureInterfaceObject(std::shared_ptr<PlatformContext> platformContext) {
|
||||
_videoSource = PlatformInterface::SharedInstance()->makeVideoSource(Manager::getMediaThread(), MediaManager::getWorkerThread());
|
||||
VideoCaptureInterfaceObject::VideoCaptureInterfaceObject(std::string deviceId, std::shared_ptr<PlatformContext> platformContext)
|
||||
: _videoSource(PlatformInterface::SharedInstance()->makeVideoSource(Manager::getMediaThread(), MediaManager::getWorkerThread())) {
|
||||
_platformContext = platformContext;
|
||||
//this should outlive the capturer
|
||||
if (_videoSource) {
|
||||
_videoCapturer = PlatformInterface::SharedInstance()->makeVideoCapturer(_videoSource, _useFrontCamera, [this](VideoState state) {
|
||||
if (this->_stateUpdated) {
|
||||
this->_stateUpdated(state);
|
||||
}
|
||||
}, _platformContext, _videoCapturerResolution);
|
||||
}
|
||||
|
||||
switchToDevice(deviceId);
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceObject::~VideoCaptureInterfaceObject() {
|
||||
if (_videoCapturer && _currentUncroppedSink != nullptr) {
|
||||
//_videoSource->RemoveSink(_currentSink.get());
|
||||
_videoCapturer->setUncroppedOutput(nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::switchCamera() {
|
||||
_useFrontCamera = !_useFrontCamera;
|
||||
webrtc::VideoTrackSourceInterface *VideoCaptureInterfaceObject::source() {
|
||||
return _videoSource;
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::switchToDevice(std::string deviceId) {
|
||||
if (_videoCapturer && _currentUncroppedSink) {
|
||||
_videoCapturer->setUncroppedOutput(nullptr);
|
||||
}
|
||||
if (_videoSource) {
|
||||
_videoCapturer = PlatformInterface::SharedInstance()->makeVideoCapturer(_videoSource, _useFrontCamera, [this](VideoState state) {
|
||||
//this should outlive the capturer
|
||||
_videoCapturer = PlatformInterface::SharedInstance()->makeVideoCapturer(_videoSource, deviceId, [this](VideoState state) {
|
||||
if (this->_stateUpdated) {
|
||||
this->_stateUpdated(state);
|
||||
}
|
||||
}, _platformContext, _videoCapturerResolution);
|
||||
}, [this](PlatformCaptureInfo info) {
|
||||
if (this->_shouldBeAdaptedToReceiverAspectRate != info.shouldBeAdaptedToReceiverAspectRate) {
|
||||
this->_shouldBeAdaptedToReceiverAspectRate = info.shouldBeAdaptedToReceiverAspectRate;
|
||||
this->updateAspectRateAdaptation();
|
||||
}
|
||||
}, _platformContext, _videoCapturerResolution);
|
||||
}
|
||||
if (_videoCapturer) {
|
||||
// if (_preferredAspectRatio > 0) {
|
||||
// _videoCapturer->setPreferredCaptureAspectRatio(_preferredAspectRatio);
|
||||
// }
|
||||
if (_currentUncroppedSink) {
|
||||
_videoCapturer->setUncroppedOutput(_currentUncroppedSink);
|
||||
}
|
||||
|
@ -57,21 +63,32 @@ void VideoCaptureInterfaceObject::setState(VideoState state) {
|
|||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setPreferredAspectRatio(float aspectRatio) {
|
||||
if (_videoCapturer) {
|
||||
if (aspectRatio > 0.01 && _videoCapturerResolution.first != 0 && _videoCapturerResolution.second != 0) {
|
||||
float originalWidth = (float)_videoCapturerResolution.first;
|
||||
float originalHeight = (float)_videoCapturerResolution.second;
|
||||
_preferredAspectRatio = aspectRatio;
|
||||
updateAspectRateAdaptation();
|
||||
}
|
||||
|
||||
float width = (originalWidth > aspectRatio * originalHeight)
|
||||
? int(std::round(aspectRatio * originalHeight))
|
||||
: originalWidth;
|
||||
float height = (originalWidth > aspectRatio * originalHeight)
|
||||
? originalHeight
|
||||
: int(std::round(originalHeight / aspectRatio));
|
||||
|
||||
PlatformInterface::SharedInstance()->adaptVideoSource(_videoSource, (int)width, (int)height, 30);
|
||||
void VideoCaptureInterfaceObject::updateAspectRateAdaptation() {
|
||||
if (_videoCapturer) {
|
||||
if (_videoCapturerResolution.first != 0 && _videoCapturerResolution.second != 0) {
|
||||
if (_preferredAspectRatio > 0.01 && _shouldBeAdaptedToReceiverAspectRate) {
|
||||
float originalWidth = (float)_videoCapturerResolution.first;
|
||||
float originalHeight = (float)_videoCapturerResolution.second;
|
||||
|
||||
float aspectRatio = _preferredAspectRatio;
|
||||
|
||||
float width = (originalWidth > aspectRatio * originalHeight)
|
||||
? int(std::round(aspectRatio * originalHeight))
|
||||
: originalWidth;
|
||||
float height = (originalWidth > aspectRatio * originalHeight)
|
||||
? originalHeight
|
||||
: int(std::round(originalHeight / aspectRatio));
|
||||
|
||||
PlatformInterface::SharedInstance()->adaptVideoSource(_videoSource, (int)width, (int)height, 30);
|
||||
} else {
|
||||
PlatformInterface::SharedInstance()->adaptVideoSource(_videoSource, _videoCapturerResolution.first, _videoCapturerResolution.second, 30);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCaptureInterfaceObject::setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
|
@ -85,18 +102,18 @@ void VideoCaptureInterfaceObject::setStateUpdated(std::function<void(VideoState)
|
|||
_stateUpdated = stateUpdated;
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceImpl::VideoCaptureInterfaceImpl(std::shared_ptr<PlatformContext> platformContext) :
|
||||
_platformContext(platformContext),
|
||||
_impl(Manager::getMediaThread(), [platformContext]() {
|
||||
return new VideoCaptureInterfaceObject(platformContext);
|
||||
VideoCaptureInterfaceImpl::VideoCaptureInterfaceImpl(std::string deviceId, std::shared_ptr<PlatformContext> platformContext) :
|
||||
_platformContext(platformContext),
|
||||
_impl(Manager::getMediaThread(), [deviceId, platformContext]() {
|
||||
return new VideoCaptureInterfaceObject(deviceId, platformContext);
|
||||
}) {
|
||||
}
|
||||
|
||||
VideoCaptureInterfaceImpl::~VideoCaptureInterfaceImpl() = default;
|
||||
|
||||
void VideoCaptureInterfaceImpl::switchCamera() {
|
||||
_impl.perform(RTC_FROM_HERE, [](VideoCaptureInterfaceObject *impl) {
|
||||
impl->switchCamera();
|
||||
void VideoCaptureInterfaceImpl::switchToDevice(std::string deviceId) {
|
||||
_impl.perform(RTC_FROM_HERE, [deviceId](VideoCaptureInterfaceObject *impl) {
|
||||
impl->switchToDevice(deviceId);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -126,4 +143,4 @@ ThreadLocalObject<VideoCaptureInterfaceObject> *VideoCaptureInterfaceImpl::objec
|
|||
return &_impl;
|
||||
}
|
||||
|
||||
}// namespace tgcalls
|
||||
} // namespace tgcalls
|
||||
|
|
|
@ -13,34 +13,36 @@ class VideoCapturerInterface;
|
|||
|
||||
class VideoCaptureInterfaceObject {
|
||||
public:
|
||||
VideoCaptureInterfaceObject(std::shared_ptr<PlatformContext> platformContext);
|
||||
VideoCaptureInterfaceObject(std::string deviceId, std::shared_ptr<PlatformContext> platformContext);
|
||||
~VideoCaptureInterfaceObject();
|
||||
|
||||
void switchCamera();
|
||||
void switchToDevice(std::string deviceId);
|
||||
void setState(VideoState state);
|
||||
void setPreferredAspectRatio(float aspectRatio);
|
||||
void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setStateUpdated(std::function<void(VideoState)> stateUpdated);
|
||||
|
||||
public:
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _videoSource;
|
||||
webrtc::VideoTrackSourceInterface *source();
|
||||
|
||||
private:
|
||||
void updateAspectRateAdaptation();
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _videoSource;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _currentUncroppedSink;
|
||||
std::shared_ptr<PlatformContext> _platformContext;
|
||||
std::pair<int, int> _videoCapturerResolution;
|
||||
std::unique_ptr<VideoCapturerInterface> _videoCapturer;
|
||||
std::function<void(VideoState)> _stateUpdated;
|
||||
bool _useFrontCamera = true;
|
||||
VideoState _state = VideoState::Active;
|
||||
float _preferredAspectRatio = 0.0f;
|
||||
bool _shouldBeAdaptedToReceiverAspectRate = true;
|
||||
};
|
||||
|
||||
class VideoCaptureInterfaceImpl : public VideoCaptureInterface {
|
||||
public:
|
||||
VideoCaptureInterfaceImpl(std::shared_ptr<PlatformContext> platformContext);
|
||||
VideoCaptureInterfaceImpl(std::string deviceId, std::shared_ptr<PlatformContext> platformContext);
|
||||
virtual ~VideoCaptureInterfaceImpl();
|
||||
|
||||
void switchCamera() override;
|
||||
void switchToDevice(std::string deviceId) override;
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredAspectRatio(float aspectRatio) override;
|
||||
void setOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
|
|
|
@ -23,6 +23,7 @@ public:
|
|||
virtual void setState(VideoState state) = 0;
|
||||
virtual void setPreferredCaptureAspectRatio(float aspectRatio) = 0;
|
||||
virtual void setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
|
|
@ -151,6 +151,11 @@ onSignalBarsUpdated_(std::move(descriptor.signalBarsUpdated)) {
|
|||
controller_->Start();
|
||||
|
||||
controller_->Connect();
|
||||
|
||||
controller_->SetCurrentAudioInput(descriptor.mediaDevicesConfig.audioInputId);
|
||||
controller_->SetCurrentAudioOutput(descriptor.mediaDevicesConfig.audioOutputId);
|
||||
controller_->SetInputVolume(descriptor.mediaDevicesConfig.inputVolume);
|
||||
controller_->SetOutputVolume(descriptor.mediaDevicesConfig.outputVolume);
|
||||
}
|
||||
|
||||
InstanceImplLegacy::~InstanceImplLegacy() {
|
||||
|
@ -204,6 +209,9 @@ void InstanceImplLegacy::receiveSignalingData(const std::vector<uint8_t> &data)
|
|||
void InstanceImplLegacy::setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setRequestedVideoAspect(float aspect) {
|
||||
}
|
||||
|
||||
void InstanceImplLegacy::setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,10 @@ public:
|
|||
void setNetworkType(NetworkType networkType) override;
|
||||
void setMuteMicrophone(bool muteMicrophone) override;
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) override;
|
||||
void setRequestedVideoAspect(float aspect) override;
|
||||
bool supportsVideo() override {
|
||||
return false;
|
||||
}
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
void setAudioOutputGainControlEnabled(bool enabled) override;
|
||||
void setEchoCancellationStrength(int strength) override;
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include "api/video_codecs/video_encoder_factory.h"
|
||||
#include "api/video_codecs/video_decoder_factory.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#include <string>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
|
@ -13,6 +14,10 @@ enum class VideoState;
|
|||
class VideoCapturerInterface;
|
||||
class PlatformContext;
|
||||
|
||||
struct PlatformCaptureInfo {
|
||||
bool shouldBeAdaptedToReceiverAspectRate = false;
|
||||
};
|
||||
|
||||
class PlatformInterface {
|
||||
public:
|
||||
static PlatformInterface *SharedInstance();
|
||||
|
@ -20,15 +25,13 @@ public:
|
|||
|
||||
virtual void configurePlatformAudio() {
|
||||
}
|
||||
virtual float getDisplayAspectRatio() {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
virtual std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext) = 0;
|
||||
virtual std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory(std::shared_ptr<PlatformContext> platformContext) = 0;
|
||||
virtual bool supportsEncoding(const std::string &codecName, std::shared_ptr<PlatformContext> platformContext) = 0;
|
||||
virtual rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) = 0;
|
||||
virtual void adaptVideoSource(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) = 0;
|
||||
virtual std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) = 0;
|
||||
virtual std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) = 0;
|
||||
|
||||
};
|
||||
|
||||
|
|
|
@ -25,10 +25,6 @@ void AndroidInterface::configurePlatformAudio() {
|
|||
|
||||
}
|
||||
|
||||
float AndroidInterface::getDisplayAspectRatio() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> AndroidInterface::makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext) {
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
|
||||
|
@ -87,8 +83,8 @@ bool AndroidInterface::supportsEncoding(const std::string &codecName, std::share
|
|||
return codecName == cricket::kVp8CodecName;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCapturerInterface> AndroidInterface::makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(_source, useFrontCamera, stateUpdated, platformContext);
|
||||
std::unique_ptr<VideoCapturerInterface> AndroidInterface::makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(_source, deviceId, stateUpdated, platformContext);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -10,13 +10,12 @@ namespace tgcalls {
|
|||
class AndroidInterface : public PlatformInterface {
|
||||
public:
|
||||
void configurePlatformAudio() override;
|
||||
float getDisplayAspectRatio() override;
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory(std::shared_ptr<PlatformContext> platformContext) override;
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory(std::shared_ptr<PlatformContext> platformContext) override;
|
||||
bool supportsEncoding(const std::string &codecName, std::shared_ptr<PlatformContext> platformContext) override;
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) override;
|
||||
void adaptVideoSource(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) override;
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) override;
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::function<void(PlatformCaptureInfo)> captureInfoUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) override;
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> _source;
|
||||
|
|
|
@ -10,11 +10,11 @@
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCameraCapturer::VideoCameraCapturer(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) : _source(source), _stateUpdated(stateUpdated), _platformContext(platformContext) {
|
||||
VideoCameraCapturer::VideoCameraCapturer(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) : _source(source), _stateUpdated(stateUpdated), _platformContext(platformContext) {
|
||||
AndroidContext *context = (AndroidContext *) platformContext.get();
|
||||
JNIEnv *env = webrtc::AttachCurrentThreadIfNeeded();
|
||||
jmethodID methodId = env->GetMethodID(context->getJavaCapturerClass(), "init", "(JZ)V");
|
||||
env->CallVoidMethod(context->getJavaCapturer(), methodId, (jlong) (intptr_t) this, (jboolean) useFrontCamera);
|
||||
env->CallVoidMethod(context->getJavaCapturer(), methodId, (jlong) (intptr_t) this, (jboolean) (deviceId != "back"));
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::setState(VideoState state) {
|
||||
|
|
|
@ -20,7 +20,7 @@ class VideoCameraCapturer;
|
|||
class VideoCameraCapturer {
|
||||
|
||||
public:
|
||||
VideoCameraCapturer(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext);
|
||||
VideoCameraCapturer(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext);
|
||||
|
||||
void setState(VideoState state);
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio);
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCapturerInterfaceImpl::VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) {
|
||||
_capturer = std::unique_ptr<VideoCameraCapturer>(new VideoCameraCapturer(source, useFrontCamera, stateUpdated, platformContext));
|
||||
VideoCapturerInterfaceImpl::VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) {
|
||||
_capturer = std::unique_ptr<VideoCameraCapturer>(new VideoCameraCapturer(source, deviceId, stateUpdated, platformContext));
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setState(VideoState state) {
|
||||
|
|
|
@ -9,7 +9,7 @@ namespace tgcalls {
|
|||
|
||||
class VideoCapturerInterfaceImpl final : public VideoCapturerInterface {
|
||||
public:
|
||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext);
|
||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::JavaVideoTrackSourceInterface> source, std::string deviceId, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext);
|
||||
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio) override;
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
#ifndef TGCALLS_DARWIN_INTERFACE_H
|
||||
#define TGCALLS_DARWIN_INTERFACE_H
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class DarwinInterface : public PlatformInterface {
|
||||
public:
|
||||
void configurePlatformAudio() override;
|
||||
float getDisplayAspectRatio() override;
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory() override;
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory() override;
|
||||
bool supportsEncoding(const std::string &codecName) override;
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) override;
|
||||
virtual void adaptVideoSource(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) override;
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) override;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
|
@ -1,85 +0,0 @@
|
|||
#include "DarwinInterface.h"
|
||||
|
||||
#include "VideoCapturerInterfaceImpl.h"
|
||||
#include "sdk/objc/native/src/objc_video_track_source.h"
|
||||
|
||||
#include "media/base/media_constants.h"
|
||||
#include "TGRTCDefaultVideoEncoderFactory.h"
|
||||
#include "TGRTCDefaultVideoDecoderFactory.h"
|
||||
#include "sdk/objc/native/api/video_encoder_factory.h"
|
||||
#include "sdk/objc/native/api/video_decoder_factory.h"
|
||||
#include "api/video_track_source_proxy.h"
|
||||
|
||||
#ifdef WEBRTC_IOS
|
||||
#include "sdk/objc/components/audio/RTCAudioSession.h"
|
||||
#endif // WEBRTC_IOS
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
static webrtc::ObjCVideoTrackSource *getObjCVideoSource(const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
||||
return static_cast<webrtc::ObjCVideoTrackSource *>(proxy_source->internal());
|
||||
}
|
||||
|
||||
void DarwinInterface::configurePlatformAudio() {
|
||||
#ifdef WEBRTC_IOS
|
||||
[RTCAudioSession sharedInstance].useManualAudio = true;
|
||||
[[RTCAudioSession sharedInstance] audioSessionDidActivate:[AVAudioSession sharedInstance]];
|
||||
[RTCAudioSession sharedInstance].isAudioEnabled = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
float DarwinInterface::getDisplayAspectRatio() {
|
||||
return 0.0f;
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> DarwinInterface::makeVideoEncoderFactory() {
|
||||
return webrtc::ObjCToNativeVideoEncoderFactory([[TGRTCDefaultVideoEncoderFactory alloc] init]);
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> DarwinInterface::makeVideoDecoderFactory() {
|
||||
return webrtc::ObjCToNativeVideoDecoderFactory([[TGRTCDefaultVideoDecoderFactory alloc] init]);
|
||||
}
|
||||
|
||||
bool DarwinInterface::supportsEncoding(const std::string &codecName) {
|
||||
if (codecName == cricket::kH265CodecName) {
|
||||
#ifdef WEBRTC_IOS
|
||||
if (@available(iOS 11.0, *)) {
|
||||
return [[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality];
|
||||
}
|
||||
#elif defined WEBRTC_MAC // WEBRTC_IOS
|
||||
if (@available(macOS 10.14, *)) {
|
||||
return [[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality];
|
||||
}
|
||||
#endif // WEBRTC_IOS || WEBRTC_MAC
|
||||
} else if (codecName == cricket::kH264CodecName) {
|
||||
return true;
|
||||
} else if (codecName == cricket::kVp8CodecName) {
|
||||
return true;
|
||||
} else if (codecName == cricket::kVp9CodecName) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> DarwinInterface::makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) {
|
||||
rtc::scoped_refptr<webrtc::ObjCVideoTrackSource> objCVideoTrackSource(new rtc::RefCountedObject<webrtc::ObjCVideoTrackSource>());
|
||||
return webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, objCVideoTrackSource);
|
||||
}
|
||||
|
||||
void DarwinInterface::adaptVideoSource(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> videoSource, int width, int height, int fps) {
|
||||
getObjCVideoSource(videoSource)->OnOutputFormatRequest(width, height, fps);
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCapturerInterface> DarwinInterface::makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext, std::pair<int, int> &outResolution) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(source, useFrontCamera, stateUpdated, outResolution);
|
||||
}
|
||||
|
||||
std::unique_ptr<PlatformInterface> CreatePlatformInterface() {
|
||||
return std::make_unique<DarwinInterface>();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#ifdef WEBRTC_IOS
|
||||
#import <UIKit/UIKit.h>
|
||||
#else
|
||||
#import <AppKit/AppKit.h>
|
||||
#endif
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoRenderer.h"
|
||||
#import "RTCVideoViewShading.h"
|
||||
|
||||
#import "api/media_stream_interface.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
@class GLVideoView;
|
||||
|
||||
/**
|
||||
* GLVideoView is an RTCVideoRenderer which renders video frames in its
|
||||
* bounds using OpenGLES 2.0 or OpenGLES 3.0.
|
||||
*/
|
||||
RTC_OBJC_EXPORT
|
||||
@interface GLVideoView :
|
||||
#ifdef WEBRTC_IOS
|
||||
UIView
|
||||
#else
|
||||
NSView
|
||||
#endif
|
||||
<RTCVideoRenderer>
|
||||
|
||||
@property(nonatomic, weak) id<RTCVideoViewDelegate> delegate;
|
||||
|
||||
- (instancetype)initWithFrame:(CGRect)frame
|
||||
shader:(id<RTCVideoViewShading>)shader NS_DESIGNATED_INITIALIZER;
|
||||
|
||||
- (instancetype)initWithCoder:(NSCoder *)aDecoder
|
||||
shader:(id<RTCVideoViewShading>)shader NS_DESIGNATED_INITIALIZER;
|
||||
|
||||
/** @abstract Wrapped RTCVideoRotation, or nil.
|
||||
*/
|
||||
@property(nonatomic, nullable) NSValue *rotationOverride;
|
||||
|
||||
@property (nonatomic, readwrite) int internalOrientation;
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink;
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived;
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated;
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated;
|
||||
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
|
@ -1,421 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import "GLVideoView.h"
|
||||
|
||||
#import <GLKit/GLKit.h>
|
||||
|
||||
#import "RTCDefaultShader.h"
|
||||
#import "RTCDisplayLinkTimer.h"
|
||||
#import "RTCI420TextureCache.h"
|
||||
#import "RTCNV12TextureCache.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#include "sdk/objc/native/api/video_frame.h"
|
||||
#import "rtc_base/time_utils.h"
|
||||
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
|
||||
namespace {
|
||||
|
||||
static RTCVideoFrame *customToObjCVideoFrame(const webrtc::VideoFrame &frame, RTCVideoRotation &rotation) {
|
||||
rotation = RTCVideoRotation(frame.rotation());
|
||||
RTCVideoFrame *videoFrame =
|
||||
[[RTCVideoFrame alloc] initWithBuffer:webrtc::ToObjCVideoFrameBuffer(frame.video_frame_buffer())
|
||||
rotation:RTCVideoRotation_90
|
||||
timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
|
||||
videoFrame.timeStamp = frame.timestamp();
|
||||
|
||||
return videoFrame;
|
||||
}
|
||||
|
||||
class VideoRendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
VideoRendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation)) {
|
||||
_frameReceived = [frameReceived copy];
|
||||
}
|
||||
|
||||
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
||||
RTCVideoRotation rotation = RTCVideoRotation_90;
|
||||
RTCVideoFrame* videoFrame = customToObjCVideoFrame(nativeVideoFrame, rotation);
|
||||
|
||||
CGSize currentSize = CGSizeMake(videoFrame.height, videoFrame.width);
|
||||
|
||||
if (_frameReceived) {
|
||||
_frameReceived(currentSize, videoFrame, rotation);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void (^_frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
static CGSize scaleToFillSize(CGSize size, CGSize maxSize) {
|
||||
if (size.width < 1.0f) {
|
||||
size.width = 1.0f;
|
||||
}
|
||||
if (size.height < 1.0f) {
|
||||
size.height = 1.0f;
|
||||
}
|
||||
if (size.width < maxSize.width) {
|
||||
size.height = floor(maxSize.width * size.height / MAX(1.0f, size.width));
|
||||
size.width = maxSize.width;
|
||||
}
|
||||
if (size.height < maxSize.height) {
|
||||
size.width = floor(maxSize.height * size.width / MAX(1.0f, size.height));
|
||||
size.height = maxSize.height;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
// GLVideoView wraps a GLKView which is setup with
|
||||
// enableSetNeedsDisplay = NO for the purpose of gaining control of
|
||||
// exactly when to call -[GLKView display]. This need for extra
|
||||
// control is required to avoid triggering method calls on GLKView
|
||||
// that results in attempting to bind the underlying render buffer
|
||||
// when the drawable size would be empty which would result in the
|
||||
// error GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT. -[GLKView display] is
|
||||
// the method that will trigger the binding of the render
|
||||
// buffer. Because the standard behaviour of -[UIView setNeedsDisplay]
|
||||
// is disabled for the reasons above, the GLVideoView maintains
|
||||
// its own |isDirty| flag.
|
||||
|
||||
@interface GLVideoView () <GLKViewDelegate>
|
||||
// |videoFrame| is set when we receive a frame from a worker thread and is read
|
||||
// from the display link callback so atomicity is required.
|
||||
@property(atomic, strong) RTCVideoFrame *videoFrame;
|
||||
@property(nonatomic, readonly) GLKView *glkView;
|
||||
@end
|
||||
|
||||
@implementation GLVideoView {
|
||||
RTCDisplayLinkTimer *_timer;
|
||||
EAGLContext *_glContext;
|
||||
// This flag should only be set and read on the main thread (e.g. by
|
||||
// setNeedsDisplay)
|
||||
BOOL _isDirty;
|
||||
id<RTCVideoViewShading> _shader;
|
||||
RTCNV12TextureCache *_nv12TextureCache;
|
||||
RTCI420TextureCache *_i420TextureCache;
|
||||
// As timestamps should be unique between frames, will store last
|
||||
// drawn frame timestamp instead of the whole frame to reduce memory usage.
|
||||
int64_t _lastDrawnFrameTimeStampNs;
|
||||
|
||||
CGSize _currentSize;
|
||||
|
||||
void (^_onFirstFrameReceived)();
|
||||
bool _firstFrameReceivedReported;
|
||||
|
||||
void (^_onOrientationUpdated)(int);
|
||||
|
||||
std::shared_ptr<VideoRendererAdapterImpl> _sink;
|
||||
}
|
||||
|
||||
@synthesize delegate = _delegate;
|
||||
@synthesize videoFrame = _videoFrame;
|
||||
@synthesize glkView = _glkView;
|
||||
@synthesize rotationOverride = _rotationOverride;
|
||||
|
||||
- (instancetype)initWithFrame:(CGRect)frame {
|
||||
return [self initWithFrame:frame shader:[[RTCDefaultShader alloc] init]];
|
||||
}
|
||||
|
||||
- (instancetype)initWithCoder:(NSCoder *)aDecoder {
|
||||
return [self initWithCoder:aDecoder shader:[[RTCDefaultShader alloc] init]];
|
||||
}
|
||||
|
||||
- (instancetype)initWithFrame:(CGRect)frame shader:(id<RTCVideoViewShading>)shader {
|
||||
if (self = [super initWithFrame:frame]) {
|
||||
_shader = shader;
|
||||
if (![self configure]) {
|
||||
return nil;
|
||||
}
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (instancetype)initWithCoder:(NSCoder *)aDecoder shader:(id<RTCVideoViewShading>)shader {
|
||||
if (self = [super initWithCoder:aDecoder]) {
|
||||
_shader = shader;
|
||||
if (![self configure]) {
|
||||
return nil;
|
||||
}
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (BOOL)configure {
|
||||
EAGLContext *glContext =
|
||||
[[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES3];
|
||||
if (!glContext) {
|
||||
glContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
|
||||
}
|
||||
if (!glContext) {
|
||||
RTCLogError(@"Failed to create EAGLContext");
|
||||
return NO;
|
||||
}
|
||||
_glContext = glContext;
|
||||
|
||||
// GLKView manages a framebuffer for us.
|
||||
_glkView = [[GLKView alloc] initWithFrame:CGRectZero
|
||||
context:_glContext];
|
||||
_glkView.drawableColorFormat = GLKViewDrawableColorFormatRGBA8888;
|
||||
_glkView.drawableDepthFormat = GLKViewDrawableDepthFormatNone;
|
||||
_glkView.drawableStencilFormat = GLKViewDrawableStencilFormatNone;
|
||||
_glkView.drawableMultisample = GLKViewDrawableMultisampleNone;
|
||||
_glkView.delegate = self;
|
||||
_glkView.layer.masksToBounds = YES;
|
||||
_glkView.enableSetNeedsDisplay = NO;
|
||||
[self addSubview:_glkView];
|
||||
|
||||
// Listen to application state in order to clean up OpenGL before app goes
|
||||
// away.
|
||||
NSNotificationCenter *notificationCenter =
|
||||
[NSNotificationCenter defaultCenter];
|
||||
[notificationCenter addObserver:self
|
||||
selector:@selector(willResignActive)
|
||||
name:UIApplicationWillResignActiveNotification
|
||||
object:nil];
|
||||
[notificationCenter addObserver:self
|
||||
selector:@selector(didBecomeActive)
|
||||
name:UIApplicationDidBecomeActiveNotification
|
||||
object:nil];
|
||||
|
||||
// Frames are received on a separate thread, so we poll for current frame
|
||||
// using a refresh rate proportional to screen refresh frequency. This
|
||||
// occurs on the main thread.
|
||||
__weak GLVideoView *weakSelf = self;
|
||||
_timer = [[RTCDisplayLinkTimer alloc] initWithTimerHandler:^{
|
||||
GLVideoView *strongSelf = weakSelf;
|
||||
[strongSelf displayLinkTimerDidFire];
|
||||
}];
|
||||
if ([[UIApplication sharedApplication] applicationState] == UIApplicationStateActive) {
|
||||
[self setupGL];
|
||||
}
|
||||
|
||||
_sink.reset(new VideoRendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame, RTCVideoRotation rotation) {
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
__strong GLVideoView *strongSelf = weakSelf;
|
||||
if (strongSelf == nil) {
|
||||
return;
|
||||
}
|
||||
if (!CGSizeEqualToSize(size, strongSelf->_currentSize)) {
|
||||
strongSelf->_currentSize = size;
|
||||
[strongSelf setSize:size];
|
||||
}
|
||||
|
||||
int mappedValue = 0;
|
||||
switch (rotation) {
|
||||
case RTCVideoRotation_90:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
case RTCVideoRotation_180:
|
||||
mappedValue = 1;
|
||||
break;
|
||||
case RTCVideoRotation_270:
|
||||
mappedValue = 2;
|
||||
break;
|
||||
default:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
}
|
||||
[strongSelf setInternalOrientation:mappedValue];
|
||||
|
||||
[strongSelf renderFrame:videoFrame];
|
||||
});
|
||||
}));
|
||||
|
||||
return YES;
|
||||
}
|
||||
|
||||
- (void)setMultipleTouchEnabled:(BOOL)multipleTouchEnabled {
|
||||
[super setMultipleTouchEnabled:multipleTouchEnabled];
|
||||
_glkView.multipleTouchEnabled = multipleTouchEnabled;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
[[NSNotificationCenter defaultCenter] removeObserver:self];
|
||||
UIApplicationState appState =
|
||||
[UIApplication sharedApplication].applicationState;
|
||||
if (appState == UIApplicationStateActive) {
|
||||
[self teardownGL];
|
||||
}
|
||||
[_timer invalidate];
|
||||
[self ensureGLContext];
|
||||
_shader = nil;
|
||||
if (_glContext && [EAGLContext currentContext] == _glContext) {
|
||||
[EAGLContext setCurrentContext:nil];
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - UIView
|
||||
|
||||
- (void)setNeedsDisplay {
|
||||
[super setNeedsDisplay];
|
||||
_isDirty = YES;
|
||||
}
|
||||
|
||||
- (void)setNeedsDisplayInRect:(CGRect)rect {
|
||||
[super setNeedsDisplayInRect:rect];
|
||||
_isDirty = YES;
|
||||
}
|
||||
|
||||
- (void)layoutSubviews {
|
||||
[super layoutSubviews];
|
||||
|
||||
if (self.bounds.size.width > 0.0f) {
|
||||
CGSize contentSize = scaleToFillSize(_currentSize, self.bounds.size);
|
||||
_glkView.frame = CGRectMake(floor((self.bounds.size.width - contentSize.width) / 2.0), floor((self.bounds.size.height - contentSize.height) / 2.0), contentSize.width, contentSize.height);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - GLKViewDelegate
|
||||
|
||||
// This method is called when the GLKView's content is dirty and needs to be
|
||||
// redrawn. This occurs on main thread.
|
||||
- (void)glkView:(GLKView *)view drawInRect:(CGRect)rect {
|
||||
// The renderer will draw the frame to the framebuffer corresponding to the
|
||||
// one used by |view|.
|
||||
RTCVideoFrame *frame = self.videoFrame;
|
||||
if (!frame || frame.timeStampNs == _lastDrawnFrameTimeStampNs) {
|
||||
return;
|
||||
}
|
||||
RTCVideoRotation rotation = frame.rotation;
|
||||
if(_rotationOverride != nil) {
|
||||
[_rotationOverride getValue: &rotation];
|
||||
}
|
||||
[self ensureGLContext];
|
||||
glClear(GL_COLOR_BUFFER_BIT);
|
||||
if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
if (!_nv12TextureCache) {
|
||||
_nv12TextureCache = [[RTCNV12TextureCache alloc] initWithContext:_glContext];
|
||||
}
|
||||
if (_nv12TextureCache) {
|
||||
[_nv12TextureCache uploadFrameToTextures:frame];
|
||||
[_shader applyShadingForFrameWithWidth:frame.width
|
||||
height:frame.height
|
||||
rotation:rotation
|
||||
yPlane:_nv12TextureCache.yTexture
|
||||
uvPlane:_nv12TextureCache.uvTexture];
|
||||
[_nv12TextureCache releaseTextures];
|
||||
|
||||
_lastDrawnFrameTimeStampNs = self.videoFrame.timeStampNs;
|
||||
}
|
||||
} else {
|
||||
if (!_i420TextureCache) {
|
||||
_i420TextureCache = [[RTCI420TextureCache alloc] initWithContext:_glContext];
|
||||
}
|
||||
[_i420TextureCache uploadFrameToTextures:frame];
|
||||
[_shader applyShadingForFrameWithWidth:frame.width
|
||||
height:frame.height
|
||||
rotation:rotation
|
||||
yPlane:_i420TextureCache.yTexture
|
||||
uPlane:_i420TextureCache.uTexture
|
||||
vPlane:_i420TextureCache.vTexture];
|
||||
|
||||
_lastDrawnFrameTimeStampNs = self.videoFrame.timeStampNs;
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - RTCVideoRenderer
|
||||
|
||||
- (void)setSize:(CGSize)size {
|
||||
[self.delegate videoView:self didChangeVideoSize:size];
|
||||
|
||||
if (self.bounds.size.width > 0.0f) {
|
||||
CGSize contentSize = scaleToFillSize(size, self.bounds.size);
|
||||
_glkView.frame = CGRectMake(floor((self.bounds.size.width - contentSize.width) / 2.0), floor((self.bounds.size.height - contentSize.height) / 2.0), contentSize.width, contentSize.height);
|
||||
}
|
||||
}
|
||||
|
||||
- (void)renderFrame:(RTCVideoFrame *)frame {
|
||||
self.videoFrame = frame;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (void)displayLinkTimerDidFire {
|
||||
// Don't render unless video frame have changed or the view content
|
||||
// has explicitly been marked dirty.
|
||||
if (!_isDirty && _lastDrawnFrameTimeStampNs == self.videoFrame.timeStampNs) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Always reset isDirty at this point, even if -[GLKView display]
|
||||
// won't be called in the case the drawable size is empty.
|
||||
_isDirty = NO;
|
||||
|
||||
// Only call -[GLKView display] if the drawable size is
|
||||
// non-empty. Calling display will make the GLKView setup its
|
||||
// render buffer if necessary, but that will fail with error
|
||||
// GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT if size is empty.
|
||||
if (self.bounds.size.width > 0 && self.bounds.size.height > 0) {
|
||||
[_glkView display];
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setupGL {
|
||||
[self ensureGLContext];
|
||||
glDisable(GL_DITHER);
|
||||
_timer.isPaused = NO;
|
||||
}
|
||||
|
||||
- (void)teardownGL {
|
||||
self.videoFrame = nil;
|
||||
_timer.isPaused = YES;
|
||||
[_glkView deleteDrawable];
|
||||
[self ensureGLContext];
|
||||
_nv12TextureCache = nil;
|
||||
_i420TextureCache = nil;
|
||||
}
|
||||
|
||||
- (void)didBecomeActive {
|
||||
[self setupGL];
|
||||
}
|
||||
|
||||
- (void)willResignActive {
|
||||
[self teardownGL];
|
||||
}
|
||||
|
||||
- (void)ensureGLContext {
|
||||
NSAssert(_glContext, @"context shouldn't be nil");
|
||||
if ([EAGLContext currentContext] != _glContext) {
|
||||
[EAGLContext setCurrentContext:_glContext];
|
||||
}
|
||||
}
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
return _sink;
|
||||
}
|
||||
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived {
|
||||
_onFirstFrameReceived = [onFirstFrameReceived copy];
|
||||
_firstFrameReceivedReported = false;
|
||||
}
|
||||
|
||||
- (void)setInternalOrientation:(int)internalOrientation {
|
||||
_internalOrientation = internalOrientation;
|
||||
if (_onOrientationUpdated) {
|
||||
_onOrientationUpdated(internalOrientation);
|
||||
}
|
||||
}
|
||||
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated {
|
||||
_onOrientationUpdated = [onOrientationUpdated copy];
|
||||
}
|
||||
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated {
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#if !TARGET_OS_IPHONE
|
||||
|
||||
#import <AppKit/NSOpenGLView.h>
|
||||
#import "api/media_stream_interface.h"
|
||||
#import "RTCVideoRenderer.h"
|
||||
#import "RTCVideoViewShading.h"
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
@class GLVideoView;
|
||||
|
||||
@protocol GLVideoViewDelegate<RTCVideoViewDelegate> @end
|
||||
|
||||
@interface GLVideoView : NSView <RTCVideoRenderer>
|
||||
|
||||
@property(nonatomic, weak) id<GLVideoViewDelegate> delegate;
|
||||
|
||||
- (instancetype)initWithFrame:(NSRect)frameRect
|
||||
pixelFormat:(NSOpenGLPixelFormat *)format
|
||||
shader:(id<RTCVideoViewShading>)shader
|
||||
NS_DESIGNATED_INITIALIZER;
|
||||
|
||||
|
||||
@property(nonatomic, nullable) NSValue *rotationOverride;
|
||||
|
||||
@property (nonatomic, readwrite) int internalOrientation;
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink;
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)(float))onFirstFrameReceived;
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated;
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated;
|
||||
- (void)setVideoContentMode:(CALayerContentsGravity)mode;
|
||||
- (void)setIsForceMirrored:(BOOL)forceMirrored;
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
||||
|
||||
#endif
|
|
@ -1,491 +0,0 @@
|
|||
/*
|
||||
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import "GLVideoViewMac.h"
|
||||
|
||||
#import "TGRTCCVPixelBuffer.h"
|
||||
|
||||
#import <GLKit/GLKit.h>
|
||||
|
||||
#import "RTCDefaultShader.h"
|
||||
#import "RTCDisplayLinkTimer.h"
|
||||
#import "RTCI420TextureCache.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#include "sdk/objc/native/api/video_frame.h"
|
||||
#import "rtc_base/time_utils.h"
|
||||
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
|
||||
namespace {
|
||||
|
||||
static RTCVideoFrame *customToObjCVideoFrame(const webrtc::VideoFrame &frame, RTCVideoRotation &rotation) {
|
||||
rotation = RTCVideoRotation(frame.rotation());
|
||||
RTCVideoFrame *videoFrame =
|
||||
[[RTCVideoFrame alloc] initWithBuffer:webrtc::ToObjCVideoFrameBuffer(frame.video_frame_buffer())
|
||||
rotation:rotation
|
||||
timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
|
||||
videoFrame.timeStamp = frame.timestamp();
|
||||
|
||||
return videoFrame;
|
||||
}
|
||||
|
||||
class VideoRendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
VideoRendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation)) {
|
||||
_frameReceived = [frameReceived copy];
|
||||
}
|
||||
|
||||
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
||||
RTCVideoRotation rotation = RTCVideoRotation_0;
|
||||
RTCVideoFrame* videoFrame = customToObjCVideoFrame(nativeVideoFrame, rotation);
|
||||
|
||||
CGSize currentSize = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width);
|
||||
|
||||
if (_frameReceived) {
|
||||
_frameReceived(currentSize, videoFrame, rotation);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void (^_frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
static CGSize scaleToFillSize(CGSize size, CGSize maxSize) {
|
||||
if (size.width < 1.0f) {
|
||||
size.width = 1.0f;
|
||||
}
|
||||
if (size.height < 1.0f) {
|
||||
size.height = 1.0f;
|
||||
}
|
||||
if (size.width < maxSize.width) {
|
||||
size.height = floor(maxSize.width * size.height / MAX(1.0f, size.width));
|
||||
size.width = maxSize.width;
|
||||
}
|
||||
if (size.height < maxSize.height) {
|
||||
size.width = floor(maxSize.height * size.width / MAX(1.0f, size.height));
|
||||
size.height = maxSize.height;
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
static CGSize aspectFilled(CGSize from, CGSize to) {
|
||||
CGFloat scale = MAX(from.width / MAX(1.0, to.width), from.height / MAX(1.0, to.height));
|
||||
return NSMakeSize(ceil(to.width * scale), ceil(to.height * scale));
|
||||
}
|
||||
static CGSize aspectFitted(CGSize from, CGSize to) {
|
||||
CGFloat scale = MAX(from.width / MAX(1.0, to.width), from.height / MAX(1.0, to.height));
|
||||
return NSMakeSize(ceil(to.width * scale), ceil(to.height * scale));
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
func aspectFilled(_ size: CGSize) -> CGSize {
|
||||
let scale = max(size.width / max(1.0, self.width), size.height / max(1.0, self.height))
|
||||
return CGSize(width: ceil(self.width * scale), height: ceil(self.height * scale))
|
||||
}
|
||||
func fittedToWidthOrSmaller(_ width: CGFloat) -> CGSize {
|
||||
let scale = min(1.0, width / max(1.0, self.width))
|
||||
return CGSize(width: floor(self.width * scale), height: floor(self.height * scale))
|
||||
}
|
||||
|
||||
func aspectFitted(_ size: CGSize) -> CGSize {
|
||||
let scale = min(size.width / max(1.0, self.width), size.height / max(1.0, self.height))
|
||||
return CGSize(width: ceil(self.width * scale), height: ceil(self.height * scale))
|
||||
}
|
||||
*/
|
||||
|
||||
|
||||
#if !TARGET_OS_IPHONE
|
||||
|
||||
@interface OpenGLVideoView : NSOpenGLView
|
||||
@property(atomic, strong) RTCVideoFrame *videoFrame;
|
||||
@property(atomic, strong) RTCI420TextureCache *i420TextureCache;
|
||||
|
||||
- (void)drawFrame;
|
||||
- (instancetype)initWithFrame:(NSRect)frame
|
||||
pixelFormat:(NSOpenGLPixelFormat *)format
|
||||
shader:(id<RTCVideoViewShading>)shader;
|
||||
@end
|
||||
|
||||
static CVReturn OnDisplayLinkFired(CVDisplayLinkRef displayLink,
|
||||
const CVTimeStamp *now,
|
||||
const CVTimeStamp *outputTime,
|
||||
CVOptionFlags flagsIn,
|
||||
CVOptionFlags *flagsOut,
|
||||
void *displayLinkContext) {
|
||||
OpenGLVideoView *view =
|
||||
(__bridge OpenGLVideoView *)displayLinkContext;
|
||||
[view drawFrame];
|
||||
return kCVReturnSuccess;
|
||||
}
|
||||
|
||||
|
||||
@implementation OpenGLVideoView {
|
||||
CVDisplayLinkRef _displayLink;
|
||||
RTCVideoFrame * _lastDrawnFrame;
|
||||
id<RTCVideoViewShading> _shader;
|
||||
|
||||
int64_t _lastDrawnFrameTimeStampNs;
|
||||
void (^_onFirstFrameReceived)(float);
|
||||
bool _firstFrameReceivedReported;
|
||||
}
|
||||
|
||||
@synthesize videoFrame = _videoFrame;
|
||||
@synthesize i420TextureCache = _i420TextureCache;
|
||||
|
||||
- (instancetype)initWithFrame:(NSRect)frame
|
||||
pixelFormat:(NSOpenGLPixelFormat *)format
|
||||
shader:(id<RTCVideoViewShading>)shader {
|
||||
if (self = [super initWithFrame:frame pixelFormat:format]) {
|
||||
self->_shader = shader;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)reshape {
|
||||
[super reshape];
|
||||
NSRect frame = [self frame];
|
||||
[self ensureGLContext];
|
||||
CGLLockContext([[self openGLContext] CGLContextObj]);
|
||||
glViewport(0, 0, frame.size.width, frame.size.height);
|
||||
CGLUnlockContext([[self openGLContext] CGLContextObj]);
|
||||
}
|
||||
|
||||
- (void)lockFocus {
|
||||
NSOpenGLContext *context = [self openGLContext];
|
||||
[super lockFocus];
|
||||
if ([context view] != self) {
|
||||
[context setView:self];
|
||||
}
|
||||
[context makeCurrentContext];
|
||||
}
|
||||
|
||||
- (void)prepareOpenGL {
|
||||
[super prepareOpenGL];
|
||||
[self ensureGLContext];
|
||||
glDisable(GL_DITHER);
|
||||
[self setupDisplayLink];
|
||||
}
|
||||
|
||||
- (void)clearGLContext {
|
||||
[self ensureGLContext];
|
||||
self.i420TextureCache = nil;
|
||||
[super clearGLContext];
|
||||
}
|
||||
|
||||
- (void)drawRect:(NSRect)rect {
|
||||
[self drawFrame];
|
||||
}
|
||||
|
||||
- (void)drawFrame {
|
||||
RTCVideoFrame *frame = self.videoFrame;
|
||||
if (!frame || frame == _lastDrawnFrame) {
|
||||
return;
|
||||
}
|
||||
// This method may be called from CVDisplayLink callback which isn't on the
|
||||
// main thread so we have to lock the GL context before drawing.
|
||||
NSOpenGLContext *context = [self openGLContext];
|
||||
CGLLockContext([context CGLContextObj]);
|
||||
|
||||
[self ensureGLContext];
|
||||
glClear(GL_COLOR_BUFFER_BIT);
|
||||
|
||||
|
||||
// Rendering native CVPixelBuffer is not supported on OS X.
|
||||
// TODO(magjed): Add support for NV12 texture cache on OS X.
|
||||
frame = [frame newI420VideoFrame];
|
||||
if (!self.i420TextureCache) {
|
||||
self.i420TextureCache = [[RTCI420TextureCache alloc] initWithContext:context];
|
||||
}
|
||||
RTCVideoRotation rotation = frame.rotation;
|
||||
|
||||
RTCI420TextureCache *i420TextureCache = self.i420TextureCache;
|
||||
if (i420TextureCache) {
|
||||
[i420TextureCache uploadFrameToTextures:frame];
|
||||
[_shader applyShadingForFrameWithWidth:frame.width
|
||||
height:frame.height
|
||||
rotation:rotation
|
||||
yPlane:i420TextureCache.yTexture
|
||||
uPlane:i420TextureCache.uTexture
|
||||
vPlane:i420TextureCache.vTexture];
|
||||
[context flushBuffer];
|
||||
_lastDrawnFrame = frame;
|
||||
}
|
||||
CGLUnlockContext([context CGLContextObj]);
|
||||
|
||||
if (!_firstFrameReceivedReported && _onFirstFrameReceived) {
|
||||
_firstFrameReceivedReported = true;
|
||||
float aspectRatio = (float)frame.width / (float)frame.height;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
self->_onFirstFrameReceived(aspectRatio);
|
||||
});
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
- (void)setupDisplayLink {
|
||||
if (_displayLink) {
|
||||
return;
|
||||
}
|
||||
// Synchronize buffer swaps with vertical refresh rate.
|
||||
GLint swapInt = 1;
|
||||
[[self openGLContext] setValues:&swapInt forParameter:NSOpenGLCPSwapInterval];
|
||||
|
||||
// Create display link.
|
||||
CVDisplayLinkCreateWithActiveCGDisplays(&_displayLink);
|
||||
CVDisplayLinkSetOutputCallback(_displayLink,
|
||||
&OnDisplayLinkFired,
|
||||
(__bridge void *)self);
|
||||
// Set the display link for the current renderer.
|
||||
CGLContextObj cglContext = [[self openGLContext] CGLContextObj];
|
||||
CGLPixelFormatObj cglPixelFormat = [[self pixelFormat] CGLPixelFormatObj];
|
||||
CVDisplayLinkSetCurrentCGDisplayFromOpenGLContext(
|
||||
_displayLink, cglContext, cglPixelFormat);
|
||||
CVDisplayLinkStart(_displayLink);
|
||||
}
|
||||
|
||||
-(void)setFrameOrigin:(NSPoint)newOrigin {
|
||||
[super setFrameOrigin:newOrigin];
|
||||
}
|
||||
|
||||
- (void)teardownDisplayLink {
|
||||
if (!_displayLink) {
|
||||
return;
|
||||
}
|
||||
CVDisplayLinkRelease(_displayLink);
|
||||
_displayLink = NULL;
|
||||
}
|
||||
|
||||
- (void)ensureGLContext {
|
||||
NSOpenGLContext* context = [self openGLContext];
|
||||
NSAssert(context, @"context shouldn't be nil");
|
||||
if ([NSOpenGLContext currentContext] != context) {
|
||||
[context makeCurrentContext];
|
||||
}
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
[self teardownDisplayLink];
|
||||
}
|
||||
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)(float))onFirstFrameReceived {
|
||||
_onFirstFrameReceived = [onFirstFrameReceived copy];
|
||||
_firstFrameReceivedReported = false;
|
||||
}
|
||||
|
||||
|
||||
@end
|
||||
|
||||
|
||||
|
||||
|
||||
@interface GLVideoView ()
|
||||
@property(nonatomic, strong) OpenGLVideoView *glView;
|
||||
@end
|
||||
|
||||
@implementation GLVideoView {
|
||||
|
||||
CGSize _currentSize;
|
||||
|
||||
std::shared_ptr<VideoRendererAdapterImpl> _sink;
|
||||
|
||||
void (^_onOrientationUpdated)(int);
|
||||
void (^_onIsMirroredUpdated)(bool);
|
||||
|
||||
bool _didSetShouldBeMirrored;
|
||||
bool _shouldBeMirrored;
|
||||
bool _forceMirrored;
|
||||
|
||||
}
|
||||
|
||||
@synthesize delegate = _delegate;
|
||||
|
||||
-(instancetype)initWithFrame:(NSRect)frameRect {
|
||||
NSOpenGLPixelFormatAttribute attributes[] = {
|
||||
NSOpenGLPFADoubleBuffer,
|
||||
NSOpenGLPFADepthSize, 24,
|
||||
NSOpenGLPFAOpenGLProfile,
|
||||
NSOpenGLProfileVersion3_2Core,
|
||||
0
|
||||
};
|
||||
NSOpenGLPixelFormat* pixelFormat =
|
||||
[[NSOpenGLPixelFormat alloc] initWithAttributes:attributes];
|
||||
return [self initWithFrame:frameRect pixelFormat: pixelFormat];
|
||||
}
|
||||
|
||||
- (instancetype)initWithFrame:(NSRect)frame pixelFormat:(NSOpenGLPixelFormat *)format {
|
||||
return [self initWithFrame:frame pixelFormat:format shader:[[RTCDefaultShader alloc] init]];
|
||||
}
|
||||
|
||||
- (instancetype)initWithFrame:(NSRect)frame
|
||||
pixelFormat:(NSOpenGLPixelFormat *)format
|
||||
shader:(id<RTCVideoViewShading>)shader {
|
||||
if (self = [super initWithFrame:frame]) {
|
||||
|
||||
_glView = [[OpenGLVideoView alloc] initWithFrame:frame pixelFormat:format shader:shader];
|
||||
_glView.wantsLayer = YES;
|
||||
self.layerContentsRedrawPolicy = NSViewLayerContentsRedrawDuringViewResize;
|
||||
_glView.layerContentsRedrawPolicy = NSViewLayerContentsRedrawDuringViewResize;
|
||||
|
||||
[self addSubview:_glView];
|
||||
|
||||
__weak GLVideoView *weakSelf = self;
|
||||
|
||||
self.wantsLayer = YES;
|
||||
|
||||
_sink.reset(new VideoRendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame, RTCVideoRotation rotation) {
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
__strong GLVideoView *strongSelf = weakSelf;
|
||||
if (strongSelf == nil) {
|
||||
return;
|
||||
}
|
||||
if (!CGSizeEqualToSize(size, strongSelf->_currentSize)) {
|
||||
strongSelf->_currentSize = size;
|
||||
[strongSelf setSize:size];
|
||||
}
|
||||
|
||||
int mappedValue = 0;
|
||||
switch (rotation) {
|
||||
case RTCVideoRotation_90:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
case RTCVideoRotation_180:
|
||||
mappedValue = 1;
|
||||
break;
|
||||
case RTCVideoRotation_270:
|
||||
mappedValue = 2;
|
||||
break;
|
||||
default:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
}
|
||||
[strongSelf setInternalOrientation:mappedValue];
|
||||
|
||||
[strongSelf renderFrame:videoFrame];
|
||||
});
|
||||
}));
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (CALayerContentsGravity)videoContentMode {
|
||||
return self.glView.layer.contentsGravity;
|
||||
}
|
||||
|
||||
- (void)setVideoContentMode:(CALayerContentsGravity)mode {
|
||||
self.glView.layer.contentsGravity = mode;
|
||||
[self setNeedsLayout:YES];
|
||||
}
|
||||
|
||||
-(void)layout {
|
||||
[super layout];
|
||||
|
||||
if (self.bounds.size.width > 0.0f && _currentSize.width > 0) {
|
||||
|
||||
NSSize size = _currentSize;
|
||||
NSSize frameSize = self.frame.size;
|
||||
if ( self.glView.layer.contentsGravity == kCAGravityResizeAspectFill) {
|
||||
size = aspectFitted(frameSize, _currentSize);
|
||||
} else {
|
||||
size = aspectFilled(frameSize, _currentSize);
|
||||
}
|
||||
_glView.frame = CGRectMake(floor((self.bounds.size.width - size.width) / 2.0), floor((self.bounds.size.height - size.height) / 2.0), size.width, size.height);
|
||||
}
|
||||
|
||||
if (_shouldBeMirrored || _forceMirrored) {
|
||||
self.glView.layer.anchorPoint = NSMakePoint(1, 0);
|
||||
self.glView.layer.affineTransform = CGAffineTransformMakeScale(-1, 1);
|
||||
} else {
|
||||
self.glView.layer.anchorPoint = NSMakePoint(0, 0);
|
||||
self.glView.layer.affineTransform = CGAffineTransformIdentity;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setSize:(CGSize)size {
|
||||
[self.delegate videoView:self didChangeVideoSize:size];
|
||||
[self setNeedsLayout:YES];
|
||||
}
|
||||
|
||||
- (void)renderFrame:(RTCVideoFrame *)videoFrame {
|
||||
self.glView.videoFrame = videoFrame;
|
||||
|
||||
if ([videoFrame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
RTCCVPixelBuffer *buffer = (RTCCVPixelBuffer*)videoFrame.buffer;
|
||||
if ([buffer isKindOfClass:[TGRTCCVPixelBuffer class]]) {
|
||||
bool shouldBeMirrored = ((TGRTCCVPixelBuffer *)buffer).shouldBeMirrored;
|
||||
if (shouldBeMirrored != _shouldBeMirrored) {
|
||||
_shouldBeMirrored = shouldBeMirrored;
|
||||
if (shouldBeMirrored || _forceMirrored) {
|
||||
self.glView.layer.anchorPoint = NSMakePoint(1, 0);
|
||||
self.glView.layer.affineTransform = CGAffineTransformMakeScale(-1, 1);
|
||||
} else {
|
||||
self.glView.layer.anchorPoint = NSMakePoint(0, 0);
|
||||
self.glView.layer.affineTransform = CGAffineTransformIdentity;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldBeMirrored != _shouldBeMirrored) {
|
||||
if (_didSetShouldBeMirrored) {
|
||||
if (_onIsMirroredUpdated) {
|
||||
_onIsMirroredUpdated(_shouldBeMirrored);
|
||||
}
|
||||
} else {
|
||||
_didSetShouldBeMirrored = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
return _sink;
|
||||
}
|
||||
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)(float))onFirstFrameReceived {
|
||||
[self.glView setOnFirstFrameReceived:onFirstFrameReceived];
|
||||
}
|
||||
|
||||
- (void)setInternalOrientation:(int)internalOrientation {
|
||||
_internalOrientation = internalOrientation;
|
||||
if (_onOrientationUpdated) {
|
||||
_onOrientationUpdated(internalOrientation);
|
||||
}
|
||||
}
|
||||
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated {
|
||||
_onOrientationUpdated = [onOrientationUpdated copy];
|
||||
}
|
||||
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated {
|
||||
}
|
||||
|
||||
- (void)setIsForceMirrored:(BOOL)forceMirrored {
|
||||
_forceMirrored = forceMirrored;
|
||||
[self setNeedsLayout:YES];
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
#endif // !TARGET_OS_IPHONE
|
|
@ -1,12 +0,0 @@
|
|||
#ifndef TGRTCCVPIXELBUFFER_H
|
||||
#define TGRTCCVPIXELBUFFER_H
|
||||
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
|
||||
@interface TGRTCCVPixelBuffer : RTCCVPixelBuffer
|
||||
|
||||
@property (nonatomic) bool shouldBeMirrored;
|
||||
|
||||
@end
|
||||
|
||||
#endif
|
|
@ -1,5 +0,0 @@
|
|||
#import "TGRTCCVPixelBuffer.h"
|
||||
|
||||
@implementation TGRTCCVPixelBuffer
|
||||
|
||||
@end
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoDecoderFactory.h"
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
/** This decoder factory include support for all codecs bundled with WebRTC. If using custom
|
||||
* codecs, create custom implementations of RTCVideoEncoderFactory and RTCVideoDecoderFactory.
|
||||
*/
|
||||
RTC_OBJC_EXPORT
|
||||
@interface TGRTCDefaultVideoDecoderFactory : NSObject <RTCVideoDecoderFactory>
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
|
@ -1,113 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import "TGRTCDefaultVideoDecoderFactory.h"
|
||||
|
||||
#import "RTCH264ProfileLevelId.h"
|
||||
#import "TGRTCVideoDecoderH264.h"
|
||||
#import "api/video_codec/RTCVideoCodecConstants.h"
|
||||
#import "api/video_codec/RTCVideoDecoderVP8.h"
|
||||
#import "base/RTCVideoCodecInfo.h"
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
#import "api/video_codec/RTCVideoDecoderVP9.h"
|
||||
#endif
|
||||
#if !defined(DISABLE_H265)
|
||||
#import "RTCH265ProfileLevelId.h"
|
||||
#import "TGRTCVideoDecoderH265.h"
|
||||
#endif
|
||||
|
||||
@implementation TGRTCDefaultVideoDecoderFactory
|
||||
|
||||
- (NSArray<RTCVideoCodecInfo *> *)supportedCodecs {
|
||||
NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
|
||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
|
||||
@"level-asymmetry-allowed" : @"1",
|
||||
@"packetization-mode" : @"1",
|
||||
};
|
||||
RTCVideoCodecInfo *constrainedHighInfo =
|
||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
||||
parameters:constrainedHighParams];
|
||||
|
||||
NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
|
||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
|
||||
@"level-asymmetry-allowed" : @"1",
|
||||
@"packetization-mode" : @"1",
|
||||
};
|
||||
RTCVideoCodecInfo *constrainedBaselineInfo =
|
||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
||||
parameters:constrainedBaselineParams];
|
||||
|
||||
RTCVideoCodecInfo *vp8Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp8Name];
|
||||
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
RTCVideoCodecInfo *vp9Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp9Name];
|
||||
#endif
|
||||
|
||||
#if !defined(DISABLE_H265)
|
||||
RTCVideoCodecInfo *h265Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH265Name];
|
||||
#endif
|
||||
|
||||
NSMutableArray<RTCVideoCodecInfo *> *result = [[NSMutableArray alloc] initWithArray:@[
|
||||
constrainedHighInfo,
|
||||
constrainedBaselineInfo,
|
||||
vp8Info,
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
vp9Info,
|
||||
#endif
|
||||
]];
|
||||
|
||||
#if !defined(DISABLE_H265)
|
||||
#ifdef WEBRTC_IOS
|
||||
if (@available(iOS 11.0, *)) {
|
||||
[result addObject:h265Info];
|
||||
}
|
||||
#else // WEBRTC_IOS
|
||||
if (@available(macOS 10.13, *)) {
|
||||
[result addObject:h265Info];
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
- (id<RTCVideoDecoder>)createDecoder:(RTCVideoCodecInfo *)info {
|
||||
if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
|
||||
return [[TGRTCVideoDecoderH264 alloc] init];
|
||||
} else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
|
||||
return [RTCVideoDecoderVP8 vp8Decoder];
|
||||
}
|
||||
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) {
|
||||
return [RTCVideoDecoderVP9 vp9Decoder];
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(DISABLE_H265)
|
||||
#ifdef WEBRTC_IOS
|
||||
if (@available(iOS 11.0, *)) {
|
||||
if ([info.name isEqualToString:kRTCVideoCodecH265Name]) {
|
||||
return [[TGRTCVideoDecoderH265 alloc] init];
|
||||
}
|
||||
}
|
||||
#else // WEBRTC_IOS
|
||||
if (@available(macOS 10.13, *)) {
|
||||
if ([info.name isEqualToString:kRTCVideoCodecH265Name]) {
|
||||
return [[TGRTCVideoDecoderH265 alloc] init];
|
||||
}
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
#endif // !DISABLE_H265
|
||||
|
||||
return nil;
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoEncoderFactory.h"
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
/** This encoder factory include support for all codecs bundled with WebRTC. If using custom
|
||||
* codecs, create custom implementations of RTCVideoEncoderFactory and RTCVideoDecoderFactory.
|
||||
*/
|
||||
RTC_OBJC_EXPORT
|
||||
@interface TGRTCDefaultVideoEncoderFactory : NSObject <RTCVideoEncoderFactory>
|
||||
|
||||
@property(nonatomic, retain) RTCVideoCodecInfo *preferredCodec;
|
||||
|
||||
+ (NSArray<RTCVideoCodecInfo *> *)supportedCodecs;
|
||||
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import "TGRTCDefaultVideoEncoderFactory.h"
|
||||
|
||||
#import "RTCH264ProfileLevelId.h"
|
||||
#import "TGRTCVideoEncoderH264.h"
|
||||
#import "api/video_codec/RTCVideoCodecConstants.h"
|
||||
#import "api/video_codec/RTCVideoEncoderVP8.h"
|
||||
#import "base/RTCVideoCodecInfo.h"
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
#import "api/video_codec/RTCVideoEncoderVP9.h"
|
||||
#endif
|
||||
#if !defined(DISABLE_H265)
|
||||
#import "RTCH265ProfileLevelId.h"
|
||||
#import "TGRTCVideoEncoderH265.h"
|
||||
#endif
|
||||
|
||||
@implementation TGRTCDefaultVideoEncoderFactory
|
||||
|
||||
@synthesize preferredCodec;
|
||||
|
||||
+ (NSArray<RTCVideoCodecInfo *> *)supportedCodecs {
|
||||
NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
|
||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
|
||||
@"level-asymmetry-allowed" : @"1",
|
||||
@"packetization-mode" : @"1",
|
||||
};
|
||||
RTCVideoCodecInfo *constrainedHighInfo =
|
||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
||||
parameters:constrainedHighParams];
|
||||
|
||||
NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
|
||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
|
||||
@"level-asymmetry-allowed" : @"1",
|
||||
@"packetization-mode" : @"1",
|
||||
};
|
||||
RTCVideoCodecInfo *constrainedBaselineInfo =
|
||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
||||
parameters:constrainedBaselineParams];
|
||||
|
||||
RTCVideoCodecInfo *vp8Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp8Name];
|
||||
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
RTCVideoCodecInfo *vp9Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp9Name];
|
||||
#endif
|
||||
|
||||
#if !defined(DISABLE_H265)
|
||||
RTCVideoCodecInfo *h265Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH265Name];
|
||||
#endif
|
||||
|
||||
NSMutableArray *result = [[NSMutableArray alloc] initWithArray:@[
|
||||
constrainedHighInfo,
|
||||
constrainedBaselineInfo,
|
||||
vp8Info,
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
vp9Info,
|
||||
#endif
|
||||
]];
|
||||
|
||||
#if !defined(DISABLE_H265)
|
||||
#ifdef WEBRTC_IOS
|
||||
if (@available(iOS 11.0, *)) {
|
||||
if ([[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality]) {
|
||||
[result addObject:h265Info];
|
||||
}
|
||||
}
|
||||
#else // WEBRTC_IOS
|
||||
if (@available(macOS 10.13, *)) {
|
||||
if ([[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality]) {
|
||||
[result addObject:h265Info];
|
||||
}
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
#endif
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
- (id<RTCVideoEncoder>)createEncoder:(RTCVideoCodecInfo *)info {
|
||||
if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
|
||||
return [[TGRTCVideoEncoderH264 alloc] initWithCodecInfo:info];
|
||||
} else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
|
||||
return [RTCVideoEncoderVP8 vp8Encoder];
|
||||
}
|
||||
#if defined(RTC_ENABLE_VP9)
|
||||
if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) {
|
||||
return [RTCVideoEncoderVP9 vp9Encoder];
|
||||
}
|
||||
#endif
|
||||
|
||||
#if !defined(DISABLE_H265)
|
||||
#ifdef WEBRTC_IOS
|
||||
if (@available(iOS 11, *)) {
|
||||
if ([info.name isEqualToString:kRTCVideoCodecH265Name]) {
|
||||
return [[TGRTCVideoEncoderH265 alloc] initWithCodecInfo:info];
|
||||
}
|
||||
}
|
||||
#else // WEBRTC_IOS
|
||||
if (@available(macOS 10.13, *)) {
|
||||
if ([info.name isEqualToString:kRTCVideoCodecH265Name]) {
|
||||
return [[TGRTCVideoEncoderH265 alloc] initWithCodecInfo:info];
|
||||
}
|
||||
}
|
||||
#endif // WEBRTC_IOS
|
||||
#endif // !DISABLE_H265
|
||||
|
||||
return nil;
|
||||
}
|
||||
|
||||
- (NSArray<RTCVideoCodecInfo *> *)supportedCodecs {
|
||||
NSMutableArray<RTCVideoCodecInfo *> *codecs = [[[self class] supportedCodecs] mutableCopy];
|
||||
|
||||
NSMutableArray<RTCVideoCodecInfo *> *orderedCodecs = [NSMutableArray array];
|
||||
NSUInteger index = [codecs indexOfObject:self.preferredCodec];
|
||||
if (index != NSNotFound) {
|
||||
[orderedCodecs addObject:[codecs objectAtIndex:index]];
|
||||
[codecs removeObjectAtIndex:index];
|
||||
}
|
||||
[orderedCodecs addObjectsFromArray:codecs];
|
||||
|
||||
return [orderedCodecs copy];
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,18 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoDecoder.h"
|
||||
|
||||
RTC_OBJC_EXPORT
|
||||
@interface TGRTCVideoDecoderH264 : NSObject <RTCVideoDecoder>
|
||||
@end
|
|
@ -1,279 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*
|
||||
*/
|
||||
|
||||
#import "TGRTCVideoDecoderH264.h"
|
||||
|
||||
#import <VideoToolbox/VideoToolbox.h>
|
||||
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#import "helpers.h"
|
||||
#import "helpers/scoped_cftyperef.h"
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
#import "helpers/UIDevice+RTCDevice.h"
|
||||
#endif
|
||||
|
||||
#include "modules/video_coding/include/video_error_codes.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "sdk/objc/components/video_codec/nalu_rewriter.h"
|
||||
|
||||
// Struct that we pass to the decoder per frame to decode. We receive it again
|
||||
// in the decoder callback.
|
||||
struct RTCFrameDecodeParams {
|
||||
RTCFrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts) : callback(cb), timestamp(ts) {}
|
||||
RTCVideoDecoderCallback callback;
|
||||
int64_t timestamp;
|
||||
};
|
||||
|
||||
@interface TGRTCVideoDecoderH264 ()
|
||||
- (void)setError:(OSStatus)error;
|
||||
@end
|
||||
|
||||
// This is the callback function that VideoToolbox calls when decode is
|
||||
// complete.
|
||||
static void decompressionOutputCallback(void *decoderRef,
|
||||
void *params,
|
||||
OSStatus status,
|
||||
VTDecodeInfoFlags infoFlags,
|
||||
CVImageBufferRef imageBuffer,
|
||||
CMTime timestamp,
|
||||
CMTime duration) {
|
||||
std::unique_ptr<RTCFrameDecodeParams> decodeParams(
|
||||
reinterpret_cast<RTCFrameDecodeParams *>(params));
|
||||
if (status != noErr) {
|
||||
TGRTCVideoDecoderH264 *decoder = (__bridge TGRTCVideoDecoderH264 *)decoderRef;
|
||||
[decoder setError:status];
|
||||
RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
|
||||
return;
|
||||
}
|
||||
// TODO(tkchin): Handle CVO properly.
|
||||
RTCCVPixelBuffer *frameBuffer = [[RTCCVPixelBuffer alloc] initWithPixelBuffer:imageBuffer];
|
||||
RTCVideoFrame *decodedFrame =
|
||||
[[RTCVideoFrame alloc] initWithBuffer:frameBuffer
|
||||
rotation:RTCVideoRotation_0
|
||||
timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec];
|
||||
decodedFrame.timeStamp = (int32_t)decodeParams->timestamp;
|
||||
decodeParams->callback(decodedFrame);
|
||||
}
|
||||
|
||||
// Decoder.
|
||||
@implementation TGRTCVideoDecoderH264 {
|
||||
CMVideoFormatDescriptionRef _videoFormat;
|
||||
CMMemoryPoolRef _memoryPool;
|
||||
VTDecompressionSessionRef _decompressionSession;
|
||||
RTCVideoDecoderCallback _callback;
|
||||
OSStatus _error;
|
||||
}
|
||||
|
||||
- (instancetype)init {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
_memoryPool = CMMemoryPoolCreate(nil);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
CMMemoryPoolInvalidate(_memoryPool);
|
||||
CFRelease(_memoryPool);
|
||||
[self destroyDecompressionSession];
|
||||
[self setVideoFormat:nullptr];
|
||||
}
|
||||
|
||||
- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (NSInteger)decode:(RTCEncodedImage *)inputImage
|
||||
missingFrames:(BOOL)missingFrames
|
||||
codecSpecificInfo:(nullable id<RTCCodecSpecificInfo>)info
|
||||
renderTimeMs:(int64_t)renderTimeMs {
|
||||
RTC_DCHECK(inputImage.buffer);
|
||||
|
||||
if (_error != noErr) {
|
||||
RTC_LOG(LS_WARNING) << "Last frame decode failed.";
|
||||
_error = noErr;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
rtc::ScopedCFTypeRef<CMVideoFormatDescriptionRef> inputFormat =
|
||||
rtc::ScopedCF(webrtc::CreateVideoFormatDescription((uint8_t *)inputImage.buffer.bytes,
|
||||
inputImage.buffer.length));
|
||||
if (inputFormat) {
|
||||
// Check if the video format has changed, and reinitialize decoder if
|
||||
// needed.
|
||||
if (!CMFormatDescriptionEqual(inputFormat.get(), _videoFormat)) {
|
||||
[self setVideoFormat:inputFormat.get()];
|
||||
int resetDecompressionSessionError = [self resetDecompressionSession];
|
||||
if (resetDecompressionSessionError != WEBRTC_VIDEO_CODEC_OK) {
|
||||
return resetDecompressionSessionError;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!_videoFormat) {
|
||||
// We received a frame but we don't have format information so we can't
|
||||
// decode it.
|
||||
// This can happen after backgrounding. We need to wait for the next
|
||||
// sps/pps before we can resume so we request a keyframe by returning an
|
||||
// error.
|
||||
RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
CMSampleBufferRef sampleBuffer = nullptr;
|
||||
if (!webrtc::H264AnnexBBufferToCMSampleBuffer((uint8_t *)inputImage.buffer.bytes,
|
||||
inputImage.buffer.length,
|
||||
_videoFormat,
|
||||
&sampleBuffer,
|
||||
_memoryPool)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
RTC_DCHECK(sampleBuffer);
|
||||
VTDecodeFrameFlags decodeFlags = kVTDecodeFrame_EnableAsynchronousDecompression;
|
||||
std::unique_ptr<RTCFrameDecodeParams> frameDecodeParams;
|
||||
frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
|
||||
OSStatus status = VTDecompressionSessionDecodeFrame(
|
||||
_decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
|
||||
#if defined(WEBRTC_IOS)
|
||||
// Re-initialize the decoder if we have an invalid session while the app is
|
||||
// active or decoder malfunctions and retry the decode request.
|
||||
if ((status == kVTInvalidSessionErr || status == kVTVideoDecoderMalfunctionErr) &&
|
||||
[self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) {
|
||||
RTC_LOG(LS_INFO) << "Failed to decode frame with code: " << status
|
||||
<< " retrying decode after decompression session reset";
|
||||
frameDecodeParams.reset(new RTCFrameDecodeParams(_callback, inputImage.timeStamp));
|
||||
status = VTDecompressionSessionDecodeFrame(
|
||||
_decompressionSession, sampleBuffer, decodeFlags, frameDecodeParams.release(), nullptr);
|
||||
}
|
||||
#endif
|
||||
CFRelease(sampleBuffer);
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)setCallback:(RTCVideoDecoderCallback)callback {
|
||||
_callback = callback;
|
||||
}
|
||||
|
||||
- (void)setError:(OSStatus)error {
|
||||
_error = error;
|
||||
}
|
||||
|
||||
- (NSInteger)releaseDecoder {
|
||||
// Need to invalidate the session so that callbacks no longer occur and it
|
||||
// is safe to null out the callback.
|
||||
[self destroyDecompressionSession];
|
||||
[self setVideoFormat:nullptr];
|
||||
_callback = nullptr;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (int)resetDecompressionSession {
|
||||
[self destroyDecompressionSession];
|
||||
|
||||
// Need to wait for the first SPS to initialize decoder.
|
||||
if (!_videoFormat) {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
// Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
|
||||
// create pixel buffers with GPU backed memory. The intent here is to pass
|
||||
// the pixel buffers directly so we avoid a texture upload later during
|
||||
// rendering. This currently is moot because we are converting back to an
|
||||
// I420 frame after decode, but eventually we will be able to plumb
|
||||
// CVPixelBuffers directly to the renderer.
|
||||
// TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
|
||||
// we can pass CVPixelBuffers as native handles in decoder output.
|
||||
static size_t const attributesSize = 3;
|
||||
CFTypeRef keys[attributesSize] = {
|
||||
#if defined(WEBRTC_IOS)
|
||||
kCVPixelBufferOpenGLESCompatibilityKey,
|
||||
#elif defined(WEBRTC_MAC)
|
||||
kCVPixelBufferOpenGLCompatibilityKey,
|
||||
#endif
|
||||
kCVPixelBufferIOSurfacePropertiesKey,
|
||||
kCVPixelBufferPixelFormatTypeKey
|
||||
};
|
||||
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
|
||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
||||
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, pixelFormat};
|
||||
CFDictionaryRef attributes = CreateCFTypeDictionary(keys, values, attributesSize);
|
||||
if (ioSurfaceValue) {
|
||||
CFRelease(ioSurfaceValue);
|
||||
ioSurfaceValue = nullptr;
|
||||
}
|
||||
if (pixelFormat) {
|
||||
CFRelease(pixelFormat);
|
||||
pixelFormat = nullptr;
|
||||
}
|
||||
VTDecompressionOutputCallbackRecord record = {
|
||||
decompressionOutputCallback, (__bridge void *)self,
|
||||
};
|
||||
OSStatus status = VTDecompressionSessionCreate(
|
||||
nullptr, _videoFormat, nullptr, attributes, &record, &_decompressionSession);
|
||||
CFRelease(attributes);
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create decompression session: " << status;
|
||||
[self destroyDecompressionSession];
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
[self configureDecompressionSession];
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)configureDecompressionSession {
|
||||
RTC_DCHECK(_decompressionSession);
|
||||
#if defined(WEBRTC_IOS)
|
||||
VTSessionSetProperty(_decompressionSession, kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
|
||||
#endif
|
||||
}
|
||||
|
||||
- (void)destroyDecompressionSession {
|
||||
if (_decompressionSession) {
|
||||
#if defined(WEBRTC_IOS)
|
||||
if ([UIDevice isIOS11OrLater]) {
|
||||
VTDecompressionSessionWaitForAsynchronousFrames(_decompressionSession);
|
||||
}
|
||||
#endif
|
||||
VTDecompressionSessionInvalidate(_decompressionSession);
|
||||
CFRelease(_decompressionSession);
|
||||
_decompressionSession = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat {
|
||||
if (_videoFormat == videoFormat) {
|
||||
return;
|
||||
}
|
||||
if (_videoFormat) {
|
||||
CFRelease(_videoFormat);
|
||||
}
|
||||
_videoFormat = videoFormat;
|
||||
if (_videoFormat) {
|
||||
CFRetain(_videoFormat);
|
||||
}
|
||||
}
|
||||
|
||||
- (NSString *)implementationName {
|
||||
return @"VideoToolbox";
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,19 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoDecoder.h"
|
||||
|
||||
RTC_OBJC_EXPORT
|
||||
API_AVAILABLE(ios(11.0))
|
||||
@interface TGRTCVideoDecoderH265 : NSObject <RTCVideoDecoder>
|
||||
@end
|
|
@ -1,347 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*
|
||||
*/
|
||||
|
||||
#import "TGRTCVideoDecoderH265.h"
|
||||
|
||||
#import <VideoToolbox/VideoToolbox.h>
|
||||
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#import "helpers.h"
|
||||
#import "helpers/scoped_cftyperef.h"
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
#import "helpers/UIDevice+RTCDevice.h"
|
||||
#endif
|
||||
|
||||
#include "modules/video_coding/include/video_error_codes.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "sdk/objc/components/video_codec/nalu_rewriter.h"
|
||||
|
||||
#include "Manager.h"
|
||||
|
||||
typedef void (^TGRTCVideoDecoderRequestKeyframeCallback)();
|
||||
|
||||
// Struct that we pass to the decoder per frame to decode. We receive it again
|
||||
// in the decoder callback.
|
||||
struct RTCH265FrameDecodeParams {
|
||||
RTCH265FrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts, TGRTCVideoDecoderRequestKeyframeCallback requestFrame)
|
||||
: callback(cb), timestamp(ts), requestFrame(requestFrame) {}
|
||||
RTCVideoDecoderCallback callback;
|
||||
int64_t timestamp;
|
||||
TGRTCVideoDecoderRequestKeyframeCallback requestFrame;
|
||||
};
|
||||
|
||||
// This is the callback function that VideoToolbox calls when decode is
|
||||
// complete.
|
||||
static void tg_h265DecompressionOutputCallback(void* decoder,
|
||||
void* params,
|
||||
OSStatus status,
|
||||
VTDecodeInfoFlags infoFlags,
|
||||
CVImageBufferRef imageBuffer,
|
||||
CMTime timestamp,
|
||||
CMTime duration) {
|
||||
std::unique_ptr<RTCH265FrameDecodeParams> decodeParams(
|
||||
reinterpret_cast<RTCH265FrameDecodeParams*>(params));
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
|
||||
if (status == -12909) {
|
||||
decodeParams->requestFrame();
|
||||
}
|
||||
return;
|
||||
}
|
||||
// TODO(tkchin): Handle CVO properly.
|
||||
RTCCVPixelBuffer* frameBuffer =
|
||||
[[RTCCVPixelBuffer alloc] initWithPixelBuffer:imageBuffer];
|
||||
RTCVideoFrame* decodedFrame = [[RTCVideoFrame alloc]
|
||||
initWithBuffer:frameBuffer
|
||||
rotation:RTCVideoRotation_0
|
||||
timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec];
|
||||
decodedFrame.timeStamp = (int32_t)decodeParams->timestamp;
|
||||
decodeParams->callback(decodedFrame);
|
||||
}
|
||||
|
||||
@interface TGRTCVideoDecoderH265RequestKeyframeHolder : NSObject
|
||||
|
||||
@property (nonatomic, strong) NSLock *lock;
|
||||
@property (nonatomic) bool shouldRequestKeyframe;
|
||||
|
||||
@end
|
||||
|
||||
@implementation TGRTCVideoDecoderH265RequestKeyframeHolder
|
||||
|
||||
- (instancetype)init {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_lock = [[NSLock alloc] init];
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
// Decoder.
|
||||
@implementation TGRTCVideoDecoderH265 {
|
||||
CMVideoFormatDescriptionRef _videoFormat;
|
||||
VTDecompressionSessionRef _decompressionSession;
|
||||
RTCVideoDecoderCallback _callback;
|
||||
TGRTCVideoDecoderH265RequestKeyframeHolder *_requestKeyframeHolder;
|
||||
TGRTCVideoDecoderRequestKeyframeCallback _requestFrame;
|
||||
OSStatus _error;
|
||||
}
|
||||
|
||||
- (instancetype)init {
|
||||
if (self = [super init]) {
|
||||
_requestKeyframeHolder = [[TGRTCVideoDecoderH265RequestKeyframeHolder alloc] init];
|
||||
TGRTCVideoDecoderH265RequestKeyframeHolder *requestKeyframeHolder = _requestKeyframeHolder;
|
||||
_requestFrame = ^{
|
||||
[requestKeyframeHolder.lock lock];
|
||||
requestKeyframeHolder.shouldRequestKeyframe = true;
|
||||
[requestKeyframeHolder.lock unlock];
|
||||
};
|
||||
NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
|
||||
#ifdef WEBRTC_IOS
|
||||
[center addObserver:self
|
||||
selector:@selector(handleApplicationDidBecomeActive:)
|
||||
name:UIApplicationWillEnterForegroundNotification
|
||||
object:[UIApplication sharedApplication]];
|
||||
#endif
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
[self destroyDecompressionSession];
|
||||
[self setVideoFormat:nullptr];
|
||||
[[NSNotificationCenter defaultCenter] removeObserver:self];
|
||||
}
|
||||
|
||||
- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
|
||||
__weak TGRTCVideoDecoderH265 *weakSelf = self;
|
||||
tgcalls::Manager::getMediaThread()->PostTask(RTC_FROM_HERE, [weakSelf]() {
|
||||
__strong TGRTCVideoDecoderH265 *strongSelf = weakSelf;
|
||||
if (strongSelf == nil) {
|
||||
return;
|
||||
}
|
||||
strongSelf->_videoFormat = nil;
|
||||
});
|
||||
}
|
||||
|
||||
- (NSInteger)decode:(RTCEncodedImage*)inputImage
|
||||
missingFrames:(BOOL)missingFrames
|
||||
codecSpecificInfo:(__nullable id<RTCCodecSpecificInfo>)info
|
||||
renderTimeMs:(int64_t)renderTimeMs {
|
||||
RTC_DCHECK(inputImage.buffer);
|
||||
|
||||
if (_error != noErr) {
|
||||
RTC_LOG(LS_WARNING) << "Last frame decode failed.";
|
||||
_error = noErr;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
rtc::ScopedCFTypeRef<CMVideoFormatDescriptionRef> inputFormat =
|
||||
rtc::ScopedCF(webrtc::CreateH265VideoFormatDescription(
|
||||
(uint8_t*)inputImage.buffer.bytes, inputImage.buffer.length));
|
||||
if (inputFormat) {
|
||||
CMVideoDimensions dimensions =
|
||||
CMVideoFormatDescriptionGetDimensions(inputFormat.get());
|
||||
RTC_LOG(LS_INFO) << "Resolution: " << dimensions.width << " x "
|
||||
<< dimensions.height;
|
||||
// Check if the video format has changed, and reinitialize decoder if
|
||||
// needed.
|
||||
if (!CMFormatDescriptionEqual(inputFormat.get(), _videoFormat)) {
|
||||
[self setVideoFormat:inputFormat.get()];
|
||||
int resetDecompressionSessionError = [self resetDecompressionSession];
|
||||
if (resetDecompressionSessionError != WEBRTC_VIDEO_CODEC_OK) {
|
||||
return resetDecompressionSessionError;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!_videoFormat) {
|
||||
// We received a frame but we don't have format information so we can't
|
||||
// decode it.
|
||||
// This can happen after backgrounding. We need to wait for the next
|
||||
// sps/pps before we can resume so we request a keyframe by returning an
|
||||
// error.
|
||||
RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
CMSampleBufferRef sampleBuffer = nullptr;
|
||||
if (!webrtc::H265AnnexBBufferToCMSampleBuffer(
|
||||
(uint8_t*)inputImage.buffer.bytes, inputImage.buffer.length,
|
||||
_videoFormat, &sampleBuffer)) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
RTC_DCHECK(sampleBuffer);
|
||||
VTDecodeFrameFlags decodeFlags =
|
||||
kVTDecodeFrame_EnableAsynchronousDecompression;
|
||||
std::unique_ptr<RTCH265FrameDecodeParams> frameDecodeParams;
|
||||
frameDecodeParams.reset(
|
||||
new RTCH265FrameDecodeParams(_callback, inputImage.timeStamp, _requestFrame));
|
||||
OSStatus status = VTDecompressionSessionDecodeFrame(
|
||||
_decompressionSession, sampleBuffer, decodeFlags,
|
||||
frameDecodeParams.release(), nullptr);
|
||||
#if defined(WEBRTC_IOS)
|
||||
// Re-initialize the decoder if we have an invalid session while the app is
|
||||
// active and retry the decode request.
|
||||
if (status == kVTInvalidSessionErr &&
|
||||
[self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) {
|
||||
frameDecodeParams.reset(
|
||||
new RTCH265FrameDecodeParams(_callback, inputImage.timeStamp, _requestFrame));
|
||||
status = VTDecompressionSessionDecodeFrame(
|
||||
_decompressionSession, sampleBuffer, decodeFlags,
|
||||
frameDecodeParams.release(), nullptr);
|
||||
}
|
||||
#endif
|
||||
CFRelease(sampleBuffer);
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
bool requestKeyframe = false;
|
||||
|
||||
[_requestKeyframeHolder.lock lock];
|
||||
if (_requestKeyframeHolder.shouldRequestKeyframe) {
|
||||
_requestKeyframeHolder.shouldRequestKeyframe = false;
|
||||
requestKeyframe = true;
|
||||
}
|
||||
[_requestKeyframeHolder.lock unlock];
|
||||
|
||||
if (requestKeyframe) {
|
||||
RTC_LOG(LS_ERROR) << "Decoder asynchronously asked to request keyframe";
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)setCallback:(RTCVideoDecoderCallback)callback {
|
||||
_callback = callback;
|
||||
}
|
||||
|
||||
- (NSInteger)releaseDecoder {
|
||||
// Need to invalidate the session so that callbacks no longer occur and it
|
||||
// is safe to null out the callback.
|
||||
[self destroyDecompressionSession];
|
||||
[self setVideoFormat:nullptr];
|
||||
_callback = nullptr;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (int)resetDecompressionSession {
|
||||
[self destroyDecompressionSession];
|
||||
|
||||
// Need to wait for the first SPS to initialize decoder.
|
||||
if (!_videoFormat) {
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
// Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
|
||||
// create pixel buffers with GPU backed memory. The intent here is to pass
|
||||
// the pixel buffers directly so we avoid a texture upload later during
|
||||
// rendering. This currently is moot because we are converting back to an
|
||||
// I420 frame after decode, but eventually we will be able to plumb
|
||||
// CVPixelBuffers directly to the renderer.
|
||||
// TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
|
||||
// we can pass CVPixelBuffers as native handles in decoder output.
|
||||
static size_t const attributesSize = 3;
|
||||
CFTypeRef keys[attributesSize] = {
|
||||
#if defined(WEBRTC_IOS)
|
||||
kCVPixelBufferOpenGLESCompatibilityKey,
|
||||
#elif defined(WEBRTC_MAC)
|
||||
kCVPixelBufferOpenGLCompatibilityKey,
|
||||
#endif
|
||||
kCVPixelBufferIOSurfacePropertiesKey,
|
||||
kCVPixelBufferPixelFormatTypeKey
|
||||
};
|
||||
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
|
||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
CFNumberRef pixelFormat =
|
||||
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
||||
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue,
|
||||
pixelFormat};
|
||||
CFDictionaryRef attributes =
|
||||
CreateCFTypeDictionary(keys, values, attributesSize);
|
||||
if (ioSurfaceValue) {
|
||||
CFRelease(ioSurfaceValue);
|
||||
ioSurfaceValue = nullptr;
|
||||
}
|
||||
if (pixelFormat) {
|
||||
CFRelease(pixelFormat);
|
||||
pixelFormat = nullptr;
|
||||
}
|
||||
VTDecompressionOutputCallbackRecord record = {
|
||||
tg_h265DecompressionOutputCallback,
|
||||
nullptr,
|
||||
};
|
||||
OSStatus status =
|
||||
VTDecompressionSessionCreate(nullptr, _videoFormat, nullptr, attributes,
|
||||
&record, &_decompressionSession);
|
||||
CFRelease(attributes);
|
||||
if (status != noErr) {
|
||||
[self destroyDecompressionSession];
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
[self configureDecompressionSession];
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)configureDecompressionSession {
|
||||
RTC_DCHECK(_decompressionSession);
|
||||
#if defined(WEBRTC_IOS)
|
||||
// VTSessionSetProperty(_decompressionSession,
|
||||
// kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
|
||||
#endif
|
||||
}
|
||||
|
||||
- (void)destroyDecompressionSession {
|
||||
if (_decompressionSession) {
|
||||
#if defined(WEBRTC_IOS)
|
||||
if ([UIDevice isIOS11OrLater]) {
|
||||
VTDecompressionSessionWaitForAsynchronousFrames(_decompressionSession);
|
||||
}
|
||||
#endif
|
||||
VTDecompressionSessionInvalidate(_decompressionSession);
|
||||
CFRelease(_decompressionSession);
|
||||
_decompressionSession = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat {
|
||||
if (_videoFormat == videoFormat) {
|
||||
return;
|
||||
}
|
||||
if (_videoFormat) {
|
||||
CFRelease(_videoFormat);
|
||||
}
|
||||
_videoFormat = videoFormat;
|
||||
if (_videoFormat) {
|
||||
CFRetain(_videoFormat);
|
||||
}
|
||||
}
|
||||
|
||||
- (NSString*)implementationName {
|
||||
return @"VideoToolbox";
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,22 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoCodecInfo.h"
|
||||
#import "RTCVideoEncoder.h"
|
||||
|
||||
RTC_OBJC_EXPORT
|
||||
@interface TGRTCVideoEncoderH264 : NSObject <RTCVideoEncoder>
|
||||
|
||||
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo *)codecInfo;
|
||||
|
||||
@end
|
|
@ -1,833 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*
|
||||
*/
|
||||
|
||||
#import "TGRTCVideoEncoderH264.h"
|
||||
|
||||
#import <VideoToolbox/VideoToolbox.h>
|
||||
#include <vector>
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
#import "helpers/UIDevice+RTCDevice.h"
|
||||
#endif
|
||||
#import "RTCCodecSpecificInfoH264.h"
|
||||
#import "RTCH264ProfileLevelId.h"
|
||||
#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h"
|
||||
#import "api/peerconnection/RTCVideoCodecInfo+Private.h"
|
||||
#import "base/RTCCodecSpecificInfo.h"
|
||||
#import "base/RTCI420Buffer.h"
|
||||
#import "base/RTCVideoEncoder.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#import "helpers.h"
|
||||
|
||||
#include "common_video/h264/h264_bitstream_parser.h"
|
||||
#include "common_video/h264/profile_level_id.h"
|
||||
#include "common_video/include/bitrate_adjuster.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "modules/video_coding/include/video_error_codes.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "sdk/objc/components/video_codec/nalu_rewriter.h"
|
||||
#include "third_party/libyuv/include/libyuv/convert_from.h"
|
||||
|
||||
@interface TGRTCVideoEncoderH264 ()
|
||||
|
||||
- (void)frameWasEncoded:(OSStatus)status
|
||||
flags:(VTEncodeInfoFlags)infoFlags
|
||||
sampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
|
||||
width:(int32_t)width
|
||||
height:(int32_t)height
|
||||
renderTimeMs:(int64_t)renderTimeMs
|
||||
timestamp:(uint32_t)timestamp
|
||||
rotation:(RTCVideoRotation)rotation;
|
||||
|
||||
@end
|
||||
|
||||
namespace { // anonymous namespace
|
||||
|
||||
// The ratio between kVTCompressionPropertyKey_DataRateLimits and
|
||||
// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
|
||||
// than the average bit rate to avoid undershooting the target.
|
||||
const float kLimitToAverageBitRateFactor = 1.5f;
|
||||
// These thresholds deviate from the default h264 QP thresholds, as they
|
||||
// have been found to work better on devices that support VideoToolbox
|
||||
const int kLowH264QpThreshold = 28;
|
||||
const int kHighH264QpThreshold = 39;
|
||||
|
||||
const OSType kNV12PixelFormat = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
|
||||
// Struct that we pass to the encoder per frame to encode. We receive it again
|
||||
// in the encoder callback.
|
||||
struct RTCFrameEncodeParams {
|
||||
RTCFrameEncodeParams(TGRTCVideoEncoderH264 *e,
|
||||
RTCCodecSpecificInfoH264 *csi,
|
||||
int32_t w,
|
||||
int32_t h,
|
||||
int64_t rtms,
|
||||
uint32_t ts,
|
||||
RTCVideoRotation r)
|
||||
: encoder(e), width(w), height(h), render_time_ms(rtms), timestamp(ts), rotation(r) {
|
||||
if (csi) {
|
||||
codecSpecificInfo = csi;
|
||||
} else {
|
||||
codecSpecificInfo = [[RTCCodecSpecificInfoH264 alloc] init];
|
||||
}
|
||||
}
|
||||
|
||||
TGRTCVideoEncoderH264 *encoder;
|
||||
RTCCodecSpecificInfoH264 *codecSpecificInfo;
|
||||
int32_t width;
|
||||
int32_t height;
|
||||
int64_t render_time_ms;
|
||||
uint32_t timestamp;
|
||||
RTCVideoRotation rotation;
|
||||
};
|
||||
|
||||
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
|
||||
// encoder. This performs the copy and format conversion.
|
||||
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
|
||||
static bool CopyVideoFrameToNV12PixelBuffer(id<RTCI420Buffer> frameBuffer, CVPixelBufferRef pixelBuffer) {
|
||||
RTC_DCHECK(pixelBuffer);
|
||||
RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer), kNV12PixelFormat);
|
||||
RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0), frameBuffer.height);
|
||||
RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0), frameBuffer.width);
|
||||
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
||||
return false;
|
||||
}
|
||||
uint8_t *dstY = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
|
||||
int dstStrideY = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
|
||||
uint8_t *dstUV = reinterpret_cast<uint8_t *>(CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1));
|
||||
int dstStrideUV = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
|
||||
// Convert I420 to NV12.
|
||||
int ret = libyuv::I420ToNV12(frameBuffer.dataY,
|
||||
frameBuffer.strideY,
|
||||
frameBuffer.dataU,
|
||||
frameBuffer.strideU,
|
||||
frameBuffer.dataV,
|
||||
frameBuffer.strideV,
|
||||
dstY,
|
||||
dstStrideY,
|
||||
dstUV,
|
||||
dstStrideUV,
|
||||
frameBuffer.width,
|
||||
frameBuffer.height);
|
||||
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
|
||||
if (ret) {
|
||||
RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
|
||||
if (!pixel_buffer_pool) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
|
||||
return nullptr;
|
||||
}
|
||||
CVPixelBufferRef pixel_buffer;
|
||||
CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, &pixel_buffer);
|
||||
if (ret != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
|
||||
// We probably want to drop frames here, since failure probably means
|
||||
// that the pool is empty.
|
||||
return nullptr;
|
||||
}
|
||||
return pixel_buffer;
|
||||
}
|
||||
|
||||
// This is the callback function that VideoToolbox calls when encode is
|
||||
// complete. From inspection this happens on its own queue.
|
||||
static void compressionOutputCallback(void *encoder,
|
||||
void *params,
|
||||
OSStatus status,
|
||||
VTEncodeInfoFlags infoFlags,
|
||||
CMSampleBufferRef sampleBuffer) {
|
||||
if (!params) {
|
||||
// If there are pending callbacks when the encoder is destroyed, this can happen.
|
||||
return;
|
||||
}
|
||||
std::unique_ptr<RTCFrameEncodeParams> encodeParams(
|
||||
reinterpret_cast<RTCFrameEncodeParams *>(params));
|
||||
[encodeParams->encoder frameWasEncoded:status
|
||||
flags:infoFlags
|
||||
sampleBuffer:sampleBuffer
|
||||
codecSpecificInfo:encodeParams->codecSpecificInfo
|
||||
width:encodeParams->width
|
||||
height:encodeParams->height
|
||||
renderTimeMs:encodeParams->render_time_ms
|
||||
timestamp:encodeParams->timestamp
|
||||
rotation:encodeParams->rotation];
|
||||
}
|
||||
|
||||
// Extract VideoToolbox profile out of the webrtc::SdpVideoFormat. If there is
|
||||
// no specific VideoToolbox profile for the specified level, AutoLevel will be
|
||||
// returned. The user must initialize the encoder with a resolution and
|
||||
// framerate conforming to the selected H264 level regardless.
|
||||
static CFStringRef ExtractProfile(const webrtc::H264::ProfileLevelId &profile_level_id) {
|
||||
switch (profile_level_id.profile) {
|
||||
case webrtc::H264::kProfileConstrainedBaseline:
|
||||
case webrtc::H264::kProfileBaseline:
|
||||
switch (profile_level_id.level) {
|
||||
case webrtc::H264::kLevel3:
|
||||
return kVTProfileLevel_H264_Baseline_3_0;
|
||||
case webrtc::H264::kLevel3_1:
|
||||
return kVTProfileLevel_H264_Baseline_3_1;
|
||||
case webrtc::H264::kLevel3_2:
|
||||
return kVTProfileLevel_H264_Baseline_3_2;
|
||||
case webrtc::H264::kLevel4:
|
||||
return kVTProfileLevel_H264_Baseline_4_0;
|
||||
case webrtc::H264::kLevel4_1:
|
||||
return kVTProfileLevel_H264_Baseline_4_1;
|
||||
case webrtc::H264::kLevel4_2:
|
||||
return kVTProfileLevel_H264_Baseline_4_2;
|
||||
case webrtc::H264::kLevel5:
|
||||
return kVTProfileLevel_H264_Baseline_5_0;
|
||||
case webrtc::H264::kLevel5_1:
|
||||
return kVTProfileLevel_H264_Baseline_5_1;
|
||||
case webrtc::H264::kLevel5_2:
|
||||
return kVTProfileLevel_H264_Baseline_5_2;
|
||||
case webrtc::H264::kLevel1:
|
||||
case webrtc::H264::kLevel1_b:
|
||||
case webrtc::H264::kLevel1_1:
|
||||
case webrtc::H264::kLevel1_2:
|
||||
case webrtc::H264::kLevel1_3:
|
||||
case webrtc::H264::kLevel2:
|
||||
case webrtc::H264::kLevel2_1:
|
||||
case webrtc::H264::kLevel2_2:
|
||||
return kVTProfileLevel_H264_Baseline_AutoLevel;
|
||||
}
|
||||
|
||||
case webrtc::H264::kProfileMain:
|
||||
switch (profile_level_id.level) {
|
||||
case webrtc::H264::kLevel3:
|
||||
return kVTProfileLevel_H264_Main_3_0;
|
||||
case webrtc::H264::kLevel3_1:
|
||||
return kVTProfileLevel_H264_Main_3_1;
|
||||
case webrtc::H264::kLevel3_2:
|
||||
return kVTProfileLevel_H264_Main_3_2;
|
||||
case webrtc::H264::kLevel4:
|
||||
return kVTProfileLevel_H264_Main_4_0;
|
||||
case webrtc::H264::kLevel4_1:
|
||||
return kVTProfileLevel_H264_Main_4_1;
|
||||
case webrtc::H264::kLevel4_2:
|
||||
return kVTProfileLevel_H264_Main_4_2;
|
||||
case webrtc::H264::kLevel5:
|
||||
return kVTProfileLevel_H264_Main_5_0;
|
||||
case webrtc::H264::kLevel5_1:
|
||||
return kVTProfileLevel_H264_Main_5_1;
|
||||
case webrtc::H264::kLevel5_2:
|
||||
return kVTProfileLevel_H264_Main_5_2;
|
||||
case webrtc::H264::kLevel1:
|
||||
case webrtc::H264::kLevel1_b:
|
||||
case webrtc::H264::kLevel1_1:
|
||||
case webrtc::H264::kLevel1_2:
|
||||
case webrtc::H264::kLevel1_3:
|
||||
case webrtc::H264::kLevel2:
|
||||
case webrtc::H264::kLevel2_1:
|
||||
case webrtc::H264::kLevel2_2:
|
||||
return kVTProfileLevel_H264_Main_AutoLevel;
|
||||
}
|
||||
|
||||
case webrtc::H264::kProfileConstrainedHigh:
|
||||
case webrtc::H264::kProfileHigh:
|
||||
switch (profile_level_id.level) {
|
||||
case webrtc::H264::kLevel3:
|
||||
return kVTProfileLevel_H264_High_3_0;
|
||||
case webrtc::H264::kLevel3_1:
|
||||
return kVTProfileLevel_H264_High_3_1;
|
||||
case webrtc::H264::kLevel3_2:
|
||||
return kVTProfileLevel_H264_High_3_2;
|
||||
case webrtc::H264::kLevel4:
|
||||
return kVTProfileLevel_H264_High_4_0;
|
||||
case webrtc::H264::kLevel4_1:
|
||||
return kVTProfileLevel_H264_High_4_1;
|
||||
case webrtc::H264::kLevel4_2:
|
||||
return kVTProfileLevel_H264_High_4_2;
|
||||
case webrtc::H264::kLevel5:
|
||||
return kVTProfileLevel_H264_High_5_0;
|
||||
case webrtc::H264::kLevel5_1:
|
||||
return kVTProfileLevel_H264_High_5_1;
|
||||
case webrtc::H264::kLevel5_2:
|
||||
return kVTProfileLevel_H264_High_5_2;
|
||||
case webrtc::H264::kLevel1:
|
||||
case webrtc::H264::kLevel1_b:
|
||||
case webrtc::H264::kLevel1_1:
|
||||
case webrtc::H264::kLevel1_2:
|
||||
case webrtc::H264::kLevel1_3:
|
||||
case webrtc::H264::kLevel2:
|
||||
case webrtc::H264::kLevel2_1:
|
||||
case webrtc::H264::kLevel2_2:
|
||||
return kVTProfileLevel_H264_High_AutoLevel;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The function returns the max allowed sample rate (pixels per second) that
|
||||
// can be processed by given encoder with |profile_level_id|.
|
||||
// See https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-H.264-201610-S!!PDF-E&type=items
|
||||
// for details.
|
||||
static NSUInteger GetMaxSampleRate(const webrtc::H264::ProfileLevelId &profile_level_id) {
|
||||
switch (profile_level_id.level) {
|
||||
case webrtc::H264::kLevel3:
|
||||
return 10368000;
|
||||
case webrtc::H264::kLevel3_1:
|
||||
return 27648000;
|
||||
case webrtc::H264::kLevel3_2:
|
||||
return 55296000;
|
||||
case webrtc::H264::kLevel4:
|
||||
case webrtc::H264::kLevel4_1:
|
||||
return 62914560;
|
||||
case webrtc::H264::kLevel4_2:
|
||||
return 133693440;
|
||||
case webrtc::H264::kLevel5:
|
||||
return 150994944;
|
||||
case webrtc::H264::kLevel5_1:
|
||||
return 251658240;
|
||||
case webrtc::H264::kLevel5_2:
|
||||
return 530841600;
|
||||
case webrtc::H264::kLevel1:
|
||||
case webrtc::H264::kLevel1_b:
|
||||
case webrtc::H264::kLevel1_1:
|
||||
case webrtc::H264::kLevel1_2:
|
||||
case webrtc::H264::kLevel1_3:
|
||||
case webrtc::H264::kLevel2:
|
||||
case webrtc::H264::kLevel2_1:
|
||||
case webrtc::H264::kLevel2_2:
|
||||
// Zero means auto rate setting.
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@implementation TGRTCVideoEncoderH264 {
|
||||
RTCVideoCodecInfo *_codecInfo;
|
||||
std::unique_ptr<webrtc::BitrateAdjuster> _bitrateAdjuster;
|
||||
uint32_t _targetBitrateBps;
|
||||
uint32_t _encoderBitrateBps;
|
||||
uint32_t _encoderFrameRate;
|
||||
uint32_t _maxAllowedFrameRate;
|
||||
RTCH264PacketizationMode _packetizationMode;
|
||||
absl::optional<webrtc::H264::ProfileLevelId> _profile_level_id;
|
||||
RTCVideoEncoderCallback _callback;
|
||||
int32_t _width;
|
||||
int32_t _height;
|
||||
VTCompressionSessionRef _compressionSession;
|
||||
CVPixelBufferPoolRef _pixelBufferPool;
|
||||
RTCVideoCodecMode _mode;
|
||||
|
||||
webrtc::H264BitstreamParser _h264BitstreamParser;
|
||||
std::vector<uint8_t> _frameScaleBuffer;
|
||||
}
|
||||
|
||||
// .5 is set as a mininum to prevent overcompensating for large temporary
|
||||
// overshoots. We don't want to degrade video quality too badly.
|
||||
// .95 is set to prevent oscillations. When a lower bitrate is set on the
|
||||
// encoder than previously set, its output seems to have a brief period of
|
||||
// drastically reduced bitrate, so we want to avoid that. In steady state
|
||||
// conditions, 0.95 seems to give us better overall bitrate over long periods
|
||||
// of time.
|
||||
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo *)codecInfo {
|
||||
if (self = [super init]) {
|
||||
_codecInfo = codecInfo;
|
||||
_bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95));
|
||||
_packetizationMode = RTCH264PacketizationModeNonInterleaved;
|
||||
_profile_level_id =
|
||||
webrtc::H264::ParseSdpProfileLevelId([codecInfo nativeSdpVideoFormat].parameters);
|
||||
RTC_DCHECK(_profile_level_id);
|
||||
RTC_LOG(LS_INFO) << "Using profile " << CFStringToString(ExtractProfile(*_profile_level_id));
|
||||
RTC_CHECK([codecInfo.name isEqualToString:kRTCVideoCodecH264Name]);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
[self destroyCompressionSession];
|
||||
}
|
||||
|
||||
- (NSInteger)startEncodeWithSettings:(RTCVideoEncoderSettings *)settings
|
||||
numberOfCores:(int)numberOfCores {
|
||||
RTC_DCHECK(settings);
|
||||
RTC_DCHECK([settings.name isEqualToString:kRTCVideoCodecH264Name]);
|
||||
|
||||
_width = settings.width;
|
||||
_height = settings.height;
|
||||
_mode = settings.mode;
|
||||
|
||||
uint32_t aligned_width = (((_width + 15) >> 4) << 4);
|
||||
uint32_t aligned_height = (((_height + 15) >> 4) << 4);
|
||||
_maxAllowedFrameRate = static_cast<uint32_t>(GetMaxSampleRate(*_profile_level_id) /
|
||||
(aligned_width * aligned_height));
|
||||
|
||||
// We can only set average bitrate on the HW encoder.
|
||||
_targetBitrateBps = settings.startBitrate * 1000; // startBitrate is in kbps.
|
||||
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
|
||||
_encoderFrameRate = MIN(settings.maxFramerate, _maxAllowedFrameRate);
|
||||
if (settings.maxFramerate > _maxAllowedFrameRate && _maxAllowedFrameRate > 0) {
|
||||
RTC_LOG(LS_WARNING) << "Initial encoder frame rate setting " << settings.maxFramerate
|
||||
<< " is larger than the "
|
||||
<< "maximal allowed frame rate " << _maxAllowedFrameRate << ".";
|
||||
}
|
||||
|
||||
// TODO(tkchin): Try setting payload size via
|
||||
// kVTCompressionPropertyKey_MaxH264SliceBytes.
|
||||
|
||||
return [self resetCompressionSessionWithPixelFormat:kNV12PixelFormat];
|
||||
}
|
||||
|
||||
- (NSInteger)encode:(RTCVideoFrame *)frame
|
||||
codecSpecificInfo:(nullable id<RTCCodecSpecificInfo>)codecSpecificInfo
|
||||
frameTypes:(NSArray<NSNumber *> *)frameTypes {
|
||||
RTC_DCHECK_EQ(frame.width, _width);
|
||||
RTC_DCHECK_EQ(frame.height, _height);
|
||||
if (!_callback || !_compressionSession) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
BOOL isKeyframeRequired = NO;
|
||||
|
||||
// Get a pixel buffer from the pool and copy frame data over.
|
||||
if ([self resetCompressionSessionIfNeededWithFrame:frame]) {
|
||||
isKeyframeRequired = YES;
|
||||
}
|
||||
|
||||
CVPixelBufferRef pixelBuffer = nullptr;
|
||||
if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
// Native frame buffer
|
||||
RTCCVPixelBuffer *rtcPixelBuffer = (RTCCVPixelBuffer *)frame.buffer;
|
||||
if (![rtcPixelBuffer requiresCropping]) {
|
||||
// This pixel buffer might have a higher resolution than what the
|
||||
// compression session is configured to. The compression session can
|
||||
// handle that and will output encoded frames in the configured
|
||||
// resolution regardless of the input pixel buffer resolution.
|
||||
pixelBuffer = rtcPixelBuffer.pixelBuffer;
|
||||
CVBufferRetain(pixelBuffer);
|
||||
} else {
|
||||
// Cropping required, we need to crop and scale to a new pixel buffer.
|
||||
pixelBuffer = CreatePixelBuffer(_pixelBufferPool);
|
||||
if (!pixelBuffer) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
int dstWidth = CVPixelBufferGetWidth(pixelBuffer);
|
||||
int dstHeight = CVPixelBufferGetHeight(pixelBuffer);
|
||||
if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) {
|
||||
int size =
|
||||
[rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth height:dstHeight];
|
||||
_frameScaleBuffer.resize(size);
|
||||
} else {
|
||||
_frameScaleBuffer.clear();
|
||||
}
|
||||
_frameScaleBuffer.shrink_to_fit();
|
||||
if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer withTempBuffer:_frameScaleBuffer.data()]) {
|
||||
CVBufferRelease(pixelBuffer);
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!pixelBuffer) {
|
||||
// We did not have a native frame buffer
|
||||
pixelBuffer = CreatePixelBuffer(_pixelBufferPool);
|
||||
if (!pixelBuffer) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
RTC_DCHECK(pixelBuffer);
|
||||
if (!CopyVideoFrameToNV12PixelBuffer([frame.buffer toI420], pixelBuffer)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to copy frame data.";
|
||||
CVBufferRelease(pixelBuffer);
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we need a keyframe.
|
||||
if (!isKeyframeRequired && frameTypes) {
|
||||
for (NSNumber *frameType in frameTypes) {
|
||||
if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) {
|
||||
isKeyframeRequired = YES;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CMTime presentationTimeStamp = CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000);
|
||||
CFDictionaryRef frameProperties = nullptr;
|
||||
if (isKeyframeRequired) {
|
||||
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
|
||||
CFTypeRef values[] = {kCFBooleanTrue};
|
||||
frameProperties = CreateCFTypeDictionary(keys, values, 1);
|
||||
}
|
||||
|
||||
std::unique_ptr<RTCFrameEncodeParams> encodeParams;
|
||||
encodeParams.reset(new RTCFrameEncodeParams(self,
|
||||
codecSpecificInfo,
|
||||
_width,
|
||||
_height,
|
||||
frame.timeStampNs / rtc::kNumNanosecsPerMillisec,
|
||||
frame.timeStamp,
|
||||
frame.rotation));
|
||||
encodeParams->codecSpecificInfo.packetizationMode = _packetizationMode;
|
||||
|
||||
// Update the bitrate if needed.
|
||||
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps() frameRate:_encoderFrameRate];
|
||||
|
||||
OSStatus status = VTCompressionSessionEncodeFrame(_compressionSession,
|
||||
pixelBuffer,
|
||||
presentationTimeStamp,
|
||||
kCMTimeInvalid,
|
||||
frameProperties,
|
||||
encodeParams.release(),
|
||||
nullptr);
|
||||
if (frameProperties) {
|
||||
CFRelease(frameProperties);
|
||||
}
|
||||
if (pixelBuffer) {
|
||||
CVBufferRelease(pixelBuffer);
|
||||
}
|
||||
|
||||
if (status == kVTInvalidSessionErr) {
|
||||
// This error occurs when entering foreground after backgrounding the app.
|
||||
RTC_LOG(LS_ERROR) << "Invalid compression session, resetting.";
|
||||
[self resetCompressionSessionWithPixelFormat:[self pixelFormatOfFrame:frame]];
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
} else if (status == kVTVideoEncoderMalfunctionErr) {
|
||||
// Sometimes the encoder malfunctions and needs to be restarted.
|
||||
RTC_LOG(LS_ERROR)
|
||||
<< "Encountered video encoder malfunction error. Resetting compression session.";
|
||||
[self resetCompressionSessionWithPixelFormat:[self pixelFormatOfFrame:frame]];
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_NO_OUTPUT;
|
||||
} else if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)setCallback:(RTCVideoEncoderCallback)callback {
|
||||
_callback = callback;
|
||||
}
|
||||
|
||||
- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate {
|
||||
_targetBitrateBps = 1000 * bitrateKbit;
|
||||
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
|
||||
if (framerate > _maxAllowedFrameRate && _maxAllowedFrameRate > 0) {
|
||||
RTC_LOG(LS_WARNING) << "Encoder frame rate setting " << framerate << " is larger than the "
|
||||
<< "maximal allowed frame rate " << _maxAllowedFrameRate << ".";
|
||||
}
|
||||
framerate = MIN(framerate, _maxAllowedFrameRate);
|
||||
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps() frameRate:framerate];
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (NSInteger)releaseEncoder {
|
||||
// Need to destroy so that the session is invalidated and won't use the
|
||||
// callback anymore. Do not remove callback until the session is invalidated
|
||||
// since async encoder callbacks can occur until invalidation.
|
||||
[self destroyCompressionSession];
|
||||
_callback = nullptr;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (OSType)pixelFormatOfFrame:(RTCVideoFrame *)frame {
|
||||
// Use NV12 for non-native frames.
|
||||
if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
RTCCVPixelBuffer *rtcPixelBuffer = (RTCCVPixelBuffer *)frame.buffer;
|
||||
return CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer);
|
||||
}
|
||||
|
||||
return kNV12PixelFormat;
|
||||
}
|
||||
|
||||
- (BOOL)resetCompressionSessionIfNeededWithFrame:(RTCVideoFrame *)frame {
|
||||
BOOL resetCompressionSession = NO;
|
||||
|
||||
// If we're capturing native frames in another pixel format than the compression session is
|
||||
// configured with, make sure the compression session is reset using the correct pixel format.
|
||||
OSType framePixelFormat = [self pixelFormatOfFrame:frame];
|
||||
|
||||
if (_compressionSession) {
|
||||
// The pool attribute `kCVPixelBufferPixelFormatTypeKey` can contain either an array of pixel
|
||||
// formats or a single pixel format.
|
||||
NSDictionary *poolAttributes =
|
||||
(__bridge NSDictionary *)CVPixelBufferPoolGetPixelBufferAttributes(_pixelBufferPool);
|
||||
id pixelFormats =
|
||||
[poolAttributes objectForKey:(__bridge NSString *)kCVPixelBufferPixelFormatTypeKey];
|
||||
NSArray<NSNumber *> *compressionSessionPixelFormats = nil;
|
||||
if ([pixelFormats isKindOfClass:[NSArray class]]) {
|
||||
compressionSessionPixelFormats = (NSArray *)pixelFormats;
|
||||
} else if ([pixelFormats isKindOfClass:[NSNumber class]]) {
|
||||
compressionSessionPixelFormats = @[ (NSNumber *)pixelFormats ];
|
||||
}
|
||||
|
||||
if (![compressionSessionPixelFormats
|
||||
containsObject:[NSNumber numberWithLong:framePixelFormat]]) {
|
||||
resetCompressionSession = YES;
|
||||
RTC_LOG(LS_INFO) << "Resetting compression session due to non-matching pixel format.";
|
||||
}
|
||||
} else {
|
||||
resetCompressionSession = YES;
|
||||
}
|
||||
|
||||
if (resetCompressionSession) {
|
||||
[self resetCompressionSessionWithPixelFormat:framePixelFormat];
|
||||
}
|
||||
return resetCompressionSession;
|
||||
}
|
||||
|
||||
- (int)resetCompressionSessionWithPixelFormat:(OSType)framePixelFormat {
|
||||
[self destroyCompressionSession];
|
||||
|
||||
// Set source image buffer attributes. These attributes will be present on
|
||||
// buffers retrieved from the encoder's pixel buffer pool.
|
||||
const size_t attributesSize = 3;
|
||||
CFTypeRef keys[attributesSize] = {
|
||||
#if defined(WEBRTC_IOS)
|
||||
kCVPixelBufferOpenGLESCompatibilityKey,
|
||||
#elif defined(WEBRTC_MAC)
|
||||
kCVPixelBufferOpenGLCompatibilityKey,
|
||||
#endif
|
||||
kCVPixelBufferIOSurfacePropertiesKey,
|
||||
kCVPixelBufferPixelFormatTypeKey
|
||||
};
|
||||
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
|
||||
int64_t pixelFormatType = framePixelFormat;
|
||||
CFNumberRef pixelFormat = CFNumberCreate(nullptr, kCFNumberLongType, &pixelFormatType);
|
||||
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, pixelFormat};
|
||||
CFDictionaryRef sourceAttributes = CreateCFTypeDictionary(keys, values, attributesSize);
|
||||
if (ioSurfaceValue) {
|
||||
CFRelease(ioSurfaceValue);
|
||||
ioSurfaceValue = nullptr;
|
||||
}
|
||||
if (pixelFormat) {
|
||||
CFRelease(pixelFormat);
|
||||
pixelFormat = nullptr;
|
||||
}
|
||||
CFMutableDictionaryRef encoder_specs = nullptr;
|
||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
||||
// Currently hw accl is supported above 360p on mac, below 360p
|
||||
// the compression session will be created with hw accl disabled.
|
||||
encoder_specs = CFDictionaryCreateMutable(
|
||||
nullptr, 1, &kCFTypeDictionaryKeyCallBacks, &kCFTypeDictionaryValueCallBacks);
|
||||
CFDictionarySetValue(encoder_specs,
|
||||
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
|
||||
kCFBooleanTrue);
|
||||
#endif
|
||||
OSStatus status =
|
||||
VTCompressionSessionCreate(nullptr, // use default allocator
|
||||
_width,
|
||||
_height,
|
||||
kCMVideoCodecType_H264,
|
||||
encoder_specs, // use hardware accelerated encoder if available
|
||||
sourceAttributes,
|
||||
nullptr, // use default compressed data allocator
|
||||
compressionOutputCallback,
|
||||
nullptr,
|
||||
&_compressionSession);
|
||||
if (sourceAttributes) {
|
||||
CFRelease(sourceAttributes);
|
||||
sourceAttributes = nullptr;
|
||||
}
|
||||
if (encoder_specs) {
|
||||
CFRelease(encoder_specs);
|
||||
encoder_specs = nullptr;
|
||||
}
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
||||
CFBooleanRef hwaccl_enabled = nullptr;
|
||||
status = VTSessionCopyProperty(_compressionSession,
|
||||
kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder,
|
||||
nullptr,
|
||||
&hwaccl_enabled);
|
||||
if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) {
|
||||
RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled";
|
||||
}
|
||||
#endif
|
||||
[self configureCompressionSession];
|
||||
|
||||
// The pixel buffer pool is dependent on the compression session so if the session is reset, the
|
||||
// pool should be reset as well.
|
||||
_pixelBufferPool = VTCompressionSessionGetPixelBufferPool(_compressionSession);
|
||||
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)configureCompressionSession {
|
||||
RTC_DCHECK(_compressionSession);
|
||||
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime, false);
|
||||
SetVTSessionProperty(_compressionSession,
|
||||
kVTCompressionPropertyKey_ProfileLevel,
|
||||
ExtractProfile(*_profile_level_id));
|
||||
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AllowFrameReordering, false);
|
||||
[self setEncoderBitrateBps:_targetBitrateBps frameRate:_encoderFrameRate];
|
||||
// TODO(tkchin): Look at entropy mode and colorspace matrices.
|
||||
// TODO(tkchin): Investigate to see if there's any way to make this work.
|
||||
// May need it to interop with Android. Currently this call just fails.
|
||||
// On inspecting encoder output on iOS8, this value is set to 6.
|
||||
// internal::SetVTSessionProperty(compression_session_,
|
||||
// kVTCompressionPropertyKey_MaxFrameDelayCount,
|
||||
// 1);
|
||||
|
||||
// Set a relatively large value for keyframe emission (7200 frames or 4 minutes).
|
||||
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
|
||||
SetVTSessionProperty(
|
||||
_compressionSession, kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, 240);
|
||||
}
|
||||
|
||||
- (void)destroyCompressionSession {
|
||||
if (_compressionSession) {
|
||||
VTCompressionSessionInvalidate(_compressionSession);
|
||||
CFRelease(_compressionSession);
|
||||
_compressionSession = nullptr;
|
||||
_pixelBufferPool = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
- (NSString *)implementationName {
|
||||
return @"VideoToolbox";
|
||||
}
|
||||
|
||||
- (void)setBitrateBps:(uint32_t)bitrateBps frameRate:(uint32_t)frameRate {
|
||||
if (_encoderBitrateBps != bitrateBps || _encoderFrameRate != frameRate) {
|
||||
[self setEncoderBitrateBps:bitrateBps frameRate:frameRate];
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setEncoderBitrateBps:(uint32_t)bitrateBps frameRate:(uint32_t)frameRate {
|
||||
if (_compressionSession) {
|
||||
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
|
||||
|
||||
// With zero |_maxAllowedFrameRate|, we fall back to automatic frame rate detection.
|
||||
if (_maxAllowedFrameRate > 0) {
|
||||
SetVTSessionProperty(
|
||||
_compressionSession, kVTCompressionPropertyKey_ExpectedFrameRate, frameRate);
|
||||
}
|
||||
|
||||
// TODO(tkchin): Add a helper method to set array value.
|
||||
int64_t dataLimitBytesPerSecondValue =
|
||||
static_cast<int64_t>(bitrateBps * kLimitToAverageBitRateFactor / 8);
|
||||
CFNumberRef bytesPerSecond =
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &dataLimitBytesPerSecondValue);
|
||||
int64_t oneSecondValue = 1;
|
||||
CFNumberRef oneSecond =
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue);
|
||||
const void *nums[2] = {bytesPerSecond, oneSecond};
|
||||
CFArrayRef dataRateLimits = CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
|
||||
OSStatus status = VTSessionSetProperty(
|
||||
_compressionSession, kVTCompressionPropertyKey_DataRateLimits, dataRateLimits);
|
||||
if (bytesPerSecond) {
|
||||
CFRelease(bytesPerSecond);
|
||||
}
|
||||
if (oneSecond) {
|
||||
CFRelease(oneSecond);
|
||||
}
|
||||
if (dataRateLimits) {
|
||||
CFRelease(dataRateLimits);
|
||||
}
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to set data rate limit with code: " << status;
|
||||
}
|
||||
|
||||
_encoderBitrateBps = bitrateBps;
|
||||
_encoderFrameRate = frameRate;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)frameWasEncoded:(OSStatus)status
|
||||
flags:(VTEncodeInfoFlags)infoFlags
|
||||
sampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
|
||||
width:(int32_t)width
|
||||
height:(int32_t)height
|
||||
renderTimeMs:(int64_t)renderTimeMs
|
||||
timestamp:(uint32_t)timestamp
|
||||
rotation:(RTCVideoRotation)rotation {
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "H264 encode failed with code: " << status;
|
||||
return;
|
||||
}
|
||||
if (infoFlags & kVTEncodeInfo_FrameDropped) {
|
||||
RTC_LOG(LS_INFO) << "H264 encode dropped frame.";
|
||||
return;
|
||||
}
|
||||
|
||||
BOOL isKeyframe = NO;
|
||||
CFArrayRef attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0);
|
||||
if (attachments != nullptr && CFArrayGetCount(attachments)) {
|
||||
CFDictionaryRef attachment =
|
||||
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
|
||||
isKeyframe = !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
|
||||
}
|
||||
|
||||
if (isKeyframe) {
|
||||
RTC_LOG(LS_INFO) << "Generated keyframe";
|
||||
}
|
||||
|
||||
__block std::unique_ptr<rtc::Buffer> buffer = std::make_unique<rtc::Buffer>();
|
||||
RTCRtpFragmentationHeader *header;
|
||||
{
|
||||
std::unique_ptr<webrtc::RTPFragmentationHeader> header_cpp;
|
||||
bool result =
|
||||
H264CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, buffer.get(), &header_cpp);
|
||||
header = [[RTCRtpFragmentationHeader alloc] initWithNativeFragmentationHeader:header_cpp.get()];
|
||||
if (!result) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
RTCEncodedImage *frame = [[RTCEncodedImage alloc] init];
|
||||
// This assumes ownership of `buffer` and is responsible for freeing it when done.
|
||||
frame.buffer = [[NSData alloc] initWithBytesNoCopy:buffer->data()
|
||||
length:buffer->size()
|
||||
deallocator:^(void *bytes, NSUInteger size) {
|
||||
buffer.reset();
|
||||
}];
|
||||
frame.encodedWidth = width;
|
||||
frame.encodedHeight = height;
|
||||
frame.completeFrame = YES;
|
||||
frame.frameType = isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
|
||||
frame.captureTimeMs = renderTimeMs;
|
||||
frame.timeStamp = timestamp;
|
||||
frame.rotation = rotation;
|
||||
frame.contentType = (_mode == RTCVideoCodecModeScreensharing) ? RTCVideoContentTypeScreenshare :
|
||||
RTCVideoContentTypeUnspecified;
|
||||
frame.flags = webrtc::VideoSendTiming::kInvalid;
|
||||
|
||||
int qp;
|
||||
_h264BitstreamParser.ParseBitstream(buffer->data(), buffer->size());
|
||||
_h264BitstreamParser.GetLastSliceQp(&qp);
|
||||
frame.qp = @(qp);
|
||||
|
||||
BOOL res = _callback(frame, codecSpecificInfo, header);
|
||||
if (!res) {
|
||||
RTC_LOG(LS_ERROR) << "Encode callback failed";
|
||||
return;
|
||||
}
|
||||
_bitrateAdjuster->Update(frame.buffer.length);
|
||||
}
|
||||
|
||||
- (nullable RTCVideoEncoderQpThresholds *)scalingSettings {
|
||||
return [[RTCVideoEncoderQpThresholds alloc] initWithThresholdsLow:kLowH264QpThreshold
|
||||
high:kHighH264QpThreshold];
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*/
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
#import "RTCMacros.h"
|
||||
#import "RTCVideoCodecInfo.h"
|
||||
#import "RTCVideoEncoder.h"
|
||||
|
||||
RTC_OBJC_EXPORT
|
||||
API_AVAILABLE(ios(11.0))
|
||||
@interface TGRTCVideoEncoderH265 : NSObject <RTCVideoEncoder>
|
||||
|
||||
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo *)codecInfo;
|
||||
|
||||
@end
|
|
@ -1,614 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license
|
||||
* that can be found in the LICENSE file in the root of the source
|
||||
* tree. An additional intellectual property rights grant can be found
|
||||
* in the file PATENTS. All contributing project authors may
|
||||
* be found in the AUTHORS file in the root of the source tree.
|
||||
*
|
||||
*/
|
||||
|
||||
#import "TGRTCVideoEncoderH265.h"
|
||||
|
||||
#import <VideoToolbox/VideoToolbox.h>
|
||||
#include <vector>
|
||||
|
||||
#import "RTCCodecSpecificInfoH265.h"
|
||||
#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h"
|
||||
#import "api/peerconnection/RTCVideoCodecInfo+Private.h"
|
||||
#import "base/RTCI420Buffer.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#import "helpers.h"
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
#import "helpers/UIDevice+RTCDevice.h"
|
||||
#endif
|
||||
|
||||
#include "common_video/h264/profile_level_id.h"
|
||||
#include "common_video/h265/h265_bitstream_parser.h"
|
||||
#include "common_video/include/bitrate_adjuster.h"
|
||||
#include "libyuv/convert_from.h"
|
||||
#include "modules/include/module_common_types.h"
|
||||
#include "modules/video_coding/include/video_error_codes.h"
|
||||
#include "rtc_base/buffer.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "rtc_base/time_utils.h"
|
||||
#include "sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h"
|
||||
#include "system_wrappers/include/clock.h"
|
||||
|
||||
@interface TGRTCVideoEncoderH265 ()
|
||||
|
||||
- (void)frameWasEncoded:(OSStatus)status
|
||||
flags:(VTEncodeInfoFlags)infoFlags
|
||||
sampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
width:(int32_t)width
|
||||
height:(int32_t)height
|
||||
renderTimeMs:(int64_t)renderTimeMs
|
||||
timestamp:(uint32_t)timestamp
|
||||
rotation:(RTCVideoRotation)rotation;
|
||||
|
||||
@end
|
||||
|
||||
namespace { // anonymous namespace
|
||||
|
||||
// The ratio between kVTCompressionPropertyKey_DataRateLimits and
|
||||
// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
|
||||
// than the average bit rate to avoid undershooting the target.
|
||||
const float kLimitToAverageBitRateFactor = 1.5f;
|
||||
// These thresholds deviate from the default h265 QP thresholds, as they
|
||||
// have been found to work better on devices that support VideoToolbox
|
||||
const int kLowh265QpThreshold = 28;
|
||||
const int kHighh265QpThreshold = 39;
|
||||
|
||||
// Struct that we pass to the encoder per frame to encode. We receive it again
|
||||
// in the encoder callback.
|
||||
struct API_AVAILABLE(ios(11.0)) RTCFrameEncodeParams {
|
||||
RTCFrameEncodeParams(TGRTCVideoEncoderH265* e,
|
||||
int32_t w,
|
||||
int32_t h,
|
||||
int64_t rtms,
|
||||
uint32_t ts,
|
||||
RTCVideoRotation r)
|
||||
: encoder(e),
|
||||
width(w),
|
||||
height(h),
|
||||
render_time_ms(rtms),
|
||||
timestamp(ts),
|
||||
rotation(r) {}
|
||||
|
||||
TGRTCVideoEncoderH265* encoder;
|
||||
int32_t width;
|
||||
int32_t height;
|
||||
int64_t render_time_ms;
|
||||
uint32_t timestamp;
|
||||
RTCVideoRotation rotation;
|
||||
};
|
||||
|
||||
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
|
||||
// encoder. This performs the copy and format conversion.
|
||||
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
|
||||
bool CopyVideoFrameToPixelBuffer(id<RTCI420Buffer> frameBuffer,
|
||||
CVPixelBufferRef pixelBuffer) {
|
||||
RTC_DCHECK(pixelBuffer);
|
||||
RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer),
|
||||
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
|
||||
RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0),
|
||||
frameBuffer.height);
|
||||
RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0),
|
||||
frameBuffer.width);
|
||||
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
||||
return false;
|
||||
}
|
||||
|
||||
uint8_t* dstY = reinterpret_cast<uint8_t*>(
|
||||
CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
|
||||
int dstStrideY = (int)CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
|
||||
uint8_t* dstUV = reinterpret_cast<uint8_t*>(
|
||||
CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1));
|
||||
int dstStrideUV = (int)CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
|
||||
// Convert I420 to NV12.
|
||||
int ret = libyuv::I420ToNV12(
|
||||
frameBuffer.dataY, frameBuffer.strideY, frameBuffer.dataU,
|
||||
frameBuffer.strideU, frameBuffer.dataV, frameBuffer.strideV, dstY,
|
||||
dstStrideY, dstUV, dstStrideUV, frameBuffer.width, frameBuffer.height);
|
||||
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
|
||||
if (ret) {
|
||||
RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
|
||||
if (!pixel_buffer_pool) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
|
||||
return nullptr;
|
||||
}
|
||||
CVPixelBufferRef pixel_buffer;
|
||||
CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool,
|
||||
&pixel_buffer);
|
||||
if (ret != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
|
||||
// We probably want to drop frames here, since failure probably means
|
||||
// that the pool is empty.
|
||||
return nullptr;
|
||||
}
|
||||
return pixel_buffer;
|
||||
}
|
||||
|
||||
// This is the callback function that VideoToolbox calls when encode is
|
||||
// complete. From inspection this happens on its own queue.
|
||||
void compressionOutputCallback(void* encoder,
|
||||
void* params,
|
||||
OSStatus status,
|
||||
VTEncodeInfoFlags infoFlags,
|
||||
CMSampleBufferRef sampleBuffer)
|
||||
API_AVAILABLE(ios(11.0)) {
|
||||
RTC_CHECK(params);
|
||||
std::unique_ptr<RTCFrameEncodeParams> encodeParams(
|
||||
reinterpret_cast<RTCFrameEncodeParams*>(params));
|
||||
RTC_CHECK(encodeParams->encoder);
|
||||
[encodeParams->encoder frameWasEncoded:status
|
||||
flags:infoFlags
|
||||
sampleBuffer:sampleBuffer
|
||||
width:encodeParams->width
|
||||
height:encodeParams->height
|
||||
renderTimeMs:encodeParams->render_time_ms
|
||||
timestamp:encodeParams->timestamp
|
||||
rotation:encodeParams->rotation];
|
||||
}
|
||||
} // namespace
|
||||
|
||||
@implementation TGRTCVideoEncoderH265 {
|
||||
RTCVideoCodecInfo* _codecInfo;
|
||||
std::unique_ptr<webrtc::BitrateAdjuster> _bitrateAdjuster;
|
||||
uint32_t _targetBitrateBps;
|
||||
uint32_t _encoderBitrateBps;
|
||||
CFStringRef _profile;
|
||||
RTCVideoEncoderCallback _callback;
|
||||
int32_t _width;
|
||||
int32_t _height;
|
||||
VTCompressionSessionRef _compressionSession;
|
||||
RTCVideoCodecMode _mode;
|
||||
int framesLeft;
|
||||
|
||||
webrtc::H265BitstreamParser _h265BitstreamParser;
|
||||
std::vector<uint8_t> _nv12ScaleBuffer;
|
||||
}
|
||||
|
||||
// .5 is set as a mininum to prevent overcompensating for large temporary
|
||||
// overshoots. We don't want to degrade video quality too badly.
|
||||
// .95 is set to prevent oscillations. When a lower bitrate is set on the
|
||||
// encoder than previously set, its output seems to have a brief period of
|
||||
// drastically reduced bitrate, so we want to avoid that. In steady state
|
||||
// conditions, 0.95 seems to give us better overall bitrate over long periods
|
||||
// of time.
|
||||
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo*)codecInfo {
|
||||
if (self = [super init]) {
|
||||
_codecInfo = codecInfo;
|
||||
_bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95));
|
||||
RTC_CHECK([codecInfo.name isEqualToString:@"H265"]);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
[self destroyCompressionSession];
|
||||
}
|
||||
|
||||
- (NSInteger)startEncodeWithSettings:(RTCVideoEncoderSettings*)settings
|
||||
numberOfCores:(int)numberOfCores {
|
||||
RTC_DCHECK(settings);
|
||||
RTC_DCHECK([settings.name isEqualToString:@"H265"]);
|
||||
|
||||
_width = settings.width;
|
||||
_height = settings.height;
|
||||
_mode = settings.mode;
|
||||
|
||||
// We can only set average bitrate on the HW encoder.
|
||||
_targetBitrateBps = settings.startBitrate;
|
||||
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
|
||||
|
||||
// TODO(tkchin): Try setting payload size via
|
||||
// kVTCompressionPropertyKey_Maxh265SliceBytes.
|
||||
|
||||
return [self resetCompressionSession];
|
||||
}
|
||||
|
||||
- (NSInteger)encode:(RTCVideoFrame*)frame
|
||||
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
|
||||
frameTypes:(NSArray<NSNumber*>*)frameTypes {
|
||||
RTC_DCHECK_EQ(frame.width, _width);
|
||||
RTC_DCHECK_EQ(frame.height, _height);
|
||||
if (!_callback || !_compressionSession) {
|
||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
||||
}
|
||||
BOOL isKeyframeRequired = NO;
|
||||
|
||||
// Get a pixel buffer from the pool and copy frame data over.
|
||||
CVPixelBufferPoolRef pixelBufferPool =
|
||||
VTCompressionSessionGetPixelBufferPool(_compressionSession);
|
||||
|
||||
#if defined(WEBRTC_IOS)
|
||||
if (!pixelBufferPool) {
|
||||
// Kind of a hack. On backgrounding, the compression session seems to get
|
||||
// invalidated, which causes this pool call to fail when the application
|
||||
// is foregrounded and frames are being sent for encoding again.
|
||||
// Resetting the session when this happens fixes the issue.
|
||||
// In addition we request a keyframe so video can recover quickly.
|
||||
[self resetCompressionSession];
|
||||
pixelBufferPool =
|
||||
VTCompressionSessionGetPixelBufferPool(_compressionSession);
|
||||
isKeyframeRequired = YES;
|
||||
RTC_LOG(LS_INFO) << "Resetting compression session due to invalid pool.";
|
||||
}
|
||||
#endif
|
||||
|
||||
CVPixelBufferRef pixelBuffer = nullptr;
|
||||
if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
// Native frame buffer
|
||||
RTCCVPixelBuffer* rtcPixelBuffer = (RTCCVPixelBuffer*)frame.buffer;
|
||||
if (![rtcPixelBuffer requiresCropping]) {
|
||||
// This pixel buffer might have a higher resolution than what the
|
||||
// compression session is configured to. The compression session can
|
||||
// handle that and will output encoded frames in the configured
|
||||
// resolution regardless of the input pixel buffer resolution.
|
||||
pixelBuffer = rtcPixelBuffer.pixelBuffer;
|
||||
CVBufferRetain(pixelBuffer);
|
||||
} else {
|
||||
// Cropping required, we need to crop and scale to a new pixel buffer.
|
||||
pixelBuffer = CreatePixelBuffer(pixelBufferPool);
|
||||
if (!pixelBuffer) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
int dstWidth = (int)CVPixelBufferGetWidth(pixelBuffer);
|
||||
int dstHeight = (int)CVPixelBufferGetHeight(pixelBuffer);
|
||||
if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) {
|
||||
int size =
|
||||
[rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth
|
||||
height:dstHeight];
|
||||
_nv12ScaleBuffer.resize(size);
|
||||
} else {
|
||||
_nv12ScaleBuffer.clear();
|
||||
}
|
||||
_nv12ScaleBuffer.shrink_to_fit();
|
||||
if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer
|
||||
withTempBuffer:_nv12ScaleBuffer.data()]) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!pixelBuffer) {
|
||||
// We did not have a native frame buffer
|
||||
pixelBuffer = CreatePixelBuffer(pixelBufferPool);
|
||||
if (!pixelBuffer) {
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
RTC_DCHECK(pixelBuffer);
|
||||
if (!CopyVideoFrameToPixelBuffer([frame.buffer toI420], pixelBuffer)) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to copy frame data.";
|
||||
CVBufferRelease(pixelBuffer);
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we need a keyframe.
|
||||
if (!isKeyframeRequired && frameTypes) {
|
||||
for (NSNumber* frameType in frameTypes) {
|
||||
if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) {
|
||||
isKeyframeRequired = YES;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CMTime presentationTimeStamp =
|
||||
CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000);
|
||||
CFDictionaryRef frameProperties = nullptr;
|
||||
if (isKeyframeRequired) {
|
||||
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
|
||||
CFTypeRef values[] = {kCFBooleanTrue};
|
||||
frameProperties = CreateCFTypeDictionary(keys, values, 1);
|
||||
}
|
||||
|
||||
std::unique_ptr<RTCFrameEncodeParams> encodeParams;
|
||||
encodeParams.reset(new RTCFrameEncodeParams(
|
||||
self, _width, _height, frame.timeStampNs / rtc::kNumNanosecsPerMillisec,
|
||||
frame.timeStamp, frame.rotation));
|
||||
|
||||
// Update the bitrate if needed.
|
||||
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()];
|
||||
|
||||
OSStatus status = VTCompressionSessionEncodeFrame(
|
||||
_compressionSession, pixelBuffer, presentationTimeStamp, kCMTimeInvalid,
|
||||
frameProperties, encodeParams.release(), nullptr);
|
||||
if (frameProperties) {
|
||||
CFRelease(frameProperties);
|
||||
}
|
||||
if (pixelBuffer) {
|
||||
CVBufferRelease(pixelBuffer);
|
||||
}
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)setCallback:(RTCVideoEncoderCallback)callback {
|
||||
_callback = callback;
|
||||
}
|
||||
|
||||
- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate {
|
||||
_targetBitrateBps = 1000 * bitrateKbit;
|
||||
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
|
||||
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()];
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (NSInteger)releaseEncoder {
|
||||
// Need to destroy so that the session is invalidated and won't use the
|
||||
// callback anymore. Do not remove callback until the session is invalidated
|
||||
// since async encoder callbacks can occur until invalidation.
|
||||
[self destroyCompressionSession];
|
||||
_callback = nullptr;
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (int)resetCompressionSession {
|
||||
[self destroyCompressionSession];
|
||||
|
||||
// Set source image buffer attributes. These attributes will be present on
|
||||
// buffers retrieved from the encoder's pixel buffer pool.
|
||||
const size_t attributesSize = 3;
|
||||
CFTypeRef keys[attributesSize] = {
|
||||
#if defined(WEBRTC_IOS)
|
||||
kCVPixelBufferOpenGLESCompatibilityKey,
|
||||
#elif defined(WEBRTC_MAC)
|
||||
kCVPixelBufferOpenGLCompatibilityKey,
|
||||
#endif
|
||||
kCVPixelBufferIOSurfacePropertiesKey,
|
||||
kCVPixelBufferPixelFormatTypeKey
|
||||
};
|
||||
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
|
||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
CFNumberRef pixelFormat =
|
||||
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
||||
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue,
|
||||
pixelFormat};
|
||||
CFDictionaryRef sourceAttributes =
|
||||
CreateCFTypeDictionary(keys, values, attributesSize);
|
||||
if (ioSurfaceValue) {
|
||||
CFRelease(ioSurfaceValue);
|
||||
ioSurfaceValue = nullptr;
|
||||
}
|
||||
if (pixelFormat) {
|
||||
CFRelease(pixelFormat);
|
||||
pixelFormat = nullptr;
|
||||
}
|
||||
CFMutableDictionaryRef encoder_specs = nullptr;
|
||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
||||
// Currently hw accl is supported above 360p on mac, below 360p
|
||||
// the compression session will be created with hw accl disabled.
|
||||
encoder_specs =
|
||||
CFDictionaryCreateMutable(nullptr, 1, &kCFTypeDictionaryKeyCallBacks,
|
||||
&kCFTypeDictionaryValueCallBacks);
|
||||
CFDictionarySetValue(
|
||||
encoder_specs,
|
||||
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
|
||||
kCFBooleanTrue);
|
||||
#endif
|
||||
OSStatus status = VTCompressionSessionCreate(
|
||||
nullptr, // use default allocator
|
||||
_width, _height, kCMVideoCodecType_HEVC,
|
||||
encoder_specs, // use hardware accelerated encoder if available
|
||||
sourceAttributes,
|
||||
nullptr, // use default compressed data allocator
|
||||
compressionOutputCallback, nullptr, &_compressionSession);
|
||||
if (sourceAttributes) {
|
||||
CFRelease(sourceAttributes);
|
||||
sourceAttributes = nullptr;
|
||||
}
|
||||
if (encoder_specs) {
|
||||
CFRelease(encoder_specs);
|
||||
encoder_specs = nullptr;
|
||||
}
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status;
|
||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
||||
}
|
||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
||||
CFBooleanRef hwaccl_enabled = nullptr;
|
||||
status = VTSessionCopyProperty(
|
||||
_compressionSession,
|
||||
kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder, nullptr,
|
||||
&hwaccl_enabled);
|
||||
if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) {
|
||||
RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled";
|
||||
} else {
|
||||
RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled";
|
||||
}
|
||||
#endif
|
||||
[self configureCompressionSession];
|
||||
return WEBRTC_VIDEO_CODEC_OK;
|
||||
}
|
||||
|
||||
- (void)configureCompressionSession {
|
||||
RTC_DCHECK(_compressionSession);
|
||||
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime,
|
||||
false);
|
||||
// SetVTSessionProperty(_compressionSession,
|
||||
// kVTCompressionPropertyKey_ProfileLevel, _profile);
|
||||
SetVTSessionProperty(_compressionSession,
|
||||
kVTCompressionPropertyKey_AllowFrameReordering, false);
|
||||
[self setEncoderBitrateBps:_targetBitrateBps];
|
||||
// TODO(tkchin): Look at entropy mode and colorspace matrices.
|
||||
// TODO(tkchin): Investigate to see if there's any way to make this work.
|
||||
// May need it to interop with Android. Currently this call just fails.
|
||||
// On inspecting encoder output on iOS8, this value is set to 6.
|
||||
// internal::SetVTSessionProperty(compression_session_,
|
||||
// kVTCompressionPropertyKey_MaxFrameDelayCount,
|
||||
// 1);
|
||||
|
||||
// Set a relatively large value for keyframe emission (7200 frames or 4
|
||||
// minutes).
|
||||
SetVTSessionProperty(_compressionSession,
|
||||
kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
|
||||
SetVTSessionProperty(_compressionSession,
|
||||
kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration,
|
||||
240);
|
||||
OSStatus status =
|
||||
VTCompressionSessionPrepareToEncodeFrames(_compressionSession);
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Compression session failed to prepare encode frames.";
|
||||
}
|
||||
}
|
||||
|
||||
- (void)destroyCompressionSession {
|
||||
if (_compressionSession) {
|
||||
VTCompressionSessionInvalidate(_compressionSession);
|
||||
CFRelease(_compressionSession);
|
||||
_compressionSession = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
- (NSString*)implementationName {
|
||||
return @"VideoToolbox";
|
||||
}
|
||||
|
||||
- (void)setBitrateBps:(uint32_t)bitrateBps {
|
||||
if (_encoderBitrateBps != bitrateBps) {
|
||||
[self setEncoderBitrateBps:bitrateBps];
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setEncoderBitrateBps:(uint32_t)bitrateBps {
|
||||
if (_compressionSession) {
|
||||
SetVTSessionProperty(_compressionSession,
|
||||
kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
|
||||
|
||||
// TODO(tkchin): Add a helper method to set array value.
|
||||
int64_t dataLimitBytesPerSecondValue =
|
||||
static_cast<int64_t>(bitrateBps * kLimitToAverageBitRateFactor / 8);
|
||||
CFNumberRef bytesPerSecond =
|
||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type,
|
||||
&dataLimitBytesPerSecondValue);
|
||||
int64_t oneSecondValue = 1;
|
||||
CFNumberRef oneSecond = CFNumberCreate(
|
||||
kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue);
|
||||
const void* nums[2] = {bytesPerSecond, oneSecond};
|
||||
CFArrayRef dataRateLimits =
|
||||
CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
|
||||
OSStatus status = VTSessionSetProperty(
|
||||
_compressionSession, kVTCompressionPropertyKey_DataRateLimits,
|
||||
dataRateLimits);
|
||||
if (bytesPerSecond) {
|
||||
CFRelease(bytesPerSecond);
|
||||
}
|
||||
if (oneSecond) {
|
||||
CFRelease(oneSecond);
|
||||
}
|
||||
if (dataRateLimits) {
|
||||
CFRelease(dataRateLimits);
|
||||
}
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to set data rate limit";
|
||||
}
|
||||
|
||||
_encoderBitrateBps = bitrateBps;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)frameWasEncoded:(OSStatus)status
|
||||
flags:(VTEncodeInfoFlags)infoFlags
|
||||
sampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
width:(int32_t)width
|
||||
height:(int32_t)height
|
||||
renderTimeMs:(int64_t)renderTimeMs
|
||||
timestamp:(uint32_t)timestamp
|
||||
rotation:(RTCVideoRotation)rotation {
|
||||
if (status != noErr) {
|
||||
RTC_LOG(LS_ERROR) << "h265 encode failed.";
|
||||
return;
|
||||
}
|
||||
if (infoFlags & kVTEncodeInfo_FrameDropped) {
|
||||
RTC_LOG(LS_INFO) << "h265 encoder dropped a frame.";
|
||||
return;
|
||||
}
|
||||
|
||||
BOOL isKeyframe = NO;
|
||||
CFArrayRef attachments =
|
||||
CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0);
|
||||
if (attachments != nullptr && CFArrayGetCount(attachments)) {
|
||||
CFDictionaryRef attachment =
|
||||
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
|
||||
isKeyframe =
|
||||
!CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
|
||||
}
|
||||
|
||||
if (isKeyframe) {
|
||||
RTC_LOG(LS_INFO) << "Generated keyframe";
|
||||
}
|
||||
|
||||
// Convert the sample buffer into a buffer suitable for RTP packetization.
|
||||
// TODO(tkchin): Allocate buffers through a pool.
|
||||
std::unique_ptr<rtc::Buffer> buffer(new rtc::Buffer());
|
||||
RTCRtpFragmentationHeader* header;
|
||||
{
|
||||
std::unique_ptr<webrtc::RTPFragmentationHeader> header_cpp;
|
||||
bool result = H265CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe,
|
||||
buffer.get(), &header_cpp);
|
||||
header = [[RTCRtpFragmentationHeader alloc]
|
||||
initWithNativeFragmentationHeader:header_cpp.get()];
|
||||
if (!result) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to convert sample buffer.";
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
RTCEncodedImage* frame = [[RTCEncodedImage alloc] init];
|
||||
frame.buffer = [NSData dataWithBytesNoCopy:buffer->data()
|
||||
length:buffer->size()
|
||||
freeWhenDone:NO];
|
||||
frame.encodedWidth = width;
|
||||
frame.encodedHeight = height;
|
||||
frame.completeFrame = YES;
|
||||
frame.frameType =
|
||||
isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
|
||||
frame.captureTimeMs = renderTimeMs;
|
||||
frame.timeStamp = timestamp;
|
||||
frame.rotation = rotation;
|
||||
frame.contentType = (_mode == RTCVideoCodecModeScreensharing)
|
||||
? RTCVideoContentTypeScreenshare
|
||||
: RTCVideoContentTypeUnspecified;
|
||||
frame.flags = webrtc::VideoSendTiming::kInvalid;
|
||||
|
||||
int qp;
|
||||
_h265BitstreamParser.ParseBitstream(buffer->data(), buffer->size());
|
||||
_h265BitstreamParser.GetLastSliceQp(&qp);
|
||||
frame.qp = @(qp);
|
||||
|
||||
BOOL res = _callback(frame, [[RTCCodecSpecificInfoH265 alloc] init], header);
|
||||
if (!res) {
|
||||
RTC_LOG(LS_ERROR) << "Encode callback failed.";
|
||||
return;
|
||||
}
|
||||
_bitrateAdjuster->Update(frame.buffer.length);
|
||||
}
|
||||
|
||||
- (RTCVideoEncoderQpThresholds*)scalingSettings {
|
||||
return [[RTCVideoEncoderQpThresholds alloc]
|
||||
initWithThresholdsLow:kLowh265QpThreshold
|
||||
high:kHighh265QpThreshold];
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,27 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_CAMERA_CAPTURER_H
|
||||
#define TGCALLS_VIDEO_CAMERA_CAPTURER_H
|
||||
#ifdef WEBRTC_IOS
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#include <memory>
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
#include "Instance.h"
|
||||
|
||||
@interface VideoCameraCapturer : NSObject
|
||||
|
||||
+ (NSArray<AVCaptureDevice *> *)captureDevices;
|
||||
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device;
|
||||
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source useFrontCamera:(bool)useFrontCamera isActiveUpdated:(void (^)(bool))isActiveUpdated;
|
||||
|
||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device format:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps;
|
||||
- (void)stopCapture;
|
||||
- (void)setIsEnabled:(bool)isEnabled;
|
||||
- (void)setPreferredCaptureAspectRatio:(float)aspectRatio;
|
||||
- (void)setUncroppedSink:(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)sink;
|
||||
|
||||
@end
|
||||
#endif // WEBRTC_IOS
|
||||
#endif
|
|
@ -1,748 +0,0 @@
|
|||
#include "VideoCameraCapturer.h"
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#include "rtc_base/logging.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "TGRTCCVPixelBuffer.h"
|
||||
#import "sdk/objc/native/src/objc_video_track_source.h"
|
||||
#import "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
#import "api/video_track_source_proxy.h"
|
||||
|
||||
#import "helpers/UIDevice+RTCDevice.h"
|
||||
|
||||
#import "helpers/AVCaptureSession+DevicePosition.h"
|
||||
#import "helpers/RTCDispatcher+Private.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
|
||||
static const int64_t kNanosecondsPerSecond = 1000000000;
|
||||
|
||||
static webrtc::ObjCVideoTrackSource *getObjCVideoSource(const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
||||
return static_cast<webrtc::ObjCVideoTrackSource *>(proxy_source->internal());
|
||||
}
|
||||
|
||||
//TODO: investigate the green edge after scaling, likely related to padding
|
||||
/*@interface RTCCVPixelBuffer (CustomCropping)
|
||||
|
||||
@end
|
||||
|
||||
@implementation RTCCVPixelBuffer (CustomCropping)
|
||||
|
||||
- (BOOL)custom_cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer
|
||||
withTempBuffer:(nullable uint8_t*)tmpBuffer {
|
||||
const OSType srcPixelFormat = CVPixelBufferGetPixelFormatType(self.pixelBuffer);
|
||||
const OSType dstPixelFormat = CVPixelBufferGetPixelFormatType(outputPixelBuffer);
|
||||
|
||||
switch (srcPixelFormat) {
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: {
|
||||
size_t dstWidth = CVPixelBufferGetWidth(outputPixelBuffer);
|
||||
size_t dstHeight = CVPixelBufferGetHeight(outputPixelBuffer);
|
||||
if (dstWidth > 0 && dstHeight > 0) {
|
||||
RTC_DCHECK(dstPixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ||
|
||||
dstPixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);
|
||||
if ([self requiresScalingToWidth:(int)dstWidth height:(int)dstHeight]) {
|
||||
RTC_DCHECK(tmpBuffer);
|
||||
}
|
||||
[self custom_cropAndScaleNV12To:outputPixelBuffer withTempBuffer:tmpBuffer];
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kCVPixelFormatType_32BGRA:
|
||||
case kCVPixelFormatType_32ARGB: {
|
||||
RTC_DCHECK(srcPixelFormat == dstPixelFormat);
|
||||
[self custom_cropAndScaleARGBTo:outputPixelBuffer];
|
||||
break;
|
||||
}
|
||||
default: { RTC_NOTREACHED() << "Unsupported pixel format."; }
|
||||
}
|
||||
|
||||
return YES;
|
||||
}
|
||||
|
||||
- (void)custom_cropAndScaleNV12To:(CVPixelBufferRef)outputPixelBuffer withTempBuffer:(uint8_t*)tmpBuffer {
|
||||
// Prepare output pointers.
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(outputPixelBuffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
||||
}
|
||||
const int dstWidth = (int)CVPixelBufferGetWidth(outputPixelBuffer);
|
||||
const int dstHeight = (int)CVPixelBufferGetHeight(outputPixelBuffer);
|
||||
uint8_t* dstY =
|
||||
reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(outputPixelBuffer, 0));
|
||||
const int dstYStride = (int)CVPixelBufferGetBytesPerRowOfPlane(outputPixelBuffer, 0);
|
||||
uint8_t* dstUV =
|
||||
reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(outputPixelBuffer, 1));
|
||||
const int dstUVStride = (int)CVPixelBufferGetBytesPerRowOfPlane(outputPixelBuffer, 1);
|
||||
|
||||
// Prepare source pointers.
|
||||
CVPixelBufferLockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
const uint8_t* srcY = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(self.pixelBuffer, 0));
|
||||
const int srcYStride = (int)CVPixelBufferGetBytesPerRowOfPlane(self.pixelBuffer, 0);
|
||||
const uint8_t* srcUV = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(self.pixelBuffer, 1));
|
||||
const int srcUVStride = (int)CVPixelBufferGetBytesPerRowOfPlane(self.pixelBuffer, 1);
|
||||
|
||||
// Crop just by modifying pointers.
|
||||
srcY += srcYStride * self.cropY + self.cropX;
|
||||
srcUV += srcUVStride * (self.cropY / 2) + self.cropX;
|
||||
|
||||
webrtc::NV12Scale(tmpBuffer,
|
||||
srcY,
|
||||
srcYStride,
|
||||
srcUV,
|
||||
srcUVStride,
|
||||
self.cropWidth,
|
||||
self.cropHeight,
|
||||
dstY,
|
||||
dstYStride,
|
||||
dstUV,
|
||||
dstUVStride,
|
||||
dstWidth,
|
||||
dstHeight);
|
||||
|
||||
CVPixelBufferUnlockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
CVPixelBufferUnlockBaseAddress(outputPixelBuffer, 0);
|
||||
}
|
||||
|
||||
- (void)custom_cropAndScaleARGBTo:(CVPixelBufferRef)outputPixelBuffer {
|
||||
// Prepare output pointers.
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(outputPixelBuffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
||||
}
|
||||
const int dstWidth = (int)CVPixelBufferGetWidth(outputPixelBuffer);
|
||||
const int dstHeight = (int)CVPixelBufferGetHeight(outputPixelBuffer);
|
||||
|
||||
uint8_t* dst = reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddress(outputPixelBuffer));
|
||||
const int dstStride = (int)CVPixelBufferGetBytesPerRow(outputPixelBuffer);
|
||||
|
||||
// Prepare source pointers.
|
||||
CVPixelBufferLockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
const uint8_t* src = static_cast<uint8_t*>(CVPixelBufferGetBaseAddress(self.pixelBuffer));
|
||||
const int srcStride = (int)CVPixelBufferGetBytesPerRow(self.pixelBuffer);
|
||||
|
||||
// Crop just by modifying pointers. Need to ensure that src pointer points to a byte corresponding
|
||||
// to the start of a new pixel (byte with B for BGRA) so that libyuv scales correctly.
|
||||
const int bytesPerPixel = 4;
|
||||
src += srcStride * self.cropY + (self.cropX * bytesPerPixel);
|
||||
|
||||
// kCVPixelFormatType_32BGRA corresponds to libyuv::FOURCC_ARGB
|
||||
libyuv::ARGBScale(src,
|
||||
srcStride,
|
||||
self.cropWidth,
|
||||
self.cropHeight,
|
||||
dst,
|
||||
dstStride,
|
||||
dstWidth,
|
||||
dstHeight,
|
||||
libyuv::kFilterBox);
|
||||
|
||||
CVPixelBufferUnlockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
CVPixelBufferUnlockBaseAddress(outputPixelBuffer, 0);
|
||||
}
|
||||
|
||||
@end*/
|
||||
|
||||
@interface VideoCameraCapturer () <AVCaptureVideoDataOutputSampleBufferDelegate> {
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
||||
|
||||
// Live on main thread.
|
||||
bool _isFrontCamera;
|
||||
|
||||
dispatch_queue_t _frameQueue;
|
||||
|
||||
// Live on RTCDispatcherTypeCaptureSession.
|
||||
AVCaptureDevice *_currentDevice;
|
||||
BOOL _hasRetriedOnFatalError;
|
||||
BOOL _isRunning;
|
||||
|
||||
// Live on RTCDispatcherTypeCaptureSession and main thread.
|
||||
std::atomic<bool> _willBeRunning;
|
||||
|
||||
AVCaptureVideoDataOutput *_videoDataOutput;
|
||||
AVCaptureSession *_captureSession;
|
||||
FourCharCode _preferredOutputPixelFormat;
|
||||
FourCharCode _outputPixelFormat;
|
||||
RTCVideoRotation _rotation;
|
||||
UIDeviceOrientation _orientation;
|
||||
bool _rotationLock;
|
||||
|
||||
// Live on mainThread.
|
||||
void (^_isActiveUpdated)(bool);
|
||||
bool _isActiveValue;
|
||||
bool _inForegroundValue;
|
||||
|
||||
// Live on frameQueue and main thread.
|
||||
std::atomic<bool> _isPaused;
|
||||
|
||||
// Live on frameQueue.
|
||||
float _aspectRatio;
|
||||
std::vector<uint8_t> _croppingBuffer;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _uncroppedSink;
|
||||
|
||||
// Live on frameQueue and RTCDispatcherTypeCaptureSession.
|
||||
std::atomic<int> _warmupFrameCount;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoCameraCapturer
|
||||
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source useFrontCamera:(bool)useFrontCamera isActiveUpdated:(void (^)(bool))isActiveUpdated {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_source = source;
|
||||
_isFrontCamera = useFrontCamera;
|
||||
_isActiveValue = true;
|
||||
_inForegroundValue = true;
|
||||
_isPaused = false;
|
||||
_isActiveUpdated = [isActiveUpdated copy];
|
||||
|
||||
_warmupFrameCount = 100;
|
||||
|
||||
#if TARGET_OS_IPHONE
|
||||
_rotationLock = true;
|
||||
#endif
|
||||
|
||||
if (![self setupCaptureSession:[[AVCaptureSession alloc] init]]) {
|
||||
return nil;
|
||||
}
|
||||
|
||||
NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
|
||||
_orientation = UIDeviceOrientationPortrait;
|
||||
_rotation = RTCVideoRotation_90;
|
||||
[center addObserver:self
|
||||
selector:@selector(deviceOrientationDidChange:)
|
||||
name:UIDeviceOrientationDidChangeNotification
|
||||
object:nil];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleCaptureSessionInterruption:)
|
||||
name:AVCaptureSessionWasInterruptedNotification
|
||||
object:_captureSession];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleCaptureSessionInterruptionEnded:)
|
||||
name:AVCaptureSessionInterruptionEndedNotification
|
||||
object:_captureSession];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleApplicationDidBecomeActive:)
|
||||
name:UIApplicationDidBecomeActiveNotification
|
||||
object:[UIApplication sharedApplication]];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleApplicationWillEnterForeground:)
|
||||
name:UIApplicationWillEnterForegroundNotification
|
||||
object:[UIApplication sharedApplication]];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleCaptureSessionRuntimeError:)
|
||||
name:AVCaptureSessionRuntimeErrorNotification
|
||||
object:_captureSession];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleCaptureSessionDidStartRunning:)
|
||||
name:AVCaptureSessionDidStartRunningNotification
|
||||
object:_captureSession];
|
||||
[center addObserver:self
|
||||
selector:@selector(handleCaptureSessionDidStopRunning:)
|
||||
name:AVCaptureSessionDidStopRunningNotification
|
||||
object:_captureSession];
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
NSAssert(!_willBeRunning, @"Session was still running in RTCCameraVideoCapturer dealloc. Forgot to call stopCapture?");
|
||||
[[NSNotificationCenter defaultCenter] removeObserver:self];
|
||||
}
|
||||
|
||||
+ (NSArray<AVCaptureDevice *> *)captureDevices {
|
||||
if (@available(iOS 10.0, *)) {
|
||||
AVCaptureDeviceDiscoverySession *session = [AVCaptureDeviceDiscoverySession discoverySessionWithDeviceTypes:@[AVCaptureDeviceTypeBuiltInWideAngleCamera] mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionUnspecified];
|
||||
return session.devices;
|
||||
} else {
|
||||
NSMutableArray<AVCaptureDevice *> *result = [[NSMutableArray alloc] init];
|
||||
for (AVCaptureDevice *device in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
|
||||
if (device.position == AVCaptureDevicePositionFront || device.position == AVCaptureDevicePositionBack) {
|
||||
[result addObject:device];
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device {
|
||||
// Support opening the device in any format. We make sure it's converted to a format we
|
||||
// can handle, if needed, in the method `-setupVideoDataOutput`.
|
||||
return device.formats;
|
||||
}
|
||||
|
||||
- (FourCharCode)preferredOutputPixelFormat {
|
||||
return _preferredOutputPixelFormat;
|
||||
}
|
||||
|
||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
|
||||
format:(AVCaptureDeviceFormat *)format
|
||||
fps:(NSInteger)fps {
|
||||
[self startCaptureWithDevice:device format:format fps:fps completionHandler:nil];
|
||||
}
|
||||
|
||||
- (void)stopCapture {
|
||||
_isActiveUpdated = nil;
|
||||
[self stopCaptureWithCompletionHandler:nil];
|
||||
}
|
||||
|
||||
- (void)setIsEnabled:(bool)isEnabled {
|
||||
_isPaused = !isEnabled;
|
||||
[self updateIsActiveValue];
|
||||
}
|
||||
|
||||
- (void)setUncroppedSink:(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)sink {
|
||||
dispatch_async(self.frameQueue, ^{
|
||||
_uncroppedSink = sink;
|
||||
});
|
||||
}
|
||||
|
||||
- (void)setPreferredCaptureAspectRatio:(float)aspectRatio {
|
||||
dispatch_async(self.frameQueue, ^{
|
||||
_aspectRatio = aspectRatio;
|
||||
});
|
||||
}
|
||||
|
||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
|
||||
format:(AVCaptureDeviceFormat *)format
|
||||
fps:(NSInteger)fps
|
||||
completionHandler:(nullable void (^)(NSError *))completionHandler {
|
||||
_willBeRunning = true;
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLogInfo("startCaptureWithDevice %@ @ %ld fps", format, (long)fps);
|
||||
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
|
||||
});
|
||||
|
||||
_currentDevice = device;
|
||||
|
||||
NSError *error = nil;
|
||||
if (![_currentDevice lockForConfiguration:&error]) {
|
||||
RTCLogError(@"Failed to lock device %@. Error: %@",
|
||||
_currentDevice,
|
||||
error.userInfo);
|
||||
if (completionHandler) {
|
||||
completionHandler(error);
|
||||
}
|
||||
_willBeRunning = false;
|
||||
return;
|
||||
}
|
||||
[self reconfigureCaptureSessionInput];
|
||||
[self updateOrientation];
|
||||
[self updateDeviceCaptureFormat:format fps:fps];
|
||||
[self updateVideoDataOutputPixelFormat:format];
|
||||
[_captureSession startRunning];
|
||||
[_currentDevice unlockForConfiguration];
|
||||
_isRunning = YES;
|
||||
if (completionHandler) {
|
||||
completionHandler(nil);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler {
|
||||
_willBeRunning = false;
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLogInfo("Stop");
|
||||
_currentDevice = nil;
|
||||
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
|
||||
[_captureSession removeInput:oldInput];
|
||||
}
|
||||
[_captureSession stopRunning];
|
||||
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
|
||||
});
|
||||
_isRunning = NO;
|
||||
if (completionHandler) {
|
||||
completionHandler();
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
#pragma mark iOS notifications
|
||||
|
||||
#if TARGET_OS_IPHONE
|
||||
- (void)deviceOrientationDidChange:(NSNotification *)notification {
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
[self updateOrientation];
|
||||
}];
|
||||
}
|
||||
#endif
|
||||
|
||||
#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate
|
||||
|
||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput
|
||||
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
fromConnection:(AVCaptureConnection *)connection {
|
||||
NSParameterAssert(captureOutput == _videoDataOutput);
|
||||
|
||||
int minWarmupFrameCount = 12;
|
||||
_warmupFrameCount++;
|
||||
if (_warmupFrameCount < minWarmupFrameCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || !CMSampleBufferIsValid(sampleBuffer) ||
|
||||
!CMSampleBufferDataIsReady(sampleBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
|
||||
if (pixelBuffer == nil) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Default to portrait orientation on iPhone.
|
||||
BOOL usingFrontCamera = NO;
|
||||
// Check the image's EXIF for the camera the image came from as the image could have been
|
||||
// delayed as we set alwaysDiscardsLateVideoFrames to NO.
|
||||
AVCaptureDevicePosition cameraPosition =
|
||||
[AVCaptureSession devicePositionForSampleBuffer:sampleBuffer];
|
||||
if (cameraPosition != AVCaptureDevicePositionUnspecified) {
|
||||
usingFrontCamera = AVCaptureDevicePositionFront == cameraPosition;
|
||||
} else {
|
||||
AVCaptureDeviceInput *deviceInput =
|
||||
(AVCaptureDeviceInput *)((AVCaptureInputPort *)connection.inputPorts.firstObject).input;
|
||||
usingFrontCamera = AVCaptureDevicePositionFront == deviceInput.device.position;
|
||||
}
|
||||
if (!_rotationLock) {
|
||||
switch (_orientation) {
|
||||
case UIDeviceOrientationPortrait:
|
||||
_rotation = RTCVideoRotation_90;
|
||||
break;
|
||||
case UIDeviceOrientationPortraitUpsideDown:
|
||||
_rotation = RTCVideoRotation_270;
|
||||
break;
|
||||
case UIDeviceOrientationLandscapeLeft:
|
||||
_rotation = usingFrontCamera ? RTCVideoRotation_180 : RTCVideoRotation_0;
|
||||
break;
|
||||
case UIDeviceOrientationLandscapeRight:
|
||||
_rotation = usingFrontCamera ? RTCVideoRotation_0 : RTCVideoRotation_180;
|
||||
break;
|
||||
case UIDeviceOrientationFaceUp:
|
||||
case UIDeviceOrientationFaceDown:
|
||||
case UIDeviceOrientationUnknown:
|
||||
// Ignore.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TGRTCCVPixelBuffer *rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer];
|
||||
rtcPixelBuffer.shouldBeMirrored = usingFrontCamera;
|
||||
|
||||
TGRTCCVPixelBuffer *uncroppedRtcPixelBuffer = rtcPixelBuffer;
|
||||
|
||||
if (_aspectRatio > FLT_EPSILON) {
|
||||
float aspect = 1.0f / _aspectRatio;
|
||||
|
||||
int width = rtcPixelBuffer.width;
|
||||
int height = rtcPixelBuffer.height;
|
||||
|
||||
float aspectWidth = width;
|
||||
float aspectHeight = ((float)(width)) / aspect;
|
||||
int cropX = (int)((width - aspectWidth) / 2.0f);
|
||||
int cropY = (int)((height - aspectHeight) / 2.0f);
|
||||
|
||||
width = (int)aspectWidth;
|
||||
width &= ~1;
|
||||
height = (int)aspectHeight;
|
||||
height &= ~1;
|
||||
|
||||
height = MIN(rtcPixelBuffer.height, height + 16);
|
||||
|
||||
if (width < rtcPixelBuffer.width || height < rtcPixelBuffer.height) {
|
||||
rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer adaptedWidth:width adaptedHeight:height cropWidth:width cropHeight:height cropX:cropX cropY:cropY];
|
||||
rtcPixelBuffer.shouldBeMirrored = usingFrontCamera;
|
||||
|
||||
CVPixelBufferRef outputPixelBufferRef = NULL;
|
||||
OSType pixelFormat = CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer);
|
||||
CVPixelBufferCreate(NULL, width, height, pixelFormat, NULL, &outputPixelBufferRef);
|
||||
if (outputPixelBufferRef) {
|
||||
int bufferSize = [rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:width height:width];
|
||||
if (_croppingBuffer.size() < bufferSize) {
|
||||
_croppingBuffer.resize(bufferSize);
|
||||
}
|
||||
if ([rtcPixelBuffer cropAndScaleTo:outputPixelBufferRef withTempBuffer:_croppingBuffer.data()]) {
|
||||
rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:outputPixelBufferRef];
|
||||
rtcPixelBuffer.shouldBeMirrored = usingFrontCamera;
|
||||
}
|
||||
CVPixelBufferRelease(outputPixelBufferRef);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
|
||||
kNanosecondsPerSecond;
|
||||
RTCVideoFrame *videoFrame = [[RTCVideoFrame alloc] initWithBuffer:rtcPixelBuffer rotation:_rotation timeStampNs:timeStampNs];
|
||||
|
||||
if (!_isPaused) {
|
||||
getObjCVideoSource(_source)->OnCapturedFrame(videoFrame);
|
||||
|
||||
if (_uncroppedSink && uncroppedRtcPixelBuffer) {
|
||||
int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
|
||||
kNanosecondsPerSecond;
|
||||
RTCVideoFrame *frame = [[RTCVideoFrame alloc] initWithBuffer:uncroppedRtcPixelBuffer rotation:_rotation timeStampNs:timeStampNs];
|
||||
|
||||
const int64_t timestamp_us = frame.timeStampNs / rtc::kNumNanosecsPerMicrosec;
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer;
|
||||
buffer = new rtc::RefCountedObject<webrtc::ObjCFrameBuffer>(frame.buffer);
|
||||
|
||||
webrtc::VideoRotation rotation = static_cast<webrtc::VideoRotation>(frame.rotation);
|
||||
|
||||
_uncroppedSink->OnFrame(webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_rotation(rotation)
|
||||
.set_timestamp_us(timestamp_us)
|
||||
.build());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput
|
||||
didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
fromConnection:(AVCaptureConnection *)connection {
|
||||
NSString *droppedReason =
|
||||
(__bridge NSString *)CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_DroppedFrameReason, nil);
|
||||
RTCLogError(@"Dropped sample buffer. Reason: %@", droppedReason);
|
||||
}
|
||||
|
||||
#pragma mark - AVCaptureSession notifications
|
||||
|
||||
- (void)handleCaptureSessionInterruption:(NSNotification *)notification {
|
||||
NSString *reasonString = nil;
|
||||
NSNumber *reason = notification.userInfo[AVCaptureSessionInterruptionReasonKey];
|
||||
if (reason) {
|
||||
switch (reason.intValue) {
|
||||
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableInBackground:
|
||||
reasonString = @"VideoDeviceNotAvailableInBackground";
|
||||
break;
|
||||
case AVCaptureSessionInterruptionReasonAudioDeviceInUseByAnotherClient:
|
||||
reasonString = @"AudioDeviceInUseByAnotherClient";
|
||||
break;
|
||||
case AVCaptureSessionInterruptionReasonVideoDeviceInUseByAnotherClient:
|
||||
reasonString = @"VideoDeviceInUseByAnotherClient";
|
||||
break;
|
||||
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableWithMultipleForegroundApps:
|
||||
reasonString = @"VideoDeviceNotAvailableWithMultipleForegroundApps";
|
||||
break;
|
||||
}
|
||||
}
|
||||
RTCLog(@"Capture session interrupted: %@", reasonString);
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionInterruptionEnded:(NSNotification *)notification {
|
||||
RTCLog(@"Capture session interruption ended.");
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionRuntimeError:(NSNotification *)notification {
|
||||
NSError *error = [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
|
||||
RTCLogError(@"Capture session runtime error: %@", error);
|
||||
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
if (error.code == AVErrorMediaServicesWereReset) {
|
||||
[self handleNonFatalError];
|
||||
} else {
|
||||
[self handleFatalError];
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionDidStartRunning:(NSNotification *)notification {
|
||||
RTCLog(@"Capture session started.");
|
||||
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
// If we successfully restarted after an unknown error,
|
||||
// allow future retries on fatal errors.
|
||||
_hasRetriedOnFatalError = NO;
|
||||
}];
|
||||
|
||||
_inForegroundValue = true;
|
||||
[self updateIsActiveValue];
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionDidStopRunning:(NSNotification *)notification {
|
||||
RTCLog(@"Capture session stopped.");
|
||||
_inForegroundValue = false;
|
||||
[self updateIsActiveValue];
|
||||
}
|
||||
|
||||
- (void)updateIsActiveValue {
|
||||
bool isActive = _inForegroundValue && !_isPaused;
|
||||
if (isActive != _isActiveValue) {
|
||||
_isActiveValue = isActive;
|
||||
if (_isActiveUpdated) {
|
||||
_isActiveUpdated(_isActiveValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)handleFatalError {
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
if (!_hasRetriedOnFatalError) {
|
||||
RTCLogWarning(@"Attempting to recover from fatal capture error.");
|
||||
[self handleNonFatalError];
|
||||
_hasRetriedOnFatalError = YES;
|
||||
} else {
|
||||
RTCLogError(@"Previous fatal error recovery failed.");
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)handleNonFatalError {
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLog(@"Restarting capture session after error.");
|
||||
if (_isRunning) {
|
||||
[_captureSession startRunning];
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
#pragma mark - UIApplication notifications
|
||||
|
||||
- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
if (_isRunning && !_captureSession.isRunning) {
|
||||
RTCLog(@"Restarting capture session on active.");
|
||||
_warmupFrameCount = 0;
|
||||
[_captureSession startRunning];
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)handleApplicationWillEnterForeground:(NSNotification *)notification {
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLog(@"Resetting warmup due to backgrounding.");
|
||||
_warmupFrameCount = 0;
|
||||
}];
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (dispatch_queue_t)frameQueue {
|
||||
if (!_frameQueue) {
|
||||
_frameQueue =
|
||||
dispatch_queue_create("org.webrtc.cameravideocapturer.video", DISPATCH_QUEUE_SERIAL);
|
||||
dispatch_set_target_queue(_frameQueue,
|
||||
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
|
||||
}
|
||||
return _frameQueue;
|
||||
}
|
||||
|
||||
- (BOOL)setupCaptureSession:(AVCaptureSession *)captureSession {
|
||||
NSAssert(_captureSession == nil, @"Setup capture session called twice.");
|
||||
_captureSession = captureSession;
|
||||
_captureSession.sessionPreset = AVCaptureSessionPresetInputPriority;
|
||||
_captureSession.usesApplicationAudioSession = true;
|
||||
[self setupVideoDataOutput];
|
||||
// Add the output.
|
||||
if (![_captureSession canAddOutput:_videoDataOutput]) {
|
||||
RTCLogError(@"Video data output unsupported.");
|
||||
return NO;
|
||||
}
|
||||
[_captureSession addOutput:_videoDataOutput];
|
||||
|
||||
return YES;
|
||||
}
|
||||
|
||||
- (void)setupVideoDataOutput {
|
||||
NSAssert(_videoDataOutput == nil, @"Setup video data output called twice.");
|
||||
AVCaptureVideoDataOutput *videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
|
||||
|
||||
// `videoDataOutput.availableVideoCVPixelFormatTypes` returns the pixel formats supported by the
|
||||
// device with the most efficient output format first. Find the first format that we support.
|
||||
NSSet<NSNumber *> *supportedPixelFormats = [TGRTCCVPixelBuffer supportedPixelFormats];
|
||||
NSMutableOrderedSet *availablePixelFormats =
|
||||
[NSMutableOrderedSet orderedSetWithArray:videoDataOutput.availableVideoCVPixelFormatTypes];
|
||||
[availablePixelFormats intersectSet:supportedPixelFormats];
|
||||
NSNumber *pixelFormat = availablePixelFormats.firstObject;
|
||||
NSAssert(pixelFormat, @"Output device has no supported formats.");
|
||||
|
||||
_preferredOutputPixelFormat = [pixelFormat unsignedIntValue];
|
||||
_outputPixelFormat = _preferredOutputPixelFormat;
|
||||
videoDataOutput.videoSettings = @{(NSString *)kCVPixelBufferPixelFormatTypeKey : pixelFormat};
|
||||
videoDataOutput.alwaysDiscardsLateVideoFrames = NO;
|
||||
[videoDataOutput setSampleBufferDelegate:self queue:self.frameQueue];
|
||||
_videoDataOutput = videoDataOutput;
|
||||
}
|
||||
|
||||
- (void)updateVideoDataOutputPixelFormat:(AVCaptureDeviceFormat *)format {
|
||||
FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(format.formatDescription);
|
||||
if (![[TGRTCCVPixelBuffer supportedPixelFormats] containsObject:@(mediaSubType)]) {
|
||||
mediaSubType = _preferredOutputPixelFormat;
|
||||
}
|
||||
|
||||
if (mediaSubType != _outputPixelFormat) {
|
||||
_outputPixelFormat = mediaSubType;
|
||||
_videoDataOutput.videoSettings =
|
||||
@{ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(mediaSubType) };
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - Private, called inside capture queue
|
||||
|
||||
- (void)updateDeviceCaptureFormat:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps {
|
||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
||||
@"updateDeviceCaptureFormat must be called on the capture queue.");
|
||||
@try {
|
||||
_currentDevice.activeFormat = format;
|
||||
_currentDevice.activeVideoMinFrameDuration = CMTimeMake(1, (int32_t)fps);
|
||||
} @catch (NSException *exception) {
|
||||
RTCLogError(@"Failed to set active format!\n User info:%@", exception.userInfo);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)reconfigureCaptureSessionInput {
|
||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
||||
@"reconfigureCaptureSessionInput must be called on the capture queue.");
|
||||
NSError *error = nil;
|
||||
AVCaptureDeviceInput *input =
|
||||
[AVCaptureDeviceInput deviceInputWithDevice:_currentDevice error:&error];
|
||||
if (!input) {
|
||||
RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
|
||||
return;
|
||||
}
|
||||
[_captureSession beginConfiguration];
|
||||
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
|
||||
[_captureSession removeInput:oldInput];
|
||||
}
|
||||
if ([_captureSession canAddInput:input]) {
|
||||
[_captureSession addInput:input];
|
||||
} else {
|
||||
RTCLogError(@"Cannot add camera as an input to the session.");
|
||||
}
|
||||
[_captureSession commitConfiguration];
|
||||
}
|
||||
|
||||
- (void)updateOrientation {
|
||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
||||
@"updateOrientation must be called on the capture queue.");
|
||||
_orientation = [UIDevice currentDevice].orientation;
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,27 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_CAMERA_CAPTURER_MAC_H
|
||||
#define TGCALLS_VIDEO_CAMERA_CAPTURER_MAC_H
|
||||
#ifndef WEBRTC_IOS
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#include <memory>
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/media_stream_interface.h"
|
||||
|
||||
@interface VideoCameraCapturer : NSObject
|
||||
|
||||
+ (NSArray<AVCaptureDevice *> *)captureDevices;
|
||||
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device;
|
||||
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source isActiveUpdated:(void (^)(bool))isActiveUpdated;
|
||||
|
||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device format:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps;
|
||||
- (void)stopCapture;
|
||||
- (void)setIsEnabled:(bool)isEnabled;
|
||||
- (void)setPreferredCaptureAspectRatio:(float)aspectRatio;
|
||||
- (void)setUncroppedSink:(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)sink;
|
||||
- (BOOL)deviceIsCaptureCompitable:(AVCaptureDevice *)device;
|
||||
|
||||
@end
|
||||
#endif //WEBRTC_MAC
|
||||
#endif
|
|
@ -1,665 +0,0 @@
|
|||
#include "VideoCameraCapturerMac.h"
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
#import "TGRTCCVPixelBuffer.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#import "sdk/objc/native/src/objc_video_track_source.h"
|
||||
#import "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
#import "api/video_track_source_proxy.h"
|
||||
|
||||
|
||||
#import "helpers/AVCaptureSession+DevicePosition.h"
|
||||
#import "helpers/RTCDispatcher+Private.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
|
||||
#include "common_video/libyuv/include/webrtc_libyuv.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
#include "third_party/libyuv/include/libyuv.h"
|
||||
|
||||
static const int64_t kNanosecondsPerSecond = 1000000000;
|
||||
|
||||
static webrtc::ObjCVideoTrackSource *getObjCVideoSource(const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
||||
return static_cast<webrtc::ObjCVideoTrackSource *>(proxy_source->internal());
|
||||
}
|
||||
|
||||
|
||||
@interface RTCCVPixelBuffer (CustomCropping)
|
||||
|
||||
@end
|
||||
|
||||
@implementation RTCCVPixelBuffer (CustomCropping)
|
||||
|
||||
- (BOOL)custom_cropAndScaleTo:(CVPixelBufferRef)outputPixelBuffer
|
||||
withTempBuffer:(nullable uint8_t*)tmpBuffer {
|
||||
const OSType srcPixelFormat = CVPixelBufferGetPixelFormatType(self.pixelBuffer);
|
||||
const OSType dstPixelFormat = CVPixelBufferGetPixelFormatType(outputPixelBuffer);
|
||||
|
||||
switch (srcPixelFormat) {
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarFullRange:
|
||||
case kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange: {
|
||||
size_t dstWidth = CVPixelBufferGetWidth(outputPixelBuffer);
|
||||
size_t dstHeight = CVPixelBufferGetHeight(outputPixelBuffer);
|
||||
if (dstWidth > 0 && dstHeight > 0) {
|
||||
RTC_DCHECK(dstPixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ||
|
||||
dstPixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange);
|
||||
if ([self requiresScalingToWidth:(int)dstWidth height:(int)dstHeight]) {
|
||||
RTC_DCHECK(tmpBuffer);
|
||||
}
|
||||
[self custom_cropAndScaleNV12To:outputPixelBuffer withTempBuffer:tmpBuffer];
|
||||
}
|
||||
break;
|
||||
}
|
||||
case kCVPixelFormatType_32BGRA:
|
||||
case kCVPixelFormatType_32ARGB: {
|
||||
RTC_DCHECK(srcPixelFormat == dstPixelFormat);
|
||||
[self custom_cropAndScaleARGBTo:outputPixelBuffer];
|
||||
break;
|
||||
}
|
||||
default: { RTC_NOTREACHED() << "Unsupported pixel format."; }
|
||||
}
|
||||
|
||||
return YES;
|
||||
}
|
||||
|
||||
- (void)custom_cropAndScaleNV12To:(CVPixelBufferRef)outputPixelBuffer withTempBuffer:(uint8_t*)tmpBuffer {
|
||||
// Prepare output pointers.
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(outputPixelBuffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
||||
}
|
||||
const int dstWidth = (int)CVPixelBufferGetWidth(outputPixelBuffer);
|
||||
const int dstHeight = (int)CVPixelBufferGetHeight(outputPixelBuffer);
|
||||
uint8_t* dstY =
|
||||
reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(outputPixelBuffer, 0));
|
||||
const int dstYStride = (int)CVPixelBufferGetBytesPerRowOfPlane(outputPixelBuffer, 0);
|
||||
uint8_t* dstUV =
|
||||
reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(outputPixelBuffer, 1));
|
||||
const int dstUVStride = (int)CVPixelBufferGetBytesPerRowOfPlane(outputPixelBuffer, 1);
|
||||
|
||||
// Prepare source pointers.
|
||||
CVPixelBufferLockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
const uint8_t* srcY = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(self.pixelBuffer, 0));
|
||||
const int srcYStride = (int)CVPixelBufferGetBytesPerRowOfPlane(self.pixelBuffer, 0);
|
||||
const uint8_t* srcUV = static_cast<uint8_t*>(CVPixelBufferGetBaseAddressOfPlane(self.pixelBuffer, 1));
|
||||
const int srcUVStride = (int)CVPixelBufferGetBytesPerRowOfPlane(self.pixelBuffer, 1);
|
||||
|
||||
// Crop just by modifying pointers.
|
||||
srcY += srcYStride * self.cropY + self.cropX;
|
||||
srcUV += srcUVStride * (self.cropY / 2) + self.cropX;
|
||||
|
||||
webrtc::NV12Scale(tmpBuffer,
|
||||
srcY,
|
||||
srcYStride,
|
||||
srcUV,
|
||||
srcUVStride,
|
||||
self.cropWidth,
|
||||
self.cropHeight,
|
||||
dstY,
|
||||
dstYStride,
|
||||
dstUV,
|
||||
dstUVStride,
|
||||
dstWidth,
|
||||
dstHeight);
|
||||
|
||||
CVPixelBufferUnlockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
CVPixelBufferUnlockBaseAddress(outputPixelBuffer, 0);
|
||||
}
|
||||
|
||||
- (void)custom_cropAndScaleARGBTo:(CVPixelBufferRef)outputPixelBuffer {
|
||||
// Prepare output pointers.
|
||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(outputPixelBuffer, 0);
|
||||
if (cvRet != kCVReturnSuccess) {
|
||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
||||
}
|
||||
const int dstWidth = (int)CVPixelBufferGetWidth(outputPixelBuffer);
|
||||
const int dstHeight = (int)CVPixelBufferGetHeight(outputPixelBuffer);
|
||||
|
||||
uint8_t* dst = reinterpret_cast<uint8_t*>(CVPixelBufferGetBaseAddress(outputPixelBuffer));
|
||||
const int dstStride = (int)CVPixelBufferGetBytesPerRow(outputPixelBuffer);
|
||||
|
||||
// Prepare source pointers.
|
||||
CVPixelBufferLockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
const uint8_t* src = static_cast<uint8_t*>(CVPixelBufferGetBaseAddress(self.pixelBuffer));
|
||||
const int srcStride = (int)CVPixelBufferGetBytesPerRow(self.pixelBuffer);
|
||||
|
||||
// Crop just by modifying pointers. Need to ensure that src pointer points to a byte corresponding
|
||||
// to the start of a new pixel (byte with B for BGRA) so that libyuv scales correctly.
|
||||
const int bytesPerPixel = 4;
|
||||
src += srcStride * self.cropY + (self.cropX * bytesPerPixel);
|
||||
|
||||
// kCVPixelFormatType_32BGRA corresponds to libyuv::FOURCC_ARGB
|
||||
libyuv::ARGBScale(src,
|
||||
srcStride,
|
||||
self.cropWidth,
|
||||
self.cropHeight,
|
||||
dst,
|
||||
dstStride,
|
||||
dstWidth,
|
||||
dstHeight,
|
||||
libyuv::kFilterBox);
|
||||
|
||||
CVPixelBufferUnlockBaseAddress(self.pixelBuffer, kCVPixelBufferLock_ReadOnly);
|
||||
CVPixelBufferUnlockBaseAddress(outputPixelBuffer, 0);
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
|
||||
|
||||
@interface VideoCameraCapturer () <AVCaptureVideoDataOutputSampleBufferDelegate> {
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
||||
|
||||
dispatch_queue_t _frameQueue;
|
||||
AVCaptureDevice *_currentDevice;
|
||||
|
||||
// Live on RTCDispatcherTypeCaptureSession.
|
||||
BOOL _hasRetriedOnFatalError;
|
||||
BOOL _isRunning;
|
||||
|
||||
// Live on RTCDispatcherTypeCaptureSession and main thread.
|
||||
std::atomic<bool> _willBeRunning;
|
||||
|
||||
AVCaptureVideoDataOutput *_videoDataOutput;
|
||||
AVCaptureSession *_captureSession;
|
||||
|
||||
AVCaptureConnection *_videoConnection;
|
||||
AVCaptureDevice *_videoDevice;
|
||||
AVCaptureDeviceInput *_videoInputDevice;
|
||||
FourCharCode _preferredOutputPixelFormat;
|
||||
FourCharCode _outputPixelFormat;
|
||||
RTCVideoRotation _rotation;
|
||||
|
||||
// Live on mainThread.
|
||||
void (^_isActiveUpdated)(bool);
|
||||
bool _isActiveValue;
|
||||
bool _inForegroundValue;
|
||||
|
||||
// Live on frameQueue and main thread.
|
||||
std::atomic<bool> _isPaused;
|
||||
std::atomic<int> _skippedFrame;
|
||||
|
||||
// Live on frameQueue;
|
||||
float _aspectRatio;
|
||||
std::vector<uint8_t> _croppingBuffer;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _uncroppedSink;
|
||||
|
||||
int _warmupFrameCount;
|
||||
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoCameraCapturer
|
||||
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source isActiveUpdated:(void (^)(bool))isActiveUpdated {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_source = source;
|
||||
_isActiveUpdated = [isActiveUpdated copy];
|
||||
_isActiveValue = true;
|
||||
_inForegroundValue = true;
|
||||
_isPaused = false;
|
||||
_skippedFrame = 0;
|
||||
_rotation = RTCVideoRotation_0;
|
||||
|
||||
_warmupFrameCount = 100;
|
||||
|
||||
if (![self setupCaptureSession:[[AVCaptureSession alloc] init]]) {
|
||||
return nil;
|
||||
}
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
NSAssert(!_willBeRunning, @"Session was still running in RTCCameraVideoCapturer dealloc. Forgot to call stopCapture?");
|
||||
[[NSNotificationCenter defaultCenter] removeObserver:self];
|
||||
}
|
||||
|
||||
+ (NSArray<AVCaptureDevice *> *)captureDevices {
|
||||
AVCaptureDevice * defaultDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
|
||||
NSMutableArray<AVCaptureDevice *> * devices = [[AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo] mutableCopy];
|
||||
|
||||
[devices insertObject:defaultDevice atIndex:0];
|
||||
|
||||
return devices;
|
||||
}
|
||||
|
||||
- (BOOL)deviceIsCaptureCompitable:(AVCaptureDevice *)device {
|
||||
if (![device isConnected] || [device isSuspended]) {
|
||||
return NO;
|
||||
}
|
||||
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device error:nil];
|
||||
|
||||
return [_captureSession canAddInput:input];
|
||||
}
|
||||
|
||||
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device {
|
||||
// Support opening the device in any format. We make sure it's converted to a format we
|
||||
// can handle, if needed, in the method `-setupVideoDataOutput`.
|
||||
return device.formats;
|
||||
}
|
||||
|
||||
- (FourCharCode)preferredOutputPixelFormat {
|
||||
return _preferredOutputPixelFormat;
|
||||
}
|
||||
|
||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
|
||||
format:(AVCaptureDeviceFormat *)format
|
||||
fps:(NSInteger)fps {
|
||||
[self startCaptureWithDevice:device format:format fps:fps completionHandler:nil];
|
||||
}
|
||||
|
||||
- (void)stopCapture {
|
||||
_isActiveUpdated = nil;
|
||||
[self stopCaptureWithCompletionHandler:nil];
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)setIsEnabled:(bool)isEnabled {
|
||||
BOOL updated = _isPaused != !isEnabled;
|
||||
_isPaused = !isEnabled;
|
||||
_skippedFrame = 0;
|
||||
if (updated) {
|
||||
if (_isPaused) {
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
[self->_captureSession stopRunning];
|
||||
self->_isRunning = NO;
|
||||
}];
|
||||
} else {
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
[self->_captureSession startRunning];
|
||||
self->_isRunning = YES;
|
||||
}];
|
||||
}
|
||||
}
|
||||
|
||||
[self updateIsActiveValue];
|
||||
}
|
||||
|
||||
|
||||
- (void)setUncroppedSink:(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)sink {
|
||||
dispatch_async(self.frameQueue, ^{
|
||||
_uncroppedSink = sink;
|
||||
});
|
||||
}
|
||||
|
||||
- (void)setPreferredCaptureAspectRatio:(float)aspectRatio {
|
||||
dispatch_async(self.frameQueue, ^{
|
||||
_aspectRatio = MAX(0.7, aspectRatio);
|
||||
});
|
||||
}
|
||||
|
||||
- (void)updateIsActiveValue {
|
||||
bool isActive = _inForegroundValue && !_isPaused;
|
||||
if (isActive != _isActiveValue) {
|
||||
_isActiveValue = isActive;
|
||||
if (_isActiveUpdated) {
|
||||
_isActiveUpdated(_isActiveValue);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
|
||||
format:(AVCaptureDeviceFormat *)format
|
||||
fps:(NSInteger)fps
|
||||
completionHandler:(nullable void (^)(NSError *))completionHandler {
|
||||
_willBeRunning = true;
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLogInfo("startCaptureWithDevice %@ @ %ld fps", format, (long)fps);
|
||||
|
||||
self->_currentDevice = device;
|
||||
|
||||
NSError *error = nil;
|
||||
if (![self->_currentDevice lockForConfiguration:&error]) {
|
||||
RTCLogError(@"Failed to lock device %@. Error: %@",
|
||||
self->_currentDevice,
|
||||
error.userInfo);
|
||||
if (completionHandler) {
|
||||
completionHandler(error);
|
||||
}
|
||||
self->_willBeRunning = false;
|
||||
return;
|
||||
}
|
||||
[self reconfigureCaptureSessionInput];
|
||||
[self updateDeviceCaptureFormat:format fps:fps];
|
||||
[self updateVideoDataOutputPixelFormat:format];
|
||||
[self->_captureSession startRunning];
|
||||
[self->_currentDevice unlockForConfiguration];
|
||||
self->_isRunning = YES;
|
||||
if (completionHandler) {
|
||||
completionHandler(nil);
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler {
|
||||
_willBeRunning = false;
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLogInfo("Stop");
|
||||
self->_currentDevice = nil;
|
||||
for (AVCaptureDeviceInput *oldInput in [self->_captureSession.inputs copy]) {
|
||||
[self->_captureSession removeInput:oldInput];
|
||||
}
|
||||
[self->_captureSession stopRunning];
|
||||
|
||||
self->_isRunning = NO;
|
||||
if (completionHandler) {
|
||||
completionHandler();
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
|
||||
#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate
|
||||
|
||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput
|
||||
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
fromConnection:(AVCaptureConnection *)connection {
|
||||
NSParameterAssert(captureOutput == _videoDataOutput);
|
||||
|
||||
int minWarmupFrameCount = 12;
|
||||
_warmupFrameCount++;
|
||||
if (_warmupFrameCount < minWarmupFrameCount) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || !CMSampleBufferIsValid(sampleBuffer) ||
|
||||
!CMSampleBufferDataIsReady(sampleBuffer)) {
|
||||
return;
|
||||
}
|
||||
|
||||
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
|
||||
if (pixelBuffer == nil) {
|
||||
return;
|
||||
}
|
||||
|
||||
TGRTCCVPixelBuffer *rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer];
|
||||
rtcPixelBuffer.shouldBeMirrored = YES;
|
||||
if (_aspectRatio > 0.001) {
|
||||
const auto originalWidth = rtcPixelBuffer.width;
|
||||
const auto originalHeight = rtcPixelBuffer.height;
|
||||
auto width = (originalWidth > _aspectRatio * originalHeight)
|
||||
? int(std::round(_aspectRatio * originalHeight))
|
||||
: originalWidth;
|
||||
auto height = (originalWidth > _aspectRatio * originalHeight)
|
||||
? originalHeight
|
||||
: int(std::round(originalHeight / _aspectRatio));
|
||||
|
||||
if ((width < originalWidth || height < originalHeight) && width && height) {
|
||||
width &= ~int(1);
|
||||
height &= ~int(1);
|
||||
const auto left = (originalWidth - width) / 2;
|
||||
const auto top = (originalHeight - height) / 2;
|
||||
|
||||
rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer adaptedWidth:width adaptedHeight:height cropWidth:width cropHeight:height cropX:left cropY:top];
|
||||
|
||||
rtcPixelBuffer.shouldBeMirrored = YES;
|
||||
|
||||
CVPixelBufferRef outputPixelBufferRef = NULL;
|
||||
OSType pixelFormat = CVPixelBufferGetPixelFormatType(rtcPixelBuffer.pixelBuffer);
|
||||
CVPixelBufferCreate(NULL, width, height, pixelFormat, NULL, &outputPixelBufferRef);
|
||||
if (outputPixelBufferRef) {
|
||||
int bufferSize = [rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:width height:width];
|
||||
if (_croppingBuffer.size() < bufferSize) {
|
||||
_croppingBuffer.resize(bufferSize);
|
||||
}
|
||||
if ([rtcPixelBuffer cropAndScaleTo:outputPixelBufferRef withTempBuffer:_croppingBuffer.data()]) {
|
||||
rtcPixelBuffer = [[TGRTCCVPixelBuffer alloc] initWithPixelBuffer:outputPixelBufferRef];
|
||||
rtcPixelBuffer.shouldBeMirrored = YES;
|
||||
}
|
||||
CVPixelBufferRelease(outputPixelBufferRef);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!_isPaused && _uncroppedSink) {
|
||||
int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
|
||||
kNanosecondsPerSecond;
|
||||
RTCVideoFrame *frame = [[RTCVideoFrame alloc] initWithBuffer:rtcPixelBuffer
|
||||
rotation:_rotation
|
||||
timeStampNs:timeStampNs];
|
||||
|
||||
const int64_t timestamp_us = frame.timeStampNs / rtc::kNumNanosecsPerMicrosec;
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoFrameBuffer> buffer;
|
||||
buffer = new rtc::RefCountedObject<webrtc::ObjCFrameBuffer>(frame.buffer);
|
||||
|
||||
webrtc::VideoRotation rotation = static_cast<webrtc::VideoRotation>(frame.rotation);
|
||||
|
||||
_uncroppedSink->OnFrame(webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(buffer)
|
||||
.set_rotation(rotation)
|
||||
.set_timestamp_us(timestamp_us)
|
||||
.build());
|
||||
}
|
||||
|
||||
|
||||
int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
|
||||
kNanosecondsPerSecond;
|
||||
RTCVideoFrame *videoFrame = [[RTCVideoFrame alloc] initWithBuffer:rtcPixelBuffer
|
||||
rotation:_rotation
|
||||
timeStampNs:timeStampNs];
|
||||
if (!_isPaused) {
|
||||
getObjCVideoSource(_source)->OnCapturedFrame(videoFrame);
|
||||
}
|
||||
_skippedFrame = MIN(_skippedFrame + 1, 16);
|
||||
}
|
||||
|
||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput
|
||||
didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
||||
fromConnection:(AVCaptureConnection *)connection {
|
||||
NSString *droppedReason =
|
||||
(__bridge NSString *)CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_DroppedFrameReason, nil);
|
||||
RTCLogError(@"Dropped sample buffer. Reason: %@", droppedReason);
|
||||
}
|
||||
|
||||
#pragma mark - AVCaptureSession notifications
|
||||
|
||||
- (void)handleCaptureSessionInterruption:(NSNotification *)notification {
|
||||
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionInterruptionEnded:(NSNotification *)notification {
|
||||
RTCLog(@"Capture session interruption ended.");
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionRuntimeError:(NSNotification *)notification {
|
||||
NSError *error = [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
|
||||
RTCLogError(@"Capture session runtime error: %@", error);
|
||||
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
[self handleFatalError];
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionDidStartRunning:(NSNotification *)notification {
|
||||
RTCLog(@"Capture session started.");
|
||||
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
// If we successfully restarted after an unknown error,
|
||||
// allow future retries on fatal errors.
|
||||
self->_hasRetriedOnFatalError = NO;
|
||||
}];
|
||||
|
||||
|
||||
_inForegroundValue = true;
|
||||
[self updateIsActiveValue];
|
||||
}
|
||||
|
||||
- (void)handleCaptureSessionDidStopRunning:(NSNotification *)notification {
|
||||
RTCLog(@"Capture session stopped.");
|
||||
_inForegroundValue = false;
|
||||
[self updateIsActiveValue];
|
||||
|
||||
}
|
||||
|
||||
- (void)handleFatalError {
|
||||
[RTCDispatcher
|
||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
if (!self->_hasRetriedOnFatalError) {
|
||||
RTCLogWarning(@"Attempting to recover from fatal capture error.");
|
||||
[self handleNonFatalError];
|
||||
self->_warmupFrameCount = 0;
|
||||
self->_hasRetriedOnFatalError = YES;
|
||||
} else {
|
||||
RTCLogError(@"Previous fatal error recovery failed.");
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
- (void)handleNonFatalError {
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
RTCLog(@"Restarting capture session after error.");
|
||||
if (self->_isRunning) {
|
||||
self->_warmupFrameCount = 0;
|
||||
[self->_captureSession startRunning];
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
#pragma mark - UIApplication notifications
|
||||
|
||||
- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
|
||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
||||
block:^{
|
||||
if (self->_isRunning && !self->_captureSession.isRunning) {
|
||||
RTCLog(@"Restarting capture session on active.");
|
||||
self->_warmupFrameCount = 0;
|
||||
[self->_captureSession startRunning];
|
||||
}
|
||||
}];
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
- (dispatch_queue_t)frameQueue {
|
||||
if (!_frameQueue) {
|
||||
_frameQueue =
|
||||
dispatch_queue_create("org.webrtc.cameravideocapturer.video", DISPATCH_QUEUE_SERIAL);
|
||||
dispatch_set_target_queue(_frameQueue,
|
||||
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
|
||||
}
|
||||
return _frameQueue;
|
||||
}
|
||||
|
||||
- (BOOL)setupCaptureSession:(AVCaptureSession *)captureSession {
|
||||
NSAssert(_captureSession == nil, @"Setup capture session called twice.");
|
||||
_captureSession = captureSession;
|
||||
|
||||
[self setupVideoDataOutput];
|
||||
// Add the output.
|
||||
if (![_captureSession canAddOutput:_videoDataOutput]) {
|
||||
RTCLogError(@"Video data output unsupported.");
|
||||
return NO;
|
||||
}
|
||||
[_captureSession addOutput:_videoDataOutput];
|
||||
|
||||
|
||||
return YES;
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)setupVideoDataOutput {
|
||||
NSAssert(_videoDataOutput == nil, @"Setup video data output called twice.");
|
||||
AVCaptureVideoDataOutput *videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
|
||||
|
||||
// `videoDataOutput.availableVideoCVPixelFormatTypes` returns the pixel formats supported by the
|
||||
// device with the most efficient output format first. Find the first format that we support.
|
||||
NSSet<NSNumber *> *supportedPixelFormats = [RTCCVPixelBuffer supportedPixelFormats];
|
||||
NSMutableOrderedSet *availablePixelFormats =
|
||||
[NSMutableOrderedSet orderedSetWithArray:videoDataOutput.availableVideoCVPixelFormatTypes];
|
||||
[availablePixelFormats intersectSet:supportedPixelFormats];
|
||||
NSNumber *pixelFormat = availablePixelFormats.firstObject;
|
||||
NSAssert(pixelFormat, @"Output device has no supported formats.");
|
||||
|
||||
_preferredOutputPixelFormat = [pixelFormat unsignedIntValue];
|
||||
_outputPixelFormat = _preferredOutputPixelFormat;
|
||||
videoDataOutput.videoSettings = @{(NSString *)kCVPixelBufferPixelFormatTypeKey : pixelFormat};
|
||||
videoDataOutput.alwaysDiscardsLateVideoFrames = NO;
|
||||
[videoDataOutput setSampleBufferDelegate:self queue:self.frameQueue];
|
||||
_videoDataOutput = videoDataOutput;
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
- (void)updateVideoDataOutputPixelFormat:(AVCaptureDeviceFormat *)format {
|
||||
FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(format.formatDescription);
|
||||
if (![[RTCCVPixelBuffer supportedPixelFormats] containsObject:@(mediaSubType)]) {
|
||||
mediaSubType = _preferredOutputPixelFormat;
|
||||
}
|
||||
|
||||
if (mediaSubType != _outputPixelFormat) {
|
||||
_outputPixelFormat = mediaSubType;
|
||||
_videoDataOutput.videoSettings =
|
||||
@{ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(mediaSubType) };
|
||||
}
|
||||
AVCaptureConnection *connection = [_videoDataOutput connectionWithMediaType:AVMediaTypeVideo];
|
||||
|
||||
|
||||
if ([connection isVideoMirroringSupported]) {
|
||||
[connection setVideoMirrored:YES];
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - Private, called inside capture queue
|
||||
|
||||
- (void)updateDeviceCaptureFormat:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps {
|
||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
||||
@"updateDeviceCaptureFormat must be called on the capture queue.");
|
||||
@try {
|
||||
_currentDevice.activeFormat = format;
|
||||
_currentDevice.activeVideoMinFrameDuration = CMTimeMake(1, (int32_t)fps);
|
||||
} @catch (NSException *exception) {
|
||||
RTCLogError(@"Failed to set active format!\n User info:%@", exception.userInfo);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)reconfigureCaptureSessionInput {
|
||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
||||
@"reconfigureCaptureSessionInput must be called on the capture queue.");
|
||||
NSError *error = nil;
|
||||
AVCaptureDeviceInput *input =
|
||||
[AVCaptureDeviceInput deviceInputWithDevice:_currentDevice error:&error];
|
||||
if (!input) {
|
||||
RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
|
||||
return;
|
||||
}
|
||||
[_captureSession beginConfiguration];
|
||||
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
|
||||
[_captureSession removeInput:oldInput];
|
||||
}
|
||||
if ([_captureSession canAddInput:input]) {
|
||||
[_captureSession addInput:input];
|
||||
} else {
|
||||
RTCLogError(@"Cannot add camera as an input to the session.");
|
||||
}
|
||||
[_captureSession commitConfiguration];
|
||||
}
|
||||
|
||||
|
||||
@end
|
||||
|
|
@ -1,33 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURER_INTERFACE_IMPL_H
|
||||
#define TGCALLS_VIDEO_CAPTURER_INTERFACE_IMPL_H
|
||||
|
||||
#include "VideoCapturerInterface.h"
|
||||
|
||||
#include "sdk/objc/native/src/objc_video_track_source.h"
|
||||
#include "api/video_track_source_proxy.h"
|
||||
|
||||
@interface VideoCapturerInterfaceImplHolder : NSObject
|
||||
|
||||
@property (nonatomic) void *reference;
|
||||
|
||||
@end
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCapturerInterfaceImpl : public VideoCapturerInterface {
|
||||
public:
|
||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::pair<int, int> &outResolution);
|
||||
~VideoCapturerInterfaceImpl() override;
|
||||
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio) override;
|
||||
void setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
||||
VideoCapturerInterfaceImplHolder *_implReference;
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
|
@ -1,271 +0,0 @@
|
|||
#include "VideoCapturerInterfaceImpl.h"
|
||||
|
||||
#include "absl/strings/match.h"
|
||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
||||
#include "api/rtp_parameters.h"
|
||||
#include "api/task_queue/default_task_queue_factory.h"
|
||||
#include "media/base/codec.h"
|
||||
#include "media/base/media_constants.h"
|
||||
#include "media/engine/webrtc_media_engine.h"
|
||||
#include "modules/audio_device/include/audio_device_default.h"
|
||||
#include "rtc_base/task_utils/repeating_task.h"
|
||||
#include "system_wrappers/include/field_trial.h"
|
||||
#include "api/video/builtin_video_bitrate_allocator_factory.h"
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
|
||||
#include "sdk/objc/native/api/video_encoder_factory.h"
|
||||
#include "sdk/objc/native/api/video_decoder_factory.h"
|
||||
|
||||
#include "sdk/objc/api/RTCVideoRendererAdapter.h"
|
||||
#include "sdk/objc/native/api/video_frame.h"
|
||||
#include "api/media_types.h"
|
||||
|
||||
#ifndef WEBRTC_IOS
|
||||
#import "VideoCameraCapturerMac.h"
|
||||
#else
|
||||
#import "VideoCameraCapturer.h"
|
||||
#endif
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
#import "VideoCaptureInterface.h"
|
||||
|
||||
@interface VideoCapturerInterfaceImplSourceDescription : NSObject
|
||||
|
||||
@property (nonatomic, readonly) bool isFrontCamera;
|
||||
@property (nonatomic, strong, readonly, nonnull) AVCaptureDevice *device;
|
||||
@property (nonatomic, strong, readonly, nonnull) AVCaptureDeviceFormat *format;
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoCapturerInterfaceImplSourceDescription
|
||||
|
||||
- (instancetype)initWithIsFrontCamera:(bool)isFrontCamera device:(AVCaptureDevice * _Nonnull)device format:(AVCaptureDeviceFormat * _Nonnull)format {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_isFrontCamera = isFrontCamera;
|
||||
_device = device;
|
||||
_format = format;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@interface VideoCapturerInterfaceImplReference : NSObject {
|
||||
VideoCameraCapturer *_videoCapturer;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoCapturerInterfaceImplReference
|
||||
|
||||
+ (AVCaptureDevice *)selectCaptureDevice:(bool)useFrontCamera {
|
||||
AVCaptureDevice *selectedCamera = nil;
|
||||
|
||||
#ifdef WEBRTC_IOS
|
||||
AVCaptureDevice *frontCamera = nil;
|
||||
AVCaptureDevice *backCamera = nil;
|
||||
for (AVCaptureDevice *device in [VideoCameraCapturer captureDevices]) {
|
||||
if (device.position == AVCaptureDevicePositionFront) {
|
||||
frontCamera = device;
|
||||
} else if (device.position == AVCaptureDevicePositionBack) {
|
||||
backCamera = device;
|
||||
}
|
||||
}
|
||||
if (useFrontCamera && frontCamera != nil) {
|
||||
selectedCamera = frontCamera;
|
||||
} else {
|
||||
selectedCamera = backCamera;
|
||||
}
|
||||
#else
|
||||
NSArray<AVCaptureDevice *> *devices = [VideoCameraCapturer captureDevices];
|
||||
for (int i = 0; i < devices.count; i++) {
|
||||
if ([_videoCapturer deviceIsCaptureCompitable:devices[i]]) {
|
||||
selectedCamera = devices[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return selectedCamera;
|
||||
}
|
||||
|
||||
+ (AVCaptureDeviceFormat *)selectCaptureDeviceFormatForDevice:(AVCaptureDevice *)selectedCamera {
|
||||
NSArray<AVCaptureDeviceFormat *> *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:selectedCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) {
|
||||
int32_t width1 = CMVideoFormatDescriptionGetDimensions(lhs.formatDescription).width;
|
||||
int32_t width2 = CMVideoFormatDescriptionGetDimensions(rhs.formatDescription).width;
|
||||
return width1 < width2 ? NSOrderedAscending : NSOrderedDescending;
|
||||
}];
|
||||
|
||||
AVCaptureDeviceFormat *bestFormat = sortedFormats.firstObject;
|
||||
|
||||
bool didSelectPreferredFormat = false;
|
||||
#ifdef WEBRTC_IOS
|
||||
for (AVCaptureDeviceFormat *format in sortedFormats) {
|
||||
CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
|
||||
if (dimensions.width == 1280 && dimensions.height == 720) {
|
||||
if (format.videoFieldOfView > 60.0f && format.videoSupportedFrameRateRanges.lastObject.maxFrameRate == 30) {
|
||||
didSelectPreferredFormat = true;
|
||||
bestFormat = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (!didSelectPreferredFormat) {
|
||||
for (AVCaptureDeviceFormat *format in sortedFormats) {
|
||||
CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
|
||||
if (dimensions.width >= 1000 || dimensions.height >= 1000) {
|
||||
bestFormat = format;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bestFormat == nil) {
|
||||
assert(false);
|
||||
return nil;
|
||||
}
|
||||
|
||||
AVFrameRateRange *frameRateRange = [[bestFormat.videoSupportedFrameRateRanges sortedArrayUsingComparator:^NSComparisonResult(AVFrameRateRange *lhs, AVFrameRateRange *rhs) {
|
||||
if (lhs.maxFrameRate < rhs.maxFrameRate) {
|
||||
return NSOrderedAscending;
|
||||
} else {
|
||||
return NSOrderedDescending;
|
||||
}
|
||||
}] lastObject];
|
||||
|
||||
if (frameRateRange == nil) {
|
||||
assert(false);
|
||||
return nil;
|
||||
}
|
||||
|
||||
return bestFormat;
|
||||
}
|
||||
|
||||
+ (VideoCapturerInterfaceImplSourceDescription *)selectCapturerDescription:(bool)useFrontCamera {
|
||||
AVCaptureDevice *selectedCamera = [VideoCapturerInterfaceImplReference selectCaptureDevice:useFrontCamera];
|
||||
|
||||
if (selectedCamera == nil) {
|
||||
return nil;
|
||||
}
|
||||
|
||||
AVCaptureDeviceFormat *bestFormat = [VideoCapturerInterfaceImplReference selectCaptureDeviceFormatForDevice:selectedCamera];
|
||||
|
||||
if (bestFormat == nil) {
|
||||
return nil;
|
||||
}
|
||||
|
||||
return [[VideoCapturerInterfaceImplSourceDescription alloc] initWithIsFrontCamera:useFrontCamera device:selectedCamera format:bestFormat];
|
||||
}
|
||||
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source sourceDescription:(VideoCapturerInterfaceImplSourceDescription *)sourceDescription isActiveUpdated:(void (^)(bool))isActiveUpdated {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
#ifdef WEBRTC_IOS
|
||||
_videoCapturer = [[VideoCameraCapturer alloc] initWithSource:source useFrontCamera:sourceDescription.isFrontCamera isActiveUpdated:isActiveUpdated];
|
||||
#else
|
||||
_videoCapturer = [[VideoCameraCapturer alloc] initWithSource:source isActiveUpdated:isActiveUpdated];
|
||||
#endif
|
||||
|
||||
[_videoCapturer startCaptureWithDevice:sourceDescription.device format:sourceDescription.format fps:30];
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (void)dealloc {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
[_videoCapturer stopCapture];
|
||||
}
|
||||
|
||||
- (void)setIsEnabled:(bool)isEnabled {
|
||||
[_videoCapturer setIsEnabled:isEnabled];
|
||||
}
|
||||
|
||||
- (void)setUncroppedSink:(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)sink {
|
||||
[_videoCapturer setUncroppedSink:sink];
|
||||
}
|
||||
|
||||
- (void)setPreferredCaptureAspectRatio:(float)aspectRatio {
|
||||
[_videoCapturer setPreferredCaptureAspectRatio:aspectRatio];
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoCapturerInterfaceImplHolder
|
||||
|
||||
@end
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCapturerInterfaceImpl::VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::pair<int, int> &outResolution) :
|
||||
_source(source) {
|
||||
VideoCapturerInterfaceImplSourceDescription *sourceDescription = [VideoCapturerInterfaceImplReference selectCapturerDescription:useFrontCamera];
|
||||
|
||||
CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(sourceDescription.format.formatDescription);
|
||||
#ifdef WEBRTC_IOS
|
||||
outResolution.first = dimensions.height;
|
||||
outResolution.second = dimensions.width;
|
||||
#else
|
||||
outResolution.first = dimensions.width;
|
||||
outResolution.second = dimensions.height;
|
||||
#endif
|
||||
|
||||
_implReference = [[VideoCapturerInterfaceImplHolder alloc] init];
|
||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
VideoCapturerInterfaceImplReference *value = [[VideoCapturerInterfaceImplReference alloc] initWithSource:source sourceDescription:sourceDescription isActiveUpdated:^(bool isActive) {
|
||||
stateUpdated(isActive ? VideoState::Active : VideoState::Paused);
|
||||
}];
|
||||
if (value != nil) {
|
||||
implReference.reference = (void *)CFBridgingRetain(value);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
VideoCapturerInterfaceImpl::~VideoCapturerInterfaceImpl() {
|
||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
if (implReference.reference != nil) {
|
||||
CFBridgingRelease(implReference.reference);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setState(VideoState state) {
|
||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
if (implReference.reference != nil) {
|
||||
VideoCapturerInterfaceImplReference *reference = (__bridge VideoCapturerInterfaceImplReference *)implReference.reference;
|
||||
[reference setIsEnabled:(state == VideoState::Active)];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setPreferredCaptureAspectRatio(float aspectRatio) {
|
||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
if (implReference.reference != nil) {
|
||||
VideoCapturerInterfaceImplReference *reference = (__bridge VideoCapturerInterfaceImplReference *)implReference.reference;
|
||||
[reference setPreferredCaptureAspectRatio:aspectRatio];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
if (implReference.reference != nil) {
|
||||
VideoCapturerInterfaceImplReference *reference = (__bridge VideoCapturerInterfaceImplReference *)implReference.reference;
|
||||
[reference setUncroppedSink:sink];
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
|
@ -1,34 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_METAL_VIEW_H
|
||||
#define TGCALLS_VIDEO_METAL_VIEW_H
|
||||
#ifdef WEBRTC_IOS
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <UIKit/UIKit.h>
|
||||
|
||||
#import "api/media_stream_interface.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
@class RTCVideoFrame;
|
||||
|
||||
@interface VideoMetalView : UIView
|
||||
|
||||
+ (bool)isSupported;
|
||||
|
||||
@property(nonatomic) UIViewContentMode videoContentMode;
|
||||
@property(nonatomic, getter=isEnabled) BOOL enabled;
|
||||
@property(nonatomic, nullable) NSValue* rotationOverride;
|
||||
|
||||
@property (nonatomic, readwrite) int internalOrientation;
|
||||
|
||||
- (void)setSize:(CGSize)size;
|
||||
- (void)renderFrame:(nullable RTCVideoFrame *)frame;
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink;
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived;
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated;
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated;
|
||||
|
||||
@end
|
||||
|
||||
#endif //WEBRTC_IOS
|
||||
#endif
|
|
@ -1,389 +0,0 @@
|
|||
#import "VideoMetalView.h"
|
||||
|
||||
#import <Metal/Metal.h>
|
||||
#import <MetalKit/MetalKit.h>
|
||||
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "TGRTCCVPixelBuffer.h"
|
||||
#include "sdk/objc/native/api/video_frame.h"
|
||||
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
|
||||
#import "api/video/video_sink_interface.h"
|
||||
#import "api/media_stream_interface.h"
|
||||
#import "rtc_base/time_utils.h"
|
||||
|
||||
#import "RTCMTLI420Renderer.h"
|
||||
#import "RTCMTLNV12Renderer.h"
|
||||
#import "RTCMTLRGBRenderer.h"
|
||||
|
||||
#define MTKViewClass NSClassFromString(@"MTKView")
|
||||
#define RTCMTLNV12RendererClass NSClassFromString(@"RTCMTLNV12Renderer")
|
||||
#define RTCMTLI420RendererClass NSClassFromString(@"RTCMTLI420Renderer")
|
||||
#define RTCMTLRGBRendererClass NSClassFromString(@"RTCMTLRGBRenderer")
|
||||
|
||||
namespace {
|
||||
|
||||
static RTCVideoFrame *customToObjCVideoFrame(const webrtc::VideoFrame &frame, RTCVideoRotation &rotation) {
|
||||
rotation = RTCVideoRotation(frame.rotation());
|
||||
RTCVideoFrame *videoFrame =
|
||||
[[RTCVideoFrame alloc] initWithBuffer:webrtc::ToObjCVideoFrameBuffer(frame.video_frame_buffer())
|
||||
rotation:rotation
|
||||
timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
|
||||
videoFrame.timeStamp = frame.timestamp();
|
||||
|
||||
return videoFrame;
|
||||
}
|
||||
|
||||
class VideoRendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
VideoRendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation)) {
|
||||
_frameReceived = [frameReceived copy];
|
||||
}
|
||||
|
||||
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
||||
RTCVideoRotation rotation = RTCVideoRotation_90;
|
||||
RTCVideoFrame* videoFrame = customToObjCVideoFrame(nativeVideoFrame, rotation);
|
||||
|
||||
CGSize currentSize = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width);
|
||||
|
||||
if (_frameReceived) {
|
||||
_frameReceived(currentSize, videoFrame, rotation);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void (^_frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
@interface VideoMetalView () <MTKViewDelegate> {
|
||||
RTCMTLI420Renderer *_rendererI420;
|
||||
RTCMTLNV12Renderer *_rendererNV12;
|
||||
RTCMTLRGBRenderer *_rendererRGB;
|
||||
MTKView *_metalView;
|
||||
RTCVideoFrame *_videoFrame;
|
||||
CGSize _videoFrameSize;
|
||||
int64_t _lastFrameTimeNs;
|
||||
|
||||
CGSize _currentSize;
|
||||
std::shared_ptr<VideoRendererAdapterImpl> _sink;
|
||||
|
||||
void (^_onFirstFrameReceived)();
|
||||
bool _firstFrameReceivedReported;
|
||||
|
||||
void (^_onOrientationUpdated)(int);
|
||||
|
||||
void (^_onIsMirroredUpdated)(bool);
|
||||
|
||||
bool _didSetShouldBeMirrored;
|
||||
bool _shouldBeMirrored;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoMetalView
|
||||
|
||||
+ (bool)isSupported {
|
||||
static bool value;
|
||||
static dispatch_once_t onceToken;
|
||||
dispatch_once(&onceToken, ^{
|
||||
id<MTLDevice> device = MTLCreateSystemDefaultDevice();
|
||||
value = device != nil;
|
||||
});
|
||||
return value;
|
||||
}
|
||||
|
||||
- (instancetype)initWithFrame:(CGRect)frameRect {
|
||||
self = [super initWithFrame:frameRect];
|
||||
if (self) {
|
||||
[self configure];
|
||||
|
||||
_currentSize = CGSizeZero;
|
||||
//_rotationOverride = @(RTCVideoRotation_90);
|
||||
|
||||
__weak VideoMetalView *weakSelf = self;
|
||||
_sink.reset(new VideoRendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame, RTCVideoRotation rotation) {
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
__strong VideoMetalView *strongSelf = weakSelf;
|
||||
if (strongSelf == nil) {
|
||||
return;
|
||||
}
|
||||
if (!CGSizeEqualToSize(size, strongSelf->_currentSize)) {
|
||||
strongSelf->_currentSize = size;
|
||||
[strongSelf setSize:size];
|
||||
}
|
||||
|
||||
int mappedValue = 0;
|
||||
switch (rotation) {
|
||||
case RTCVideoRotation_90:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
case RTCVideoRotation_180:
|
||||
mappedValue = 1;
|
||||
break;
|
||||
case RTCVideoRotation_270:
|
||||
mappedValue = 2;
|
||||
break;
|
||||
default:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
}
|
||||
[strongSelf setInternalOrientation:mappedValue];
|
||||
|
||||
[strongSelf renderFrame:videoFrame];
|
||||
});
|
||||
}));
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (BOOL)isEnabled {
|
||||
return !_metalView.paused;
|
||||
}
|
||||
|
||||
- (void)setEnabled:(BOOL)enabled {
|
||||
_metalView.paused = !enabled;
|
||||
}
|
||||
|
||||
- (UIViewContentMode)videoContentMode {
|
||||
return _metalView.contentMode;
|
||||
}
|
||||
|
||||
- (void)setVideoContentMode:(UIViewContentMode)mode {
|
||||
_metalView.contentMode = mode;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
+ (BOOL)isMetalAvailable {
|
||||
return MTLCreateSystemDefaultDevice() != nil;
|
||||
}
|
||||
|
||||
+ (MTKView *)createMetalView:(CGRect)frame {
|
||||
return [[MTKViewClass alloc] initWithFrame:frame];
|
||||
}
|
||||
|
||||
+ (RTCMTLNV12Renderer *)createNV12Renderer {
|
||||
return [[RTCMTLNV12RendererClass alloc] init];
|
||||
}
|
||||
|
||||
+ (RTCMTLI420Renderer *)createI420Renderer {
|
||||
return [[RTCMTLI420RendererClass alloc] init];
|
||||
}
|
||||
|
||||
+ (RTCMTLRGBRenderer *)createRGBRenderer {
|
||||
return [[RTCMTLRGBRenderer alloc] init];
|
||||
}
|
||||
|
||||
- (void)configure {
|
||||
NSAssert([VideoMetalView isMetalAvailable], @"Metal not availiable on this device");
|
||||
|
||||
_metalView = [VideoMetalView createMetalView:self.bounds];
|
||||
_metalView.delegate = self;
|
||||
_metalView.contentMode = UIViewContentModeScaleAspectFill;
|
||||
[self addSubview:_metalView];
|
||||
_videoFrameSize = CGSizeZero;
|
||||
}
|
||||
|
||||
- (void)setMultipleTouchEnabled:(BOOL)multipleTouchEnabled {
|
||||
[super setMultipleTouchEnabled:multipleTouchEnabled];
|
||||
_metalView.multipleTouchEnabled = multipleTouchEnabled;
|
||||
}
|
||||
|
||||
- (void)layoutSubviews {
|
||||
[super layoutSubviews];
|
||||
|
||||
CGRect bounds = self.bounds;
|
||||
_metalView.frame = bounds;
|
||||
if (!CGSizeEqualToSize(_videoFrameSize, CGSizeZero)) {
|
||||
_metalView.drawableSize = [self drawableSize];
|
||||
} else {
|
||||
_metalView.drawableSize = bounds.size;
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - MTKViewDelegate methods
|
||||
|
||||
- (void)drawInMTKView:(nonnull MTKView *)view {
|
||||
NSAssert(view == _metalView, @"Receiving draw callbacks from foreign instance.");
|
||||
RTCVideoFrame *videoFrame = _videoFrame;
|
||||
// Skip rendering if we've already rendered this frame.
|
||||
if (!videoFrame || videoFrame.timeStampNs == _lastFrameTimeNs) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (CGRectIsEmpty(view.bounds)) {
|
||||
return;
|
||||
}
|
||||
|
||||
RTCMTLRenderer *renderer;
|
||||
if ([videoFrame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
RTCCVPixelBuffer *buffer = (RTCCVPixelBuffer*)videoFrame.buffer;
|
||||
|
||||
if ([buffer isKindOfClass:[TGRTCCVPixelBuffer class]]) {
|
||||
bool shouldBeMirrored = ((TGRTCCVPixelBuffer *)buffer).shouldBeMirrored;
|
||||
if (shouldBeMirrored != _shouldBeMirrored) {
|
||||
_shouldBeMirrored = shouldBeMirrored;
|
||||
if (_shouldBeMirrored) {
|
||||
_metalView.transform = CGAffineTransformMakeScale(-1.0f, 1.0f);
|
||||
} else {
|
||||
_metalView.transform = CGAffineTransformIdentity;
|
||||
}
|
||||
|
||||
if (_didSetShouldBeMirrored) {
|
||||
if (_onIsMirroredUpdated) {
|
||||
_onIsMirroredUpdated(_shouldBeMirrored);
|
||||
}
|
||||
} else {
|
||||
_didSetShouldBeMirrored = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const OSType pixelFormat = CVPixelBufferGetPixelFormatType(buffer.pixelBuffer);
|
||||
if (pixelFormat == kCVPixelFormatType_32BGRA || pixelFormat == kCVPixelFormatType_32ARGB) {
|
||||
if (!_rendererRGB) {
|
||||
_rendererRGB = [VideoMetalView createRGBRenderer];
|
||||
if (![_rendererRGB addRenderingDestination:_metalView]) {
|
||||
_rendererRGB = nil;
|
||||
RTCLogError(@"Failed to create RGB renderer");
|
||||
return;
|
||||
}
|
||||
}
|
||||
renderer = _rendererRGB;
|
||||
} else {
|
||||
if (!_rendererNV12) {
|
||||
_rendererNV12 = [VideoMetalView createNV12Renderer];
|
||||
if (![_rendererNV12 addRenderingDestination:_metalView]) {
|
||||
_rendererNV12 = nil;
|
||||
RTCLogError(@"Failed to create NV12 renderer");
|
||||
return;
|
||||
}
|
||||
}
|
||||
renderer = _rendererNV12;
|
||||
}
|
||||
} else {
|
||||
if (!_rendererI420) {
|
||||
_rendererI420 = [VideoMetalView createI420Renderer];
|
||||
if (![_rendererI420 addRenderingDestination:_metalView]) {
|
||||
_rendererI420 = nil;
|
||||
RTCLogError(@"Failed to create I420 renderer");
|
||||
return;
|
||||
}
|
||||
}
|
||||
renderer = _rendererI420;
|
||||
}
|
||||
|
||||
renderer.rotationOverride = _rotationOverride;
|
||||
|
||||
[renderer drawFrame:videoFrame];
|
||||
_lastFrameTimeNs = videoFrame.timeStampNs;
|
||||
}
|
||||
|
||||
- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
|
||||
}
|
||||
|
||||
#pragma mark -
|
||||
|
||||
- (void)setRotationOverride:(NSValue *)rotationOverride {
|
||||
_rotationOverride = rotationOverride;
|
||||
|
||||
_metalView.drawableSize = [self drawableSize];
|
||||
[self setNeedsLayout];
|
||||
}
|
||||
|
||||
- (RTCVideoRotation)frameRotation {
|
||||
if (_rotationOverride) {
|
||||
RTCVideoRotation rotation;
|
||||
if (@available(iOS 11, *)) {
|
||||
[_rotationOverride getValue:&rotation size:sizeof(rotation)];
|
||||
} else {
|
||||
[_rotationOverride getValue:&rotation];
|
||||
}
|
||||
return rotation;
|
||||
}
|
||||
|
||||
return _videoFrame.rotation;
|
||||
}
|
||||
|
||||
- (CGSize)drawableSize {
|
||||
// Flip width/height if the rotations are not the same.
|
||||
CGSize videoFrameSize = _videoFrameSize;
|
||||
RTCVideoRotation frameRotation = [self frameRotation];
|
||||
|
||||
BOOL useLandscape =
|
||||
(frameRotation == RTCVideoRotation_0) || (frameRotation == RTCVideoRotation_180);
|
||||
BOOL sizeIsLandscape = (_videoFrame.rotation == RTCVideoRotation_0) ||
|
||||
(_videoFrame.rotation == RTCVideoRotation_180);
|
||||
|
||||
if (useLandscape == sizeIsLandscape) {
|
||||
return videoFrameSize;
|
||||
} else {
|
||||
return CGSizeMake(videoFrameSize.height, videoFrameSize.width);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - RTCVideoRenderer
|
||||
|
||||
- (void)setSize:(CGSize)size {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
_videoFrameSize = size;
|
||||
CGSize drawableSize = [self drawableSize];
|
||||
|
||||
_metalView.drawableSize = drawableSize;
|
||||
[self setNeedsLayout];
|
||||
//[strongSelf.delegate videoView:self didChangeVideoSize:size];
|
||||
}
|
||||
|
||||
- (void)renderFrame:(nullable RTCVideoFrame *)frame {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
if (!_firstFrameReceivedReported && _onFirstFrameReceived) {
|
||||
_firstFrameReceivedReported = true;
|
||||
_onFirstFrameReceived();
|
||||
}
|
||||
|
||||
if (!self.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (frame == nil) {
|
||||
RTCLogInfo(@"Incoming frame is nil. Exiting render callback.");
|
||||
return;
|
||||
}
|
||||
_videoFrame = frame;
|
||||
}
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
return _sink;
|
||||
}
|
||||
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived {
|
||||
_onFirstFrameReceived = [onFirstFrameReceived copy];
|
||||
_firstFrameReceivedReported = false;
|
||||
}
|
||||
|
||||
- (void)setInternalOrientation:(int)internalOrientation {
|
||||
if (_internalOrientation != internalOrientation) {
|
||||
_internalOrientation = internalOrientation;
|
||||
if (_onOrientationUpdated) {
|
||||
_onOrientationUpdated(internalOrientation);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated {
|
||||
_onOrientationUpdated = [onOrientationUpdated copy];
|
||||
}
|
||||
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated {
|
||||
_onIsMirroredUpdated = [onIsMirroredUpdated copy];
|
||||
}
|
||||
|
||||
@end
|
|
@ -1,36 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_METAL_VIEW_MAC_H
|
||||
#define TGCALLS_VIDEO_METAL_VIEW_MAC_H
|
||||
#ifndef WEBRTC_IOS
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AppKit/AppKit.h>
|
||||
|
||||
#import "api/media_stream_interface.h"
|
||||
|
||||
#include <memory>
|
||||
|
||||
@class RTCVideoFrame;
|
||||
|
||||
@interface VideoMetalView : NSView
|
||||
|
||||
+ (bool)isSupported;
|
||||
|
||||
@property(nonatomic) CALayerContentsGravity _Nullable videoContentMode;
|
||||
@property(nonatomic, getter=isEnabled) BOOL enabled;
|
||||
@property(nonatomic, nullable) NSValue* rotationOverride;
|
||||
|
||||
@property (nonatomic, readwrite) int internalOrientation;
|
||||
|
||||
|
||||
- (void)setSize:(CGSize)size;
|
||||
- (void)renderFrame:(nullable RTCVideoFrame *)frame;
|
||||
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink;
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)(float))onFirstFrameReceived;
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated;
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated;
|
||||
- (void)setIsForceMirrored:(BOOL)forceMirrored;
|
||||
@end
|
||||
|
||||
#endif // WEBRTC_MAC
|
||||
#endif
|
|
@ -1,377 +0,0 @@
|
|||
#import "VideoMetalViewMac.h"
|
||||
#import <Metal/Metal.h>
|
||||
#import <MetalKit/MetalKit.h>
|
||||
#import "TGRTCCVPixelBuffer.h"
|
||||
#import "base/RTCLogging.h"
|
||||
#import "base/RTCVideoFrame.h"
|
||||
#import "base/RTCVideoFrameBuffer.h"
|
||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||
#include "sdk/objc/native/api/video_frame.h"
|
||||
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||
|
||||
#import "api/video/video_sink_interface.h"
|
||||
#import "api/media_stream_interface.h"
|
||||
#import "rtc_base/time_utils.h"
|
||||
|
||||
|
||||
|
||||
#import "api/video/video_sink_interface.h"
|
||||
#import "api/media_stream_interface.h"
|
||||
|
||||
#import "RTCMTLI420Renderer.h"
|
||||
|
||||
#define MTKViewClass NSClassFromString(@"MTKView")
|
||||
#define RTCMTLI420RendererClass NSClassFromString(@"RTCMTLI420Renderer")
|
||||
|
||||
namespace {
|
||||
|
||||
static RTCVideoFrame *customToObjCVideoFrame(const webrtc::VideoFrame &frame, RTCVideoRotation &rotation) {
|
||||
rotation = RTCVideoRotation(frame.rotation());
|
||||
RTCVideoFrame *videoFrame =
|
||||
[[RTCVideoFrame alloc] initWithBuffer:webrtc::ToObjCVideoFrameBuffer(frame.video_frame_buffer())
|
||||
rotation:rotation
|
||||
timeStampNs:frame.timestamp_us() * rtc::kNumNanosecsPerMicrosec];
|
||||
videoFrame.timeStamp = frame.timestamp();
|
||||
|
||||
return videoFrame;
|
||||
}
|
||||
|
||||
class VideoRendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
public:
|
||||
VideoRendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation)) {
|
||||
_frameReceived = [frameReceived copy];
|
||||
}
|
||||
|
||||
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
||||
RTCVideoRotation rotation = RTCVideoRotation_0;
|
||||
RTCVideoFrame* videoFrame = customToObjCVideoFrame(nativeVideoFrame, rotation);
|
||||
|
||||
CGSize currentSize = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width);
|
||||
|
||||
if (_frameReceived) {
|
||||
_frameReceived(currentSize, videoFrame, rotation);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void (^_frameReceived)(CGSize, RTCVideoFrame *, RTCVideoRotation);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
@interface VideoMetalView () <MTKViewDelegate> {
|
||||
RTCMTLI420Renderer *_rendererI420;
|
||||
|
||||
MTKView *_metalView;
|
||||
RTCVideoFrame *_videoFrame;
|
||||
CGSize _videoFrameSize;
|
||||
int64_t _lastFrameTimeNs;
|
||||
|
||||
CGSize _currentSize;
|
||||
std::shared_ptr<VideoRendererAdapterImpl> _sink;
|
||||
|
||||
void (^_onFirstFrameReceived)(float);
|
||||
bool _firstFrameReceivedReported;
|
||||
void (^_onOrientationUpdated)(int);
|
||||
void (^_onIsMirroredUpdated)(bool);
|
||||
|
||||
bool _didSetShouldBeMirrored;
|
||||
bool _shouldBeMirrored;
|
||||
bool _forceMirrored;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation VideoMetalView
|
||||
|
||||
+ (bool)isSupported {
|
||||
return [VideoMetalView isMetalAvailable];
|
||||
}
|
||||
|
||||
- (instancetype)initWithFrame:(CGRect)frameRect {
|
||||
self = [super initWithFrame:frameRect];
|
||||
if (self) {
|
||||
[self configure];
|
||||
|
||||
_currentSize = CGSizeZero;
|
||||
|
||||
__weak VideoMetalView *weakSelf = self;
|
||||
_sink.reset(new VideoRendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame, RTCVideoRotation rotation) {
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
__strong VideoMetalView *strongSelf = weakSelf;
|
||||
if (strongSelf == nil) {
|
||||
return;
|
||||
}
|
||||
if (!CGSizeEqualToSize(size, strongSelf->_currentSize)) {
|
||||
strongSelf->_currentSize = size;
|
||||
[strongSelf setSize:size];
|
||||
}
|
||||
|
||||
int mappedValue = 0;
|
||||
switch (rotation) {
|
||||
case RTCVideoRotation_90:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
case RTCVideoRotation_180:
|
||||
mappedValue = 1;
|
||||
break;
|
||||
case RTCVideoRotation_270:
|
||||
mappedValue = 2;
|
||||
break;
|
||||
default:
|
||||
mappedValue = 0;
|
||||
break;
|
||||
}
|
||||
[strongSelf setInternalOrientation:mappedValue];
|
||||
|
||||
[strongSelf renderFrame:videoFrame];
|
||||
});
|
||||
}));
|
||||
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (BOOL)isEnabled {
|
||||
return !_metalView.paused;
|
||||
}
|
||||
|
||||
- (void)setEnabled:(BOOL)enabled {
|
||||
_metalView.paused = !enabled;
|
||||
}
|
||||
|
||||
- (CALayerContentsGravity)videoContentMode {
|
||||
return _metalView.layer.contentsGravity;
|
||||
}
|
||||
|
||||
- (void)setVideoContentMode:(CALayerContentsGravity)mode {
|
||||
_metalView.layer.contentsGravity = mode;
|
||||
}
|
||||
|
||||
#pragma mark - Private
|
||||
|
||||
+ (BOOL)isMetalAvailable {
|
||||
return MTLCreateSystemDefaultDevice() != nil;
|
||||
}
|
||||
|
||||
+ (MTKView *)createMetalView:(CGRect)frame {
|
||||
return [[MTKViewClass alloc] initWithFrame:frame];
|
||||
}
|
||||
|
||||
+ (RTCMTLI420Renderer *)createI420Renderer {
|
||||
return [[RTCMTLI420RendererClass alloc] init];
|
||||
}
|
||||
|
||||
|
||||
- (void)configure {
|
||||
NSAssert([VideoMetalView isMetalAvailable], @"Metal not availiable on this device");
|
||||
self.wantsLayer = YES;
|
||||
self.layerContentsRedrawPolicy = NSViewLayerContentsRedrawDuringViewResize;
|
||||
_metalView = [VideoMetalView createMetalView:self.bounds];
|
||||
_metalView.delegate = self;
|
||||
_metalView.layer.cornerRadius = 4;
|
||||
_metalView.layer.backgroundColor = [NSColor clearColor].CGColor;
|
||||
_metalView.layer.contentsGravity = kCAGravityResizeAspectFill;//UIViewContentModeScaleAspectFill;
|
||||
[self addSubview:_metalView];
|
||||
_videoFrameSize = CGSizeZero;
|
||||
// _metalView.layer.affineTransform = CGAffineTransformMakeScale(-1.0, -1.0);
|
||||
}
|
||||
|
||||
|
||||
|
||||
- (void)layout {
|
||||
[super layout];
|
||||
|
||||
if (_shouldBeMirrored || _forceMirrored) {
|
||||
_metalView.layer.anchorPoint = NSMakePoint(1, 0);
|
||||
_metalView.layer.affineTransform = CGAffineTransformMakeScale(-1, 1);
|
||||
// _metalView.layer.transform = CATransform3DMakeScale(-1, 1, 1);
|
||||
} else {
|
||||
_metalView.layer.anchorPoint = NSMakePoint(0, 0);
|
||||
_metalView.layer.affineTransform = CGAffineTransformIdentity;
|
||||
//_metalView.layer.transform = CATransform3DIdentity;
|
||||
}
|
||||
|
||||
CGRect bounds = self.bounds;
|
||||
_metalView.frame = bounds;
|
||||
if (!CGSizeEqualToSize(_videoFrameSize, CGSizeZero)) {
|
||||
_metalView.drawableSize = [self drawableSize];
|
||||
} else {
|
||||
_metalView.drawableSize = bounds.size;
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - MTKViewDelegate methods
|
||||
|
||||
- (void)drawInMTKView:(nonnull MTKView *)view {
|
||||
NSAssert(view == _metalView, @"Receiving draw callbacks from foreign instance.");
|
||||
RTCVideoFrame *videoFrame = _videoFrame;
|
||||
// Skip rendering if we've already rendered this frame.
|
||||
if (!videoFrame || videoFrame.timeStampNs == _lastFrameTimeNs) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (CGRectIsEmpty(view.bounds)) {
|
||||
return;
|
||||
}
|
||||
|
||||
RTCMTLRenderer *renderer;
|
||||
|
||||
if ([videoFrame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
||||
RTCCVPixelBuffer *buffer = (RTCCVPixelBuffer*)videoFrame.buffer;
|
||||
|
||||
if ([buffer isKindOfClass:[TGRTCCVPixelBuffer class]]) {
|
||||
bool shouldBeMirrored = ((TGRTCCVPixelBuffer *)buffer).shouldBeMirrored;
|
||||
if (shouldBeMirrored != _shouldBeMirrored) {
|
||||
_shouldBeMirrored = shouldBeMirrored;
|
||||
bool shouldBeMirrored = ((TGRTCCVPixelBuffer *)buffer).shouldBeMirrored;
|
||||
|
||||
if (shouldBeMirrored || _forceMirrored) {
|
||||
_metalView.layer.anchorPoint = NSMakePoint(1, 0);
|
||||
_metalView.layer.affineTransform = CGAffineTransformMakeScale(-1, 1);
|
||||
// _metalView.layer.transform = CATransform3DMakeScale(-1, 1, 1);
|
||||
} else {
|
||||
_metalView.layer.anchorPoint = NSMakePoint(0, 0);
|
||||
_metalView.layer.affineTransform = CGAffineTransformIdentity;
|
||||
//_metalView.layer.transform = CATransform3DIdentity;
|
||||
}
|
||||
|
||||
if (_didSetShouldBeMirrored) {
|
||||
if (_onIsMirroredUpdated) {
|
||||
_onIsMirroredUpdated(_shouldBeMirrored);
|
||||
}
|
||||
} else {
|
||||
_didSetShouldBeMirrored = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!_rendererI420) {
|
||||
_rendererI420 = [VideoMetalView createI420Renderer];
|
||||
if (![_rendererI420 addRenderingDestination:_metalView]) {
|
||||
_rendererI420 = nil;
|
||||
RTCLogError(@"Failed to create I420 renderer");
|
||||
return;
|
||||
}
|
||||
}
|
||||
renderer = _rendererI420;
|
||||
|
||||
renderer.rotationOverride = _rotationOverride;
|
||||
[renderer drawFrame:videoFrame];
|
||||
_lastFrameTimeNs = videoFrame.timeStampNs;
|
||||
|
||||
if (!_firstFrameReceivedReported && _onFirstFrameReceived) {
|
||||
_firstFrameReceivedReported = true;
|
||||
_onFirstFrameReceived((float)videoFrame.width / (float)videoFrame.height);
|
||||
}
|
||||
}
|
||||
|
||||
- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
|
||||
}
|
||||
|
||||
#pragma mark -
|
||||
|
||||
- (void)setRotationOverride:(NSValue *)rotationOverride {
|
||||
_rotationOverride = rotationOverride;
|
||||
|
||||
_metalView.drawableSize = [self drawableSize];
|
||||
[self setNeedsLayout:YES];
|
||||
}
|
||||
|
||||
- (RTCVideoRotation)rtcFrameRotation {
|
||||
if (_rotationOverride) {
|
||||
RTCVideoRotation rotation;
|
||||
if (@available(macOS 10.13, *)) {
|
||||
[_rotationOverride getValue:&rotation size:sizeof(rotation)];
|
||||
} else {
|
||||
[_rotationOverride getValue:&rotation];
|
||||
}
|
||||
return rotation;
|
||||
}
|
||||
|
||||
return _videoFrame.rotation;
|
||||
}
|
||||
|
||||
- (CGSize)drawableSize {
|
||||
// Flip width/height if the rotations are not the same.
|
||||
CGSize videoFrameSize = _videoFrameSize;
|
||||
RTCVideoRotation frameRotation = [self rtcFrameRotation];
|
||||
|
||||
BOOL useLandscape =
|
||||
(frameRotation == RTCVideoRotation_0) || (frameRotation == RTCVideoRotation_180);
|
||||
BOOL sizeIsLandscape = (_videoFrame.rotation == RTCVideoRotation_0) ||
|
||||
(_videoFrame.rotation == RTCVideoRotation_180);
|
||||
|
||||
if (useLandscape == sizeIsLandscape) {
|
||||
return videoFrameSize;
|
||||
} else {
|
||||
return CGSizeMake(videoFrameSize.height, videoFrameSize.width);
|
||||
}
|
||||
}
|
||||
|
||||
#pragma mark - RTCVideoRenderer
|
||||
|
||||
- (void)setSize:(CGSize)size {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
_videoFrameSize = size;
|
||||
CGSize drawableSize = [self drawableSize];
|
||||
|
||||
_metalView.drawableSize = drawableSize;
|
||||
[self setNeedsLayout:YES];
|
||||
//[strongSelf.delegate videoView:self didChangeVideoSize:size];
|
||||
}
|
||||
|
||||
- (void)renderFrame:(nullable RTCVideoFrame *)frame {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
if (!self.isEnabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
if (frame == nil) {
|
||||
RTCLogInfo(@"Incoming frame is nil. Exiting render callback.");
|
||||
return;
|
||||
}
|
||||
_videoFrame = frame;
|
||||
|
||||
|
||||
}
|
||||
|
||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink {
|
||||
assert([NSThread isMainThread]);
|
||||
|
||||
return _sink;
|
||||
}
|
||||
|
||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)(float))onFirstFrameReceived {
|
||||
_onFirstFrameReceived = [onFirstFrameReceived copy];
|
||||
_firstFrameReceivedReported = false;
|
||||
}
|
||||
|
||||
- (void)setInternalOrientation:(int)internalOrientation {
|
||||
if (_internalOrientation != internalOrientation) {
|
||||
_internalOrientation = internalOrientation;
|
||||
if (_onOrientationUpdated) {
|
||||
_onOrientationUpdated(internalOrientation);
|
||||
}
|
||||
}
|
||||
}
|
||||
- (void)internalSetOnOrientationUpdated:(void (^ _Nullable)(int))onOrientationUpdated {
|
||||
_onOrientationUpdated = [onOrientationUpdated copy];
|
||||
}
|
||||
- (void)internalSetOnIsMirroredUpdated:(void (^ _Nullable)(bool))onIsMirroredUpdated {
|
||||
_onIsMirroredUpdated = [onIsMirroredUpdated copy];
|
||||
}
|
||||
|
||||
- (void)setIsForceMirrored:(BOOL)forceMirrored {
|
||||
_forceMirrored = forceMirrored;
|
||||
[self setNeedsLayout:YES];
|
||||
}
|
||||
|
||||
|
||||
@end
|
|
@ -1,40 +0,0 @@
|
|||
#include "DesktopInterface.h"
|
||||
|
||||
#include "platform/tdesktop/VideoCapturerInterfaceImpl.h"
|
||||
#include "platform/tdesktop/VideoCapturerTrackSource.h"
|
||||
|
||||
#include "api/video_codecs/builtin_video_encoder_factory.h"
|
||||
#include "api/video_codecs/builtin_video_decoder_factory.h"
|
||||
#include "api/video_track_source_proxy.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> DesktopInterface::makeVideoEncoderFactory() {
|
||||
return webrtc::CreateBuiltinVideoEncoderFactory();
|
||||
}
|
||||
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> DesktopInterface::makeVideoDecoderFactory() {
|
||||
return webrtc::CreateBuiltinVideoDecoderFactory();
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> DesktopInterface::makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) {
|
||||
const auto videoTrackSource = VideoCapturerTrackSource::Create();
|
||||
return videoTrackSource
|
||||
? webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, videoTrackSource)
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
bool DesktopInterface::supportsEncoding(const std::string &codecName) {
|
||||
return (codecName == cricket::kH264CodecName)
|
||||
|| (codecName == cricket::kVp8CodecName);
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCapturerInterface> DesktopInterface::makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(source, useFrontCamera, stateUpdated);
|
||||
}
|
||||
|
||||
std::unique_ptr<PlatformInterface> CreatePlatformInterface() {
|
||||
return std::make_unique<DesktopInterface>();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
|
@ -1,21 +0,0 @@
|
|||
#ifndef TGCALLS_DESKTOP_INTERFACE_H
|
||||
#define TGCALLS_DESKTOP_INTERFACE_H
|
||||
|
||||
#include "platform/PlatformInterface.h"
|
||||
#include "VideoCapturerInterface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class DesktopInterface : public PlatformInterface {
|
||||
public:
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory() override;
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory() override;
|
||||
bool supportsEncoding(const std::string &codecName) override;
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) override;
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated, std::shared_ptr<PlatformContext> platformContext) override;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif // TGCALLS_DESKTOP_INTERFACE_H
|
|
@ -1,212 +0,0 @@
|
|||
#include "VideoCameraCapturer.h"
|
||||
|
||||
#include "api/video/i420_buffer.h"
|
||||
#include "api/video/video_frame_buffer.h"
|
||||
#include "api/video/video_rotation.h"
|
||||
#include "modules/video_capture/video_capture_factory.h"
|
||||
#include "rtc_base/checks.h"
|
||||
#include "rtc_base/logging.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
VideoCameraCapturer::VideoCameraCapturer(const CreateTag &) {
|
||||
}
|
||||
|
||||
VideoCameraCapturer::~VideoCameraCapturer() {
|
||||
destroy();
|
||||
}
|
||||
|
||||
bool VideoCameraCapturer::init(
|
||||
size_t width,
|
||||
size_t height,
|
||||
size_t target_fps,
|
||||
size_t capture_device_index) {
|
||||
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> device_info(
|
||||
webrtc::VideoCaptureFactory::CreateDeviceInfo());
|
||||
|
||||
char device_name[256];
|
||||
char unique_name[256];
|
||||
if (device_info->GetDeviceName(static_cast<uint32_t>(capture_device_index),
|
||||
device_name, sizeof(device_name), unique_name,
|
||||
sizeof(unique_name)) != 0) {
|
||||
destroy();
|
||||
return false;
|
||||
}
|
||||
|
||||
_module = webrtc::VideoCaptureFactory::Create(unique_name);
|
||||
if (!_module) {
|
||||
return false;
|
||||
}
|
||||
_module->RegisterCaptureDataCallback(this);
|
||||
|
||||
device_info->GetCapability(_module->CurrentDeviceName(), 0, _capability);
|
||||
|
||||
_capability.width = static_cast<int32_t>(width);
|
||||
_capability.height = static_cast<int32_t>(height);
|
||||
_capability.maxFPS = static_cast<int32_t>(target_fps);
|
||||
_capability.videoType = webrtc::VideoType::kI420;
|
||||
|
||||
if (_module->StartCapture(_capability) != 0) {
|
||||
destroy();
|
||||
return false;
|
||||
}
|
||||
|
||||
RTC_CHECK(_module->CaptureStarted());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::setState(VideoState state) {
|
||||
if (_state == state) {
|
||||
return;
|
||||
}
|
||||
_state = state;
|
||||
if (_state == VideoState::Inactive) {
|
||||
_module->StopCapture();
|
||||
} else {
|
||||
_module->StartCapture(_capability);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::setPreferredCaptureAspectRatio(float aspectRatio) {
|
||||
_aspectRatio = aspectRatio;
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCameraCapturer> VideoCameraCapturer::Create(
|
||||
size_t width,
|
||||
size_t height,
|
||||
size_t target_fps,
|
||||
size_t capture_device_index) {
|
||||
auto result = std::make_unique<VideoCameraCapturer>(CreateTag{});
|
||||
if (!result->init(width, height, target_fps, capture_device_index)) {
|
||||
RTC_LOG(LS_WARNING)
|
||||
<< "Failed to create VideoCameraCapturer("
|
||||
<< "w = " << width << ", "
|
||||
<< "h = " << height << ", "
|
||||
<< "fps = " << target_fps << ")";
|
||||
return nullptr;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::destroy() {
|
||||
if (!_module) {
|
||||
return;
|
||||
}
|
||||
|
||||
_module->StopCapture();
|
||||
_module->DeRegisterCaptureDataCallback();
|
||||
_module = nullptr;
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::OnFrame(const webrtc::VideoFrame &frame) {
|
||||
if (_state != VideoState::Active) {
|
||||
return;
|
||||
}
|
||||
//int cropped_width = 0;
|
||||
//int cropped_height = 0;
|
||||
//int out_width = 0;
|
||||
//int out_height = 0;
|
||||
|
||||
//if (!_videoAdapter.AdaptFrameResolution(
|
||||
// frame.width(), frame.height(), frame.timestamp_us() * 1000,
|
||||
// &cropped_width, &cropped_height, &out_width, &out_height)) {
|
||||
// // Drop frame in order to respect frame rate constraint.
|
||||
// return;
|
||||
//}
|
||||
//if (out_height != frame.height() || out_width != frame.width()) {
|
||||
// // Video adapter has requested a down-scale. Allocate a new buffer and
|
||||
// // return scaled version.
|
||||
// // For simplicity, only scale here without cropping.
|
||||
// rtc::scoped_refptr<webrtc::I420Buffer> scaled_buffer =
|
||||
// webrtc::I420Buffer::Create(out_width, out_height);
|
||||
// scaled_buffer->ScaleFrom(*frame.video_frame_buffer()->ToI420());
|
||||
// webrtc::VideoFrame::Builder new_frame_builder =
|
||||
// webrtc::VideoFrame::Builder()
|
||||
// .set_video_frame_buffer(scaled_buffer)
|
||||
// .set_rotation(webrtc::kVideoRotation_0)
|
||||
// .set_timestamp_us(frame.timestamp_us())
|
||||
// .set_id(frame.id());
|
||||
// if (frame.has_update_rect()) {
|
||||
// webrtc::VideoFrame::UpdateRect new_rect = frame.update_rect().ScaleWithFrame(
|
||||
// frame.width(), frame.height(), 0, 0, frame.width(), frame.height(),
|
||||
// out_width, out_height);
|
||||
// new_frame_builder.set_update_rect(new_rect);
|
||||
// }
|
||||
// _broadcaster.OnFrame(new_frame_builder.build());
|
||||
|
||||
//} else {
|
||||
// // No adaptations needed, just return the frame as is.
|
||||
// _broadcaster.OnFrame(frame);
|
||||
//}
|
||||
|
||||
if (_aspectRatio <= 0.001) {
|
||||
_broadcaster.OnFrame(frame);
|
||||
return;
|
||||
}
|
||||
const auto originalWidth = frame.width();
|
||||
const auto originalHeight = frame.height();
|
||||
auto width = (originalWidth > _aspectRatio * originalHeight)
|
||||
? int(std::round(_aspectRatio * originalHeight))
|
||||
: originalWidth;
|
||||
auto height = (originalWidth > _aspectRatio * originalHeight)
|
||||
? originalHeight
|
||||
: int(std::round(originalHeight / _aspectRatio));
|
||||
if ((width >= originalWidth && height >= originalHeight) || !width || !height) {
|
||||
_broadcaster.OnFrame(frame);
|
||||
return;
|
||||
}
|
||||
|
||||
width &= ~int(1);
|
||||
height &= ~int(1);
|
||||
const auto left = (originalWidth - width) / 2;
|
||||
const auto top = (originalHeight - height) / 2;
|
||||
rtc::scoped_refptr<webrtc::I420Buffer> croppedBuffer =
|
||||
webrtc::I420Buffer::Create(width, height);
|
||||
croppedBuffer->CropAndScaleFrom(
|
||||
*frame.video_frame_buffer()->ToI420(),
|
||||
left,
|
||||
top,
|
||||
width,
|
||||
height);
|
||||
webrtc::VideoFrame::Builder croppedBuilder =
|
||||
webrtc::VideoFrame::Builder()
|
||||
.set_video_frame_buffer(croppedBuffer)
|
||||
.set_rotation(webrtc::kVideoRotation_0)
|
||||
.set_timestamp_us(frame.timestamp_us())
|
||||
.set_id(frame.id());
|
||||
if (frame.has_update_rect()) {
|
||||
croppedBuilder.set_update_rect(frame.update_rect().ScaleWithFrame(
|
||||
frame.width(),
|
||||
frame.height(),
|
||||
left,
|
||||
top,
|
||||
width,
|
||||
height,
|
||||
width,
|
||||
height));
|
||||
}
|
||||
_broadcaster.OnFrame(croppedBuilder.build());
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::AddOrUpdateSink(
|
||||
rtc::VideoSinkInterface<webrtc::VideoFrame> *sink,
|
||||
const rtc::VideoSinkWants &wants) {
|
||||
_broadcaster.AddOrUpdateSink(sink, wants);
|
||||
updateVideoAdapter();
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame> *sink) {
|
||||
_broadcaster.RemoveSink(sink);
|
||||
updateVideoAdapter();
|
||||
}
|
||||
|
||||
void VideoCameraCapturer::updateVideoAdapter() {
|
||||
//_videoAdapter.OnSinkWants(_broadcaster.wants());
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
|
@ -1,65 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_CAMERA_CAPTURER_H
|
||||
#define TGCALLS_VIDEO_CAMERA_CAPTURER_H
|
||||
|
||||
#include "api/scoped_refptr.h"
|
||||
#include "api/video/video_frame.h"
|
||||
#include "api/video/video_source_interface.h"
|
||||
#include "media/base/video_adapter.h"
|
||||
#include "media/base/video_broadcaster.h"
|
||||
#include "modules/video_capture/video_capture.h"
|
||||
|
||||
#include "VideoCaptureInterface.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
#include <stddef.h>
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCameraCapturer :
|
||||
public rtc::VideoSourceInterface<webrtc::VideoFrame>,
|
||||
public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||
private:
|
||||
enum CreateTag {
|
||||
};
|
||||
|
||||
public:
|
||||
VideoCameraCapturer(const CreateTag &);
|
||||
~VideoCameraCapturer();
|
||||
|
||||
static std::unique_ptr<VideoCameraCapturer> Create(size_t width,
|
||||
size_t height,
|
||||
size_t target_fps,
|
||||
size_t capture_device_index);
|
||||
|
||||
void setState(VideoState state);
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio);
|
||||
|
||||
void AddOrUpdateSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink,
|
||||
const rtc::VideoSinkWants& wants) override;
|
||||
void RemoveSink(rtc::VideoSinkInterface<webrtc::VideoFrame>* sink) override;
|
||||
|
||||
void OnFrame(const webrtc::VideoFrame &frame) override;
|
||||
|
||||
private:
|
||||
bool init(size_t width,
|
||||
size_t height,
|
||||
size_t target_fps,
|
||||
size_t capture_device_index);
|
||||
void destroy();
|
||||
void updateVideoAdapter();
|
||||
|
||||
rtc::VideoBroadcaster _broadcaster;
|
||||
//cricket::VideoAdapter _videoAdapter;
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoCaptureModule> _module;
|
||||
webrtc::VideoCaptureCapability _capability;
|
||||
|
||||
VideoState _state = VideoState::Active;
|
||||
float _aspectRatio = 0.;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
|
@ -1,52 +0,0 @@
|
|||
#include "VideoCapturerInterfaceImpl.h"
|
||||
|
||||
#include "VideoCapturerTrackSource.h"
|
||||
#include "VideoCameraCapturer.h"
|
||||
|
||||
#include "api/video_track_source_proxy.h"
|
||||
|
||||
namespace tgcalls {
|
||||
namespace {
|
||||
|
||||
static VideoCameraCapturer *GetCapturer(
|
||||
const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
||||
const auto proxy = static_cast<webrtc::VideoTrackSourceProxy*>(nativeSource.get());
|
||||
const auto internal = static_cast<VideoCapturerTrackSource*>(proxy->internal());
|
||||
return internal->capturer();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
VideoCapturerInterfaceImpl::VideoCapturerInterfaceImpl(
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source,
|
||||
bool useFrontCamera,
|
||||
std::function<void(VideoState)> stateUpdated)
|
||||
: _source(source)
|
||||
, _stateUpdated(stateUpdated) {
|
||||
}
|
||||
|
||||
VideoCapturerInterfaceImpl::~VideoCapturerInterfaceImpl() {
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setState(VideoState state) {
|
||||
GetCapturer(_source)->setState(state);
|
||||
if (_stateUpdated) {
|
||||
_stateUpdated(state);
|
||||
}
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setPreferredCaptureAspectRatio(float aspectRatio) {
|
||||
GetCapturer(_source)->setPreferredCaptureAspectRatio(aspectRatio);
|
||||
}
|
||||
|
||||
void VideoCapturerInterfaceImpl::setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
if (_uncroppedSink != nullptr) {
|
||||
_source->RemoveSink(_uncroppedSink.get());
|
||||
}
|
||||
_uncroppedSink = sink;
|
||||
if (_uncroppedSink != nullptr) {
|
||||
_source->AddOrUpdateSink(_uncroppedSink.get(), rtc::VideoSinkWants());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
|
@ -1,28 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURER_INTERFACE_IMPL_H
|
||||
#define TGCALLS_VIDEO_CAPTURER_INTERFACE_IMPL_H
|
||||
|
||||
#include "VideoCapturerInterface.h"
|
||||
|
||||
#include "api/media_stream_interface.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCapturerInterfaceImpl final : public VideoCapturerInterface {
|
||||
public:
|
||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(VideoState)> stateUpdated);
|
||||
~VideoCapturerInterfaceImpl() override;
|
||||
|
||||
void setState(VideoState state) override;
|
||||
void setPreferredCaptureAspectRatio(float aspectRatio) override;
|
||||
void setUncroppedOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _uncroppedSink;
|
||||
std::function<void(VideoState)> _stateUpdated;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
|
@ -1,46 +0,0 @@
|
|||
#include "VideoCapturerTrackSource.h"
|
||||
|
||||
#include "VideoCameraCapturer.h"
|
||||
|
||||
#include "modules/video_capture/video_capture_factory.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
rtc::scoped_refptr<VideoCapturerTrackSource> VideoCapturerTrackSource::Create() {
|
||||
const size_t kWidth = 640;
|
||||
const size_t kHeight = 480;
|
||||
const size_t kFps = 30;
|
||||
|
||||
std::unique_ptr<webrtc::VideoCaptureModule::DeviceInfo> info(
|
||||
webrtc::VideoCaptureFactory::CreateDeviceInfo());
|
||||
if (!info) {
|
||||
return nullptr;
|
||||
}
|
||||
int num_devices = info->NumberOfDevices();
|
||||
|
||||
for (int i = 0; i < num_devices; ++i) {
|
||||
if (auto capturer = VideoCameraCapturer::Create(kWidth, kHeight, kFps, i)) {
|
||||
return new rtc::RefCountedObject<VideoCapturerTrackSource>(
|
||||
CreateTag{},
|
||||
std::move(capturer));
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
VideoCapturerTrackSource::VideoCapturerTrackSource(
|
||||
const CreateTag &,
|
||||
std::unique_ptr<VideoCameraCapturer> capturer) :
|
||||
VideoTrackSource(/*remote=*/false),
|
||||
_capturer(std::move(capturer)) {
|
||||
}
|
||||
|
||||
VideoCameraCapturer *VideoCapturerTrackSource::capturer() const {
|
||||
return _capturer.get();
|
||||
}
|
||||
|
||||
rtc::VideoSourceInterface<webrtc::VideoFrame>* VideoCapturerTrackSource::source() {
|
||||
return _capturer.get();
|
||||
}
|
||||
|
||||
} // namespace tgcalls
|
|
@ -1,34 +0,0 @@
|
|||
#ifndef TGCALLS_VIDEO_CAPTURER_TRACK_SOURCE_H
|
||||
#define TGCALLS_VIDEO_CAPTURER_TRACK_SOURCE_H
|
||||
|
||||
#include "pc/video_track_source.h"
|
||||
#include "VideoCameraCapturer.h"
|
||||
|
||||
namespace tgcalls {
|
||||
|
||||
class VideoCameraCapturer;
|
||||
|
||||
class VideoCapturerTrackSource : public webrtc::VideoTrackSource {
|
||||
private:
|
||||
struct CreateTag {
|
||||
};
|
||||
|
||||
public:
|
||||
static rtc::scoped_refptr<VideoCapturerTrackSource> Create();
|
||||
|
||||
VideoCapturerTrackSource(
|
||||
const CreateTag &,
|
||||
std::unique_ptr<VideoCameraCapturer> capturer);
|
||||
|
||||
VideoCameraCapturer *capturer() const;
|
||||
|
||||
private:
|
||||
rtc::VideoSourceInterface<webrtc::VideoFrame> *source() override;
|
||||
|
||||
std::unique_ptr<VideoCameraCapturer> _capturer;
|
||||
|
||||
};
|
||||
|
||||
} // namespace tgcalls
|
||||
|
||||
#endif
|
|
@ -239,7 +239,6 @@ public:
|
|||
_remoteBatteryLevelIsLowUpdated(descriptor.remoteBatteryLevelIsLowUpdated),
|
||||
_remotePrefferedAspectRatioUpdated(descriptor.remotePrefferedAspectRatioUpdated),
|
||||
_videoCapture(descriptor.videoCapture),
|
||||
_localPreferredVideoAspectRatio(descriptor.config.preferredAspectRatio),
|
||||
_state(State::Reconnecting),
|
||||
_videoState(_videoCapture ? VideoState::Active : VideoState::Inactive),
|
||||
_platformContext(descriptor.platformContext) {
|
||||
|
@ -437,6 +436,9 @@ public:
|
|||
}
|
||||
beginSendingVideo();
|
||||
}
|
||||
|
||||
void setRequestedVideoAspect(float aspect) {
|
||||
}
|
||||
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data) {
|
||||
if (true) {
|
||||
|
@ -863,7 +865,7 @@ private:
|
|||
});
|
||||
});
|
||||
|
||||
_localVideoTrack = _nativeFactory->CreateVideoTrack("video0", videoCaptureImpl->_videoSource);
|
||||
_localVideoTrack = _nativeFactory->CreateVideoTrack("video0", videoCaptureImpl->source());
|
||||
_peerConnection->AddTrack(_localVideoTrack, _streamIds);
|
||||
for (auto &it : _peerConnection->GetTransceivers()) {
|
||||
if (it->media_type() == cricket::MediaType::MEDIA_TYPE_VIDEO) {
|
||||
|
@ -970,6 +972,12 @@ void InstanceImplReference::setVideoCapture(std::shared_ptr<VideoCaptureInterfac
|
|||
});
|
||||
}
|
||||
|
||||
void InstanceImplReference::setRequestedVideoAspect(float aspect) {
|
||||
internal_->perform(RTC_FROM_HERE, [aspect](InstanceImplReferenceInternal *internal) {
|
||||
internal->setRequestedVideoAspect(aspect);
|
||||
});
|
||||
}
|
||||
|
||||
void InstanceImplReference::setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
internal_->perform(RTC_FROM_HERE, [sink](InstanceImplReferenceInternal *internal) {
|
||||
internal->setIncomingVideoOutput(sink);
|
||||
|
|
|
@ -18,6 +18,10 @@ public:
|
|||
void setNetworkType(NetworkType networkType) override;
|
||||
void setMuteMicrophone(bool muteMicrophone) override;
|
||||
void setVideoCapture(std::shared_ptr<VideoCaptureInterface> videoCapture) override;
|
||||
void setRequestedVideoAspect(float aspect) override;
|
||||
bool supportsVideo() override {
|
||||
return true;
|
||||
}
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override;
|
||||
void setAudioOutputGainControlEnabled(bool enabled) override;
|
||||
void setEchoCancellationStrength(int strength) override;
|
||||
|
|
|
@ -18,7 +18,7 @@ public class BuildVars {
|
|||
public static boolean LOGS_ENABLED = false;
|
||||
public static boolean USE_CLOUD_STRINGS = true;
|
||||
public static boolean CHECK_UPDATES = true;
|
||||
public static int BUILD_VERSION = 2092;
|
||||
public static int BUILD_VERSION = 2094;
|
||||
public static String BUILD_VERSION_STRING = "7.1.0";
|
||||
public static int APP_ID = 4;
|
||||
public static String APP_HASH = "014b35b6184100b085b0d0572f9b5103";
|
||||
|
|
|
@ -5269,7 +5269,7 @@ public class MessageObject {
|
|||
if (scheduled && message.date < ConnectionsManager.getInstance(currentAccount).getCurrentTime() - 60) {
|
||||
return false;
|
||||
}
|
||||
if (chat != null && (chat.left || chat.kicked)) {
|
||||
if (chat != null && (chat.left || chat.kicked) && (!chat.megagroup || !chat.has_link)) {
|
||||
return false;
|
||||
}
|
||||
if (message == null || message.peer_id == null || message.media != null && (isRoundVideoDocument(message.media.document) || isStickerDocument(message.media.document) || isAnimatedStickerDocument(message.media.document, true) || isLocationMessage(message)) || message.action != null && !(message.action instanceof TLRPC.TL_messageActionEmpty) || isForwardedMessage(message) || message.via_bot_id != 0 || message.id < 0) {
|
||||
|
|
|
@ -3842,7 +3842,7 @@ public class MessagesController extends BaseController implements NotificationCe
|
|||
}
|
||||
}
|
||||
} else {
|
||||
markDialogMessageAsDeleted(messages, channelId);
|
||||
markDialogMessageAsDeleted(messages, -channelId);
|
||||
}
|
||||
getMessagesStorage().markMessagesAsDeleted(messages, true, channelId, forAll, false);
|
||||
getMessagesStorage().updateDialogsWithDeletedMessages(messages, null, true, channelId);
|
||||
|
@ -10769,10 +10769,10 @@ public class MessagesController extends BaseController implements NotificationCe
|
|||
}
|
||||
if (chat_id != 0) {
|
||||
chat = chatsDict.get(chat_id);
|
||||
if (chat == null) {
|
||||
if (chat == null || chat.min) {
|
||||
chat = getChat(chat_id);
|
||||
}
|
||||
if (chat == null) {
|
||||
if (chat == null || chat.min) {
|
||||
chat = getMessagesStorage().getChatSync(chat_id);
|
||||
putChat(chat, true);
|
||||
}
|
||||
|
@ -10932,7 +10932,7 @@ public class MessagesController extends BaseController implements NotificationCe
|
|||
messages.put(message.dialog_id, arr);
|
||||
}
|
||||
arr.add(obj);
|
||||
if ((!obj.isOut() || obj.messageOwner.from_scheduled) && obj.isUnread()) {
|
||||
if ((!obj.isOut() || obj.messageOwner.from_scheduled) && obj.isUnread() && !ChatObject.isNotInChat(chat) && chat.min) {
|
||||
if (pushMessages == null) {
|
||||
pushMessages = new ArrayList<>();
|
||||
}
|
||||
|
|
|
@ -2771,7 +2771,7 @@ public class MessagesStorage extends BaseController {
|
|||
getChatsInternal(TextUtils.join(",", chatsToLoad), chats);
|
||||
for (int a = 0; a < chats.size(); a++) {
|
||||
TLRPC.Chat chat = chats.get(a);
|
||||
if (chat != null && (ChatObject.isNotInChat(chat) || chat.migrated_to != null)) {
|
||||
if (chat != null && (ChatObject.isNotInChat(chat) || chat.min || chat.migrated_to != null)) {
|
||||
long did = -chat.id;
|
||||
database.executeFast("UPDATE dialogs SET unread_count = 0 WHERE did = " + did).stepThis().dispose();
|
||||
database.executeFast(String.format(Locale.US, "UPDATE messages SET read_state = 3 WHERE uid = %d AND mid > 0 AND read_state IN(0,2) AND out = 0", did)).stepThis().dispose();
|
||||
|
|
|
@ -1221,6 +1221,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
}
|
||||
int lower_id = (int) peer;
|
||||
int sendResult = 0;
|
||||
int myId = getUserConfig().getClientUserId();
|
||||
if (lower_id != 0) {
|
||||
final TLRPC.Peer peer_id = getMessagesController().getPeer((int) peer);
|
||||
boolean isMegagroup = false;
|
||||
|
@ -1229,6 +1230,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
boolean canSendMedia = true;
|
||||
boolean canSendPolls = true;
|
||||
boolean canSendPreview = true;
|
||||
String rank = null;
|
||||
int linkedToGroup = 0;
|
||||
TLRPC.Chat chat;
|
||||
if (lower_id > 0) {
|
||||
|
@ -1264,7 +1266,6 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
LongSparseArray<TLRPC.Message> messagesByRandomIds = new LongSparseArray<>();
|
||||
TLRPC.InputPeer inputPeer = getMessagesController().getInputPeer(lower_id);
|
||||
long lastDialogId = 0;
|
||||
int myId = getUserConfig().getClientUserId();
|
||||
final boolean toMyself = peer == myId;
|
||||
long lastGroupedId;
|
||||
for (int a = 0; a < messages.size(); a++) {
|
||||
|
@ -1294,7 +1295,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
}
|
||||
|
||||
final TLRPC.Message newMsg = new TLRPC.TL_message();
|
||||
boolean forwardFromSaved = msgObj.getDialogId() == myId && msgObj.isFromUser() && msgObj.messageOwner.from_id.user_id == getUserConfig().getClientUserId();
|
||||
boolean forwardFromSaved = msgObj.getDialogId() == myId && msgObj.isFromUser() && msgObj.messageOwner.from_id.user_id == myId;
|
||||
if (msgObj.isForwarded()) {
|
||||
newMsg.fwd_from = new TLRPC.TL_messageFwdHeader();
|
||||
if ((msgObj.messageOwner.fwd_from.flags & 1) != 0) {
|
||||
|
@ -1446,16 +1447,20 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
if (peer_id.channel_id != 0 && !isMegagroup) {
|
||||
if (isSignature) {
|
||||
newMsg.from_id = new TLRPC.TL_peerUser();
|
||||
newMsg.from_id.user_id = getUserConfig().getClientUserId();
|
||||
newMsg.from_id.user_id = myId;
|
||||
} else {
|
||||
newMsg.from_id = peer_id;
|
||||
}
|
||||
newMsg.post = true;
|
||||
} else if (ChatObject.shouldSendAnonymously(chat)) {
|
||||
newMsg.from_id = peer_id;
|
||||
if (rank != null) {
|
||||
newMsg.post_author = rank;
|
||||
newMsg.flags |= 65536;
|
||||
}
|
||||
} else {
|
||||
newMsg.from_id = new TLRPC.TL_peerUser();
|
||||
newMsg.from_id.user_id = getUserConfig().getClientUserId();
|
||||
newMsg.from_id.user_id = myId;
|
||||
newMsg.flags |= TLRPC.MESSAGE_FLAG_HAS_FROM_ID;
|
||||
}
|
||||
if (newMsg.random_id == 0) {
|
||||
|
@ -1609,6 +1614,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
final int oldId = newMsgObj1.id;
|
||||
final ArrayList<TLRPC.Message> sentMessages = new ArrayList<>();
|
||||
sentMessages.add(message);
|
||||
msgObj1.messageOwner.post_author = message.post_author;
|
||||
updateMediaPaths(msgObj1, message, message.id, null, true);
|
||||
int existFlags = msgObj1.getMediaExistanceFlags();
|
||||
newMsgObj1.id = message.id;
|
||||
|
@ -2583,9 +2589,11 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
boolean isChannel = false;
|
||||
boolean forceNoSoundVideo = false;
|
||||
boolean anonymously = false;
|
||||
String rank = null;
|
||||
int linkedToGroup = 0;
|
||||
TLRPC.EncryptedChat encryptedChat = null;
|
||||
TLRPC.InputPeer sendToPeer = lower_id != 0 ? getMessagesController().getInputPeer(lower_id) : null;
|
||||
int myId = getUserConfig().getClientUserId();
|
||||
if (lower_id == 0) {
|
||||
encryptedChat = getMessagesController().getEncryptedChat(high_id);
|
||||
if (encryptedChat == null) {
|
||||
|
@ -2886,9 +2894,13 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
newMsg.from_id.channel_id = sendToPeer.channel_id;
|
||||
} else if (anonymously) {
|
||||
newMsg.from_id = getMessagesController().getPeer(lower_id);
|
||||
if (rank != null) {
|
||||
newMsg.post_author = rank;
|
||||
newMsg.flags |= 65536;
|
||||
}
|
||||
} else {
|
||||
newMsg.from_id = new TLRPC.TL_peerUser();
|
||||
newMsg.from_id.user_id = getUserConfig().getClientUserId();
|
||||
newMsg.from_id.user_id = myId;
|
||||
newMsg.flags |= TLRPC.MESSAGE_FLAG_HAS_FROM_ID;
|
||||
}
|
||||
getUserConfig().saveConfig(false);
|
||||
|
@ -2925,7 +2937,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
newMsg.post = true;
|
||||
if (chat.signatures) {
|
||||
newMsg.from_id = new TLRPC.TL_peerUser();
|
||||
newMsg.from_id.user_id = getUserConfig().getClientUserId();
|
||||
newMsg.from_id.user_id = myId;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2975,7 +2987,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
}
|
||||
} else {
|
||||
newMsg.peer_id = new TLRPC.TL_peerUser();
|
||||
if (encryptedChat.participant_id == getUserConfig().getClientUserId()) {
|
||||
if (encryptedChat.participant_id == myId) {
|
||||
newMsg.peer_id.user_id = encryptedChat.admin_id;
|
||||
} else {
|
||||
newMsg.peer_id.user_id = encryptedChat.participant_id;
|
||||
|
@ -4786,6 +4798,7 @@ public class SendMessagesHelper extends BaseController implements NotificationCe
|
|||
}
|
||||
message.unread = value < message.id;
|
||||
}
|
||||
msgObj.messageOwner.post_author = message.post_author;
|
||||
updateMediaPaths(msgObj, message, message.id, originalPath, false);
|
||||
existFlags = msgObj.getMediaExistanceFlags();
|
||||
newMsgObj.id = message.id;
|
||||
|
|
|
@ -15,7 +15,7 @@ import java.util.List;
|
|||
|
||||
public final class Instance {
|
||||
|
||||
public static final List<String> AVAILABLE_VERSIONS = Build.VERSION.SDK_INT >= 18 ? Arrays.asList("2.7.7", "2.4.4") : Arrays.asList("2.4.4");
|
||||
public static final List<String> AVAILABLE_VERSIONS = Build.VERSION.SDK_INT >= 18 ? Arrays.asList(/*"3.0.0", */"2.7.7", "2.4.4") : Arrays.asList("2.4.4");
|
||||
|
||||
public static final int AUDIO_STATE_MUTED = 0;
|
||||
public static final int AUDIO_STATE_ACTIVE = 1;
|
||||
|
@ -153,9 +153,11 @@ public final class Instance {
|
|||
public final boolean enableAgc;
|
||||
public final boolean enableCallUpgrade;
|
||||
public final String logPath;
|
||||
public final String statsLogPath;
|
||||
public final int maxApiLayer;
|
||||
public final boolean enableSm;
|
||||
|
||||
public Config(double initializationTimeout, double receiveTimeout, int dataSaving, boolean enableP2p, boolean enableAec, boolean enableNs, boolean enableAgc, boolean enableCallUpgrade, String logPath, int maxApiLayer) {
|
||||
public Config(double initializationTimeout, double receiveTimeout, int dataSaving, boolean enableP2p, boolean enableAec, boolean enableNs, boolean enableAgc, boolean enableCallUpgrade, boolean enableSm, String logPath, String statsLogPath, int maxApiLayer) {
|
||||
this.initializationTimeout = initializationTimeout;
|
||||
this.receiveTimeout = receiveTimeout;
|
||||
this.dataSaving = dataSaving;
|
||||
|
@ -165,7 +167,9 @@ public final class Instance {
|
|||
this.enableAgc = enableAgc;
|
||||
this.enableCallUpgrade = enableCallUpgrade;
|
||||
this.logPath = logPath;
|
||||
this.statsLogPath = statsLogPath;
|
||||
this.maxApiLayer = maxApiLayer;
|
||||
this.enableSm = enableSm;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -180,7 +184,9 @@ public final class Instance {
|
|||
", enableAgc=" + enableAgc +
|
||||
", enableCallUpgrade=" + enableCallUpgrade +
|
||||
", logPath='" + logPath + '\'' +
|
||||
", statsLogPath='" + statsLogPath + '\'' +
|
||||
", maxApiLayer=" + maxApiLayer +
|
||||
", enableSm=" + enableSm +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
@ -328,6 +334,7 @@ public final class Instance {
|
|||
|
||||
public final boolean useSystemNs;
|
||||
public final boolean useSystemAec;
|
||||
public final boolean enableStunMarking;
|
||||
public final double hangupUiTimeout;
|
||||
|
||||
public final boolean enable_vp8_encoder;
|
||||
|
@ -345,6 +352,7 @@ public final class Instance {
|
|||
this.jsonObject = jsonObject;
|
||||
this.useSystemNs = jsonObject.optBoolean("use_system_ns", true);
|
||||
this.useSystemAec = jsonObject.optBoolean("use_system_aec", true);
|
||||
this.enableStunMarking = jsonObject.optBoolean("voip_enable_stun_marking", false);
|
||||
this.hangupUiTimeout = jsonObject.optDouble("hangup_ui_timeout", 5);
|
||||
|
||||
this.enable_vp8_encoder = jsonObject.optBoolean("enable_vp8_encoder", true);
|
||||
|
|
|
@ -93,9 +93,9 @@ public class NativeInstance {
|
|||
}
|
||||
|
||||
private static native long makeNativeInstance(String version, NativeInstance instance, Instance.Config config, String persistentStateFilePath, Instance.Endpoint[] endpoints, Instance.Proxy proxy, int networkType, Instance.EncryptionKey encryptionKey, VideoSink remoteSink, long videoCapturer, float aspectRatio);
|
||||
public static native long createVideoCapturer(VideoSink localSink);
|
||||
public static native long createVideoCapturer(VideoSink localSink, boolean front);
|
||||
public static native void setVideoStateCapturer(long videoCapturer, int videoState);
|
||||
public static native void switchCameraCapturer(long videoCapturer);
|
||||
public static native void switchCameraCapturer(long videoCapturer, boolean front);
|
||||
public static native void destroyVideoCapturer(long videoCapturer);
|
||||
|
||||
public native void setGlobalServerConfig(String serverConfigJson);
|
||||
|
@ -111,8 +111,8 @@ public class NativeInstance {
|
|||
public native Instance.TrafficStats getTrafficStats();
|
||||
public native byte[] getPersistentState();
|
||||
private native void stopNative();
|
||||
public native void setupOutgoingVideo(VideoSink localSink);
|
||||
public native void switchCamera();
|
||||
public native void setupOutgoingVideo(VideoSink localSink, boolean front);
|
||||
public native void switchCamera(boolean front);
|
||||
public native void setVideoState(int videoState);
|
||||
public native void onSignalingDataReceive(byte[] data);
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ public class VoIPService extends VoIPBaseService {
|
|||
}
|
||||
|
||||
if (videoCall) {
|
||||
videoCapturer = NativeInstance.createVideoCapturer(localSink);
|
||||
videoCapturer = NativeInstance.createVideoCapturer(localSink, isFrontFaceCamera);
|
||||
videoState = Instance.VIDEO_STATE_ACTIVE;
|
||||
if (!isBtHeadsetConnected && !isHeadsetPlugged) {
|
||||
setAudioOutput(0);
|
||||
|
@ -219,7 +219,7 @@ public class VoIPService extends VoIPBaseService {
|
|||
isVideoAvailable = true;
|
||||
}
|
||||
if (videoCall && (Build.VERSION.SDK_INT < 23 || checkSelfPermission(Manifest.permission.CAMERA) == PackageManager.PERMISSION_GRANTED)) {
|
||||
videoCapturer = NativeInstance.createVideoCapturer(localSink);
|
||||
videoCapturer = NativeInstance.createVideoCapturer(localSink, isFrontFaceCamera);
|
||||
videoState = Instance.VIDEO_STATE_ACTIVE;
|
||||
} else {
|
||||
videoState = Instance.VIDEO_STATE_INACTIVE;
|
||||
|
@ -629,18 +629,18 @@ public class VoIPService extends VoIPBaseService {
|
|||
if (tgVoip == null) {
|
||||
return;
|
||||
}
|
||||
tgVoip.setupOutgoingVideo(localSink);
|
||||
tgVoip.setupOutgoingVideo(localSink, isFrontFaceCamera);
|
||||
}
|
||||
|
||||
public void switchCamera() {
|
||||
if (tgVoip == null || switchingCamera) {
|
||||
if (videoCapturer != 0 && !switchingCamera) {
|
||||
NativeInstance.switchCameraCapturer(videoCapturer);
|
||||
NativeInstance.switchCameraCapturer(videoCapturer, !isFrontFaceCamera);
|
||||
}
|
||||
return;
|
||||
}
|
||||
switchingCamera = true;
|
||||
tgVoip.switchCamera();
|
||||
tgVoip.switchCamera(!isFrontFaceCamera);
|
||||
}
|
||||
|
||||
public void setVideoState(int videoState) {
|
||||
|
@ -649,7 +649,7 @@ public class VoIPService extends VoIPBaseService {
|
|||
this.videoState = videoState;
|
||||
NativeInstance.setVideoStateCapturer(videoCapturer, videoState);
|
||||
} else if (videoState == Instance.VIDEO_STATE_ACTIVE && currentState != STATE_BUSY && currentState != STATE_ENDED) {
|
||||
videoCapturer = NativeInstance.createVideoCapturer(localSink);
|
||||
videoCapturer = NativeInstance.createVideoCapturer(localSink, isFrontFaceCamera);
|
||||
this.videoState = Instance.VIDEO_STATE_ACTIVE;
|
||||
}
|
||||
return;
|
||||
|
@ -1085,8 +1085,9 @@ public class VoIPService extends VoIPBaseService {
|
|||
final Instance.ServerConfig serverConfig = Instance.getGlobalServerConfig();
|
||||
final boolean enableAec = !(sysAecAvailable && serverConfig.useSystemAec);
|
||||
final boolean enableNs = !(sysNsAvailable && serverConfig.useSystemNs);
|
||||
final String logFilePath = BuildVars.DEBUG_VERSION ? VoIPHelper.getLogFilePath("voip" + call.id) : VoIPHelper.getLogFilePath(call.id);
|
||||
final Instance.Config config = new Instance.Config(initializationTimeout, receiveTimeout, voipDataSaving, call.p2p_allowed, enableAec, enableNs, true, false, logFilePath, call.protocol.max_layer);
|
||||
final String logFilePath = BuildVars.DEBUG_VERSION ? VoIPHelper.getLogFilePath("voip" + call.id) : VoIPHelper.getLogFilePath(call.id, false);
|
||||
final String statisLogFilePath = "";
|
||||
final Instance.Config config = new Instance.Config(initializationTimeout, receiveTimeout, voipDataSaving, call.p2p_allowed, enableAec, enableNs, true, false, serverConfig.enableStunMarking, logFilePath, statisLogFilePath, call.protocol.max_layer);
|
||||
|
||||
// persistent state
|
||||
final String persistentStateFilePath = new File(ApplicationLoader.applicationContext.getFilesDir(), "voip_persistent_state.json").getAbsolutePath();
|
||||
|
|
|
@ -145,6 +145,7 @@ public class AvatarPreviewer {
|
|||
public static enum MenuItem {
|
||||
OPEN_PROFILE("OpenProfile", R.string.OpenProfile, R.drawable.msg_openprofile),
|
||||
OPEN_CHANNEL("OpenChannel2", R.string.OpenChannel2, R.drawable.msg_channel),
|
||||
OPEN_GROUP("OpenGroup2", R.string.OpenGroup2, R.drawable.msg_discussion),
|
||||
SEND_MESSAGE("SendMessage", R.string.SendMessage, R.drawable.msg_discussion),
|
||||
MENTION("Mention", R.string.Mention, R.drawable.msg_mention);
|
||||
|
||||
|
|
|
@ -537,6 +537,8 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
private boolean hintButtonVisible;
|
||||
private float hintButtonProgress;
|
||||
|
||||
private String lastPostAuthor;
|
||||
|
||||
private boolean hasPsaHint;
|
||||
private int psaHelpX;
|
||||
private int psaHelpY;
|
||||
|
@ -2770,7 +2772,8 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
boolean messageIdChanged = currentMessageObject == null || currentMessageObject.getId() != messageObject.getId();
|
||||
boolean messageChanged = currentMessageObject != messageObject || messageObject.forceUpdate;
|
||||
boolean dataChanged = currentMessageObject != null && currentMessageObject.getId() == messageObject.getId() && lastSendState == MessageObject.MESSAGE_SEND_STATE_EDITING && messageObject.isSent()
|
||||
|| currentMessageObject == messageObject && (isUserDataChanged() || photoNotSet);
|
||||
|| currentMessageObject == messageObject && (isUserDataChanged() || photoNotSet)
|
||||
|| lastPostAuthor != messageObject.messageOwner.post_author;
|
||||
boolean groupChanged = groupedMessages != currentMessagesGroup;
|
||||
boolean pollChanged = false;
|
||||
if (drawCommentButton || drawSideButton == 3 && !((hasDiscussion && messageObject.isLinkedToChat(linkedChatId) || isRepliesChat) && (currentPosition == null || currentPosition.siblingHeights == null && (currentPosition.flags & MessageObject.POSITION_FLAG_BOTTOM) != 0 || currentPosition.siblingHeights != null && (currentPosition.flags & MessageObject.POSITION_FLAG_TOP) == 0))) {
|
||||
|
@ -2826,6 +2829,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
currentMessageObject = messageObject;
|
||||
currentMessagesGroup = groupedMessages;
|
||||
lastTime = -2;
|
||||
lastPostAuthor = messageObject.messageOwner.post_author;
|
||||
isHighlightedAnimated = false;
|
||||
widthBeforeNewTimeLine = -1;
|
||||
if (currentMessagesGroup != null && (currentMessagesGroup.posArray.size() > 1)) {
|
||||
|
@ -3006,7 +3010,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
if (isRepliesChat) {
|
||||
comment = LocaleController.getString("ViewInChat", R.string.ViewInChat);
|
||||
} else {
|
||||
comment = commentCount == 0 ? LocaleController.getString("LeaveAComment", R.string.LeaveAComment) : LocaleController.formatPluralString("Comments", commentCount).toLowerCase();
|
||||
comment = commentCount == 0 ? LocaleController.getString("LeaveAComment", R.string.LeaveAComment) : LocaleController.formatPluralString("CommentsCount", commentCount);
|
||||
if (commentCount != 0 && !messageObject.messageOwner.replies.recent_repliers.isEmpty()) {
|
||||
createCommentUI();
|
||||
int size = messageObject.messageOwner.replies.recent_repliers.size();
|
||||
|
@ -4019,7 +4023,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
namesOffset -= AndroidUtilities.dp(1);
|
||||
}
|
||||
} else if (messageObject.type == 12) {
|
||||
drawName = messageObject.isFromGroup() && messageObject.isOutOwner() && messageObject.isMegagroup();
|
||||
drawName = messageObject.isFromGroup() && messageObject.isMegagroup();
|
||||
drawForwardedName = !isRepliesChat;
|
||||
drawPhotoImage = true;
|
||||
photoImage.setRoundRadius(AndroidUtilities.dp(22));
|
||||
|
@ -4102,7 +4106,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
}
|
||||
} else if (messageObject.type == 2) {
|
||||
drawForwardedName = !isRepliesChat;
|
||||
drawName = messageObject.isFromGroup() && messageObject.isOutOwner() && messageObject.isMegagroup();
|
||||
drawName = messageObject.isFromGroup() && messageObject.isMegagroup();
|
||||
if (AndroidUtilities.isTablet()) {
|
||||
backgroundWidth = Math.min(AndroidUtilities.getMinTabletSide() - AndroidUtilities.dp(drawAvatar ? 102 : 50), AndroidUtilities.dp(270));
|
||||
} else {
|
||||
|
@ -4117,7 +4121,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
namesOffset -= AndroidUtilities.dp(1);
|
||||
}
|
||||
} else if (messageObject.type == 14) {
|
||||
drawName = messageObject.isFromGroup() && messageObject.isOutOwner() && messageObject.isMegagroup();
|
||||
drawName = messageObject.isFromGroup() && messageObject.isMegagroup();
|
||||
if (AndroidUtilities.isTablet()) {
|
||||
backgroundWidth = Math.min(AndroidUtilities.getMinTabletSide() - AndroidUtilities.dp(drawAvatar ? 102 : 50), AndroidUtilities.dp(270));
|
||||
} else {
|
||||
|
@ -4397,7 +4401,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
} else {
|
||||
drawForwardedName = messageObject.messageOwner.fwd_from != null && !messageObject.isAnyKindOfSticker();
|
||||
if (!messageObject.isAnyKindOfSticker() && messageObject.type != MessageObject.TYPE_ROUND_VIDEO) {
|
||||
drawName = messageObject.isFromGroup() && messageObject.isOutOwner() && messageObject.isMegagroup();
|
||||
drawName = messageObject.isFromGroup() && messageObject.isMegagroup();
|
||||
}
|
||||
mediaBackground = isMedia = messageObject.type != 9;
|
||||
drawImageButton = true;
|
||||
|
@ -5268,6 +5272,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
}
|
||||
if (currentMessageObject.isDice()) {
|
||||
totalHeight += AndroidUtilities.dp(21);
|
||||
additionalTimeOffsetY = AndroidUtilities.dp(21);
|
||||
}
|
||||
|
||||
int additionalTop = 0;
|
||||
|
@ -6764,7 +6769,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
diff -= timeWidth + AndroidUtilities.dp(4 + (currentMessageObject.isOutOwner() ? 20 : 0));
|
||||
}
|
||||
if (diff > 0) {
|
||||
textX += diff;
|
||||
textX += diff - getExtraTimeX();
|
||||
}
|
||||
}
|
||||
if (transitionParams.animateChangeProgress != 1.0f && transitionParams.animateMessageText) {
|
||||
|
@ -8610,7 +8615,11 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
if (messageObject.scheduled) {
|
||||
signString = null;
|
||||
} else if (messageObject.messageOwner.post_author != null) {
|
||||
signString = messageObject.messageOwner.post_author.replace("\n", "");
|
||||
if (isMegagroup && messageObject.getFromChatId() == messageObject.getDialogId()) {
|
||||
signString = null;
|
||||
} else {
|
||||
signString = messageObject.messageOwner.post_author.replace("\n", "");
|
||||
}
|
||||
} else if (messageObject.messageOwner.fwd_from != null && messageObject.messageOwner.fwd_from.post_author != null) {
|
||||
signString = messageObject.messageOwner.fwd_from.post_author.replace("\n", "");
|
||||
} else if (!messageObject.isOutOwner() && fromId > 0 && messageObject.messageOwner.post) {
|
||||
|
@ -8841,6 +8850,10 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
adminString = LocaleController.getString("DiscussChannel", R.string.DiscussChannel);
|
||||
adminWidth = (int) Math.ceil(Theme.chat_adminPaint.measureText(adminString));
|
||||
nameWidth -= adminWidth;
|
||||
} else if (isMegagroup && currentChat != null && messageObject.messageOwner.post_author != null && currentChat.id == -currentMessageObject.getFromChatId()) {
|
||||
adminString = messageObject.messageOwner.post_author.replace("\n", "");
|
||||
adminWidth = (int) Math.ceil(Theme.chat_adminPaint.measureText(adminString));
|
||||
nameWidth -= adminWidth;
|
||||
} else if (currentUser != null && !currentMessageObject.isOutOwner() && !currentMessageObject.isAnyKindOfSticker() && currentMessageObject.type != 5 && delegate != null && (adminLabel = delegate.getAdminRank(currentUser.id)) != null) {
|
||||
if (adminLabel.length() == 0) {
|
||||
adminLabel = LocaleController.getString("ChatAdmin", R.string.ChatAdmin);
|
||||
|
@ -9946,6 +9959,10 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
return drawNameLayout && nameLayout != null;
|
||||
}
|
||||
|
||||
public boolean isAdminLayoutChanged() {
|
||||
return !TextUtils.equals(lastPostAuthor, currentMessageObject.messageOwner.post_author);
|
||||
}
|
||||
|
||||
public void drawNamesLayout(Canvas canvas, float alpha) {
|
||||
long newAnimationTime = SystemClock.elapsedRealtime();
|
||||
long dt = newAnimationTime - lastNamesAnimationTime;
|
||||
|
@ -10017,13 +10034,29 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
}
|
||||
nameY = AndroidUtilities.dp(drawPinnedTop ? 9 : 10);
|
||||
}
|
||||
canvas.translate(nameX, nameY);
|
||||
float nx;
|
||||
if (transitionParams.animateSign) {
|
||||
nx = transitionParams.animateNameX + (nameX - transitionParams.animateNameX) * transitionParams.animateChangeProgress;
|
||||
} else {
|
||||
nx = nameX;
|
||||
}
|
||||
canvas.translate(nx, nameY);
|
||||
nameLayout.draw(canvas);
|
||||
canvas.restore();
|
||||
if (adminLayout != null) {
|
||||
Theme.chat_adminPaint.setColor(Theme.getColor(isDrawSelectionBackground() ? Theme.key_chat_adminSelectedText : Theme.key_chat_adminText));
|
||||
int color = Theme.getColor(isDrawSelectionBackground() ? Theme.key_chat_adminSelectedText : Theme.key_chat_adminText);
|
||||
Theme.chat_adminPaint.setColor(color);
|
||||
canvas.save();
|
||||
canvas.translate(backgroundDrawableLeft + backgroundDrawableRight - AndroidUtilities.dp(11) - adminLayout.getLineWidth(0), nameY + AndroidUtilities.dp(0.5f));
|
||||
float ax;
|
||||
if (!mediaBackground && currentMessageObject.isOutOwner()) {
|
||||
ax = backgroundDrawableLeft + backgroundDrawableRight - AndroidUtilities.dp(17) - adminLayout.getLineWidth(0);
|
||||
} else {
|
||||
ax = backgroundDrawableLeft + backgroundDrawableRight - AndroidUtilities.dp(11) - adminLayout.getLineWidth(0);
|
||||
}
|
||||
canvas.translate(ax, nameY + AndroidUtilities.dp(0.5f));
|
||||
if (transitionParams.animateSign) {
|
||||
Theme.chat_adminPaint.setAlpha((int) (Color.alpha(color) * transitionParams.animateChangeProgress));
|
||||
}
|
||||
adminLayout.draw(canvas);
|
||||
canvas.restore();
|
||||
}
|
||||
|
@ -11934,6 +11967,7 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
private final int INSTANT_VIEW = 499;
|
||||
private final int SHARE = 498;
|
||||
private final int REPLY = 497;
|
||||
private final int COMMENT = 496;
|
||||
private Path linkPath = new Path();
|
||||
private RectF rectF = new RectF();
|
||||
private Rect rect = new Rect();
|
||||
|
@ -12104,6 +12138,9 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
if (drawInstantView && !instantButtonRect.isEmpty()) {
|
||||
info.addChild(ChatMessageCell.this, INSTANT_VIEW);
|
||||
}
|
||||
if (commentLayout != null) {
|
||||
info.addChild(ChatMessageCell.this, COMMENT);
|
||||
}
|
||||
if (drawSideButton == 1) {
|
||||
info.addChild(ChatMessageCell.this, SHARE);
|
||||
}
|
||||
|
@ -12259,6 +12296,21 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
rect.offset(pos[0], pos[1]);
|
||||
info.setBoundsInScreen(rect);
|
||||
info.setClickable(true);
|
||||
} else if (virtualViewId == COMMENT) {
|
||||
info.setClassName("android.widget.Button");
|
||||
info.setEnabled(true);
|
||||
if (commentLayout != null) {
|
||||
info.setText(commentLayout.getText());
|
||||
}
|
||||
info.addAction(AccessibilityNodeInfo.ACTION_CLICK);
|
||||
rect.set(commentButtonRect);
|
||||
info.setBoundsInParent(rect);
|
||||
if (accessibilityVirtualViewBounds.get(virtualViewId) == null || !accessibilityVirtualViewBounds.get(virtualViewId).equals(rect)) {
|
||||
accessibilityVirtualViewBounds.put(virtualViewId, new Rect(rect));
|
||||
}
|
||||
rect.offset(pos[0], pos[1]);
|
||||
info.setBoundsInScreen(rect);
|
||||
info.setClickable(true);
|
||||
}
|
||||
info.setFocusable(true);
|
||||
info.setVisibleToUser(true);
|
||||
|
@ -12318,6 +12370,10 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
if (delegate != null && (!isThreadChat || currentMessageObject.getReplyTopMsgId() != 0) && currentMessageObject.hasValidReplyMessageObject()) {
|
||||
delegate.didPressReplyMessage(ChatMessageCell.this, currentMessageObject.getReplyMsgId());
|
||||
}
|
||||
} else if (virtualViewId == COMMENT) {
|
||||
if (delegate != null) {
|
||||
delegate.didPressCommentButton(ChatMessageCell.this);
|
||||
}
|
||||
}
|
||||
} else if (action == AccessibilityNodeInfo.ACTION_LONG_CLICK) {
|
||||
ClickableSpan link = getLinkById(virtualViewId);
|
||||
|
@ -12410,6 +12466,10 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
private int animateCommentUnreadX;
|
||||
private boolean animateCommentDrawUnread;
|
||||
|
||||
private boolean animateSign;
|
||||
private float animateNameX;
|
||||
private String lastSignMessage;
|
||||
|
||||
public boolean imageChangeBoundsTransition;
|
||||
public int deltaLeft;
|
||||
public int deltaRight;
|
||||
|
@ -12494,6 +12554,8 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
lastRepliesLayout = repliesLayout;
|
||||
}
|
||||
|
||||
lastSignMessage = lastPostAuthor;
|
||||
|
||||
lastButtonX = buttonX;
|
||||
lastButtonY = buttonY;
|
||||
}
|
||||
|
@ -12573,6 +12635,12 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
changed = true;
|
||||
}
|
||||
|
||||
if (!TextUtils.equals(lastSignMessage, lastPostAuthor)) {
|
||||
animateSign = true;
|
||||
animateNameX = nameX;
|
||||
changed = true;
|
||||
}
|
||||
|
||||
return changed;
|
||||
}
|
||||
|
||||
|
@ -12620,6 +12688,8 @@ public class ChatMessageCell extends BaseCell implements SeekBar.SeekBarDelegate
|
|||
|
||||
animateComments = false;
|
||||
animateCommentsLayout = null;
|
||||
|
||||
animateSign = false;
|
||||
}
|
||||
|
||||
public boolean supportChangeAnimation() {
|
||||
|
|
|
@ -210,6 +210,7 @@ import java.io.File;
|
|||
import java.io.FileWriter;
|
||||
import java.net.URLDecoder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Calendar;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -10751,6 +10752,9 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
if (messagesDict[loadIndex].indexOfKey(messageId) >= 0) {
|
||||
continue;
|
||||
}
|
||||
if (threadMessageId != 0 && obj.messageOwner instanceof TLRPC.TL_messageEmpty) {
|
||||
continue;
|
||||
}
|
||||
if (currentEncryptedChat != null && obj.messageOwner.stickerVerified == 0) {
|
||||
getMediaDataController().verifyAnimatedStickerMessage(obj.messageOwner);
|
||||
}
|
||||
|
@ -11536,6 +11540,9 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
if (action instanceof TLRPC.TL_messageActionChannelMigrateFrom) {
|
||||
continue;
|
||||
}
|
||||
if (threadMessageId != 0 && obj.messageOwner instanceof TLRPC.TL_messageEmpty) {
|
||||
continue;
|
||||
}
|
||||
if (threadMessageObject != null && obj.isReply()) {
|
||||
int mid = obj.getReplyAnyMsgId();
|
||||
if (threadMessageObject.getId() == mid) {
|
||||
|
@ -11553,10 +11560,12 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
last_message_id = Math.min(last_message_id, messageId);
|
||||
}
|
||||
|
||||
if (obj.messageOwner.mentioned && obj.isContentUnread()) {
|
||||
newMentionsCount++;
|
||||
if (threadMessageId == 0) {
|
||||
if (obj.messageOwner.mentioned && obj.isContentUnread()) {
|
||||
newMentionsCount++;
|
||||
}
|
||||
newUnreadMessageCount++;
|
||||
}
|
||||
newUnreadMessageCount++;
|
||||
if (obj.type == 10 || obj.type == 11) {
|
||||
updateChat = true;
|
||||
}
|
||||
|
@ -11634,6 +11643,9 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
if (action instanceof TLRPC.TL_messageActionChannelMigrateFrom) {
|
||||
continue;
|
||||
}
|
||||
if (threadMessageId != 0 && obj.messageOwner instanceof TLRPC.TL_messageEmpty) {
|
||||
continue;
|
||||
}
|
||||
if (threadMessageObject != null && threadMessageObject.messageOwner.replies != null && obj.isReply()) {
|
||||
int mid = obj.getReplyAnyMsgId();
|
||||
if (threadMessageObject.getId() == mid) {
|
||||
|
@ -11834,10 +11846,12 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
changeBoundAnimator.start();
|
||||
}
|
||||
}
|
||||
if (!obj.isOut() && obj.messageOwner.mentioned && obj.isContentUnread()) {
|
||||
newMentionsCount++;
|
||||
if (threadMessageId == 0) {
|
||||
if (!obj.isOut() && obj.messageOwner.mentioned && obj.isContentUnread()) {
|
||||
newMentionsCount++;
|
||||
}
|
||||
newUnreadMessageCount++;
|
||||
}
|
||||
newUnreadMessageCount++;
|
||||
if (obj.type == 10 || obj.type == 11) {
|
||||
updateChat = true;
|
||||
}
|
||||
|
@ -12406,11 +12420,13 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
for (int a = 0, N = removedIndexes.size(); a < N; a++) {
|
||||
chatAdapter.notifyItemRemoved(removedIndexes.get(a));
|
||||
}
|
||||
removeUnreadPlane(false);
|
||||
if (!isThreadChat() || messages.size() <= 3) {
|
||||
removeUnreadPlane(false);
|
||||
}
|
||||
chatAdapter.notifyItemRangeChanged(chatAdapter.messagesStartRow, messages.size());
|
||||
}
|
||||
updateVisibleRows();
|
||||
} else {
|
||||
} else if (threadMessageId == 0) {
|
||||
first_unread_id = 0;
|
||||
last_message_id = 0;
|
||||
createUnreadMessageAfterId = 0;
|
||||
|
@ -12715,7 +12731,15 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
} else if (id == NotificationCenter.removeAllMessagesFromDialog) {
|
||||
long did = (Long) args[0];
|
||||
if (dialog_id == did) {
|
||||
clearHistory((Boolean) args[1]);
|
||||
if (threadMessageId != 0) {
|
||||
if (forwardEndReached[0]) {
|
||||
forwardEndReached[0] = false;
|
||||
chatAdapter.notifyItemInserted(0);
|
||||
}
|
||||
MessagesController.getInstance(currentAccount).addToViewsQueue(threadMessageObject);
|
||||
} else {
|
||||
clearHistory((Boolean) args[1]);
|
||||
}
|
||||
}
|
||||
} else if (id == NotificationCenter.screenshotTook) {
|
||||
updateInformationForScreenshotDetector();
|
||||
|
@ -16892,7 +16916,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
break;
|
||||
}
|
||||
case 27: {
|
||||
openDiscussionMessageChat(currentChat.id, selectedObject.getId(), 0, -1, 0, null);
|
||||
openDiscussionMessageChat(currentChat.id, null, selectedObject.getId(), 0, -1, 0, null);
|
||||
break;
|
||||
}
|
||||
case 100: {
|
||||
|
@ -17438,7 +17462,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
private boolean savedNoHistory;
|
||||
private boolean savedNoDiscussion;
|
||||
|
||||
private void processLoadedDiscussionMessage(boolean noDiscussion, TLRPC.TL_messages_discussionMessage discussionMessage, boolean noHistory, TLRPC.messages_Messages history, int maxReadId, MessageObject fallbackMessage, Runnable progressRunnable, TLRPC.TL_messages_getDiscussionMessage req, TLRPC.Chat originalChat, int highlightMsgId) {
|
||||
private void processLoadedDiscussionMessage(boolean noDiscussion, TLRPC.TL_messages_discussionMessage discussionMessage, boolean noHistory, TLRPC.messages_Messages history, int maxReadId, MessageObject fallbackMessage, Runnable progressRunnable, TLRPC.TL_messages_getDiscussionMessage req, TLRPC.Chat originalChat, int highlightMsgId, MessageObject originalMessage) {
|
||||
if (!noDiscussion && discussionMessage == null || noDiscussion || !noHistory && history == null) {
|
||||
return;
|
||||
}
|
||||
|
@ -17468,6 +17492,9 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
if (highlightMsgId != 0) {
|
||||
chatActivity.highlightMessageId = highlightMsgId;
|
||||
}
|
||||
if (originalMessage != null && originalMessage.messageOwner.replies != null && chatActivity.threadMessageObject.messageOwner.replies != null) {
|
||||
originalMessage.messageOwner.replies.replies = chatActivity.threadMessageObject.messageOwner.replies.replies;
|
||||
}
|
||||
presentFragment(chatActivity);
|
||||
chatOpened = true;
|
||||
if (history != null) {
|
||||
|
@ -17499,7 +17526,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
}
|
||||
}
|
||||
|
||||
private void openDiscussionMessageChat(int chatId, int messageId, int linkedChatId, int maxReadId, int highlightMsgId, MessageObject fallbackMessage) {
|
||||
private void openDiscussionMessageChat(int chatId, MessageObject originalMessage, int messageId, int linkedChatId, int maxReadId, int highlightMsgId, MessageObject fallbackMessage) {
|
||||
TLRPC.Chat chat = getMessagesController().getChat(chatId);
|
||||
TLRPC.TL_messages_getDiscussionMessage req = new TLRPC.TL_messages_getDiscussionMessage();
|
||||
req.peer = MessagesController.getInputPeer(chat);
|
||||
|
@ -17552,7 +17579,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
} else {
|
||||
savedNoHistory = true;
|
||||
}
|
||||
processLoadedDiscussionMessage(savedNoDiscussion, savedDiscussionMessage, savedNoHistory, savedHistory, maxReadId, fallbackMessage, progressRunnable, req, chat, highlightMsgId);
|
||||
processLoadedDiscussionMessage(savedNoDiscussion, savedDiscussionMessage, savedNoHistory, savedHistory, maxReadId, fallbackMessage, progressRunnable, req, chat, highlightMsgId, originalMessage);
|
||||
}));
|
||||
getConnectionsManager().bindRequestToGuid(commentMessagesRequestId, classGuid);
|
||||
} else {
|
||||
|
@ -17571,7 +17598,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
} else {
|
||||
savedNoDiscussion = true;
|
||||
}
|
||||
processLoadedDiscussionMessage(savedNoDiscussion, savedDiscussionMessage, savedNoHistory, savedHistory, maxReadId, fallbackMessage, progressRunnable, req, chat, highlightMsgId);
|
||||
processLoadedDiscussionMessage(savedNoDiscussion, savedDiscussionMessage, savedNoHistory, savedHistory, maxReadId, fallbackMessage, progressRunnable, req, chat, highlightMsgId, originalMessage);
|
||||
}));
|
||||
getConnectionsManager().bindRequestToGuid(commentRequestId, classGuid);
|
||||
AndroidUtilities.runOnUIThread(progressRunnable, 500);
|
||||
|
@ -18192,7 +18219,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
MessageObject messageObject = cell.getMessageObject();
|
||||
if ((UserObject.isReplyUser(currentUser) || UserObject.isUserSelf(currentUser)) && messageObject.messageOwner.fwd_from.saved_from_peer != null) {
|
||||
if (UserObject.isReplyUser(currentUser) && messageObject.messageOwner.reply_to != null && messageObject.messageOwner.reply_to.reply_to_top_id != 0) {
|
||||
openDiscussionMessageChat(messageObject.messageOwner.reply_to.reply_to_peer_id.channel_id, messageObject.messageOwner.reply_to.reply_to_top_id, 0, -1, messageObject.messageOwner.fwd_from.saved_from_msg_id, messageObject);
|
||||
openDiscussionMessageChat(messageObject.messageOwner.reply_to.reply_to_peer_id.channel_id, null, messageObject.messageOwner.reply_to.reply_to_top_id, 0, -1, messageObject.messageOwner.fwd_from.saved_from_msg_id, messageObject);
|
||||
} else {
|
||||
openOriginalReplyChat(messageObject);
|
||||
}
|
||||
|
@ -18249,7 +18276,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
processRowSelect(cell, true, touchX, touchY);
|
||||
return;
|
||||
}
|
||||
openChannel(cell, chat, postId);
|
||||
openChat(cell, chat, postId);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -18341,7 +18368,11 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
@Override
|
||||
public boolean didLongPressChannelAvatar(ChatMessageCell cell, TLRPC.Chat chat, int postId, float touchX, float touchY) {
|
||||
if (isAvatarPreviewerEnabled()) {
|
||||
final AvatarPreviewer.MenuItem[] menuItems = {AvatarPreviewer.MenuItem.OPEN_PROFILE, AvatarPreviewer.MenuItem.OPEN_CHANNEL};
|
||||
AvatarPreviewer.MenuItem[] menuItems = {AvatarPreviewer.MenuItem.OPEN_PROFILE};
|
||||
if (currentChat == null || currentChat.id != chat.id || isThreadChat()) {
|
||||
menuItems = Arrays.copyOf(menuItems, 2);
|
||||
menuItems[1] = chat.broadcast ? AvatarPreviewer.MenuItem.OPEN_CHANNEL : AvatarPreviewer.MenuItem.OPEN_GROUP;
|
||||
}
|
||||
final TLRPC.ChatFull chatFull = getMessagesController().getChatFull(chat.id);
|
||||
final AvatarPreviewer.Data data;
|
||||
if (chatFull != null) {
|
||||
|
@ -18355,8 +18386,9 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
case OPEN_PROFILE:
|
||||
openProfile(chat);
|
||||
break;
|
||||
case OPEN_GROUP:
|
||||
case OPEN_CHANNEL:
|
||||
openChannel(cell, chat, 0);
|
||||
openChat(cell, chat, 0);
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
@ -18395,7 +18427,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
}
|
||||
}
|
||||
|
||||
private void openChannel(ChatMessageCell cell, TLRPC.Chat chat, int postId) {
|
||||
private void openChat(ChatMessageCell cell, TLRPC.Chat chat, int postId) {
|
||||
if (currentChat == null || chat.id != currentChat.id || isThreadChat()) {
|
||||
Bundle args = new Bundle();
|
||||
args.putInt("chat_id", chat.id);
|
||||
|
@ -18683,7 +18715,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
maxReadId = -1;
|
||||
linkedChatId = 0;
|
||||
}
|
||||
openDiscussionMessageChat(currentChat.id, message.getId(), linkedChatId, maxReadId, 0, null);
|
||||
openDiscussionMessageChat(currentChat.id, message, message.getId(), linkedChatId, maxReadId, 0, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -18892,7 +18924,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
if (pinnedBottom && fromId < 0 && currentChat.megagroup) {
|
||||
pinnedBottom = false;
|
||||
}
|
||||
} else if (UserObject.isUserSelf(currentUser)) {
|
||||
} else if (UserObject.isUserSelf(currentUser) || UserObject.isReplyUser(currentUser)) {
|
||||
if (message.isPrivateForward() || nextMessage.isPrivateForward()) {
|
||||
pinnedBottom = false;
|
||||
} else {
|
||||
|
@ -18911,7 +18943,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
if (pinnedTop && fromId < 0 && currentChat.megagroup) {
|
||||
pinnedTop = false;
|
||||
}
|
||||
} else if (UserObject.isUserSelf(currentUser)) {
|
||||
} else if (UserObject.isUserSelf(currentUser) || UserObject.isReplyUser(currentUser)) {
|
||||
if (message.isPrivateForward() || prevMessage.isPrivateForward()) {
|
||||
pinnedTop = false;
|
||||
} else {
|
||||
|
@ -19180,7 +19212,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
View child = chatListView.getChildAt(a);
|
||||
if (child instanceof ChatMessageCell) {
|
||||
ChatMessageCell cell = (ChatMessageCell) child;
|
||||
if (cell.getMessageObject() == messageObject) {
|
||||
if (cell.getMessageObject() == messageObject && !cell.isAdminLayoutChanged()) {
|
||||
cell.setMessageObject(messageObject, cell.getCurrentMessagesGroup(), cell.isPinnedBottom(), cell.isPinnedTop());
|
||||
return cell;
|
||||
}
|
||||
|
@ -19360,7 +19392,7 @@ public class ChatActivity extends BaseFragment implements NotificationCenter.Not
|
|||
int threadId = Utilities.parseInt(data.getQueryParameter("thread"));
|
||||
if (channelId == currentChat.id && messageId != 0) {
|
||||
if (threadId != 0) {
|
||||
openDiscussionMessageChat(currentChat.id, threadId, 0, -1, 0, null);
|
||||
openDiscussionMessageChat(currentChat.id, null, threadId, 0, -1, 0, null);
|
||||
} else {
|
||||
showScrollToMessageError = true;
|
||||
scrollToMessageId(messageId, fromMessageId, true, 0, false);
|
||||
|
|
|
@ -1692,7 +1692,11 @@ public class AlertsCreator {
|
|||
}
|
||||
});
|
||||
final NumberPicker.OnValueChangeListener onValueChangeListener = (picker, oldVal, newVal) -> {
|
||||
container.performHapticFeedback(HapticFeedbackConstants.KEYBOARD_TAP, HapticFeedbackConstants.FLAG_IGNORE_GLOBAL_SETTING);
|
||||
try {
|
||||
container.performHapticFeedback(HapticFeedbackConstants.KEYBOARD_TAP, HapticFeedbackConstants.FLAG_IGNORE_GLOBAL_SETTING);
|
||||
} catch (Exception ignore) {
|
||||
|
||||
}
|
||||
checkScheduleDate(buttonTextView, selfUserId == dialogId, dayPicker, hourPicker, minutePicker);
|
||||
};
|
||||
dayPicker.setOnValueChangedListener(onValueChangeListener);
|
||||
|
@ -1894,7 +1898,11 @@ public class AlertsCreator {
|
|||
dayPicker.setWrapSelectorWheel(false);
|
||||
dayPicker.setFormatter(value -> "" + value);
|
||||
final NumberPicker.OnValueChangeListener onValueChangeListener = (picker, oldVal, newVal) -> {
|
||||
container.performHapticFeedback(HapticFeedbackConstants.KEYBOARD_TAP, HapticFeedbackConstants.FLAG_IGNORE_GLOBAL_SETTING);
|
||||
try {
|
||||
container.performHapticFeedback(HapticFeedbackConstants.KEYBOARD_TAP, HapticFeedbackConstants.FLAG_IGNORE_GLOBAL_SETTING);
|
||||
} catch (Exception ignore) {
|
||||
|
||||
}
|
||||
checkCalendarDate(minDate, dayPicker, monthPicker, yearPicker);
|
||||
};
|
||||
dayPicker.setOnValueChangedListener(onValueChangeListener);
|
||||
|
|
|
@ -445,31 +445,30 @@ public class ChatAvatarContainer extends FrameLayout implements NotificationCent
|
|||
}
|
||||
} else {
|
||||
if (parentFragment.isThreadChat()) {
|
||||
if (titleTextView.getTag() == null) {
|
||||
return;
|
||||
}
|
||||
titleTextView.setTag(null);
|
||||
subtitleTextView.setVisibility(VISIBLE);
|
||||
if (titleAnimation != null) {
|
||||
titleAnimation.cancel();
|
||||
titleAnimation = null;
|
||||
}
|
||||
if (animated) {
|
||||
titleAnimation = new AnimatorSet();
|
||||
titleAnimation.playTogether(
|
||||
ObjectAnimator.ofFloat(titleTextView, View.TRANSLATION_Y, 0),
|
||||
ObjectAnimator.ofFloat(subtitleTextView, View.ALPHA, 1.0f));
|
||||
titleAnimation.addListener(new AnimatorListenerAdapter() {
|
||||
@Override
|
||||
public void onAnimationEnd(Animator animation) {
|
||||
titleAnimation = null;
|
||||
}
|
||||
});
|
||||
titleAnimation.setDuration(180);
|
||||
titleAnimation.start();
|
||||
} else {
|
||||
titleTextView.setTranslationY(0.0f);
|
||||
subtitleTextView.setAlpha(1.0f);
|
||||
if (titleTextView.getTag() != null) {
|
||||
titleTextView.setTag(null);
|
||||
subtitleTextView.setVisibility(VISIBLE);
|
||||
if (titleAnimation != null) {
|
||||
titleAnimation.cancel();
|
||||
titleAnimation = null;
|
||||
}
|
||||
if (animated) {
|
||||
titleAnimation = new AnimatorSet();
|
||||
titleAnimation.playTogether(
|
||||
ObjectAnimator.ofFloat(titleTextView, View.TRANSLATION_Y, 0),
|
||||
ObjectAnimator.ofFloat(subtitleTextView, View.ALPHA, 1.0f));
|
||||
titleAnimation.addListener(new AnimatorListenerAdapter() {
|
||||
@Override
|
||||
public void onAnimationEnd(Animator animation) {
|
||||
titleAnimation = null;
|
||||
}
|
||||
});
|
||||
titleAnimation.setDuration(180);
|
||||
titleAnimation.start();
|
||||
} else {
|
||||
titleTextView.setTranslationY(0.0f);
|
||||
subtitleTextView.setAlpha(1.0f);
|
||||
}
|
||||
}
|
||||
}
|
||||
newSubtitle = printString;
|
||||
|
|
|
@ -147,6 +147,7 @@ public class SearchViewPager extends ViewPagerFixed implements FilteredSearchVie
|
|||
emptyView.subtitle.setVisibility(View.GONE);
|
||||
emptyView.setVisibility(View.GONE);
|
||||
emptyView.addView(loadingView, 0);
|
||||
emptyView.showProgress(true, false);
|
||||
|
||||
searchContainer.addView(emptyView);
|
||||
searchListView.setEmptyView(emptyView);
|
||||
|
|
|
@ -517,7 +517,7 @@ public class VoIPHelper {
|
|||
c.get(Calendar.MINUTE), c.get(Calendar.SECOND), name)).getAbsolutePath();
|
||||
}
|
||||
|
||||
public static String getLogFilePath(long callId) {
|
||||
public static String getLogFilePath(long callId, boolean stats) {
|
||||
final File logsDir = getLogsDir();
|
||||
if (!BuildVars.DEBUG_VERSION) {
|
||||
final File[] _logs = logsDir.listFiles();
|
||||
|
|
|
@ -51,6 +51,7 @@ import android.view.View;
|
|||
import android.view.ViewConfiguration;
|
||||
import android.view.ViewGroup;
|
||||
import android.view.ViewOutlineProvider;
|
||||
import android.view.ViewParent;
|
||||
import android.view.ViewTreeObserver;
|
||||
import android.view.WindowManager;
|
||||
import android.view.animation.AccelerateDecelerateInterpolator;
|
||||
|
@ -2771,7 +2772,7 @@ public class DialogsActivity extends BaseFragment implements NotificationCenter.
|
|||
floatingButtonContainer.setContentDescription(LocaleController.getString("NewMessageTitle", R.string.NewMessageTitle));
|
||||
floatingButtonContainer.addView(floatingButton, LayoutHelper.createFrame((Build.VERSION.SDK_INT >= 21 ? 56 : 60), (Build.VERSION.SDK_INT >= 21 ? 56 : 60), Gravity.LEFT | Gravity.TOP, 10, 0, 10, 0));
|
||||
|
||||
|
||||
searchTabsView = null;
|
||||
if (searchString != null) {
|
||||
showSearch(true, false);
|
||||
actionBar.openSearchField(searchString, false);
|
||||
|
@ -2997,7 +2998,6 @@ public class DialogsActivity extends BaseFragment implements NotificationCenter.
|
|||
}
|
||||
|
||||
updateFilterTabs(false);
|
||||
searchTabsView = null;
|
||||
|
||||
return fragmentView;
|
||||
}
|
||||
|
@ -3654,7 +3654,10 @@ public class DialogsActivity extends BaseFragment implements NotificationCenter.
|
|||
contentView.addView(searchTabsView, LayoutHelper.createFrame(LayoutHelper.MATCH_PARENT, 44));
|
||||
}
|
||||
} else if (searchTabsView != null && onlyDialogsAdapter) {
|
||||
((ContentView) fragmentView).removeView(searchTabsView);
|
||||
ViewParent parent = searchTabsView.getParent();
|
||||
if (parent instanceof ViewGroup) {
|
||||
((ViewGroup) parent).removeView(searchTabsView);
|
||||
}
|
||||
searchTabsView = null;
|
||||
}
|
||||
|
||||
|
|
|
@ -228,6 +228,7 @@
|
|||
<string name="GoToSettings">GO TO SETTINGS</string>
|
||||
<string name="OpenProfile">Open Profile</string>
|
||||
<string name="OpenChannel2">Open Channel</string>
|
||||
<string name="OpenGroup2">Open Group</string>
|
||||
<string name="SendMessage">Send Message</string>
|
||||
<string name="Mention">Mention</string>
|
||||
<string name="NotificationsMutedHint">Notifications muted</string>
|
||||
|
@ -3611,6 +3612,12 @@
|
|||
<string name="Comments_few">%1$d Comments</string>
|
||||
<string name="Comments_many">%1$d Comments</string>
|
||||
<string name="Comments_other">%1$d Comments</string>
|
||||
<string name="CommentsCount_zero">%1$d comments</string>
|
||||
<string name="CommentsCount_one">%1$d comment</string>
|
||||
<string name="CommentsCount_two">%1$d comments</string>
|
||||
<string name="CommentsCount_few">%1$d comments</string>
|
||||
<string name="CommentsCount_many">%1$d comments</string>
|
||||
<string name="CommentsCount_other">%1$d comments</string>
|
||||
<!--accessibility descriptions-->
|
||||
<string name="AccDescrGroup">Group</string>
|
||||
<string name="AccDescrChannel">Channel</string>
|
||||
|
|
Loading…
Reference in New Issue