diff --git a/resources/icons/ui/video-call.png b/resources/icons/ui/video-call.png
new file mode 100644
index 00000000..f40ce022
Binary files /dev/null and b/resources/icons/ui/video-call.png differ
diff --git a/resources/qml/ActiveCallBar.qml b/resources/qml/ActiveCallBar.qml
index 9344738e..49b5d059 100644
--- a/resources/qml/ActiveCallBar.qml
+++ b/resources/qml/ActiveCallBar.qml
@@ -10,6 +10,12 @@ Rectangle {
color: "#2ECC71"
implicitHeight: rowLayout.height + 8
+ MouseArea {
+ anchors.fill: parent
+ onClicked: if (TimelineManager.onVideoCall)
+ stackLayout.currentIndex = stackLayout.currentIndex ? 0 : 1;
+ }
+
RowLayout {
id: rowLayout
@@ -33,7 +39,8 @@ Rectangle {
Image {
Layout.preferredWidth: 24
Layout.preferredHeight: 24
- source: "qrc:/icons/icons/ui/place-call.png"
+ source: TimelineManager.onVideoCall ?
+ "qrc:/icons/icons/ui/video-call.png" : "qrc:/icons/icons/ui/place-call.png"
}
Label {
@@ -58,9 +65,12 @@ Rectangle {
callStateLabel.text = "00:00";
var d = new Date();
callTimer.startTime = Math.floor(d.getTime() / 1000);
+ if (TimelineManager.onVideoCall)
+ stackLayout.currentIndex = 1;
break;
case WebRTCState.DISCONNECTED:
callStateLabel.text = "";
+ stackLayout.currentIndex = 0;
}
}
diff --git a/resources/qml/TimelineView.qml b/resources/qml/TimelineView.qml
index ab0148e9..d69d5568 100644
--- a/resources/qml/TimelineView.qml
+++ b/resources/qml/TimelineView.qml
@@ -4,7 +4,7 @@ import "./emoji"
import QtGraphicalEffects 1.0
import QtQuick 2.9
import QtQuick.Controls 2.3
-import QtQuick.Layouts 1.2
+import QtQuick.Layouts 1.3
import QtQuick.Window 2.2
import im.nheko 1.0
import im.nheko.EmojiModel 1.0
@@ -282,144 +282,157 @@ Page {
}
- ListView {
- id: chat
+ StackLayout {
+ id: stackLayout
+ currentIndex: 0
- property int delegateMaxWidth: (Settings.timelineMaxWidth > 100 && (parent.width - Settings.timelineMaxWidth) > scrollbar.width * 2) ? Settings.timelineMaxWidth : (parent.width - scrollbar.width * 2)
-
- visible: TimelineManager.timeline != null
- cacheBuffer: 400
- Layout.fillWidth: true
- Layout.fillHeight: true
- model: TimelineManager.timeline
- boundsBehavior: Flickable.StopAtBounds
- pixelAligned: true
- spacing: 4
- verticalLayoutDirection: ListView.BottomToTop
- onCountChanged: {
- if (atYEnd)
- model.currentIndex = 0;
-
- } // Mark last event as read, since we are at the bottom
-
- ScrollHelper {
- flickable: parent
- anchors.fill: parent
- }
-
- Shortcut {
- sequence: StandardKey.MoveToPreviousPage
- onActivated: {
- chat.contentY = chat.contentY - chat.height / 2;
- chat.returnToBounds();
+ Connections {
+ target: TimelineManager
+ function onActiveTimelineChanged() {
+ stackLayout.currentIndex = 0;
}
}
- Shortcut {
- sequence: StandardKey.MoveToNextPage
- onActivated: {
- chat.contentY = chat.contentY + chat.height / 2;
- chat.returnToBounds();
- }
- }
+ ListView {
+ id: chat
- Shortcut {
- sequence: StandardKey.Cancel
- onActivated: chat.model.reply = undefined
- }
+ property int delegateMaxWidth: (Settings.timelineMaxWidth > 100 && (parent.width - Settings.timelineMaxWidth) > scrollbar.width * 2) ? Settings.timelineMaxWidth : (parent.width - scrollbar.width * 2)
- Shortcut {
- sequence: "Alt+Up"
- onActivated: chat.model.reply = chat.model.indexToId(chat.model.reply ? chat.model.idToIndex(chat.model.reply) + 1 : 0)
- }
+ visible: TimelineManager.timeline != null
+ cacheBuffer: 400
+ Layout.fillWidth: true
+ Layout.fillHeight: true
+ model: TimelineManager.timeline
+ boundsBehavior: Flickable.StopAtBounds
+ pixelAligned: true
+ spacing: 4
+ verticalLayoutDirection: ListView.BottomToTop
+ onCountChanged: {
+ if (atYEnd)
+ model.currentIndex = 0;
- Shortcut {
- sequence: "Alt+Down"
- onActivated: {
- var idx = chat.model.reply ? chat.model.idToIndex(chat.model.reply) - 1 : -1;
- chat.model.reply = idx >= 0 ? chat.model.indexToId(idx) : undefined;
- }
- }
+ } // Mark last event as read, since we are at the bottom
- Component {
- id: userProfileComponent
-
- UserProfile {
+ ScrollHelper {
+ flickable: parent
+ anchors.fill: parent
}
- }
+ Shortcut {
+ sequence: StandardKey.MoveToPreviousPage
+ onActivated: {
+ chat.contentY = chat.contentY - chat.height / 2;
+ chat.returnToBounds();
+ }
+ }
- section {
- property: "section"
- }
+ Shortcut {
+ sequence: StandardKey.MoveToNextPage
+ onActivated: {
+ chat.contentY = chat.contentY + chat.height / 2;
+ chat.returnToBounds();
+ }
+ }
- Component {
- id: sectionHeader
+ Shortcut {
+ sequence: StandardKey.Cancel
+ onActivated: chat.model.reply = undefined
+ }
- Column {
- property var modelData
- property string section
- property string nextSection
+ Shortcut {
+ sequence: "Alt+Up"
+ onActivated: chat.model.reply = chat.model.indexToId(chat.model.reply ? chat.model.idToIndex(chat.model.reply) + 1 : 0)
+ }
- topPadding: 4
- bottomPadding: 4
- spacing: 8
- visible: !!modelData
- width: parent.width
- height: (section.includes(" ") ? dateBubble.height + 8 + userName.height : userName.height) + 8
+ Shortcut {
+ sequence: "Alt+Down"
+ onActivated: {
+ var idx = chat.model.reply ? chat.model.idToIndex(chat.model.reply) - 1 : -1;
+ chat.model.reply = idx >= 0 ? chat.model.indexToId(idx) : undefined;
+ }
+ }
- Label {
- id: dateBubble
-
- anchors.horizontalCenter: parent ? parent.horizontalCenter : undefined
- visible: section.includes(" ")
- text: chat.model.formatDateSeparator(modelData.timestamp)
- color: colors.text
- height: fontMetrics.height * 1.4
- width: contentWidth * 1.2
- horizontalAlignment: Text.AlignHCenter
- verticalAlignment: Text.AlignVCenter
-
- background: Rectangle {
- radius: parent.height / 2
- color: colors.base
- }
+ Component {
+ id: userProfileComponent
+ UserProfile {
}
- Row {
- height: userName.height
+ }
+
+ section {
+ property: "section"
+ }
+
+ Component {
+ id: sectionHeader
+
+ Column {
+ property var modelData
+ property string section
+ property string nextSection
+
+ topPadding: 4
+ bottomPadding: 4
spacing: 8
-
- Avatar {
- width: avatarSize
- height: avatarSize
- url: chat.model.avatarUrl(modelData.userId).replace("mxc://", "image://MxcImage/")
- displayName: modelData.userName
- userid: modelData.userId
-
- MouseArea {
- anchors.fill: parent
- onClicked: chat.model.openUserProfile(modelData.userId)
- cursorShape: Qt.PointingHandCursor
- propagateComposedEvents: true
- }
-
- }
+ visible: !!modelData
+ width: parent.width
+ height: (section.includes(" ") ? dateBubble.height + 8 + userName.height : userName.height) + 8
Label {
- id: userName
+ id: dateBubble
- text: TimelineManager.escapeEmoji(modelData.userName)
- color: TimelineManager.userColor(modelData.userId, colors.window)
- textFormat: Text.RichText
+ anchors.horizontalCenter: parent ? parent.horizontalCenter : undefined
+ visible: section.includes(" ")
+ text: chat.model.formatDateSeparator(modelData.timestamp)
+ color: colors.text
+ height: fontMetrics.height * 1.4
+ width: contentWidth * 1.2
+ horizontalAlignment: Text.AlignHCenter
+ verticalAlignment: Text.AlignVCenter
+
+ background: Rectangle {
+ radius: parent.height / 2
+ color: colors.base
+ }
+
+ }
+
+ Row {
+ height: userName.height
+ spacing: 8
+
+ Avatar {
+ width: avatarSize
+ height: avatarSize
+ url: chat.model.avatarUrl(modelData.userId).replace("mxc://", "image://MxcImage/")
+ displayName: modelData.userName
+ userid: modelData.userId
+
+ MouseArea {
+ anchors.fill: parent
+ onClicked: chat.model.openUserProfile(modelData.userId)
+ cursorShape: Qt.PointingHandCursor
+ propagateComposedEvents: true
+ }
+
+ }
+
+ Label {
+ id: userName
+
+ text: TimelineManager.escapeEmoji(modelData.userName)
+ color: TimelineManager.userColor(modelData.userId, colors.window)
+ textFormat: Text.RichText
+
+ MouseArea {
+ anchors.fill: parent
+ Layout.alignment: Qt.AlignHCenter
+ onClicked: chat.model.openUserProfile(modelData.userId)
+ cursorShape: Qt.PointingHandCursor
+ propagateComposedEvents: true
+ }
- MouseArea {
- anchors.fill: parent
- Layout.alignment: Qt.AlignHCenter
- onClicked: chat.model.openUserProfile(modelData.userId)
- cursorShape: Qt.PointingHandCursor
- propagateComposedEvents: true
}
}
@@ -428,62 +441,67 @@ Page {
}
- }
-
- ScrollBar.vertical: ScrollBar {
- id: scrollbar
- }
-
- delegate: Item {
- id: wrapper
-
- // This would normally be previousSection, but our model's order is inverted.
- property bool sectionBoundary: (ListView.nextSection != "" && ListView.nextSection !== ListView.section) || model.index === chat.count - 1
- property Item section
-
- anchors.horizontalCenter: parent ? parent.horizontalCenter : undefined
- width: chat.delegateMaxWidth
- height: section ? section.height + timelinerow.height : timelinerow.height
- onSectionBoundaryChanged: {
- if (sectionBoundary) {
- var properties = {
- "modelData": model.dump,
- "section": ListView.section,
- "nextSection": ListView.nextSection
- };
- section = sectionHeader.createObject(wrapper, properties);
- } else {
- section.destroy();
- section = null;
- }
+ ScrollBar.vertical: ScrollBar {
+ id: scrollbar
}
- TimelineRow {
- id: timelinerow
+ delegate: Item {
+ id: wrapper
- y: section ? section.y + section.height : 0
- }
-
- Connections {
- function onMovementEnded() {
- if (y + height + 2 * chat.spacing > chat.contentY + chat.height && y < chat.contentY + chat.height)
- chat.model.currentIndex = index;
+ // This would normally be previousSection, but our model's order is inverted.
+ property bool sectionBoundary: (ListView.nextSection != "" && ListView.nextSection !== ListView.section) || model.index === chat.count - 1
+ property Item section
+ anchors.horizontalCenter: parent ? parent.horizontalCenter : undefined
+ width: chat.delegateMaxWidth
+ height: section ? section.height + timelinerow.height : timelinerow.height
+ onSectionBoundaryChanged: {
+ if (sectionBoundary) {
+ var properties = {
+ "modelData": model.dump,
+ "section": ListView.section,
+ "nextSection": ListView.nextSection
+ };
+ section = sectionHeader.createObject(wrapper, properties);
+ } else {
+ section.destroy();
+ section = null;
+ }
}
- target: chat
+ TimelineRow {
+ id: timelinerow
+
+ y: section ? section.y + section.height : 0
+ }
+
+ Connections {
+ function onMovementEnded() {
+ if (y + height + 2 * chat.spacing > chat.contentY + chat.height && y < chat.contentY + chat.height)
+ chat.model.currentIndex = index;
+
+ }
+
+ target: chat
+ }
+
+ }
+
+ footer: BusyIndicator {
+ anchors.horizontalCenter: parent.horizontalCenter
+ running: chat.model && chat.model.paginationInProgress
+ height: 50
+ width: 50
+ z: 3
}
}
- footer: BusyIndicator {
- anchors.horizontalCenter: parent.horizontalCenter
- running: chat.model && chat.model.paginationInProgress
- height: 50
- width: 50
- z: 3
+ Loader {
+ id: videoCallLoader
+ source: TimelineManager.onVideoCall ? "VideoCall.qml" : ""
+ onLoaded: TimelineManager.setVideoCallItem()
}
-
}
Item {
diff --git a/resources/qml/VideoCall.qml b/resources/qml/VideoCall.qml
new file mode 100644
index 00000000..69fc1a2b
--- /dev/null
+++ b/resources/qml/VideoCall.qml
@@ -0,0 +1,7 @@
+import QtQuick 2.9
+
+import org.freedesktop.gstreamer.GLVideoItem 1.0
+
+GstGLVideoItem {
+ objectName: "videoCallItem"
+}
diff --git a/resources/res.qrc b/resources/res.qrc
index 87216e30..dc5c9969 100644
--- a/resources/res.qrc
+++ b/resources/res.qrc
@@ -74,6 +74,7 @@
icons/ui/end-call.png
icons/ui/microphone-mute.png
icons/ui/microphone-unmute.png
+ icons/ui/video-call.png
icons/emoji-categories/people.png
icons/emoji-categories/people@2x.png
@@ -130,6 +131,7 @@
qml/Reactions.qml
qml/ScrollHelper.qml
qml/TimelineRow.qml
+ qml/VideoCall.qml
qml/emoji/EmojiButton.qml
qml/emoji/EmojiPicker.qml
qml/UserProfile.qml
diff --git a/src/CallManager.cpp b/src/CallManager.cpp
index b1d1a75a..4cd98a9f 100644
--- a/src/CallManager.cpp
+++ b/src/CallManager.cpp
@@ -25,9 +25,6 @@ Q_DECLARE_METATYPE(mtx::responses::TurnServer)
using namespace mtx::events;
using namespace mtx::events::msg;
-// https://github.com/vector-im/riot-web/issues/10173
-#define STUN_SERVER "stun://turn.matrix.org:3478"
-
namespace {
std::vector
getTurnURIs(const mtx::responses::TurnServer &turnServer);
@@ -43,6 +40,8 @@ CallManager::CallManager(QSharedPointer userSettings)
qRegisterMetaType();
qRegisterMetaType();
+ session_.setSettings(userSettings);
+
connect(
&session_,
&WebRTCSession::offerCreated,
@@ -128,30 +127,29 @@ CallManager::CallManager(QSharedPointer userSettings)
}
void
-CallManager::sendInvite(const QString &roomid)
+CallManager::sendInvite(const QString &roomid, bool isVideo)
{
if (onActiveCall())
return;
auto roomInfo = cache::singleRoomInfo(roomid.toStdString());
if (roomInfo.member_count != 2) {
- emit ChatPage::instance()->showNotification(
- "Voice calls are limited to 1:1 rooms.");
+ emit ChatPage::instance()->showNotification("Calls are limited to 1:1 rooms.");
return;
}
std::string errorMessage;
- if (!session_.init(&errorMessage)) {
+ if (!session_.havePlugins(false, &errorMessage) ||
+ (isVideo && !session_.havePlugins(true, &errorMessage))) {
emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
return;
}
roomid_ = roomid;
- session_.setStunServer(settings_->useStunServer() ? STUN_SERVER : "");
session_.setTurnServers(turnURIs_);
-
generateCallID();
- nhlog::ui()->debug("WebRTC: call id: {} - creating invite", callid_);
+ nhlog::ui()->debug(
+ "WebRTC: call id: {} - creating {} invite", callid_, isVideo ? "video" : "voice");
std::vector members(cache::getMembers(roomid.toStdString()));
const RoomMember &callee =
members.front().user_id == utils::localUser() ? members.back() : members.front();
@@ -159,10 +157,12 @@ CallManager::sendInvite(const QString &roomid)
callPartyAvatarUrl_ = QString::fromStdString(roomInfo.avatar_url);
emit newCallParty();
playRingtone("qrc:/media/media/ringback.ogg", true);
- if (!session_.createOffer()) {
+ if (!session_.createOffer(isVideo)) {
emit ChatPage::instance()->showNotification("Problem setting up call.");
endCall();
}
+ if (isVideo)
+ emit newVideoCallState();
}
namespace {
@@ -242,7 +242,7 @@ CallManager::handleEvent(const RoomEvent &callInviteEvent)
return;
auto roomInfo = cache::singleRoomInfo(callInviteEvent.room_id);
- if (onActiveCall() || roomInfo.member_count != 2 || isVideo) {
+ if (onActiveCall() || roomInfo.member_count != 2) {
emit newMessage(QString::fromStdString(callInviteEvent.room_id),
CallHangUp{callInviteEvent.content.call_id,
0,
@@ -266,10 +266,11 @@ CallManager::handleEvent(const RoomEvent &callInviteEvent)
QString::fromStdString(roomInfo.name),
QString::fromStdString(roomInfo.avatar_url),
settings_,
+ isVideo,
MainWindow::instance());
- connect(dialog, &dialogs::AcceptCall::accept, this, [this, callInviteEvent]() {
+ connect(dialog, &dialogs::AcceptCall::accept, this, [this, callInviteEvent, isVideo]() {
MainWindow::instance()->hideOverlay();
- answerInvite(callInviteEvent.content);
+ answerInvite(callInviteEvent.content, isVideo);
});
connect(dialog, &dialogs::AcceptCall::reject, this, [this]() {
MainWindow::instance()->hideOverlay();
@@ -279,19 +280,18 @@ CallManager::handleEvent(const RoomEvent &callInviteEvent)
}
void
-CallManager::answerInvite(const CallInvite &invite)
+CallManager::answerInvite(const CallInvite &invite, bool isVideo)
{
stopRingtone();
std::string errorMessage;
- if (!session_.init(&errorMessage)) {
+ if (!session_.havePlugins(false, &errorMessage) ||
+ (isVideo && !session_.havePlugins(true, &errorMessage))) {
emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
hangUp();
return;
}
- session_.setStunServer(settings_->useStunServer() ? STUN_SERVER : "");
session_.setTurnServers(turnURIs_);
-
if (!session_.acceptOffer(invite.sdp)) {
emit ChatPage::instance()->showNotification("Problem setting up call.");
hangUp();
@@ -299,6 +299,8 @@ CallManager::answerInvite(const CallInvite &invite)
}
session_.acceptICECandidates(remoteICECandidates_);
remoteICECandidates_.clear();
+ if (isVideo)
+ emit newVideoCallState();
}
void
@@ -384,7 +386,10 @@ CallManager::endCall()
{
stopRingtone();
clear();
+ bool isVideo = session_.isVideo();
session_.end();
+ if (isVideo)
+ emit newVideoCallState();
}
void
diff --git a/src/CallManager.h b/src/CallManager.h
index 640230a4..c3afa155 100644
--- a/src/CallManager.h
+++ b/src/CallManager.h
@@ -26,7 +26,7 @@ class CallManager : public QObject
public:
CallManager(QSharedPointer);
- void sendInvite(const QString &roomid);
+ void sendInvite(const QString &roomid, bool isVideo);
void hangUp(
mtx::events::msg::CallHangUp::Reason = mtx::events::msg::CallHangUp::Reason::User);
bool onActiveCall() const;
@@ -43,6 +43,7 @@ signals:
void newMessage(const QString &roomid, const mtx::events::msg::CallAnswer &);
void newMessage(const QString &roomid, const mtx::events::msg::CallHangUp &);
void newCallParty();
+ void newVideoCallState();
void turnServerRetrieved(const mtx::responses::TurnServer &);
private slots:
@@ -67,7 +68,7 @@ private:
void handleEvent(const mtx::events::RoomEvent &);
void handleEvent(const mtx::events::RoomEvent &);
void handleEvent(const mtx::events::RoomEvent &);
- void answerInvite(const mtx::events::msg::CallInvite &);
+ void answerInvite(const mtx::events::msg::CallInvite &, bool isVideo);
void generateCallID();
void clear();
void endCall();
diff --git a/src/ChatPage.cpp b/src/ChatPage.cpp
index 8e93c0f4..e0ac31ab 100644
--- a/src/ChatPage.cpp
+++ b/src/ChatPage.cpp
@@ -437,7 +437,7 @@ ChatPage::ChatPage(QSharedPointer userSettings, QWidget *parent)
} else {
if (auto roomInfo = cache::singleRoomInfo(current_room_.toStdString());
roomInfo.member_count != 2) {
- showNotification("Voice calls are limited to 1:1 rooms.");
+ showNotification("Calls are limited to 1:1 rooms.");
} else {
std::vector members(
cache::getMembers(current_room_.toStdString()));
@@ -452,7 +452,10 @@ ChatPage::ChatPage(QSharedPointer userSettings, QWidget *parent)
userSettings_,
MainWindow::instance());
connect(dialog, &dialogs::PlaceCall::voice, this, [this]() {
- callManager_.sendInvite(current_room_);
+ callManager_.sendInvite(current_room_, false);
+ });
+ connect(dialog, &dialogs::PlaceCall::video, this, [this]() {
+ callManager_.sendInvite(current_room_, true);
});
utils::centerWidget(dialog, MainWindow::instance());
dialog->show();
diff --git a/src/UserSettingsPage.cpp b/src/UserSettingsPage.cpp
index 7d81e663..f04193c9 100644
--- a/src/UserSettingsPage.cpp
+++ b/src/UserSettingsPage.cpp
@@ -42,6 +42,7 @@
#include "Olm.h"
#include "UserSettingsPage.h"
#include "Utils.h"
+#include "WebRTCSession.h"
#include "ui/FlatButton.h"
#include "ui/ToggleButton.h"
@@ -77,8 +78,11 @@ UserSettings::load()
presence_ =
settings.value("user/presence", QVariant::fromValue(Presence::AutomaticPresence))
.value();
- useStunServer_ = settings.value("user/use_stun_server", false).toBool();
- defaultAudioSource_ = settings.value("user/default_audio_source", QString()).toString();
+ microphone_ = settings.value("user/microphone", QString()).toString();
+ camera_ = settings.value("user/camera", QString()).toString();
+ cameraResolution_ = settings.value("user/camera_resolution", QString()).toString();
+ cameraFrameRate_ = settings.value("user/camera_frame_rate", QString()).toString();
+ useStunServer_ = settings.value("user/use_stun_server", false).toBool();
applyTheme();
}
@@ -292,12 +296,42 @@ UserSettings::setUseStunServer(bool useStunServer)
}
void
-UserSettings::setDefaultAudioSource(const QString &defaultAudioSource)
+UserSettings::setMicrophone(QString microphone)
{
- if (defaultAudioSource == defaultAudioSource_)
+ if (microphone == microphone_)
return;
- defaultAudioSource_ = defaultAudioSource;
- emit defaultAudioSourceChanged(defaultAudioSource);
+ microphone_ = microphone;
+ emit microphoneChanged(microphone);
+ save();
+}
+
+void
+UserSettings::setCamera(QString camera)
+{
+ if (camera == camera_)
+ return;
+ camera_ = camera;
+ emit cameraChanged(camera);
+ save();
+}
+
+void
+UserSettings::setCameraResolution(QString resolution)
+{
+ if (resolution == cameraResolution_)
+ return;
+ cameraResolution_ = resolution;
+ emit cameraResolutionChanged(resolution);
+ save();
+}
+
+void
+UserSettings::setCameraFrameRate(QString frameRate)
+{
+ if (frameRate == cameraFrameRate_)
+ return;
+ cameraFrameRate_ = frameRate;
+ emit cameraFrameRateChanged(frameRate);
save();
}
@@ -386,8 +420,11 @@ UserSettings::save()
settings.setValue("font_family", font_);
settings.setValue("emoji_font_family", emojiFont_);
settings.setValue("presence", QVariant::fromValue(presence_));
+ settings.setValue("microphone", microphone_);
+ settings.setValue("camera", camera_);
+ settings.setValue("camera_resolution", cameraResolution_);
+ settings.setValue("camera_frame_rate", cameraFrameRate_);
settings.setValue("use_stun_server", useStunServer_);
- settings.setValue("default_audio_source", defaultAudioSource_);
settings.endGroup();
@@ -458,6 +495,10 @@ UserSettingsPage::UserSettingsPage(QSharedPointer settings, QWidge
fontSizeCombo_ = new QComboBox{this};
fontSelectionCombo_ = new QComboBox{this};
emojiFontSelectionCombo_ = new QComboBox{this};
+ microphoneCombo_ = new QComboBox{this};
+ cameraCombo_ = new QComboBox{this};
+ cameraResolutionCombo_ = new QComboBox{this};
+ cameraFrameRateCombo_ = new QComboBox{this};
timelineMaxWidthSpin_ = new QSpinBox{this};
if (!settings_->tray())
@@ -645,6 +686,14 @@ UserSettingsPage::UserSettingsPage(QSharedPointer settings, QWidge
formLayout_->addRow(callsLabel);
formLayout_->addRow(new HorizontalLine{this});
+ boxWrap(tr("Microphone"), microphoneCombo_);
+ boxWrap(tr("Camera"), cameraCombo_);
+ boxWrap(tr("Camera resolution"), cameraResolutionCombo_);
+ boxWrap(tr("Camera frame rate"), cameraFrameRateCombo_);
+ microphoneCombo_->setSizeAdjustPolicy(QComboBox::AdjustToContents);
+ cameraCombo_->setSizeAdjustPolicy(QComboBox::AdjustToContents);
+ cameraResolutionCombo_->setSizeAdjustPolicy(QComboBox::AdjustToContents);
+ cameraFrameRateCombo_->setSizeAdjustPolicy(QComboBox::AdjustToContents);
boxWrap(tr("Allow fallback call assist server"),
useStunServer_,
tr("Will use turn.matrix.org as assist when your home server does not offer one."));
@@ -698,6 +747,38 @@ UserSettingsPage::UserSettingsPage(QSharedPointer settings, QWidge
connect(emojiFontSelectionCombo_,
static_cast(&QComboBox::currentTextChanged),
[this](const QString &family) { settings_->setEmojiFontFamily(family.trimmed()); });
+
+ connect(microphoneCombo_,
+ static_cast(&QComboBox::currentTextChanged),
+ [this](const QString µphone) { settings_->setMicrophone(microphone); });
+
+ connect(cameraCombo_,
+ static_cast(&QComboBox::currentTextChanged),
+ [this](const QString &camera) {
+ settings_->setCamera(camera);
+ std::vector resolutions =
+ WebRTCSession::instance().getResolutions(camera.toStdString());
+ cameraResolutionCombo_->clear();
+ for (const auto &resolution : resolutions)
+ cameraResolutionCombo_->addItem(QString::fromStdString(resolution));
+ });
+
+ connect(cameraResolutionCombo_,
+ static_cast(&QComboBox::currentTextChanged),
+ [this](const QString &resolution) {
+ settings_->setCameraResolution(resolution);
+ std::vector frameRates =
+ WebRTCSession::instance().getFrameRates(settings_->camera().toStdString(),
+ resolution.toStdString());
+ cameraFrameRateCombo_->clear();
+ for (const auto &frameRate : frameRates)
+ cameraFrameRateCombo_->addItem(QString::fromStdString(frameRate));
+ });
+
+ connect(cameraFrameRateCombo_,
+ static_cast(&QComboBox::currentTextChanged),
+ [this](const QString &frameRate) { settings_->setCameraFrameRate(frameRate); });
+
connect(trayToggle_, &Toggle::toggled, this, [this](bool disabled) {
settings_->setTray(!disabled);
if (disabled) {
@@ -807,6 +888,26 @@ UserSettingsPage::showEvent(QShowEvent *)
enlargeEmojiOnlyMessages_->setState(!settings_->enlargeEmojiOnlyMessages());
deviceIdValue_->setText(QString::fromStdString(http::client()->device_id()));
timelineMaxWidthSpin_->setValue(settings_->timelineMaxWidth());
+
+ WebRTCSession::instance().refreshDevices();
+ auto mics =
+ WebRTCSession::instance().getDeviceNames(false, settings_->microphone().toStdString());
+ microphoneCombo_->clear();
+ for (const auto &m : mics)
+ microphoneCombo_->addItem(QString::fromStdString(m));
+
+ auto cameraResolution = settings_->cameraResolution();
+ auto cameraFrameRate = settings_->cameraFrameRate();
+
+ auto cameras =
+ WebRTCSession::instance().getDeviceNames(true, settings_->camera().toStdString());
+ cameraCombo_->clear();
+ for (const auto &c : cameras)
+ cameraCombo_->addItem(QString::fromStdString(c));
+
+ utils::restoreCombobox(cameraResolutionCombo_, cameraResolution);
+ utils::restoreCombobox(cameraFrameRateCombo_, cameraFrameRate);
+
useStunServer_->setState(!settings_->useStunServer());
deviceFingerprintValue_->setText(
diff --git a/src/UserSettingsPage.h b/src/UserSettingsPage.h
index e947bfae..9d291303 100644
--- a/src/UserSettingsPage.h
+++ b/src/UserSettingsPage.h
@@ -73,8 +73,12 @@ class UserSettings : public QObject
Q_PROPERTY(Presence presence READ presence WRITE setPresence NOTIFY presenceChanged)
Q_PROPERTY(
bool useStunServer READ useStunServer WRITE setUseStunServer NOTIFY useStunServerChanged)
- Q_PROPERTY(QString defaultAudioSource READ defaultAudioSource WRITE setDefaultAudioSource
- NOTIFY defaultAudioSourceChanged)
+ Q_PROPERTY(QString microphone READ microphone WRITE setMicrophone NOTIFY microphoneChanged)
+ Q_PROPERTY(QString camera READ camera WRITE setCamera NOTIFY cameraChanged)
+ Q_PROPERTY(QString cameraResolution READ cameraResolution WRITE setCameraResolution NOTIFY
+ cameraResolutionChanged)
+ Q_PROPERTY(QString cameraFrameRate READ cameraFrameRate WRITE setCameraFrameRate NOTIFY
+ cameraFrameRateChanged)
public:
UserSettings();
@@ -111,8 +115,11 @@ public:
void setAvatarCircles(bool state);
void setDecryptSidebar(bool state);
void setPresence(Presence state);
+ void setMicrophone(QString microphone);
+ void setCamera(QString camera);
+ void setCameraResolution(QString resolution);
+ void setCameraFrameRate(QString frameRate);
void setUseStunServer(bool state);
- void setDefaultAudioSource(const QString &deviceName);
QString theme() const { return !theme_.isEmpty() ? theme_ : defaultTheme_; }
bool messageHoverHighlight() const { return messageHoverHighlight_; }
@@ -138,8 +145,11 @@ public:
QString font() const { return font_; }
QString emojiFont() const { return emojiFont_; }
Presence presence() const { return presence_; }
+ QString microphone() const { return microphone_; }
+ QString camera() const { return camera_; }
+ QString cameraResolution() const { return cameraResolution_; }
+ QString cameraFrameRate() const { return cameraFrameRate_; }
bool useStunServer() const { return useStunServer_; }
- QString defaultAudioSource() const { return defaultAudioSource_; }
signals:
void groupViewStateChanged(bool state);
@@ -162,8 +172,11 @@ signals:
void fontChanged(QString state);
void emojiFontChanged(QString state);
void presenceChanged(Presence state);
+ void microphoneChanged(QString microphone);
+ void cameraChanged(QString camera);
+ void cameraResolutionChanged(QString resolution);
+ void cameraFrameRateChanged(QString frameRate);
void useStunServerChanged(bool state);
- void defaultAudioSourceChanged(const QString &deviceName);
private:
// Default to system theme if QT_QPA_PLATFORMTHEME var is set.
@@ -191,8 +204,11 @@ private:
QString font_;
QString emojiFont_;
Presence presence_;
+ QString microphone_;
+ QString camera_;
+ QString cameraResolution_;
+ QString cameraFrameRate_;
bool useStunServer_;
- QString defaultAudioSource_;
};
class HorizontalLine : public QFrame
@@ -256,6 +272,10 @@ private:
QComboBox *fontSizeCombo_;
QComboBox *fontSelectionCombo_;
QComboBox *emojiFontSelectionCombo_;
+ QComboBox *microphoneCombo_;
+ QComboBox *cameraCombo_;
+ QComboBox *cameraResolutionCombo_;
+ QComboBox *cameraFrameRateCombo_;
QSpinBox *timelineMaxWidthSpin_;
diff --git a/src/WebRTCSession.cpp b/src/WebRTCSession.cpp
index 1c11f750..177bdf7a 100644
--- a/src/WebRTCSession.cpp
+++ b/src/WebRTCSession.cpp
@@ -1,7 +1,16 @@
#include
+#include
+#include
#include
+#include
+#include
+#include
+#include
+#include
+#include
#include "Logging.h"
+#include "UserSettingsPage.h"
#include "WebRTCSession.h"
#ifdef GSTREAMER_AVAILABLE
@@ -15,6 +24,9 @@ extern "C"
}
#endif
+// https://github.com/vector-im/riot-web/issues/10173
+constexpr std::string_view STUN_SERVER = "stun://turn.matrix.org:3478";
+
Q_DECLARE_METATYPE(webrtc::State)
using webrtc::State;
@@ -39,7 +51,7 @@ WebRTCSession::init(std::string *errorMessage)
GError *error = nullptr;
if (!gst_init_check(nullptr, nullptr, &error)) {
- std::string strError = std::string("WebRTC: failed to initialise GStreamer: ");
+ std::string strError("WebRTC: failed to initialise GStreamer: ");
if (error) {
strError += error->message;
g_error_free(error);
@@ -50,51 +62,14 @@ WebRTCSession::init(std::string *errorMessage)
return false;
}
+ initialised_ = true;
gchar *version = gst_version_string();
- std::string gstVersion(version);
+ nhlog::ui()->info("WebRTC: initialised {}", version);
g_free(version);
- nhlog::ui()->info("WebRTC: initialised " + gstVersion);
-
- // GStreamer Plugins:
- // Base: audioconvert, audioresample, opus, playback, volume
- // Good: autodetect, rtpmanager
- // Bad: dtls, srtp, webrtc
- // libnice [GLib]: nice
- initialised_ = true;
- std::string strError = gstVersion + ": Missing plugins: ";
- const gchar *needed[] = {"audioconvert",
- "audioresample",
- "autodetect",
- "dtls",
- "nice",
- "opus",
- "playback",
- "rtpmanager",
- "srtp",
- "volume",
- "webrtc",
- nullptr};
- GstRegistry *registry = gst_registry_get();
- for (guint i = 0; i < g_strv_length((gchar **)needed); i++) {
- GstPlugin *plugin = gst_registry_find_plugin(registry, needed[i]);
- if (!plugin) {
- strError += std::string(needed[i]) + " ";
- initialised_ = false;
- continue;
- }
- gst_object_unref(plugin);
- }
-
- if (initialised_) {
#if GST_CHECK_VERSION(1, 18, 0)
- startDeviceMonitor();
+ startDeviceMonitor();
#endif
- } else {
- nhlog::ui()->error(strError);
- if (errorMessage)
- *errorMessage = strError;
- }
- return initialised_;
+ return true;
#else
(void)errorMessage;
return false;
@@ -103,37 +78,154 @@ WebRTCSession::init(std::string *errorMessage)
#ifdef GSTREAMER_AVAILABLE
namespace {
-bool isoffering_;
+
+struct AudioSource
+{
+ std::string name;
+ GstDevice *device;
+};
+
+struct VideoSource
+{
+ struct Caps
+ {
+ std::string resolution;
+ std::vector frameRates;
+ };
+ std::string name;
+ GstDevice *device;
+ std::vector caps;
+};
+
std::string localsdp_;
std::vector localcandidates_;
-std::vector> audioSources_;
+bool haveAudioStream_;
+bool haveVideoStream_;
+std::vector audioSources_;
+std::vector videoSources_;
+
+using FrameRate = std::pair;
+std::optional
+getFrameRate(const GValue *value)
+{
+ if (GST_VALUE_HOLDS_FRACTION(value)) {
+ gint num = gst_value_get_fraction_numerator(value);
+ gint den = gst_value_get_fraction_denominator(value);
+ return FrameRate{num, den};
+ }
+ return std::nullopt;
+}
+
+void
+addFrameRate(std::vector &rates, const FrameRate &rate)
+{
+ constexpr double minimumFrameRate = 15.0;
+ if (static_cast(rate.first) / rate.second >= minimumFrameRate)
+ rates.push_back(std::to_string(rate.first) + "/" + std::to_string(rate.second));
+}
+
+std::pair
+tokenise(std::string_view str, char delim)
+{
+ std::pair ret;
+ auto pos = str.find_first_of(delim);
+ auto s = str.data();
+ std::from_chars(s, s + pos, ret.first);
+ std::from_chars(s + pos + 1, s + str.size(), ret.second);
+ return ret;
+}
void
addDevice(GstDevice *device)
{
- if (device) {
- gchar *name = gst_device_get_display_name(device);
- nhlog::ui()->debug("WebRTC: device added: {}", name);
+ if (!device)
+ return;
+
+ gchar *name = gst_device_get_display_name(device);
+ gchar *type = gst_device_get_device_class(device);
+ bool isVideo = !std::strncmp(type, "Video", 5);
+ g_free(type);
+ nhlog::ui()->debug("WebRTC: {} device added: {}", isVideo ? "video" : "audio", name);
+ if (!isVideo) {
audioSources_.push_back({name, device});
g_free(name);
+ return;
}
+
+ GstCaps *gstcaps = gst_device_get_caps(device);
+ if (!gstcaps) {
+ nhlog::ui()->debug("WebRTC: unable to get caps for {}", name);
+ g_free(name);
+ return;
+ }
+
+ VideoSource source{name, device, {}};
+ g_free(name);
+ guint nCaps = gst_caps_get_size(gstcaps);
+ for (guint i = 0; i < nCaps; ++i) {
+ GstStructure *structure = gst_caps_get_structure(gstcaps, i);
+ const gchar *name = gst_structure_get_name(structure);
+ if (!std::strcmp(name, "video/x-raw")) {
+ gint widthpx, heightpx;
+ if (gst_structure_get(structure,
+ "width",
+ G_TYPE_INT,
+ &widthpx,
+ "height",
+ G_TYPE_INT,
+ &heightpx,
+ nullptr)) {
+ VideoSource::Caps caps;
+ caps.resolution =
+ std::to_string(widthpx) + "x" + std::to_string(heightpx);
+ const GValue *value =
+ gst_structure_get_value(structure, "framerate");
+ if (auto fr = getFrameRate(value); fr)
+ addFrameRate(caps.frameRates, *fr);
+ else if (GST_VALUE_HOLDS_LIST(value)) {
+ guint nRates = gst_value_list_get_size(value);
+ for (guint j = 0; j < nRates; ++j) {
+ const GValue *rate =
+ gst_value_list_get_value(value, j);
+ if (auto fr = getFrameRate(rate); fr)
+ addFrameRate(caps.frameRates, *fr);
+ }
+ }
+ if (!caps.frameRates.empty())
+ source.caps.push_back(std::move(caps));
+ }
+ }
+ }
+ gst_caps_unref(gstcaps);
+ videoSources_.push_back(std::move(source));
}
#if GST_CHECK_VERSION(1, 18, 0)
+template
+bool
+removeDevice(T &sources, GstDevice *device, bool changed)
+{
+ if (auto it = std::find_if(sources.begin(),
+ sources.end(),
+ [device](const auto &s) { return s.device == device; });
+ it != sources.end()) {
+ nhlog::ui()->debug(std::string("WebRTC: device ") +
+ (changed ? "changed: " : "removed: ") + "{}",
+ it->name);
+ gst_object_unref(device);
+ sources.erase(it);
+ return true;
+ }
+ return false;
+}
+
void
removeDevice(GstDevice *device, bool changed)
{
if (device) {
- if (auto it = std::find_if(audioSources_.begin(),
- audioSources_.end(),
- [device](const auto &s) { return s.second == device; });
- it != audioSources_.end()) {
- nhlog::ui()->debug(std::string("WebRTC: device ") +
- (changed ? "changed: " : "removed: ") + "{}",
- it->first);
- gst_object_unref(device);
- audioSources_.erase(it);
- }
+ if (removeDevice(audioSources_, device, changed) ||
+ removeDevice(videoSources_, device, changed))
+ return;
}
}
#endif
@@ -194,7 +286,7 @@ parseSDP(const std::string &sdp, GstWebRTCSDPType type)
return gst_webrtc_session_description_new(type, msg);
} else {
nhlog::ui()->error("WebRTC: failed to parse remote session description");
- gst_object_unref(msg);
+ gst_sdp_message_free(msg);
return nullptr;
}
}
@@ -250,7 +342,7 @@ iceGatheringStateChanged(GstElement *webrtc,
g_object_get(webrtc, "ice-gathering-state", &newState, nullptr);
if (newState == GST_WEBRTC_ICE_GATHERING_STATE_COMPLETE) {
nhlog::ui()->debug("WebRTC: GstWebRTCICEGatheringState -> Complete");
- if (isoffering_) {
+ if (WebRTCSession::instance().isOffering()) {
emit WebRTCSession::instance().offerCreated(localsdp_, localcandidates_);
emit WebRTCSession::instance().stateChanged(State::OFFERSENT);
} else {
@@ -266,7 +358,7 @@ gboolean
onICEGatheringCompletion(gpointer timerid)
{
*(guint *)(timerid) = 0;
- if (isoffering_) {
+ if (WebRTCSession::instance().isOffering()) {
emit WebRTCSession::instance().offerCreated(localsdp_, localcandidates_);
emit WebRTCSession::instance().stateChanged(State::OFFERSENT);
} else {
@@ -286,25 +378,25 @@ addLocalICECandidate(GstElement *webrtc G_GNUC_UNUSED,
nhlog::ui()->debug("WebRTC: local candidate: (m-line:{}):{}", mlineIndex, candidate);
#if GST_CHECK_VERSION(1, 18, 0)
- localcandidates_.push_back({"audio", (uint16_t)mlineIndex, candidate});
+ localcandidates_.push_back({std::string() /*max-bundle*/, (uint16_t)mlineIndex, candidate});
return;
#else
if (WebRTCSession::instance().state() >= State::OFFERSENT) {
emit WebRTCSession::instance().newICECandidate(
- {"audio", (uint16_t)mlineIndex, candidate});
+ {std::string() /*max-bundle*/, (uint16_t)mlineIndex, candidate});
return;
}
- localcandidates_.push_back({"audio", (uint16_t)mlineIndex, candidate});
+ localcandidates_.push_back({std::string() /*max-bundle*/, (uint16_t)mlineIndex, candidate});
// GStreamer v1.16: webrtcbin's notify::ice-gathering-state triggers
// GST_WEBRTC_ICE_GATHERING_STATE_COMPLETE too early. Fixed in v1.18.
- // Use a 100ms timeout in the meantime
+ // Use a 1s timeout in the meantime
static guint timerid = 0;
if (timerid)
g_source_remove(timerid);
- timerid = g_timeout_add(100, onICEGatheringCompletion, &timerid);
+ timerid = g_timeout_add(1000, onICEGatheringCompletion, &timerid);
#endif
}
@@ -329,40 +421,166 @@ iceConnectionStateChanged(GstElement *webrtc,
}
}
-void
-linkNewPad(GstElement *decodebin G_GNUC_UNUSED, GstPad *newpad, GstElement *pipe)
+// https://gitlab.freedesktop.org/gstreamer/gst-plugins-bad/-/issues/1164
+struct KeyFrameRequestData
{
- GstCaps *caps = gst_pad_get_current_caps(newpad);
- if (!caps)
+ GstElement *pipe = nullptr;
+ GstElement *decodebin = nullptr;
+ gint packetsLost = 0;
+ guint timerid = 0;
+ std::string statsField;
+} keyFrameRequestData_;
+
+void
+sendKeyFrameRequest()
+{
+ GstPad *sinkpad = gst_element_get_static_pad(keyFrameRequestData_.decodebin, "sink");
+ if (!gst_pad_push_event(sinkpad,
+ gst_event_new_custom(GST_EVENT_CUSTOM_UPSTREAM,
+ gst_structure_new_empty("GstForceKeyUnit"))))
+ nhlog::ui()->error("WebRTC: key frame request failed");
+ else
+ nhlog::ui()->debug("WebRTC: sent key frame request");
+
+ gst_object_unref(sinkpad);
+}
+
+void
+testPacketLoss_(GstPromise *promise, gpointer G_GNUC_UNUSED)
+{
+ const GstStructure *reply = gst_promise_get_reply(promise);
+ gint packetsLost = 0;
+ GstStructure *rtpStats;
+ if (!gst_structure_get(reply,
+ keyFrameRequestData_.statsField.c_str(),
+ GST_TYPE_STRUCTURE,
+ &rtpStats,
+ nullptr)) {
+ nhlog::ui()->error("WebRTC: get-stats: no field: {}",
+ keyFrameRequestData_.statsField);
+ gst_promise_unref(promise);
return;
+ }
+ gst_structure_get_int(rtpStats, "packets-lost", &packetsLost);
+ gst_structure_free(rtpStats);
+ gst_promise_unref(promise);
+ if (packetsLost > keyFrameRequestData_.packetsLost) {
+ nhlog::ui()->debug("WebRTC: inbound video lost packet count: {}", packetsLost);
+ keyFrameRequestData_.packetsLost = packetsLost;
+ sendKeyFrameRequest();
+ }
+}
- const gchar *name = gst_structure_get_name(gst_caps_get_structure(caps, 0));
- gst_caps_unref(caps);
+gboolean
+testPacketLoss(gpointer G_GNUC_UNUSED)
+{
+ if (keyFrameRequestData_.pipe) {
+ GstElement *webrtc =
+ gst_bin_get_by_name(GST_BIN(keyFrameRequestData_.pipe), "webrtcbin");
+ GstPromise *promise =
+ gst_promise_new_with_change_func(testPacketLoss_, nullptr, nullptr);
+ g_signal_emit_by_name(webrtc, "get-stats", nullptr, promise);
+ gst_object_unref(webrtc);
+ return TRUE;
+ }
+ return FALSE;
+}
- GstPad *queuepad = nullptr;
- if (g_str_has_prefix(name, "audio")) {
+#if GST_CHECK_VERSION(1, 18, 0)
+void
+setWaitForKeyFrame(GstBin *decodebin G_GNUC_UNUSED, GstElement *element, gpointer G_GNUC_UNUSED)
+{
+ if (!std::strcmp(
+ gst_plugin_feature_get_name(GST_PLUGIN_FEATURE(gst_element_get_factory(element))),
+ "rtpvp8depay"))
+ g_object_set(element, "wait-for-keyframe", TRUE, nullptr);
+}
+#endif
+
+void
+linkNewPad(GstElement *decodebin, GstPad *newpad, GstElement *pipe)
+{
+ GstPad *sinkpad = gst_element_get_static_pad(decodebin, "sink");
+ GstCaps *sinkcaps = gst_pad_get_current_caps(sinkpad);
+ const GstStructure *structure = gst_caps_get_structure(sinkcaps, 0);
+
+ gchar *mediaType = nullptr;
+ guint ssrc = 0;
+ gst_structure_get(
+ structure, "media", G_TYPE_STRING, &mediaType, "ssrc", G_TYPE_UINT, &ssrc, nullptr);
+ gst_caps_unref(sinkcaps);
+ gst_object_unref(sinkpad);
+
+ WebRTCSession *session = &WebRTCSession::instance();
+ GstElement *queue = gst_element_factory_make("queue", nullptr);
+ if (!std::strcmp(mediaType, "audio")) {
nhlog::ui()->debug("WebRTC: received incoming audio stream");
- GstElement *queue = gst_element_factory_make("queue", nullptr);
+ haveAudioStream_ = true;
GstElement *convert = gst_element_factory_make("audioconvert", nullptr);
GstElement *resample = gst_element_factory_make("audioresample", nullptr);
GstElement *sink = gst_element_factory_make("autoaudiosink", nullptr);
+
gst_bin_add_many(GST_BIN(pipe), queue, convert, resample, sink, nullptr);
gst_element_link_many(queue, convert, resample, sink, nullptr);
gst_element_sync_state_with_parent(queue);
gst_element_sync_state_with_parent(convert);
gst_element_sync_state_with_parent(resample);
gst_element_sync_state_with_parent(sink);
- queuepad = gst_element_get_static_pad(queue, "sink");
+ } else if (!std::strcmp(mediaType, "video")) {
+ nhlog::ui()->debug("WebRTC: received incoming video stream");
+ if (!session->getVideoItem()) {
+ g_free(mediaType);
+ gst_object_unref(queue);
+ nhlog::ui()->error("WebRTC: video call item not set");
+ return;
+ }
+ haveVideoStream_ = true;
+ keyFrameRequestData_.statsField =
+ std::string("rtp-inbound-stream-stats_") + std::to_string(ssrc);
+ GstElement *videoconvert = gst_element_factory_make("videoconvert", nullptr);
+ GstElement *glupload = gst_element_factory_make("glupload", nullptr);
+ GstElement *glcolorconvert = gst_element_factory_make("glcolorconvert", nullptr);
+ GstElement *qmlglsink = gst_element_factory_make("qmlglsink", nullptr);
+ GstElement *glsinkbin = gst_element_factory_make("glsinkbin", nullptr);
+ g_object_set(qmlglsink, "widget", session->getVideoItem(), nullptr);
+ g_object_set(glsinkbin, "sink", qmlglsink, nullptr);
+
+ gst_bin_add_many(
+ GST_BIN(pipe), queue, videoconvert, glupload, glcolorconvert, glsinkbin, nullptr);
+ gst_element_link_many(
+ queue, videoconvert, glupload, glcolorconvert, glsinkbin, nullptr);
+ gst_element_sync_state_with_parent(queue);
+ gst_element_sync_state_with_parent(videoconvert);
+ gst_element_sync_state_with_parent(glupload);
+ gst_element_sync_state_with_parent(glcolorconvert);
+ gst_element_sync_state_with_parent(glsinkbin);
+ } else {
+ g_free(mediaType);
+ gst_object_unref(queue);
+ nhlog::ui()->error("WebRTC: unknown pad type: {}", GST_PAD_NAME(newpad));
+ return;
}
+ GstPad *queuepad = gst_element_get_static_pad(queue, "sink");
if (queuepad) {
if (GST_PAD_LINK_FAILED(gst_pad_link(newpad, queuepad)))
nhlog::ui()->error("WebRTC: unable to link new pad");
else {
- emit WebRTCSession::instance().stateChanged(State::CONNECTED);
+ if (!session->isVideo() ||
+ (haveAudioStream_ &&
+ (haveVideoStream_ || session->isRemoteVideoRecvOnly()))) {
+ emit session->stateChanged(State::CONNECTED);
+ if (haveVideoStream_) {
+ keyFrameRequestData_.pipe = pipe;
+ keyFrameRequestData_.decodebin = decodebin;
+ keyFrameRequestData_.timerid =
+ g_timeout_add_seconds(3, testPacketLoss, nullptr);
+ }
+ }
}
gst_object_unref(queuepad);
}
+ g_free(mediaType);
}
void
@@ -373,7 +591,12 @@ addDecodeBin(GstElement *webrtc G_GNUC_UNUSED, GstPad *newpad, GstElement *pipe)
nhlog::ui()->debug("WebRTC: received incoming stream");
GstElement *decodebin = gst_element_factory_make("decodebin", nullptr);
+ // hardware decoding needs investigation; eg rendering fails if vaapi plugin installed
+ g_object_set(decodebin, "force-sw-decoders", TRUE, nullptr);
g_signal_connect(decodebin, "pad-added", G_CALLBACK(linkNewPad), pipe);
+#if GST_CHECK_VERSION(1, 18, 0)
+ g_signal_connect(decodebin, "element-added", G_CALLBACK(setWaitForKeyFrame), pipe);
+#endif
gst_bin_add(GST_BIN(pipe), decodebin);
gst_element_sync_state_with_parent(decodebin);
GstPad *sinkpad = gst_element_get_static_pad(decodebin, "sink");
@@ -382,51 +605,134 @@ addDecodeBin(GstElement *webrtc G_GNUC_UNUSED, GstPad *newpad, GstElement *pipe)
gst_object_unref(sinkpad);
}
-std::string::const_iterator
-findName(const std::string &sdp, const std::string &name)
+bool
+strstr_(std::string_view str1, std::string_view str2)
{
- return std::search(
- sdp.cbegin(),
- sdp.cend(),
- name.cbegin(),
- name.cend(),
- [](unsigned char c1, unsigned char c2) { return std::tolower(c1) == std::tolower(c2); });
-}
-
-int
-getPayloadType(const std::string &sdp, const std::string &name)
-{
- // eg a=rtpmap:111 opus/48000/2
- auto e = findName(sdp, name);
- if (e == sdp.cend()) {
- nhlog::ui()->error("WebRTC: remote offer - " + name + " attribute missing");
- return -1;
- }
-
- if (auto s = sdp.rfind(':', e - sdp.cbegin()); s == std::string::npos) {
- nhlog::ui()->error("WebRTC: remote offer - unable to determine " + name +
- " payload type");
- return -1;
- } else {
- ++s;
- try {
- return std::stoi(std::string(sdp, s, e - sdp.cbegin() - s));
- } catch (...) {
- nhlog::ui()->error("WebRTC: remote offer - unable to determine " + name +
- " payload type");
- }
- }
- return -1;
-}
+ return std::search(str1.cbegin(),
+ str1.cend(),
+ str2.cbegin(),
+ str2.cend(),
+ [](unsigned char c1, unsigned char c2) {
+ return std::tolower(c1) == std::tolower(c2);
+ }) != str1.cend();
}
bool
-WebRTCSession::createOffer()
+getMediaAttributes(const GstSDPMessage *sdp,
+ const char *mediaType,
+ const char *encoding,
+ int &payloadType,
+ bool &recvOnly)
{
- isoffering_ = true;
+ payloadType = -1;
+ recvOnly = false;
+ for (guint mlineIndex = 0; mlineIndex < gst_sdp_message_medias_len(sdp); ++mlineIndex) {
+ const GstSDPMedia *media = gst_sdp_message_get_media(sdp, mlineIndex);
+ if (!std::strcmp(gst_sdp_media_get_media(media), mediaType)) {
+ recvOnly = gst_sdp_media_get_attribute_val(media, "recvonly") != nullptr;
+ const gchar *rtpval = nullptr;
+ for (guint n = 0; n == 0 || rtpval; ++n) {
+ rtpval = gst_sdp_media_get_attribute_val_n(media, "rtpmap", n);
+ if (rtpval && strstr_(rtpval, encoding)) {
+ payloadType = std::atoi(rtpval);
+ break;
+ }
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+template
+std::vector
+deviceNames(T &sources, const std::string &defaultDevice)
+{
+ std::vector ret;
+ ret.reserve(sources.size());
+ std::transform(sources.cbegin(),
+ sources.cend(),
+ std::back_inserter(ret),
+ [](const auto &s) { return s.name; });
+
+ // move default device to top of the list
+ if (auto it = std::find_if(ret.begin(),
+ ret.end(),
+ [&defaultDevice](const auto &s) { return s == defaultDevice; });
+ it != ret.end())
+ std::swap(ret.front(), *it);
+
+ return ret;
+}
+
+}
+
+bool
+WebRTCSession::havePlugins(bool isVideo, std::string *errorMessage)
+{
+ if (!initialised_ && !init(errorMessage))
+ return false;
+ if (!isVideo && haveVoicePlugins_)
+ return true;
+ if (isVideo && haveVideoPlugins_)
+ return true;
+
+ const gchar *voicePlugins[] = {"audioconvert",
+ "audioresample",
+ "autodetect",
+ "dtls",
+ "nice",
+ "opus",
+ "playback",
+ "rtpmanager",
+ "srtp",
+ "volume",
+ "webrtc",
+ nullptr};
+
+ const gchar *videoPlugins[] = {"opengl", "qmlgl", "rtp", "videoconvert", "vpx", nullptr};
+
+ std::string strError("Missing GStreamer plugins: ");
+ const gchar **needed = isVideo ? videoPlugins : voicePlugins;
+ bool &havePlugins = isVideo ? haveVideoPlugins_ : haveVoicePlugins_;
+ havePlugins = true;
+ GstRegistry *registry = gst_registry_get();
+ for (guint i = 0; i < g_strv_length((gchar **)needed); i++) {
+ GstPlugin *plugin = gst_registry_find_plugin(registry, needed[i]);
+ if (!plugin) {
+ havePlugins = false;
+ strError += std::string(needed[i]) + " ";
+ continue;
+ }
+ gst_object_unref(plugin);
+ }
+ if (!havePlugins) {
+ nhlog::ui()->error(strError);
+ if (errorMessage)
+ *errorMessage = strError;
+ return false;
+ }
+
+ if (isVideo) {
+ // load qmlglsink to register GStreamer's GstGLVideoItem QML type
+ GstElement *qmlglsink = gst_element_factory_make("qmlglsink", nullptr);
+ gst_object_unref(qmlglsink);
+ }
+ return true;
+}
+
+bool
+WebRTCSession::createOffer(bool isVideo)
+{
+ isOffering_ = true;
+ isVideo_ = isVideo;
+ isRemoteVideoRecvOnly_ = false;
+ videoItem_ = nullptr;
+ haveAudioStream_ = false;
+ haveVideoStream_ = false;
localsdp_.clear();
localcandidates_.clear();
- return startPipeline(111); // a dynamic opus payload type
+ return startPipeline(111, isVideo ? 96 : -1); // dynamic payload types
}
bool
@@ -436,19 +742,42 @@ WebRTCSession::acceptOffer(const std::string &sdp)
if (state_ != State::DISCONNECTED)
return false;
- isoffering_ = false;
+ isOffering_ = false;
+ isRemoteVideoRecvOnly_ = false;
+ videoItem_ = nullptr;
+ haveAudioStream_ = false;
+ haveVideoStream_ = false;
localsdp_.clear();
localcandidates_.clear();
- int opusPayloadType = getPayloadType(sdp, "opus");
- if (opusPayloadType == -1)
- return false;
-
GstWebRTCSessionDescription *offer = parseSDP(sdp, GST_WEBRTC_SDP_TYPE_OFFER);
if (!offer)
return false;
- if (!startPipeline(opusPayloadType)) {
+ int opusPayloadType;
+ bool recvOnly;
+ if (getMediaAttributes(offer->sdp, "audio", "opus", opusPayloadType, recvOnly)) {
+ if (opusPayloadType == -1) {
+ nhlog::ui()->error("WebRTC: remote audio offer - no opus encoding");
+ gst_webrtc_session_description_free(offer);
+ return false;
+ }
+ } else {
+ nhlog::ui()->error("WebRTC: remote offer - no audio media");
+ gst_webrtc_session_description_free(offer);
+ return false;
+ }
+
+ int vp8PayloadType;
+ isVideo_ =
+ getMediaAttributes(offer->sdp, "video", "vp8", vp8PayloadType, isRemoteVideoRecvOnly_);
+ if (isVideo_ && vp8PayloadType == -1) {
+ nhlog::ui()->error("WebRTC: remote video offer - no vp8 encoding");
+ gst_webrtc_session_description_free(offer);
+ return false;
+ }
+
+ if (!startPipeline(opusPayloadType, vp8PayloadType)) {
gst_webrtc_session_description_free(offer);
return false;
}
@@ -473,6 +802,13 @@ WebRTCSession::acceptAnswer(const std::string &sdp)
return false;
}
+ if (isVideo_) {
+ int unused;
+ if (!getMediaAttributes(
+ answer->sdp, "video", "vp8", unused, isRemoteVideoRecvOnly_))
+ isRemoteVideoRecvOnly_ = true;
+ }
+
g_signal_emit_by_name(webrtc_, "set-remote-description", answer, nullptr);
gst_webrtc_session_description_free(answer);
return true;
@@ -497,21 +833,23 @@ WebRTCSession::acceptICECandidates(
}
bool
-WebRTCSession::startPipeline(int opusPayloadType)
+WebRTCSession::startPipeline(int opusPayloadType, int vp8PayloadType)
{
if (state_ != State::DISCONNECTED)
return false;
emit stateChanged(State::INITIATING);
- if (!createPipeline(opusPayloadType))
+ if (!createPipeline(opusPayloadType, vp8PayloadType)) {
+ end();
return false;
+ }
webrtc_ = gst_bin_get_by_name(GST_BIN(pipe_), "webrtcbin");
- if (!stunServer_.empty()) {
- nhlog::ui()->info("WebRTC: setting STUN server: {}", stunServer_);
- g_object_set(webrtc_, "stun-server", stunServer_.c_str(), nullptr);
+ if (settings_->useStunServer()) {
+ nhlog::ui()->info("WebRTC: setting STUN server: {}", STUN_SERVER);
+ g_object_set(webrtc_, "stun-server", STUN_SERVER, nullptr);
}
for (const auto &uri : turnServers_) {
@@ -523,7 +861,7 @@ WebRTCSession::startPipeline(int opusPayloadType)
nhlog::ui()->warn("WebRTC: no TURN server provided");
// generate the offer when the pipeline goes to PLAYING
- if (isoffering_)
+ if (isOffering_)
g_signal_connect(
webrtc_, "on-negotiation-needed", G_CALLBACK(::createOffer), nullptr);
@@ -562,20 +900,19 @@ WebRTCSession::startPipeline(int opusPayloadType)
}
bool
-WebRTCSession::createPipeline(int opusPayloadType)
+WebRTCSession::createPipeline(int opusPayloadType, int vp8PayloadType)
{
- if (audioSources_.empty()) {
- nhlog::ui()->error("WebRTC: no audio sources");
+ auto it = std::find_if(audioSources_.cbegin(), audioSources_.cend(), [this](const auto &s) {
+ return s.name == settings_->microphone().toStdString();
+ });
+ if (it == audioSources_.cend()) {
+ nhlog::ui()->error("WebRTC: unknown microphone: {}",
+ settings_->microphone().toStdString());
return false;
}
+ nhlog::ui()->debug("WebRTC: microphone: {}", it->name);
- if (audioSourceIndex_ < 0 || (size_t)audioSourceIndex_ >= audioSources_.size()) {
- nhlog::ui()->error("WebRTC: invalid audio source index");
- return false;
- }
-
- GstElement *source =
- gst_device_create_element(audioSources_[audioSourceIndex_].second, nullptr);
+ GstElement *source = gst_device_create_element(it->device, nullptr);
GstElement *volume = gst_element_factory_make("volume", "srclevel");
GstElement *convert = gst_element_factory_make("audioconvert", nullptr);
GstElement *resample = gst_element_factory_make("audioresample", nullptr);
@@ -627,10 +964,103 @@ WebRTCSession::createPipeline(int opusPayloadType)
capsfilter,
webrtcbin,
nullptr)) {
- nhlog::ui()->error("WebRTC: failed to link pipeline elements");
- end();
+ nhlog::ui()->error("WebRTC: failed to link audio pipeline elements");
return false;
}
+ return isVideo_ ? addVideoPipeline(vp8PayloadType) : true;
+}
+
+bool
+WebRTCSession::addVideoPipeline(int vp8PayloadType)
+{
+ // allow incoming video calls despite localUser having no webcam
+ if (videoSources_.empty())
+ return !isOffering_;
+
+ auto it = std::find_if(videoSources_.cbegin(), videoSources_.cend(), [this](const auto &s) {
+ return s.name == settings_->camera().toStdString();
+ });
+ if (it == videoSources_.cend()) {
+ nhlog::ui()->error("WebRTC: unknown camera: {}", settings_->camera().toStdString());
+ return false;
+ }
+
+ std::string resSetting = settings_->cameraResolution().toStdString();
+ const std::string &res = resSetting.empty() ? it->caps.front().resolution : resSetting;
+ std::string frSetting = settings_->cameraFrameRate().toStdString();
+ const std::string &fr = frSetting.empty() ? it->caps.front().frameRates.front() : frSetting;
+ auto resolution = tokenise(res, 'x');
+ auto frameRate = tokenise(fr, '/');
+ nhlog::ui()->debug("WebRTC: camera: {}", it->name);
+ nhlog::ui()->debug("WebRTC: camera resolution: {}x{}", resolution.first, resolution.second);
+ nhlog::ui()->debug("WebRTC: camera frame rate: {}/{}", frameRate.first, frameRate.second);
+
+ GstElement *source = gst_device_create_element(it->device, nullptr);
+ GstElement *capsfilter = gst_element_factory_make("capsfilter", nullptr);
+ GstCaps *caps = gst_caps_new_simple("video/x-raw",
+ "width",
+ G_TYPE_INT,
+ resolution.first,
+ "height",
+ G_TYPE_INT,
+ resolution.second,
+ "framerate",
+ GST_TYPE_FRACTION,
+ frameRate.first,
+ frameRate.second,
+ nullptr);
+ g_object_set(capsfilter, "caps", caps, nullptr);
+ gst_caps_unref(caps);
+
+ GstElement *convert = gst_element_factory_make("videoconvert", nullptr);
+ GstElement *queue1 = gst_element_factory_make("queue", nullptr);
+ GstElement *vp8enc = gst_element_factory_make("vp8enc", nullptr);
+ g_object_set(vp8enc, "deadline", 1, nullptr);
+ g_object_set(vp8enc, "error-resilient", 1, nullptr);
+
+ GstElement *rtp = gst_element_factory_make("rtpvp8pay", nullptr);
+ GstElement *queue2 = gst_element_factory_make("queue", nullptr);
+ GstElement *rtpcapsfilter = gst_element_factory_make("capsfilter", nullptr);
+ GstCaps *rtpcaps = gst_caps_new_simple("application/x-rtp",
+ "media",
+ G_TYPE_STRING,
+ "video",
+ "encoding-name",
+ G_TYPE_STRING,
+ "VP8",
+ "payload",
+ G_TYPE_INT,
+ vp8PayloadType,
+ nullptr);
+ g_object_set(rtpcapsfilter, "caps", rtpcaps, nullptr);
+ gst_caps_unref(rtpcaps);
+
+ gst_bin_add_many(GST_BIN(pipe_),
+ source,
+ capsfilter,
+ convert,
+ queue1,
+ vp8enc,
+ rtp,
+ queue2,
+ rtpcapsfilter,
+ nullptr);
+
+ GstElement *webrtcbin = gst_bin_get_by_name(GST_BIN(pipe_), "webrtcbin");
+ if (!gst_element_link_many(source,
+ capsfilter,
+ convert,
+ queue1,
+ vp8enc,
+ rtp,
+ queue2,
+ rtpcapsfilter,
+ webrtcbin,
+ nullptr)) {
+ nhlog::ui()->error("WebRTC: failed to link video pipeline elements");
+ return false;
+ }
+ gst_object_unref(webrtcbin);
return true;
}
@@ -665,6 +1095,7 @@ void
WebRTCSession::end()
{
nhlog::ui()->debug("WebRTC: ending session");
+ keyFrameRequestData_ = KeyFrameRequestData{};
if (pipe_) {
gst_element_set_state(pipe_, GST_STATE_NULL);
gst_object_unref(pipe_);
@@ -672,7 +1103,11 @@ WebRTCSession::end()
g_source_remove(busWatchId_);
busWatchId_ = 0;
}
- webrtc_ = nullptr;
+ webrtc_ = nullptr;
+ isVideo_ = false;
+ isOffering_ = false;
+ isRemoteVideoRecvOnly_ = false;
+ videoItem_ = nullptr;
if (state_ != State::DISCONNECTED)
emit stateChanged(State::DISCONNECTED);
}
@@ -690,6 +1125,9 @@ WebRTCSession::startDeviceMonitor()
GstCaps *caps = gst_caps_new_empty_simple("audio/x-raw");
gst_device_monitor_add_filter(monitor, "Audio/Source", caps);
gst_caps_unref(caps);
+ caps = gst_caps_new_empty_simple("video/x-raw");
+ gst_device_monitor_add_filter(monitor, "Video/Source", caps);
+ gst_caps_unref(caps);
GstBus *bus = gst_device_monitor_get_bus(monitor);
gst_bus_add_watch(bus, newBusMessage, nullptr);
@@ -700,12 +1138,14 @@ WebRTCSession::startDeviceMonitor()
}
}
}
-
-#else
+#endif
void
WebRTCSession::refreshDevices()
{
+#if GST_CHECK_VERSION(1, 18, 0)
+ return;
+#else
if (!initialised_)
return;
@@ -715,47 +1155,77 @@ WebRTCSession::refreshDevices()
GstCaps *caps = gst_caps_new_empty_simple("audio/x-raw");
gst_device_monitor_add_filter(monitor, "Audio/Source", caps);
gst_caps_unref(caps);
+ caps = gst_caps_new_empty_simple("video/x-raw");
+ gst_device_monitor_add_filter(monitor, "Video/Source", caps);
+ gst_caps_unref(caps);
}
- std::for_each(audioSources_.begin(), audioSources_.end(), [](const auto &s) {
- gst_object_unref(s.second);
- });
- audioSources_.clear();
+ auto clearDevices = [](auto &sources) {
+ std::for_each(
+ sources.begin(), sources.end(), [](auto &s) { gst_object_unref(s.device); });
+ sources.clear();
+ };
+ clearDevices(audioSources_);
+ clearDevices(videoSources_);
+
GList *devices = gst_device_monitor_get_devices(monitor);
if (devices) {
- audioSources_.reserve(g_list_length(devices));
for (GList *l = devices; l != nullptr; l = l->next)
addDevice(GST_DEVICE_CAST(l->data));
g_list_free(devices);
}
-}
#endif
+}
std::vector
-WebRTCSession::getAudioSourceNames(const std::string &defaultDevice)
+WebRTCSession::getDeviceNames(bool isVideo, const std::string &defaultDevice) const
{
-#if !GST_CHECK_VERSION(1, 18, 0)
- refreshDevices();
-#endif
- // move default device to top of the list
- if (auto it = std::find_if(audioSources_.begin(),
- audioSources_.end(),
- [&](const auto &s) { return s.first == defaultDevice; });
- it != audioSources_.end())
- std::swap(audioSources_.front(), *it);
+ return isVideo ? deviceNames(videoSources_, defaultDevice)
+ : deviceNames(audioSources_, defaultDevice);
+}
+std::vector
+WebRTCSession::getResolutions(const std::string &cameraName) const
+{
std::vector ret;
- ret.reserve(audioSources_.size());
- std::for_each(audioSources_.cbegin(), audioSources_.cend(), [&](const auto &s) {
- ret.push_back(s.first);
- });
+ if (auto it = std::find_if(videoSources_.cbegin(),
+ videoSources_.cend(),
+ [&cameraName](const auto &s) { return s.name == cameraName; });
+ it != videoSources_.cend()) {
+ ret.reserve(it->caps.size());
+ for (const auto &c : it->caps)
+ ret.push_back(c.resolution);
+ }
return ret;
}
+std::vector
+WebRTCSession::getFrameRates(const std::string &cameraName, const std::string &resolution) const
+{
+ if (auto i = std::find_if(videoSources_.cbegin(),
+ videoSources_.cend(),
+ [&](const auto &s) { return s.name == cameraName; });
+ i != videoSources_.cend()) {
+ if (auto j =
+ std::find_if(i->caps.cbegin(),
+ i->caps.cend(),
+ [&](const auto &s) { return s.resolution == resolution; });
+ j != i->caps.cend())
+ return j->frameRates;
+ }
+ return {};
+}
+
#else
bool
-WebRTCSession::createOffer()
+WebRTCSession::havePlugins(bool, std::string *)
+{
+ return false;
+}
+
+bool
+WebRTCSession::createOffer(bool)
{
return false;
}
@@ -776,18 +1246,6 @@ void
WebRTCSession::acceptICECandidates(const std::vector &)
{}
-bool
-WebRTCSession::startPipeline(int)
-{
- return false;
-}
-
-bool
-WebRTCSession::createPipeline(int)
-{
- return false;
-}
-
bool
WebRTCSession::isMicMuted() const
{
@@ -808,14 +1266,21 @@ void
WebRTCSession::refreshDevices()
{}
-void
-WebRTCSession::startDeviceMonitor()
-{}
-
std::vector
-WebRTCSession::getAudioSourceNames(const std::string &)
+WebRTCSession::getDeviceNames(bool, const std::string &) const
{
return {};
}
+std::vector
+WebRTCSession::getResolutions(const std::string &) const
+{
+ return {};
+}
+
+std::vector
+WebRTCSession::getFrameRates(const std::string &, const std::string &) const
+{
+ return {};
+}
#endif
diff --git a/src/WebRTCSession.h b/src/WebRTCSession.h
index 83cabf5c..d5e195a8 100644
--- a/src/WebRTCSession.h
+++ b/src/WebRTCSession.h
@@ -4,10 +4,13 @@
#include
#include
+#include
#include "mtx/events/voip.hpp"
typedef struct _GstElement GstElement;
+class QQuickItem;
+class UserSettings;
namespace webrtc {
Q_NAMESPACE
@@ -39,10 +42,13 @@ public:
return instance;
}
- bool init(std::string *errorMessage = nullptr);
+ bool havePlugins(bool isVideo, std::string *errorMessage = nullptr);
webrtc::State state() const { return state_; }
+ bool isVideo() const { return isVideo_; }
+ bool isOffering() const { return isOffering_; }
+ bool isRemoteVideoRecvOnly() const { return isRemoteVideoRecvOnly_; }
- bool createOffer();
+ bool createOffer(bool isVideo);
bool acceptOffer(const std::string &sdp);
bool acceptAnswer(const std::string &sdp);
void acceptICECandidates(const std::vector &);
@@ -51,11 +57,18 @@ public:
bool toggleMicMute();
void end();
- void setStunServer(const std::string &stunServer) { stunServer_ = stunServer; }
+ void setSettings(QSharedPointer settings) { settings_ = settings; }
void setTurnServers(const std::vector &uris) { turnServers_ = uris; }
- std::vector getAudioSourceNames(const std::string &defaultDevice);
- void setAudioSource(int audioDeviceIndex) { audioSourceIndex_ = audioDeviceIndex; }
+ void refreshDevices();
+ std::vector getDeviceNames(bool isVideo,
+ const std::string &defaultDevice) const;
+ std::vector getResolutions(const std::string &cameraName) const;
+ std::vector getFrameRates(const std::string &cameraName,
+ const std::string &resolution) const;
+
+ void setVideoItem(QQuickItem *item) { videoItem_ = item; }
+ QQuickItem *getVideoItem() const { return videoItem_; }
signals:
void offerCreated(const std::string &sdp,
@@ -71,18 +84,24 @@ private slots:
private:
WebRTCSession();
- bool initialised_ = false;
- webrtc::State state_ = webrtc::State::DISCONNECTED;
- GstElement *pipe_ = nullptr;
- GstElement *webrtc_ = nullptr;
- unsigned int busWatchId_ = 0;
- std::string stunServer_;
+ bool initialised_ = false;
+ bool haveVoicePlugins_ = false;
+ bool haveVideoPlugins_ = false;
+ webrtc::State state_ = webrtc::State::DISCONNECTED;
+ bool isVideo_ = false;
+ bool isOffering_ = false;
+ bool isRemoteVideoRecvOnly_ = false;
+ QQuickItem *videoItem_ = nullptr;
+ GstElement *pipe_ = nullptr;
+ GstElement *webrtc_ = nullptr;
+ unsigned int busWatchId_ = 0;
+ QSharedPointer settings_;
std::vector turnServers_;
- int audioSourceIndex_ = -1;
- bool startPipeline(int opusPayloadType);
- bool createPipeline(int opusPayloadType);
- void refreshDevices();
+ bool init(std::string *errorMessage = nullptr);
+ bool startPipeline(int opusPayloadType, int vp8PayloadType);
+ bool createPipeline(int opusPayloadType, int vp8PayloadType);
+ bool addVideoPipeline(int vp8PayloadType);
void startDeviceMonitor();
public:
diff --git a/src/dialogs/AcceptCall.cpp b/src/dialogs/AcceptCall.cpp
index 2b47b7dc..8323e9ff 100644
--- a/src/dialogs/AcceptCall.cpp
+++ b/src/dialogs/AcceptCall.cpp
@@ -19,23 +19,32 @@ AcceptCall::AcceptCall(const QString &caller,
const QString &roomName,
const QString &avatarUrl,
QSharedPointer settings,
+ bool isVideo,
QWidget *parent)
: QWidget(parent)
{
std::string errorMessage;
- if (!WebRTCSession::instance().init(&errorMessage)) {
+ WebRTCSession *session = &WebRTCSession::instance();
+ if (!session->havePlugins(false, &errorMessage)) {
emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
emit close();
return;
}
- audioDevices_ = WebRTCSession::instance().getAudioSourceNames(
- settings->defaultAudioSource().toStdString());
- if (audioDevices_.empty()) {
- emit ChatPage::instance()->showNotification(
- "Incoming call: No audio sources found.");
+ if (isVideo && !session->havePlugins(true, &errorMessage)) {
+ emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
emit close();
return;
}
+ session->refreshDevices();
+ microphones_ = session->getDeviceNames(false, settings->microphone().toStdString());
+ if (microphones_.empty()) {
+ emit ChatPage::instance()->showNotification(
+ tr("Incoming call: No microphone found."));
+ emit close();
+ return;
+ }
+ if (isVideo)
+ cameras_ = session->getDeviceNames(true, settings->camera().toStdString());
setAutoFillBackground(true);
setWindowFlags(Qt::Tool | Qt::WindowStaysOnTopHint);
@@ -77,9 +86,10 @@ AcceptCall::AcceptCall(const QString &caller,
const int iconSize = 22;
QLabel *callTypeIndicator = new QLabel(this);
callTypeIndicator->setPixmap(
- QIcon(":/icons/icons/ui/place-call.png").pixmap(QSize(iconSize * 2, iconSize * 2)));
+ QIcon(isVideo ? ":/icons/icons/ui/video-call.png" : ":/icons/icons/ui/place-call.png")
+ .pixmap(QSize(iconSize * 2, iconSize * 2)));
- QLabel *callTypeLabel = new QLabel("Voice Call", this);
+ QLabel *callTypeLabel = new QLabel(isVideo ? tr("Video Call") : tr("Voice Call"), this);
labelFont.setPointSizeF(f.pointSizeF() * 1.1);
callTypeLabel->setFont(labelFont);
callTypeLabel->setAlignment(Qt::AlignCenter);
@@ -88,7 +98,8 @@ AcceptCall::AcceptCall(const QString &caller,
buttonLayout->setSpacing(18);
acceptBtn_ = new QPushButton(tr("Accept"), this);
acceptBtn_->setDefault(true);
- acceptBtn_->setIcon(QIcon(":/icons/icons/ui/place-call.png"));
+ acceptBtn_->setIcon(
+ QIcon(isVideo ? ":/icons/icons/ui/video-call.png" : ":/icons/icons/ui/place-call.png"));
acceptBtn_->setIconSize(QSize(iconSize, iconSize));
rejectBtn_ = new QPushButton(tr("Reject"), this);
@@ -97,18 +108,17 @@ AcceptCall::AcceptCall(const QString &caller,
buttonLayout->addWidget(acceptBtn_);
buttonLayout->addWidget(rejectBtn_);
- auto deviceLayout = new QHBoxLayout;
- auto audioLabel = new QLabel(this);
- audioLabel->setPixmap(
- QIcon(":/icons/icons/ui/microphone-unmute.png").pixmap(QSize(iconSize, iconSize)));
+ microphoneCombo_ = new QComboBox(this);
+ for (const auto &m : microphones_)
+ microphoneCombo_->addItem(QIcon(":/icons/icons/ui/microphone-unmute.png"),
+ QString::fromStdString(m));
- auto deviceList = new QComboBox(this);
- for (const auto &d : audioDevices_)
- deviceList->addItem(QString::fromStdString(d));
-
- deviceLayout->addStretch();
- deviceLayout->addWidget(audioLabel);
- deviceLayout->addWidget(deviceList);
+ if (!cameras_.empty()) {
+ cameraCombo_ = new QComboBox(this);
+ for (const auto &c : cameras_)
+ cameraCombo_->addItem(QIcon(":/icons/icons/ui/video-call.png"),
+ QString::fromStdString(c));
+ }
if (displayNameLabel)
layout->addWidget(displayNameLabel, 0, Qt::AlignCenter);
@@ -117,12 +127,17 @@ AcceptCall::AcceptCall(const QString &caller,
layout->addWidget(callTypeIndicator, 0, Qt::AlignCenter);
layout->addWidget(callTypeLabel, 0, Qt::AlignCenter);
layout->addLayout(buttonLayout);
- layout->addLayout(deviceLayout);
+ layout->addWidget(microphoneCombo_);
+ if (cameraCombo_)
+ layout->addWidget(cameraCombo_);
- connect(acceptBtn_, &QPushButton::clicked, this, [this, deviceList, settings]() {
- WebRTCSession::instance().setAudioSource(deviceList->currentIndex());
- settings->setDefaultAudioSource(
- QString::fromStdString(audioDevices_[deviceList->currentIndex()]));
+ connect(acceptBtn_, &QPushButton::clicked, this, [this, settings, session]() {
+ settings->setMicrophone(
+ QString::fromStdString(microphones_[microphoneCombo_->currentIndex()]));
+ if (cameraCombo_) {
+ settings->setCamera(
+ QString::fromStdString(cameras_[cameraCombo_->currentIndex()]));
+ }
emit accept();
emit close();
});
@@ -131,4 +146,5 @@ AcceptCall::AcceptCall(const QString &caller,
emit close();
});
}
+
}
diff --git a/src/dialogs/AcceptCall.h b/src/dialogs/AcceptCall.h
index 5db8fcfa..00616c53 100644
--- a/src/dialogs/AcceptCall.h
+++ b/src/dialogs/AcceptCall.h
@@ -6,6 +6,7 @@
#include
#include
+class QComboBox;
class QPushButton;
class QString;
class UserSettings;
@@ -22,6 +23,7 @@ public:
const QString &roomName,
const QString &avatarUrl,
QSharedPointer settings,
+ bool isVideo,
QWidget *parent = nullptr);
signals:
@@ -29,8 +31,12 @@ signals:
void reject();
private:
- QPushButton *acceptBtn_;
- QPushButton *rejectBtn_;
- std::vector audioDevices_;
+ QPushButton *acceptBtn_ = nullptr;
+ QPushButton *rejectBtn_ = nullptr;
+ QComboBox *microphoneCombo_ = nullptr;
+ QComboBox *cameraCombo_ = nullptr;
+ std::vector microphones_;
+ std::vector cameras_;
};
+
}
diff --git a/src/dialogs/PlaceCall.cpp b/src/dialogs/PlaceCall.cpp
index 8acdbe88..3dd01acb 100644
--- a/src/dialogs/PlaceCall.cpp
+++ b/src/dialogs/PlaceCall.cpp
@@ -23,18 +23,20 @@ PlaceCall::PlaceCall(const QString &callee,
: QWidget(parent)
{
std::string errorMessage;
- if (!WebRTCSession::instance().init(&errorMessage)) {
+ WebRTCSession *session = &WebRTCSession::instance();
+ if (!session->havePlugins(false, &errorMessage)) {
emit ChatPage::instance()->showNotification(QString::fromStdString(errorMessage));
emit close();
return;
}
- audioDevices_ = WebRTCSession::instance().getAudioSourceNames(
- settings->defaultAudioSource().toStdString());
- if (audioDevices_.empty()) {
- emit ChatPage::instance()->showNotification("No audio sources found.");
+ session->refreshDevices();
+ microphones_ = session->getDeviceNames(false, settings->microphone().toStdString());
+ if (microphones_.empty()) {
+ emit ChatPage::instance()->showNotification(tr("No microphone found."));
emit close();
return;
}
+ cameras_ = session->getDeviceNames(true, settings->camera().toStdString());
setAutoFillBackground(true);
setWindowFlags(Qt::Tool | Qt::WindowStaysOnTopHint);
@@ -56,48 +58,74 @@ PlaceCall::PlaceCall(const QString &callee,
avatar->setImage(avatarUrl);
else
avatar->setLetter(utils::firstChar(roomName));
- const int iconSize = 18;
- voiceBtn_ = new QPushButton(tr("Voice"), this);
+
+ voiceBtn_ = new QPushButton(tr("Voice"), this);
voiceBtn_->setIcon(QIcon(":/icons/icons/ui/place-call.png"));
- voiceBtn_->setIconSize(QSize(iconSize, iconSize));
+ voiceBtn_->setIconSize(QSize(iconSize_, iconSize_));
voiceBtn_->setDefault(true);
+
+ if (!cameras_.empty()) {
+ videoBtn_ = new QPushButton(tr("Video"), this);
+ videoBtn_->setIcon(QIcon(":/icons/icons/ui/video-call.png"));
+ videoBtn_->setIconSize(QSize(iconSize_, iconSize_));
+ }
cancelBtn_ = new QPushButton(tr("Cancel"), this);
buttonLayout->addWidget(avatar);
buttonLayout->addStretch();
buttonLayout->addWidget(voiceBtn_);
+ if (videoBtn_)
+ buttonLayout->addWidget(videoBtn_);
buttonLayout->addWidget(cancelBtn_);
QString name = displayName.isEmpty() ? callee : displayName;
- QLabel *label = new QLabel("Place a call to " + name + "?", this);
+ QLabel *label = new QLabel(tr("Place a call to ") + name + "?", this);
- auto deviceLayout = new QHBoxLayout;
- auto audioLabel = new QLabel(this);
- audioLabel->setPixmap(QIcon(":/icons/icons/ui/microphone-unmute.png")
- .pixmap(QSize(iconSize * 1.2, iconSize * 1.2)));
+ microphoneCombo_ = new QComboBox(this);
+ for (const auto &m : microphones_)
+ microphoneCombo_->addItem(QIcon(":/icons/icons/ui/microphone-unmute.png"),
+ QString::fromStdString(m));
- auto deviceList = new QComboBox(this);
- for (const auto &d : audioDevices_)
- deviceList->addItem(QString::fromStdString(d));
-
- deviceLayout->addStretch();
- deviceLayout->addWidget(audioLabel);
- deviceLayout->addWidget(deviceList);
+ if (videoBtn_) {
+ cameraCombo_ = new QComboBox(this);
+ for (const auto &c : cameras_)
+ cameraCombo_->addItem(QIcon(":/icons/icons/ui/video-call.png"),
+ QString::fromStdString(c));
+ }
layout->addWidget(label);
layout->addLayout(buttonLayout);
- layout->addLayout(deviceLayout);
+ layout->addStretch();
+ layout->addWidget(microphoneCombo_);
+ if (videoBtn_)
+ layout->addWidget(cameraCombo_);
- connect(voiceBtn_, &QPushButton::clicked, this, [this, deviceList, settings]() {
- WebRTCSession::instance().setAudioSource(deviceList->currentIndex());
- settings->setDefaultAudioSource(
- QString::fromStdString(audioDevices_[deviceList->currentIndex()]));
+ connect(voiceBtn_, &QPushButton::clicked, this, [this, settings, session]() {
+ settings->setMicrophone(
+ QString::fromStdString(microphones_[microphoneCombo_->currentIndex()]));
emit voice();
emit close();
});
+ if (videoBtn_)
+ connect(videoBtn_, &QPushButton::clicked, this, [this, settings, session]() {
+ std::string error;
+ if (!session->havePlugins(true, &error)) {
+ emit ChatPage::instance()->showNotification(
+ QString::fromStdString(error));
+ emit close();
+ return;
+ }
+ settings->setMicrophone(
+ QString::fromStdString(microphones_[microphoneCombo_->currentIndex()]));
+ settings->setCamera(
+ QString::fromStdString(cameras_[cameraCombo_->currentIndex()]));
+ emit video();
+ emit close();
+ });
connect(cancelBtn_, &QPushButton::clicked, this, [this]() {
emit cancel();
emit close();
});
}
+
}
diff --git a/src/dialogs/PlaceCall.h b/src/dialogs/PlaceCall.h
index e178afc4..e042258f 100644
--- a/src/dialogs/PlaceCall.h
+++ b/src/dialogs/PlaceCall.h
@@ -6,6 +6,7 @@
#include
#include
+class QComboBox;
class QPushButton;
class QString;
class UserSettings;
@@ -26,11 +27,18 @@ public:
signals:
void voice();
+ void video();
void cancel();
private:
- QPushButton *voiceBtn_;
- QPushButton *cancelBtn_;
- std::vector audioDevices_;
+ const int iconSize_ = 18;
+ QPushButton *voiceBtn_ = nullptr;
+ QPushButton *videoBtn_ = nullptr;
+ QPushButton *cancelBtn_ = nullptr;
+ QComboBox *microphoneCombo_ = nullptr;
+ QComboBox *cameraCombo_ = nullptr;
+ std::vector microphones_;
+ std::vector cameras_;
};
+
}
diff --git a/src/timeline/TimelineViewManager.cpp b/src/timeline/TimelineViewManager.cpp
index 7c81ca8f..353f7065 100644
--- a/src/timeline/TimelineViewManager.cpp
+++ b/src/timeline/TimelineViewManager.cpp
@@ -242,6 +242,17 @@ TimelineViewManager::TimelineViewManager(QSharedPointer userSettin
&TimelineViewManager::callStateChanged);
connect(
callManager_, &CallManager::newCallParty, this, &TimelineViewManager::callPartyChanged);
+ connect(callManager_,
+ &CallManager::newVideoCallState,
+ this,
+ &TimelineViewManager::videoCallChanged);
+}
+
+void
+TimelineViewManager::setVideoCallItem()
+{
+ WebRTCSession::instance().setVideoItem(
+ view->rootObject()->findChild("videoCallItem"));
}
void
diff --git a/src/timeline/TimelineViewManager.h b/src/timeline/TimelineViewManager.h
index 9a2a6467..1a2d4c4e 100644
--- a/src/timeline/TimelineViewManager.h
+++ b/src/timeline/TimelineViewManager.h
@@ -36,6 +36,7 @@ class TimelineViewManager : public QObject
Q_PROPERTY(
bool isNarrowView MEMBER isNarrowView_ READ isNarrowView NOTIFY narrowViewChanged)
Q_PROPERTY(webrtc::State callState READ callState NOTIFY callStateChanged)
+ Q_PROPERTY(bool onVideoCall READ onVideoCall NOTIFY videoCallChanged)
Q_PROPERTY(QString callPartyName READ callPartyName NOTIFY callPartyChanged)
Q_PROPERTY(QString callPartyAvatarUrl READ callPartyAvatarUrl NOTIFY callPartyChanged)
Q_PROPERTY(bool isMicMuted READ isMicMuted NOTIFY micMuteChanged)
@@ -55,6 +56,8 @@ public:
Q_INVOKABLE bool isInitialSync() const { return isInitialSync_; }
bool isNarrowView() const { return isNarrowView_; }
webrtc::State callState() const { return WebRTCSession::instance().state(); }
+ bool onVideoCall() const { return WebRTCSession::instance().isVideo(); }
+ Q_INVOKABLE void setVideoCallItem();
QString callPartyName() const { return callManager_->callPartyName(); }
QString callPartyAvatarUrl() const { return callManager_->callPartyAvatarUrl(); }
bool isMicMuted() const { return WebRTCSession::instance().isMicMuted(); }
@@ -89,6 +92,7 @@ signals:
void showRoomList();
void narrowViewChanged();
void callStateChanged(webrtc::State);
+ void videoCallChanged();
void callPartyChanged();
void micMuteChanged();