61 Commits

Author SHA1 Message Date
ouwou
67e924e538 display users in voice in channel list 2023-01-06 18:40:11 -05:00
ouwou
c4590f8b23 start voice info box 2023-01-03 22:52:41 -05:00
ouwou
dff93e103a actually go to connected state and transmit data 2023-01-03 19:57:38 -05:00
ouwou
02583b8512 re-add ssrc map 2023-01-03 19:47:56 -05:00
ouwou
4740965f4c rewrite DiscordVoiceClient and stuff 2023-01-03 19:01:33 -05:00
ouwou
6ff2563e36 move ixwebsocket to fork 2022-12-26 22:13:09 -05:00
ouwou
afaba05293 actually reconstruct websocket on voice connect 2022-11-15 15:38:39 -05:00
ouwou
929ebf1a60 mess with some websocket stuff to try and fix things
to be honest, im not sure what ive done here. whatever memory i have of the issue i was trying to fix has long disappeared by the time im committing this. theres still some issues with being force disconnected and i really dont understand it
ill figure it out eventually... maybe :/
2022-11-15 02:15:21 -05:00
ouwou
38c5230a1d add window to change more stuff with opus 2022-11-14 01:28:07 -05:00
ouwou
e2784cd97b model stuff to track active device
also minor refactor
2022-11-09 19:03:53 -05:00
ouwou
0471688732 add ability to set capture device 2022-11-08 04:01:54 -05:00
ouwou
f97a6ff266 fix up CI:
add libhandy as dependency
change ubuntu actions environment
update nlohmann/json to latest release
add preprocessor checks
2022-11-08 02:32:45 -05:00
ouwou
28c3ec417f add spdlog to ci 2022-11-07 21:53:05 -05:00
ouwou
f8f9a907c9 add basic combobox to choose output device, start using spdlog 2022-11-05 02:32:43 -04:00
ouwou
cb690b6def only enable microphone when in a voice channel 2022-10-24 22:10:50 -04:00
ouwou
f751037717 Merge branch 'master' into voice 2022-10-24 02:48:57 -04:00
ouwou
e888306272 add gain slider (how 2 loudmic? 🤓) 2022-10-21 01:23:37 -04:00
ouwou
848e75f577 use new volume meter for other users 2022-10-20 02:18:01 -04:00
ouwou
e2110c22ee store user data from voice state updates 2022-10-18 18:34:14 -04:00
ouwou
cf53831b2a decay capture meter faster 2022-10-18 18:34:02 -04:00
ouwou
88f2e63eeb custom draw capture volume with gate indicator 2022-10-18 02:53:11 -04:00
ouwou
5a3bce7498 basic voice gate 2022-10-18 01:47:43 -04:00
ouwou
621beb1344 basic volume meters 2022-10-16 23:12:26 -04:00
ouwou
17e7478bb4 add user row on voice connect 2022-10-12 01:51:32 -04:00
ouwou
78a5b9599c remove user from list on disconnect 2022-10-10 00:27:47 -04:00
ouwou
5588c46d14 Merge branch 'master' into voice 2022-10-09 23:01:09 -04:00
ouwou
c30d17ebb2 show avatar in voice window 2022-10-07 20:43:41 -04:00
ouwou
fd9d1ffb33 Merge branch 'master' into voice 2022-10-07 20:34:02 -04:00
ouwou
dfcfe4353a center voice window 2022-10-05 22:01:23 -04:00
ouwou
9edac78380 put voice member list in a scrolled window 2022-10-05 21:54:07 -04:00
ouwou
d2c9985c57 one mutex is enough 2022-10-05 21:39:18 -04:00
ouwou
92c70bda08 add per user volume slider 2022-10-05 18:43:44 -04:00
ouwou
9dc2e863e8 temp mindeps build fix 2022-10-04 02:08:48 -04:00
ouwou
9394ac9b93 support voice text channels 2022-10-03 16:05:31 -04:00
ouwou
05acb8c857 try and handle voice socket closure properly maybe 2022-10-03 00:16:56 -04:00
ouwou
d8d9f1b857 close voice window on context menu disconnect 2022-10-02 02:50:48 -04:00
ouwou
e08e3106d6 rudimentary dm voice call support 2022-10-01 17:46:10 -04:00
ouwou
3e3afde223 try to fix mindeps build 2022-09-30 01:23:58 -04:00
ouwou
0438b11c91 dont dispatch udp to main 2022-09-30 01:09:51 -04:00
ouwou
f8ae99ee7b fix crash on disconnect 2022-09-29 22:47:10 -04:00
ouwou
b735feb901 add udp keepalive 2022-09-29 22:47:00 -04:00
ouwou
dc127d15fb display user list, client side mute 2022-09-29 21:46:15 -04:00
ouwou
a96d96b3aa basic mute/deafen 2022-09-28 22:10:36 -04:00
ouwou
d57d822aa9 manage decoders with ssrc updates 2022-09-28 20:44:52 -04:00
ouwou
a79b2d418e synchronize ws close/open to creating thread 2022-09-28 20:44:33 -04:00
ouwou
0571a05497 Merge branch 'master' into voice 2022-09-27 00:36:11 -04:00
ouwou
90437de2c0 make voice disconnect/reconnect work 2022-09-06 03:29:13 -04:00
ouwou
654e225093 try to fix shutdown with select 2022-09-06 03:25:24 -04:00
ouwou
e93b8715f9 basic voice capture + transmission 2022-09-05 02:21:37 -04:00
ouwou
b7fffb8691 fix min call 2022-09-02 01:47:13 -04:00
ouwou
0a04985678 make compile work if voice support is disabled 2022-09-02 01:25:33 -04:00
ouwou
9c8d9e54fe handle multiple speakers properly 2022-09-02 00:38:59 -04:00
ouwou
1f4070e52f basic buffering (i think) 2022-08-31 20:42:14 -04:00
ouwou
2e9beaaa30 dont send preferred region 2022-08-31 20:03:24 -04:00
ouwou
21d640cea3 try to fix mindeps run 2022-08-31 17:55:03 -04:00
ouwou
352c0fd1c1 fix opus include path (pt 2) 2022-08-31 17:07:43 -04:00
ouwou
12a5fcfcd3 fix opus include path 2022-08-31 16:58:17 -04:00
ouwou
f2f8afa368 fix compilation maybe 2022-08-31 16:44:30 -04:00
ouwou
c393cc9707 add deps to ci 2022-08-31 03:01:48 -04:00
ouwou
0fa33915da rudimentary voice implementation 2022-08-31 01:51:02 -04:00
ouwou
634f51fb41 add miniaudio submodule 2022-08-28 16:58:09 -04:00
38 changed files with 3352 additions and 70 deletions

View File

@@ -1,6 +1,6 @@
name: Abaddon CI
on: [push, pull_request]
on: [ push, pull_request ]
jobs:
msys2:
@@ -8,8 +8,8 @@ jobs:
runs-on: windows-latest
strategy:
matrix:
buildtype: [Debug, RelWithDebInfo, MinSizeRel]
mindeps: [false]
buildtype: [ Debug, RelWithDebInfo, MinSizeRel ]
mindeps: [ false ]
include:
- buildtype: RelWithDebInfo
mindeps: true
@@ -37,6 +37,7 @@ jobs:
mingw-w64-x86_64-curl
mingw-w64-x86_64-zlib
mingw-w64-x86_64-gtkmm3
mingw-w64-x86_64-spdlog
if_false: >-
git
make
@@ -49,6 +50,9 @@ jobs:
mingw-w64-x86_64-zlib
mingw-w64-x86_64-gtkmm3
mingw-w64-x86_64-libhandy
mingw-w64-x86_64-opus
mingw-w64-x86_64-libsodium
mingw-w64-x86_64-spdlog
- name: Setup MSYS2 (2)
uses: msys2/setup-msys2@v2
@@ -57,10 +61,20 @@ jobs:
update: true
install: ${{ steps.setupmsys.outputs.value }}
- name: Build
run: |
cmake -GNinja -Bbuild -DCMAKE_BUILD_TYPE=${{ matrix.buildtype }}
cmake --build build
- name: Build (1)
uses: haya14busa/action-cond@v1
id: buildcmd
with:
cond: ${{ matrix.mindeps == true }}
if_true: |
cmake -GNinja -Bbuild -DUSE_LIBHANDY=OFF -DENABLE_VOICE=OFF -DCMAKE_BUILD_TYPE=${{ matrix.buildtype }}
cmake --build build
if_false: |
cmake -GNinja -Bbuild -DCMAKE_BUILD_TYPE=${{ matrix.buildtype }}
cmake --build build
- name: Build (2)
run: ${{ steps.buildcmd.outputs.value }}
- name: Setup Artifact
run: |
@@ -105,7 +119,7 @@ jobs:
runs-on: macos-latest
strategy:
matrix:
buildtype: [Debug, RelWithDebInfo]
buildtype: [ Debug, RelWithDebInfo ]
steps:
- uses: actions/checkout@v1
with:
@@ -119,6 +133,10 @@ jobs:
brew install gtkmm3
brew install nlohmann-json
brew install jpeg
brew install opus
brew install libsodium
brew install spdlog
brew install libhandy
- name: Build
uses: lukka/run-cmake@v3
@@ -141,10 +159,10 @@ jobs:
linux:
name: linux-${{ matrix.buildtype }}
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
strategy:
matrix:
buildtype: [Debug, RelWithDebInfo, MinSizeRel]
buildtype: [ Debug, RelWithDebInfo, MinSizeRel ]
steps:
- uses: actions/checkout@v1
with:
@@ -160,7 +178,7 @@ jobs:
cd deps
git clone https://github.com/nlohmann/json
cd json
git checkout db78ac1d7716f56fc9f1b030b715f872f93964e4
git checkout bc889afb4c5bf1c0d8ee29ef35eaaf4c8bef8a5d
mkdir build
cd build
cmake ..
@@ -168,6 +186,10 @@ jobs:
sudo make install
sudo apt-get install libgtkmm-3.0-dev
sudo apt-get install libcurl4-gnutls-dev
sudo apt-get install libopus-dev
sudo apt-get install libsodium-dev
sudo apt-get install libspdlog-dev
sudo apt-get install libhandy-1-dev
- name: Build
uses: lukka/run-cmake@v3

5
.gitmodules vendored
View File

@@ -3,4 +3,7 @@
url = https://github.com/tschoonj/GTK-for-Windows-Runtime-Environment-Installer
[submodule "subprojects/ixwebsocket"]
path = subprojects/ixwebsocket
url = https://github.com/machinezone/ixwebsocket
url = https://github.com/ouwou/ixwebsocket
[submodule "subprojects/miniaudio"]
path = subprojects/miniaudio
url = https://github.com/mackron/miniaudio

View File

@@ -8,6 +8,7 @@ set(CMAKE_CXX_STANDARD 17)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
option(USE_LIBHANDY "Enable features that require libhandy (default)" ON)
option(ENABLE_VOICE "Enable voice suppport" ON)
find_package(nlohmann_json REQUIRED)
find_package(CURL)
@@ -89,6 +90,9 @@ if (Fontconfig_FOUND)
target_link_libraries(abaddon Fontconfig::Fontconfig)
endif ()
find_package(spdlog REQUIRED)
target_link_libraries(abaddon spdlog::spdlog)
target_link_libraries(abaddon ${SQLite3_LIBRARIES})
target_link_libraries(abaddon ${GTKMM_LIBRARIES})
target_link_libraries(abaddon ${CURL_LIBRARIES})
@@ -106,3 +110,18 @@ if (USE_LIBHANDY)
target_compile_definitions(abaddon PRIVATE WITH_LIBHANDY)
endif ()
endif ()
if (ENABLE_VOICE)
target_compile_definitions(abaddon PRIVATE WITH_VOICE)
find_package(PkgConfig)
target_include_directories(abaddon PUBLIC subprojects/miniaudio)
pkg_check_modules(Opus REQUIRED IMPORTED_TARGET opus)
target_link_libraries(abaddon PkgConfig::Opus)
pkg_check_modules(libsodium REQUIRED IMPORTED_TARGET libsodium)
target_link_libraries(abaddon PkgConfig::libsodium)
target_link_libraries(abaddon ${CMAKE_DL_LIBS})
endif ()

View File

@@ -47,6 +47,7 @@
/bin/libpng16-16.dll
/bin/libpsl-5.dll
/bin/libsigc-2.0-0.dll
/bin/libspdlog.dll
/bin/libsqlite3-0.dll
/bin/libssh2-1.dll
/bin/libssl-1_1-x64.dll

View File

@@ -44,7 +44,7 @@ has to be separate to allow main.css to override certain things
background: @secondary_color;
}
.app-popup list {
.app-window list, .app-popup list {
background: @secondary_color;
}
@@ -87,3 +87,11 @@ has to be separate to allow main.css to override certain things
.app-window colorswatch {
box-shadow: 0 1px rgba(0, 0, 0, 0);
}
.app-window scale {
padding-top: 0px;
padding-bottom: 0px;
margin-top: 0px;
margin-bottom: 0px;
color: @text_color;
}

View File

@@ -360,3 +360,21 @@
background-color: #dd3300;
margin-left: 1px;
}
.voice-info {
background-color: #0B0B0B;
padding: 5px;
border: 1px solid #202020;
}
.voice-info-disconnect-image {
color: #DDDDDD;
}
.voice-info-status {
font-weight: bold;
}
.voice-info-location {
}

View File

@@ -1,8 +1,12 @@
#include <gtkmm.h>
#include <memory>
#include <spdlog/spdlog.h>
#include <spdlog/cfg/env.h>
#include <spdlog/sinks/stdout_color_sinks.h>
#include <string>
#include <algorithm>
#include "platform.hpp"
#include "audio/manager.hpp"
#include "discord/discord.hpp"
#include "dialogs/token.hpp"
#include "dialogs/editmessage.hpp"
@@ -16,6 +20,7 @@
#include "windows/profilewindow.hpp"
#include "windows/pinnedwindow.hpp"
#include "windows/threadswindow.hpp"
#include "windows/voicewindow.hpp"
#include "startup.hpp"
#ifdef WITH_LIBHANDY
@@ -36,7 +41,8 @@ Abaddon::Abaddon()
std::string ua = GetSettings().UserAgent;
m_discord.SetUserAgent(ua);
m_discord.signal_gateway_ready().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnReady));
// todo rename funcs
m_discord.signal_gateway_ready_supplemental().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnReady));
m_discord.signal_message_create().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnMessageCreate));
m_discord.signal_message_delete().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnMessageDelete));
m_discord.signal_message_update().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnMessageUpdate));
@@ -48,6 +54,16 @@ Abaddon::Abaddon()
m_discord.signal_thread_update().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnThreadUpdate));
m_discord.signal_message_sent().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnMessageSent));
m_discord.signal_disconnected().connect(sigc::mem_fun(*this, &Abaddon::DiscordOnDisconnect));
#ifdef WITH_VOICE
m_discord.signal_voice_connected().connect(sigc::mem_fun(*this, &Abaddon::OnVoiceConnected));
m_discord.signal_voice_disconnected().connect(sigc::mem_fun(*this, &Abaddon::OnVoiceDisconnected));
m_discord.signal_voice_speaking().connect([this](const VoiceSpeakingData &m) {
printf("%llu has ssrc %u\n", (uint64_t)m.UserID, m.SSRC);
m_audio->AddSSRC(m.SSRC);
});
#endif
m_discord.signal_channel_accessibility_changed().connect([this](Snowflake id, bool accessible) {
if (!accessible)
m_channels_requested.erase(id);
@@ -228,6 +244,16 @@ int Abaddon::StartGTK() {
return 1;
}
#ifdef WITH_VOICE
m_audio = std::make_unique<AudioManager>();
if (!m_audio->OK()) {
Gtk::MessageDialog dlg(*m_main_window, "The audio engine could not be initialized!", false, Gtk::MESSAGE_ERROR, Gtk::BUTTONS_OK, true);
dlg.set_position(Gtk::WIN_POS_CENTER);
dlg.run();
return 1;
}
#endif
// store must be checked before this can be called
m_main_window->UpdateComponents();
@@ -247,6 +273,11 @@ int Abaddon::StartGTK() {
m_main_window->GetChannelList()->signal_action_guild_leave().connect(sigc::mem_fun(*this, &Abaddon::ActionLeaveGuild));
m_main_window->GetChannelList()->signal_action_guild_settings().connect(sigc::mem_fun(*this, &Abaddon::ActionGuildSettings));
#ifdef WITH_VOICE
m_main_window->GetChannelList()->signal_action_join_voice_channel().connect(sigc::mem_fun(*this, &Abaddon::ActionJoinVoiceChannel));
m_main_window->GetChannelList()->signal_action_disconnect_voice().connect(sigc::mem_fun(*this, &Abaddon::ActionDisconnectVoice));
#endif
m_main_window->GetChatWindow()->signal_action_message_edit().connect(sigc::mem_fun(*this, &Abaddon::ActionChatEditMessage));
m_main_window->GetChatWindow()->signal_action_chat_submit().connect(sigc::mem_fun(*this, &Abaddon::ActionChatInputSubmit));
m_main_window->GetChatWindow()->signal_action_chat_load_history().connect(sigc::mem_fun(*this, &Abaddon::ActionChatLoadHistory));
@@ -398,6 +429,64 @@ void Abaddon::DiscordOnThreadUpdate(const ThreadUpdateData &data) {
}
}
#ifdef WITH_VOICE
void Abaddon::OnVoiceConnected() {
m_audio->StartCaptureDevice();
auto *wnd = new VoiceWindow(m_discord.GetVoiceChannelID());
m_voice_window = wnd;
wnd->signal_mute().connect([this](bool is_mute) {
m_discord.SetVoiceMuted(is_mute);
m_audio->SetCapture(!is_mute);
});
wnd->signal_deafen().connect([this](bool is_deaf) {
m_discord.SetVoiceDeafened(is_deaf);
m_audio->SetPlayback(!is_deaf);
});
wnd->signal_gate().connect([this](double gate) {
m_audio->SetCaptureGate(gate);
});
wnd->signal_gain().connect([this](double gain) {
m_audio->SetCaptureGain(gain);
});
wnd->signal_mute_user_cs().connect([this](Snowflake id, bool is_mute) {
if (const auto ssrc = m_discord.GetSSRCOfUser(id); ssrc.has_value()) {
m_audio->SetMuteSSRC(*ssrc, is_mute);
}
});
wnd->signal_user_volume_changed().connect([this](Snowflake id, double volume) {
if (const auto ssrc = m_discord.GetSSRCOfUser(id); ssrc.has_value()) {
m_audio->SetVolumeSSRC(*ssrc, volume);
}
});
wnd->set_position(Gtk::WIN_POS_CENTER);
wnd->show();
wnd->signal_hide().connect([this, wnd]() {
m_discord.DisconnectFromVoice();
m_voice_window = nullptr;
delete wnd;
delete m_user_menu;
SetupUserMenu();
});
}
void Abaddon::OnVoiceDisconnected() {
m_audio->StopCaptureDevice();
m_audio->RemoveAllSSRCs();
if (m_voice_window != nullptr) {
m_voice_window->close();
}
}
#endif
SettingsManager::Settings &Abaddon::GetSettings() {
return m_settings.GetSettings();
}
@@ -916,6 +1005,16 @@ void Abaddon::ActionViewThreads(Snowflake channel_id) {
window->show();
}
#ifdef WITH_VOICE
void Abaddon::ActionJoinVoiceChannel(Snowflake channel_id) {
m_discord.ConnectToVoice(channel_id);
}
void Abaddon::ActionDisconnectVoice() {
m_discord.DisconnectFromVoice();
}
#endif
std::optional<Glib::ustring> Abaddon::ShowTextPrompt(const Glib::ustring &prompt, const Glib::ustring &title, const Glib::ustring &placeholder, Gtk::Window *window) {
TextInputDialog dlg(prompt, title, placeholder, window != nullptr ? *window : *m_main_window);
const auto code = dlg.run();
@@ -955,15 +1054,24 @@ EmojiResource &Abaddon::GetEmojis() {
return m_emojis;
}
#ifdef WITH_VOICE
AudioManager &Abaddon::GetAudio() {
return *m_audio;
}
#endif
void Abaddon::on_tray_click() {
m_main_window->set_visible(!m_main_window->is_visible());
}
void Abaddon::on_tray_menu_click() {
m_gtk_app->quit();
}
void Abaddon::on_tray_popup_menu(int button, int activate_time) {
m_tray->popup_menu_at_position(*m_tray_menu, button, activate_time);
}
void Abaddon::on_window_hide() {
if (!m_settings.GetSettings().HideToTray) {
m_gtk_app->quit();
@@ -994,6 +1102,12 @@ int main(int argc, char **argv) {
if (buf[0] != '1')
SetEnvironmentVariableA("GTK_CSD", "0");
#endif
spdlog::cfg::load_env_levels();
auto log_audio = spdlog::stdout_color_mt("audio");
auto log_voice = spdlog::stdout_color_mt("voice");
auto log_discord = spdlog::stdout_color_mt("discord");
Gtk::Main::init_gtkmm_internals(); // why???
return Abaddon::Get().StartGTK();
}

View File

@@ -12,6 +12,8 @@
#define APP_TITLE "Abaddon"
class AudioManager;
class Abaddon {
private:
Abaddon();
@@ -52,6 +54,11 @@ public:
void ActionViewPins(Snowflake channel_id);
void ActionViewThreads(Snowflake channel_id);
#ifdef WITH_VOICE
void ActionJoinVoiceChannel(Snowflake channel_id);
void ActionDisconnectVoice();
#endif
std::optional<Glib::ustring> ShowTextPrompt(const Glib::ustring &prompt, const Glib::ustring &title, const Glib::ustring &placeholder = "", Gtk::Window *window = nullptr);
bool ShowConfirm(const Glib::ustring &prompt, Gtk::Window *window = nullptr);
@@ -60,6 +67,10 @@ public:
ImageManager &GetImageManager();
EmojiResource &GetEmojis();
#ifdef WITH_VOICE
AudioManager &GetAudio();
#endif
std::string GetDiscordToken() const;
bool IsDiscordActive() const;
@@ -78,6 +89,11 @@ public:
void DiscordOnDisconnect(bool is_reconnecting, GatewayCloseCode close_code);
void DiscordOnThreadUpdate(const ThreadUpdateData &data);
#ifdef WITH_VOICE
void OnVoiceConnected();
void OnVoiceDisconnected();
#endif
SettingsManager::Settings &GetSettings();
Glib::RefPtr<Gtk::CssProvider> GetStyleProvider();
@@ -144,6 +160,11 @@ private:
ImageManager m_img_mgr;
EmojiResource m_emojis;
#ifdef WITH_VOICE
std::unique_ptr<AudioManager> m_audio;
Gtk::Window *m_voice_window = nullptr;
#endif
mutable std::mutex m_mutex;
Glib::RefPtr<Gtk::Application> m_gtk_app;
Glib::RefPtr<Gtk::CssProvider> m_css_provider;

121
src/audio/devices.cpp Normal file
View File

@@ -0,0 +1,121 @@
#ifdef WITH_VOICE
// clang-format off
#include "devices.hpp"
#include <cstring>
#include <spdlog/spdlog.h>
// clang-format on
AudioDevices::AudioDevices()
: m_playback(Gtk::ListStore::create(m_playback_columns))
, m_capture(Gtk::ListStore::create(m_capture_columns)) {
}
Glib::RefPtr<Gtk::ListStore> AudioDevices::GetPlaybackDeviceModel() {
return m_playback;
}
Glib::RefPtr<Gtk::ListStore> AudioDevices::GetCaptureDeviceModel() {
return m_capture;
}
void AudioDevices::SetDevices(ma_device_info *pPlayback, ma_uint32 playback_count, ma_device_info *pCapture, ma_uint32 capture_count) {
m_playback->clear();
for (ma_uint32 i = 0; i < playback_count; i++) {
auto &d = pPlayback[i];
auto row = *m_playback->append();
row[m_playback_columns.Name] = d.name;
row[m_playback_columns.DeviceID] = d.id;
if (d.isDefault) {
m_default_playback_iter = row;
SetActivePlaybackDevice(row);
}
}
m_capture->clear();
for (ma_uint32 i = 0; i < capture_count; i++) {
auto &d = pCapture[i];
auto row = *m_capture->append();
row[m_capture_columns.Name] = d.name;
row[m_capture_columns.DeviceID] = d.id;
if (d.isDefault) {
m_default_capture_iter = row;
SetActiveCaptureDevice(row);
}
}
if (!m_default_playback_iter) {
spdlog::get("audio")->warn("No default playback device found");
}
if (!m_default_capture_iter) {
spdlog::get("audio")->warn("No default capture device found");
}
}
std::optional<ma_device_id> AudioDevices::GetPlaybackDeviceIDFromModel(const Gtk::TreeModel::iterator &iter) const {
if (iter) {
return static_cast<ma_device_id>((*iter)[m_playback_columns.DeviceID]);
}
return std::nullopt;
}
std::optional<ma_device_id> AudioDevices::GetCaptureDeviceIDFromModel(const Gtk::TreeModel::iterator &iter) const {
if (iter) {
return static_cast<ma_device_id>((*iter)[m_capture_columns.DeviceID]);
}
return std::nullopt;
}
std::optional<ma_device_id> AudioDevices::GetDefaultPlayback() const {
if (m_default_playback_iter) {
return static_cast<ma_device_id>((*m_default_playback_iter)[m_playback_columns.DeviceID]);
}
return std::nullopt;
}
std::optional<ma_device_id> AudioDevices::GetDefaultCapture() const {
if (m_default_capture_iter) {
return static_cast<ma_device_id>((*m_default_capture_iter)[m_capture_columns.DeviceID]);
}
return std::nullopt;
}
void AudioDevices::SetActivePlaybackDevice(const Gtk::TreeModel::iterator &iter) {
m_active_playback_iter = iter;
}
void AudioDevices::SetActiveCaptureDevice(const Gtk::TreeModel::iterator &iter) {
m_active_capture_iter = iter;
}
Gtk::TreeModel::iterator AudioDevices::GetActivePlaybackDevice() {
return m_active_playback_iter;
}
Gtk::TreeModel::iterator AudioDevices::GetActiveCaptureDevice() {
return m_active_capture_iter;
}
AudioDevices::PlaybackColumns::PlaybackColumns() {
add(Name);
add(DeviceID);
}
AudioDevices::CaptureColumns::CaptureColumns() {
add(Name);
add(DeviceID);
}
#endif

58
src/audio/devices.hpp Normal file
View File

@@ -0,0 +1,58 @@
#pragma once
#ifdef WITH_VOICE
// clang-format off
#include <gtkmm/liststore.h>
#include <miniaudio.h>
#include <optional>
// clang-format on
class AudioDevices {
public:
AudioDevices();
Glib::RefPtr<Gtk::ListStore> GetPlaybackDeviceModel();
Glib::RefPtr<Gtk::ListStore> GetCaptureDeviceModel();
void SetDevices(ma_device_info *pPlayback, ma_uint32 playback_count, ma_device_info *pCapture, ma_uint32 capture_count);
[[nodiscard]] std::optional<ma_device_id> GetPlaybackDeviceIDFromModel(const Gtk::TreeModel::iterator &iter) const;
[[nodiscard]] std::optional<ma_device_id> GetCaptureDeviceIDFromModel(const Gtk::TreeModel::iterator &iter) const;
[[nodiscard]] std::optional<ma_device_id> GetDefaultPlayback() const;
[[nodiscard]] std::optional<ma_device_id> GetDefaultCapture() const;
void SetActivePlaybackDevice(const Gtk::TreeModel::iterator &iter);
void SetActiveCaptureDevice(const Gtk::TreeModel::iterator &iter);
Gtk::TreeModel::iterator GetActivePlaybackDevice();
Gtk::TreeModel::iterator GetActiveCaptureDevice();
private:
class PlaybackColumns : public Gtk::TreeModel::ColumnRecord {
public:
PlaybackColumns();
Gtk::TreeModelColumn<Glib::ustring> Name;
Gtk::TreeModelColumn<ma_device_id> DeviceID;
};
PlaybackColumns m_playback_columns;
Glib::RefPtr<Gtk::ListStore> m_playback;
Gtk::TreeModel::iterator m_active_playback_iter;
Gtk::TreeModel::iterator m_default_playback_iter;
class CaptureColumns : public Gtk::TreeModel::ColumnRecord {
public:
CaptureColumns();
Gtk::TreeModelColumn<Glib::ustring> Name;
Gtk::TreeModelColumn<ma_device_id> DeviceID;
};
CaptureColumns m_capture_columns;
Glib::RefPtr<Gtk::ListStore> m_capture;
Gtk::TreeModel::iterator m_active_capture_iter;
Gtk::TreeModel::iterator m_default_capture_iter;
};
#endif

466
src/audio/manager.cpp Normal file
View File

@@ -0,0 +1,466 @@
#ifdef WITH_VOICE
// clang-format off
#ifdef _WIN32
#include <winsock2.h>
#endif
#include "manager.hpp"
#include <array>
#include <glibmm/main.h>
#include <spdlog/spdlog.h>
#define MINIAUDIO_IMPLEMENTATION
#include <miniaudio.h>
#include <opus.h>
#include <cstring>
// clang-format on
const uint8_t *StripRTPExtensionHeader(const uint8_t *buf, int num_bytes, size_t &outlen) {
if (buf[0] == 0xbe && buf[1] == 0xde && num_bytes > 4) {
uint64_t offset = 4 + 4 * ((buf[2] << 8) | buf[3]);
outlen = num_bytes - offset;
return buf + offset;
}
outlen = num_bytes;
return buf;
}
void data_callback(ma_device *pDevice, void *pOutput, const void *pInput, ma_uint32 frameCount) {
AudioManager *mgr = reinterpret_cast<AudioManager *>(pDevice->pUserData);
if (mgr == nullptr) return;
std::lock_guard<std::mutex> _(mgr->m_mutex);
auto *pOutputF32 = static_cast<float *>(pOutput);
for (auto &[ssrc, pair] : mgr->m_sources) {
double volume = 1.0;
if (const auto vol_it = mgr->m_volume_ssrc.find(ssrc); vol_it != mgr->m_volume_ssrc.end()) {
volume = vol_it->second;
}
auto &buf = pair.first;
const size_t n = std::min(static_cast<size_t>(buf.size()), static_cast<size_t>(frameCount * 2ULL));
for (size_t i = 0; i < n; i++) {
pOutputF32[i] += volume * buf[i] / 32768.F;
}
buf.erase(buf.begin(), buf.begin() + n);
}
}
void capture_data_callback(ma_device *pDevice, void *pOutput, const void *pInput, ma_uint32 frameCount) {
auto *mgr = reinterpret_cast<AudioManager *>(pDevice->pUserData);
if (mgr == nullptr) return;
mgr->OnCapturedPCM(static_cast<const int16_t *>(pInput), frameCount);
}
AudioManager::AudioManager() {
m_ok = true;
int err;
m_encoder = opus_encoder_create(48000, 2, OPUS_APPLICATION_VOIP, &err);
if (err != OPUS_OK) {
spdlog::get("audio")->error("failed to initialize opus encoder: {}", err);
m_ok = false;
return;
}
opus_encoder_ctl(m_encoder, OPUS_SET_BITRATE(64000));
if (ma_context_init(nullptr, 0, nullptr, &m_context) != MA_SUCCESS) {
spdlog::get("audio")->error("failed to initialize context");
m_ok = false;
return;
}
spdlog::get("audio")->info("Audio backend: {}", ma_get_backend_name(m_context.backend));
Enumerate();
m_playback_config = ma_device_config_init(ma_device_type_playback);
m_playback_config.playback.format = ma_format_f32;
m_playback_config.playback.channels = 2;
m_playback_config.sampleRate = 48000;
m_playback_config.dataCallback = data_callback;
m_playback_config.pUserData = this;
if (const auto playback_id = m_devices.GetDefaultPlayback(); playback_id.has_value()) {
m_playback_id = *playback_id;
m_playback_config.playback.pDeviceID = &m_playback_id;
}
if (ma_device_init(&m_context, &m_playback_config, &m_playback_device) != MA_SUCCESS) {
spdlog::get("audio")->error("failed to initialize playback device");
m_ok = false;
return;
}
if (ma_device_start(&m_playback_device) != MA_SUCCESS) {
spdlog::get("audio")->error("failed to start playback");
ma_device_uninit(&m_playback_device);
m_ok = false;
return;
}
m_capture_config = ma_device_config_init(ma_device_type_capture);
m_capture_config.capture.format = ma_format_s16;
m_capture_config.capture.channels = 2;
m_capture_config.sampleRate = 48000;
m_capture_config.periodSizeInFrames = 480;
m_capture_config.dataCallback = capture_data_callback;
m_capture_config.pUserData = this;
if (const auto capture_id = m_devices.GetDefaultCapture(); capture_id.has_value()) {
m_capture_id = *capture_id;
m_capture_config.capture.pDeviceID = &m_capture_id;
}
if (ma_device_init(&m_context, &m_capture_config, &m_capture_device) != MA_SUCCESS) {
spdlog::get("audio")->error("failed to initialize capture device");
m_ok = false;
return;
}
char playback_device_name[MA_MAX_DEVICE_NAME_LENGTH + 1];
ma_device_get_name(&m_playback_device, ma_device_type_playback, playback_device_name, sizeof(playback_device_name), nullptr);
spdlog::get("audio")->info("using {} as playback device", playback_device_name);
char capture_device_name[MA_MAX_DEVICE_NAME_LENGTH + 1];
ma_device_get_name(&m_capture_device, ma_device_type_capture, capture_device_name, sizeof(capture_device_name), nullptr);
spdlog::get("audio")->info("using {} as capture device", capture_device_name);
Glib::signal_timeout().connect(sigc::mem_fun(*this, &AudioManager::DecayVolumeMeters), 40);
}
AudioManager::~AudioManager() {
ma_device_uninit(&m_playback_device);
ma_device_uninit(&m_capture_device);
ma_context_uninit(&m_context);
RemoveAllSSRCs();
}
void AudioManager::AddSSRC(uint32_t ssrc) {
std::lock_guard<std::mutex> _(m_mutex);
int error;
if (m_sources.find(ssrc) == m_sources.end()) {
auto *decoder = opus_decoder_create(48000, 2, &error);
m_sources.insert(std::make_pair(ssrc, std::make_pair(std::deque<int16_t> {}, decoder)));
}
}
void AudioManager::RemoveSSRC(uint32_t ssrc) {
std::lock_guard<std::mutex> _(m_mutex);
if (auto it = m_sources.find(ssrc); it != m_sources.end()) {
opus_decoder_destroy(it->second.second);
m_sources.erase(it);
}
}
void AudioManager::RemoveAllSSRCs() {
spdlog::get("audio")->info("removing all ssrc");
std::lock_guard<std::mutex> _(m_mutex);
for (auto &[ssrc, pair] : m_sources) {
opus_decoder_destroy(pair.second);
}
m_sources.clear();
}
void AudioManager::SetOpusBuffer(uint8_t *ptr) {
m_opus_buffer = ptr;
}
void AudioManager::FeedMeOpus(uint32_t ssrc, const std::vector<uint8_t> &data) {
if (!m_should_playback) return;
std::lock_guard<std::mutex> _(m_mutex);
if (m_muted_ssrcs.find(ssrc) != m_muted_ssrcs.end()) return;
size_t payload_size = 0;
const auto *opus_encoded = StripRTPExtensionHeader(data.data(), static_cast<int>(data.size()), payload_size);
static std::array<opus_int16, 120 * 48 * 2> pcm;
if (auto it = m_sources.find(ssrc); it != m_sources.end()) {
int decoded = opus_decode(it->second.second, opus_encoded, static_cast<opus_int32>(payload_size), pcm.data(), 120 * 48, 0);
if (decoded <= 0) {
} else {
UpdateReceiveVolume(ssrc, pcm.data(), decoded);
auto &buf = it->second.first;
buf.insert(buf.end(), pcm.begin(), pcm.begin() + decoded * 2);
}
}
}
void AudioManager::StartCaptureDevice() {
if (ma_device_start(&m_capture_device) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to start capture device");
}
}
void AudioManager::StopCaptureDevice() {
if (ma_device_stop(&m_capture_device) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to stop capture device");
}
}
void AudioManager::SetPlaybackDevice(const Gtk::TreeModel::iterator &iter) {
spdlog::get("audio")->debug("Setting new playback device");
const auto device_id = m_devices.GetPlaybackDeviceIDFromModel(iter);
if (!device_id) {
spdlog::get("audio")->error("Requested ID from iterator is invalid");
return;
}
m_devices.SetActivePlaybackDevice(iter);
m_playback_id = *device_id;
ma_device_uninit(&m_playback_device);
m_playback_config = ma_device_config_init(ma_device_type_playback);
m_playback_config.playback.format = ma_format_f32;
m_playback_config.playback.channels = 2;
m_playback_config.playback.pDeviceID = &m_playback_id;
m_playback_config.sampleRate = 48000;
m_playback_config.dataCallback = data_callback;
m_playback_config.pUserData = this;
if (ma_device_init(&m_context, &m_playback_config, &m_playback_device) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to initialize new device");
return;
}
if (ma_device_start(&m_playback_device) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to start new device");
return;
}
}
void AudioManager::SetCaptureDevice(const Gtk::TreeModel::iterator &iter) {
spdlog::get("audio")->debug("Setting new capture device");
const auto device_id = m_devices.GetCaptureDeviceIDFromModel(iter);
if (!device_id) {
spdlog::get("audio")->error("Requested ID from iterator is invalid");
return;
}
m_devices.SetActiveCaptureDevice(iter);
m_capture_id = *device_id;
ma_device_uninit(&m_capture_device);
m_capture_config = ma_device_config_init(ma_device_type_capture);
m_capture_config.capture.format = ma_format_s16;
m_capture_config.capture.channels = 2;
m_capture_config.capture.pDeviceID = &m_capture_id;
m_capture_config.sampleRate = 48000;
m_capture_config.periodSizeInFrames = 480;
m_capture_config.dataCallback = capture_data_callback;
m_capture_config.pUserData = this;
if (ma_device_init(&m_context, &m_capture_config, &m_capture_device) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to initialize new device");
return;
}
// technically this should probably try and check old state but if you are in the window to change it then you are connected
if (ma_device_start(&m_capture_device) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to start new device");
return;
}
}
void AudioManager::SetCapture(bool capture) {
m_should_capture = capture;
}
void AudioManager::SetPlayback(bool playback) {
m_should_playback = playback;
}
void AudioManager::SetCaptureGate(double gate) {
m_capture_gate = gate * 0.01;
}
void AudioManager::SetCaptureGain(double gain) {
m_capture_gain = gain;
}
void AudioManager::SetMuteSSRC(uint32_t ssrc, bool mute) {
std::lock_guard<std::mutex> _(m_mutex);
if (mute) {
m_muted_ssrcs.insert(ssrc);
} else {
m_muted_ssrcs.erase(ssrc);
}
}
void AudioManager::SetVolumeSSRC(uint32_t ssrc, double volume) {
std::lock_guard<std::mutex> _(m_mutex);
volume *= 0.01;
constexpr const double E = 2.71828182845904523536;
m_volume_ssrc[ssrc] = (std::exp(volume) - 1) / (E - 1);
}
void AudioManager::SetEncodingApplication(int application) {
std::lock_guard<std::mutex> _(m_enc_mutex);
int prev_bitrate = 64000;
if (int err = opus_encoder_ctl(m_encoder, OPUS_GET_BITRATE(&prev_bitrate)); err != OPUS_OK) {
spdlog::get("audio")->error("Failed to get old bitrate when reinitializing: {}", err);
}
opus_encoder_destroy(m_encoder);
int err = 0;
m_encoder = opus_encoder_create(48000, 2, application, &err);
if (err != OPUS_OK) {
spdlog::get("audio")->critical("opus_encoder_create failed: {}", err);
return;
}
if (int err = opus_encoder_ctl(m_encoder, OPUS_SET_BITRATE(prev_bitrate)); err != OPUS_OK) {
spdlog::get("audio")->error("Failed to set bitrate when reinitializing: {}", err);
}
}
int AudioManager::GetEncodingApplication() {
std::lock_guard<std::mutex> _(m_enc_mutex);
int temp = OPUS_APPLICATION_VOIP;
if (int err = opus_encoder_ctl(m_encoder, OPUS_GET_APPLICATION(&temp)); err != OPUS_OK) {
spdlog::get("audio")->error("opus_encoder_ctl(OPUS_GET_APPLICATION) failed: {}", err);
}
return temp;
}
void AudioManager::SetSignalHint(int signal) {
std::lock_guard<std::mutex> _(m_enc_mutex);
if (int err = opus_encoder_ctl(m_encoder, OPUS_SET_SIGNAL(signal)); err != OPUS_OK) {
spdlog::get("audio")->error("opus_encoder_ctl(OPUS_SET_SIGNAL) failed: {}", err);
}
}
int AudioManager::GetSignalHint() {
std::lock_guard<std::mutex> _(m_enc_mutex);
int temp = OPUS_AUTO;
if (int err = opus_encoder_ctl(m_encoder, OPUS_GET_SIGNAL(&temp)); err != OPUS_OK) {
spdlog::get("audio")->error("opus_encoder_ctl(OPUS_GET_SIGNAL) failed: {}", err);
}
return temp;
}
void AudioManager::SetBitrate(int bitrate) {
std::lock_guard<std::mutex> _(m_enc_mutex);
if (int err = opus_encoder_ctl(m_encoder, OPUS_SET_BITRATE(bitrate)); err != OPUS_OK) {
spdlog::get("audio")->error("opus_encoder_ctl(OPUS_SET_BITRATE) failed: {}", err);
}
}
int AudioManager::GetBitrate() {
std::lock_guard<std::mutex> _(m_enc_mutex);
int temp = 64000;
if (int err = opus_encoder_ctl(m_encoder, OPUS_GET_BITRATE(&temp)); err != OPUS_OK) {
spdlog::get("audio")->error("opus_encoder_ctl(OPUS_GET_BITRATE) failed: {}", err);
}
return temp;
}
void AudioManager::Enumerate() {
ma_device_info *pPlaybackDeviceInfo;
ma_uint32 playbackDeviceCount;
ma_device_info *pCaptureDeviceInfo;
ma_uint32 captureDeviceCount;
spdlog::get("audio")->debug("Enumerating devices");
if (ma_context_get_devices(
&m_context,
&pPlaybackDeviceInfo,
&playbackDeviceCount,
&pCaptureDeviceInfo,
&captureDeviceCount) != MA_SUCCESS) {
spdlog::get("audio")->error("Failed to enumerate devices");
return;
}
spdlog::get("audio")->debug("Found {} playback devices and {} capture devices", playbackDeviceCount, captureDeviceCount);
m_devices.SetDevices(pPlaybackDeviceInfo, playbackDeviceCount, pCaptureDeviceInfo, captureDeviceCount);
}
void AudioManager::OnCapturedPCM(const int16_t *pcm, ma_uint32 frames) {
if (m_opus_buffer == nullptr || !m_should_capture) return;
const double gain = m_capture_gain;
// i have a suspicion i can cast the const away... but i wont
std::vector<int16_t> new_pcm(pcm, pcm + frames * 2);
for (auto &val : new_pcm) {
const int32_t unclamped = static_cast<int32_t>(val * gain);
val = std::clamp(unclamped, INT16_MIN, INT16_MAX);
}
UpdateCaptureVolume(new_pcm.data(), frames);
if (m_capture_peak_meter / 32768.0 < m_capture_gate) return;
m_enc_mutex.lock();
int payload_len = opus_encode(m_encoder, new_pcm.data(), 480, static_cast<unsigned char *>(m_opus_buffer), 1275);
m_enc_mutex.unlock();
if (payload_len < 0) {
spdlog::get("audio")->error("encoding error: {}", payload_len);
} else {
m_signal_opus_packet.emit(payload_len);
}
}
void AudioManager::UpdateReceiveVolume(uint32_t ssrc, const int16_t *pcm, int frames) {
std::lock_guard<std::mutex> _(m_vol_mtx);
auto &meter = m_volumes[ssrc];
for (int i = 0; i < frames * 2; i += 2) {
const int amp = std::abs(pcm[i]);
meter = std::max(meter, std::abs(amp) / 32768.0);
}
}
void AudioManager::UpdateCaptureVolume(const int16_t *pcm, ma_uint32 frames) {
for (ma_uint32 i = 0; i < frames * 2; i += 2) {
const int amp = std::abs(pcm[i]);
m_capture_peak_meter = std::max(m_capture_peak_meter.load(std::memory_order_relaxed), amp);
}
}
bool AudioManager::DecayVolumeMeters() {
m_capture_peak_meter -= 600;
if (m_capture_peak_meter < 0) m_capture_peak_meter = 0;
std::lock_guard<std::mutex> _(m_vol_mtx);
for (auto &[ssrc, meter] : m_volumes) {
meter -= 0.01;
if (meter < 0.0) meter = 0.0;
}
return true;
}
bool AudioManager::OK() const {
return m_ok;
}
double AudioManager::GetCaptureVolumeLevel() const noexcept {
return m_capture_peak_meter / 32768.0;
}
double AudioManager::GetSSRCVolumeLevel(uint32_t ssrc) const noexcept {
std::lock_guard<std::mutex> _(m_vol_mtx);
if (const auto it = m_volumes.find(ssrc); it != m_volumes.end()) {
return it->second;
}
return 0.0;
}
AudioDevices &AudioManager::GetDevices() {
return m_devices;
}
AudioManager::type_signal_opus_packet AudioManager::signal_opus_packet() {
return m_signal_opus_packet;
}
#endif

120
src/audio/manager.hpp Normal file
View File

@@ -0,0 +1,120 @@
#pragma once
#ifdef WITH_VOICE
// clang-format off
#include <array>
#include <atomic>
#include <deque>
#include <gtkmm/treemodel.h>
#include <mutex>
#include <thread>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include <miniaudio.h>
#include <opus.h>
#include <sigc++/sigc++.h>
#include "devices.hpp"
// clang-format on
class AudioManager {
public:
AudioManager();
~AudioManager();
void AddSSRC(uint32_t ssrc);
void RemoveSSRC(uint32_t ssrc);
void RemoveAllSSRCs();
void SetOpusBuffer(uint8_t *ptr);
void FeedMeOpus(uint32_t ssrc, const std::vector<uint8_t> &data);
void StartCaptureDevice();
void StopCaptureDevice();
void SetPlaybackDevice(const Gtk::TreeModel::iterator &iter);
void SetCaptureDevice(const Gtk::TreeModel::iterator &iter);
void SetCapture(bool capture);
void SetPlayback(bool playback);
void SetCaptureGate(double gate);
void SetCaptureGain(double gain);
void SetMuteSSRC(uint32_t ssrc, bool mute);
void SetVolumeSSRC(uint32_t ssrc, double volume);
void SetEncodingApplication(int application);
[[nodiscard]] int GetEncodingApplication();
void SetSignalHint(int signal);
[[nodiscard]] int GetSignalHint();
void SetBitrate(int bitrate);
[[nodiscard]] int GetBitrate();
void Enumerate();
[[nodiscard]] bool OK() const;
[[nodiscard]] double GetCaptureVolumeLevel() const noexcept;
[[nodiscard]] double GetSSRCVolumeLevel(uint32_t ssrc) const noexcept;
[[nodiscard]] AudioDevices &GetDevices();
private:
void OnCapturedPCM(const int16_t *pcm, ma_uint32 frames);
void UpdateReceiveVolume(uint32_t ssrc, const int16_t *pcm, int frames);
void UpdateCaptureVolume(const int16_t *pcm, ma_uint32 frames);
std::atomic<int> m_capture_peak_meter = 0;
bool DecayVolumeMeters();
friend void data_callback(ma_device *, void *, const void *, ma_uint32);
friend void capture_data_callback(ma_device *, void *, const void *, ma_uint32);
std::thread m_thread;
bool m_ok;
// playback
ma_device m_playback_device;
ma_device_config m_playback_config;
ma_device_id m_playback_id;
// capture
ma_device m_capture_device;
ma_device_config m_capture_config;
ma_device_id m_capture_id;
ma_context m_context;
mutable std::mutex m_mutex;
mutable std::mutex m_enc_mutex;
std::unordered_map<uint32_t, std::pair<std::deque<int16_t>, OpusDecoder *>> m_sources;
OpusEncoder *m_encoder;
uint8_t *m_opus_buffer = nullptr;
std::atomic<bool> m_should_capture = true;
std::atomic<bool> m_should_playback = true;
std::atomic<double> m_capture_gate = 0.0;
std::atomic<double> m_capture_gain = 1.0;
std::unordered_set<uint32_t> m_muted_ssrcs;
std::unordered_map<uint32_t, double> m_volume_ssrc;
mutable std::mutex m_vol_mtx;
std::unordered_map<uint32_t, double> m_volumes;
AudioDevices m_devices;
public:
using type_signal_opus_packet = sigc::signal<void(int payload_size)>;
type_signal_opus_packet signal_opus_packet();
private:
type_signal_opus_packet m_signal_opus_packet;
};
#endif

View File

@@ -20,9 +20,17 @@ ChannelList::ChannelList()
#ifdef WITH_LIBHANDY
, m_menu_channel_open_tab("Open in New _Tab", true)
, m_menu_dm_open_tab("Open in New _Tab", true)
#endif
#ifdef WITH_VOICE
, m_menu_voice_channel_join("_Join", true)
, m_menu_voice_channel_disconnect("_Disconnect", true)
#endif
, m_menu_dm_copy_id("_Copy ID", true)
, m_menu_dm_close("") // changes depending on if group or not
#ifdef WITH_VOICE
, m_menu_dm_join_voice("Join _Voice", true)
, m_menu_dm_disconnect_voice("_Disconnect Voice", true)
#endif
, m_menu_thread_copy_id("_Copy ID", true)
, m_menu_thread_leave("_Leave", true)
, m_menu_thread_archive("_Archive", true)
@@ -36,7 +44,11 @@ ChannelList::ChannelList()
const auto type = row[m_columns.m_type];
// text channels should not be allowed to be collapsed
// maybe they should be but it seems a little difficult to handle expansion to permit this
#ifdef WITH_VOICE
if (type != RenderType::TextChannel && type != RenderType::VoiceChannel) {
#else
if (type != RenderType::TextChannel) {
#endif
if (row[m_columns.m_expanded]) {
m_view.collapse_row(path);
row[m_columns.m_expanded] = false;
@@ -46,7 +58,11 @@ ChannelList::ChannelList()
}
}
#ifdef WITH_VOICE
if (type == RenderType::TextChannel || type == RenderType::DM || type == RenderType::Thread || type == RenderType::VoiceChannel) {
#else
if (type == RenderType::TextChannel || type == RenderType::DM || type == RenderType::Thread) {
#endif
const auto id = static_cast<Snowflake>(row[m_columns.m_id]);
m_signal_action_channel_item_select.emit(id);
Abaddon::Get().GetDiscordClient().MarkChannelAsRead(id, [](...) {});
@@ -161,6 +177,21 @@ ChannelList::ChannelList()
m_menu_channel.append(m_menu_channel_copy_id);
m_menu_channel.show_all();
#ifdef WITH_VOICE
m_menu_voice_channel_join.signal_activate().connect([this]() {
const auto id = static_cast<Snowflake>((*m_model->get_iter(m_path_for_menu))[m_columns.m_id]);
m_signal_action_join_voice_channel.emit(id);
});
m_menu_voice_channel_disconnect.signal_activate().connect([this]() {
m_signal_action_disconnect_voice.emit();
});
m_menu_voice_channel.append(m_menu_voice_channel_join);
m_menu_voice_channel.append(m_menu_voice_channel_disconnect);
m_menu_voice_channel.show_all();
#endif
m_menu_dm_copy_id.signal_activate().connect([this] {
Gtk::Clipboard::get()->set_text(std::to_string((*m_model->get_iter(m_path_for_menu))[m_columns.m_id]));
});
@@ -192,6 +223,17 @@ ChannelList::ChannelList()
#endif
m_menu_dm.append(m_menu_dm_toggle_mute);
m_menu_dm.append(m_menu_dm_close);
#ifdef WITH_VOICE
m_menu_dm_join_voice.signal_activate().connect([this]() {
const auto id = static_cast<Snowflake>((*m_model->get_iter(m_path_for_menu))[m_columns.m_id]);
m_signal_action_join_voice_channel.emit(id);
});
m_menu_dm_disconnect_voice.signal_activate().connect([this]() {
m_signal_action_disconnect_voice.emit();
});
m_menu_dm.append(m_menu_dm_join_voice);
m_menu_dm.append(m_menu_dm_disconnect_voice);
#endif
m_menu_dm.append(m_menu_dm_copy_id);
m_menu_dm.show_all();
@@ -579,7 +621,11 @@ Gtk::TreeModel::iterator ChannelList::AddGuild(const GuildData &guild) {
for (const auto &channel_ : *guild.Channels) {
const auto channel = discord.GetChannel(channel_.ID);
if (!channel.has_value()) continue;
#ifdef WITH_VOICE
if (channel->Type == ChannelType::GUILD_TEXT || channel->Type == ChannelType::GUILD_NEWS || channel->Type == ChannelType::GUILD_VOICE) {
#else
if (channel->Type == ChannelType::GUILD_TEXT || channel->Type == ChannelType::GUILD_NEWS) {
#endif
if (channel->ParentID.has_value())
categories[*channel->ParentID].push_back(*channel);
else
@@ -605,9 +651,31 @@ Gtk::TreeModel::iterator ChannelList::AddGuild(const GuildData &guild) {
m_tmp_channel_map[thread.ID] = CreateThreadRow(row.children(), thread);
};
auto add_voice_participants = [this, &discord](const ChannelData &channel, const Gtk::TreeNodeChildren &root) {
for (auto user_id : discord.GetUsersInVoiceChannel(channel.ID)) {
const auto user = discord.GetUser(user_id);
auto user_row = *m_model->append(root);
user_row[m_columns.m_type] = RenderType::VoiceParticipant;
user_row[m_columns.m_id] = user_id;
if (user.has_value()) {
user_row[m_columns.m_name] = user->GetEscapedName();
} else {
user_row[m_columns.m_name] = "<i>Unknown</i>";
}
}
};
for (const auto &channel : orphan_channels) {
auto channel_row = *m_model->append(guild_row.children());
channel_row[m_columns.m_type] = RenderType::TextChannel;
if (IsTextChannel(channel.Type))
channel_row[m_columns.m_type] = RenderType::TextChannel;
#ifdef WITH_VOICE
else {
channel_row[m_columns.m_type] = RenderType::VoiceChannel;
add_voice_participants(channel, channel_row->children());
}
#endif
channel_row[m_columns.m_id] = channel.ID;
channel_row[m_columns.m_name] = "#" + Glib::Markup::escape_text(*channel.Name);
channel_row[m_columns.m_sort] = *channel.Position + OrphanChannelSortOffset;
@@ -630,7 +698,14 @@ Gtk::TreeModel::iterator ChannelList::AddGuild(const GuildData &guild) {
for (const auto &channel : channels) {
auto channel_row = *m_model->append(cat_row.children());
channel_row[m_columns.m_type] = RenderType::TextChannel;
if (IsTextChannel(channel.Type))
channel_row[m_columns.m_type] = RenderType::TextChannel;
#ifdef WITH_VOICE
else {
channel_row[m_columns.m_type] = RenderType::VoiceChannel;
add_voice_participants(channel, channel_row->children());
}
#endif
channel_row[m_columns.m_id] = channel.ID;
channel_row[m_columns.m_name] = "#" + Glib::Markup::escape_text(*channel.Name);
channel_row[m_columns.m_sort] = *channel.Position;
@@ -732,7 +807,11 @@ bool ChannelList::SelectionFunc(const Glib::RefPtr<Gtk::TreeModel> &model, const
m_last_selected = m_model->get_path(row);
auto type = (*m_model->get_iter(path))[m_columns.m_type];
#ifdef WITH_VOICE
return type == RenderType::TextChannel || type == RenderType::DM || type == RenderType::Thread || type == RenderType::VoiceChannel;
#else
return type == RenderType::TextChannel || type == RenderType::DM || type == RenderType::Thread;
#endif
}
void ChannelList::AddPrivateChannels() {
@@ -856,6 +935,12 @@ bool ChannelList::OnButtonPressEvent(GdkEventButton *ev) {
OnChannelSubmenuPopup();
m_menu_channel.popup_at_pointer(reinterpret_cast<GdkEvent *>(ev));
break;
#ifdef WITH_VOICE
case RenderType::VoiceChannel:
OnVoiceChannelSubmenuPopup();
m_menu_voice_channel.popup_at_pointer(reinterpret_cast<GdkEvent *>(ev));
break;
#endif
case RenderType::DM: {
OnDMSubmenuPopup();
const auto channel = Abaddon::Get().GetDiscordClient().GetChannel(static_cast<Snowflake>(row[m_columns.m_id]));
@@ -947,14 +1032,41 @@ void ChannelList::OnChannelSubmenuPopup() {
m_menu_channel_toggle_mute.set_label("Mute");
}
#ifdef WITH_VOICE
void ChannelList::OnVoiceChannelSubmenuPopup() {
const auto iter = m_model->get_iter(m_path_for_menu);
if (!iter) return;
const auto id = static_cast<Snowflake>((*iter)[m_columns.m_id]);
auto &discord = Abaddon::Get().GetDiscordClient();
if (discord.IsVoiceConnected() || discord.IsVoiceConnecting()) {
m_menu_voice_channel_join.set_sensitive(false);
m_menu_voice_channel_disconnect.set_sensitive(discord.GetVoiceChannelID() == id);
} else {
m_menu_voice_channel_join.set_sensitive(true);
m_menu_voice_channel_disconnect.set_sensitive(false);
}
}
#endif
void ChannelList::OnDMSubmenuPopup() {
auto iter = m_model->get_iter(m_path_for_menu);
if (!iter) return;
const auto id = static_cast<Snowflake>((*iter)[m_columns.m_id]);
if (Abaddon::Get().GetDiscordClient().IsChannelMuted(id))
auto &discord = Abaddon::Get().GetDiscordClient();
if (discord.IsChannelMuted(id))
m_menu_dm_toggle_mute.set_label("Unmute");
else
m_menu_dm_toggle_mute.set_label("Mute");
#ifdef WITH_VOICE
if (discord.IsVoiceConnected() || discord.IsVoiceConnecting()) {
m_menu_dm_join_voice.set_sensitive(false);
m_menu_dm_disconnect_voice.set_sensitive(discord.GetVoiceChannelID() == id);
} else {
m_menu_dm_join_voice.set_sensitive(true);
m_menu_dm_disconnect_voice.set_sensitive(false);
}
#endif
}
void ChannelList::OnThreadSubmenuPopup() {
@@ -997,6 +1109,16 @@ ChannelList::type_signal_action_open_new_tab ChannelList::signal_action_open_new
}
#endif
#ifdef WITH_VOICE
ChannelList::type_signal_action_join_voice_channel ChannelList::signal_action_join_voice_channel() {
return m_signal_action_join_voice_channel;
}
ChannelList::type_signal_action_disconnect_voice ChannelList::signal_action_disconnect_voice() {
return m_signal_action_disconnect_voice;
}
#endif
ChannelList::ModelColumns::ModelColumns() {
add(m_type);
add(m_id);

View File

@@ -125,10 +125,20 @@ protected:
Gtk::MenuItem m_menu_channel_open_tab;
#endif
#ifdef WITH_VOICE
Gtk::Menu m_menu_voice_channel;
Gtk::MenuItem m_menu_voice_channel_join;
Gtk::MenuItem m_menu_voice_channel_disconnect;
#endif
Gtk::Menu m_menu_dm;
Gtk::MenuItem m_menu_dm_copy_id;
Gtk::MenuItem m_menu_dm_close;
Gtk::MenuItem m_menu_dm_toggle_mute;
#ifdef WITH_VOICE
Gtk::MenuItem m_menu_dm_join_voice;
Gtk::MenuItem m_menu_dm_disconnect_voice;
#endif
#ifdef WITH_LIBHANDY
Gtk::MenuItem m_menu_dm_open_tab;
@@ -148,6 +158,10 @@ protected:
void OnDMSubmenuPopup();
void OnThreadSubmenuPopup();
#ifdef WITH_VOICE
void OnVoiceChannelSubmenuPopup();
#endif
bool m_updating_listing = false;
Snowflake m_active_channel;
@@ -166,6 +180,14 @@ public:
type_signal_action_open_new_tab signal_action_open_new_tab();
#endif
#ifdef WITH_VOICE
using type_signal_action_join_voice_channel = sigc::signal<void, Snowflake>;
using type_signal_action_disconnect_voice = sigc::signal<void>;
type_signal_action_join_voice_channel signal_action_join_voice_channel();
type_signal_action_disconnect_voice signal_action_disconnect_voice();
#endif
type_signal_action_channel_item_select signal_action_channel_item_select();
type_signal_action_guild_leave signal_action_guild_leave();
type_signal_action_guild_settings signal_action_guild_settings();
@@ -178,4 +200,9 @@ private:
#ifdef WITH_LIBHANDY
type_signal_action_open_new_tab m_signal_action_open_new_tab;
#endif
#ifdef WITH_VOICE
type_signal_action_join_voice_channel m_signal_action_join_voice_channel;
type_signal_action_disconnect_voice m_signal_action_disconnect_voice;
#endif
};

View File

@@ -65,6 +65,12 @@ void CellRendererChannels::get_preferred_width_vfunc(Gtk::Widget &widget, int &m
return get_preferred_width_vfunc_channel(widget, minimum_width, natural_width);
case RenderType::Thread:
return get_preferred_width_vfunc_thread(widget, minimum_width, natural_width);
#ifdef WITH_VOICE
case RenderType::VoiceChannel:
return get_preferred_width_vfunc_voice_channel(widget, minimum_width, natural_width);
case RenderType::VoiceParticipant:
return get_preferred_width_vfunc_voice_participant(widget, minimum_width, natural_width);
#endif
case RenderType::DMHeader:
return get_preferred_width_vfunc_dmheader(widget, minimum_width, natural_width);
case RenderType::DM:
@@ -82,6 +88,12 @@ void CellRendererChannels::get_preferred_width_for_height_vfunc(Gtk::Widget &wid
return get_preferred_width_for_height_vfunc_channel(widget, height, minimum_width, natural_width);
case RenderType::Thread:
return get_preferred_width_for_height_vfunc_thread(widget, height, minimum_width, natural_width);
#ifdef WITH_VOICE
case RenderType::VoiceChannel:
return get_preferred_width_for_height_vfunc_voice_channel(widget, height, minimum_width, natural_width);
case RenderType::VoiceParticipant:
return get_preferred_width_for_height_vfunc_voice_participant(widget, height, minimum_width, natural_width);
#endif
case RenderType::DMHeader:
return get_preferred_width_for_height_vfunc_dmheader(widget, height, minimum_width, natural_width);
case RenderType::DM:
@@ -99,6 +111,12 @@ void CellRendererChannels::get_preferred_height_vfunc(Gtk::Widget &widget, int &
return get_preferred_height_vfunc_channel(widget, minimum_height, natural_height);
case RenderType::Thread:
return get_preferred_height_vfunc_thread(widget, minimum_height, natural_height);
#ifdef WITH_VOICE
case RenderType::VoiceChannel:
return get_preferred_height_vfunc_voice_channel(widget, minimum_height, natural_height);
case RenderType::VoiceParticipant:
return get_preferred_height_vfunc_voice_participant(widget, minimum_height, natural_height);
#endif
case RenderType::DMHeader:
return get_preferred_height_vfunc_dmheader(widget, minimum_height, natural_height);
case RenderType::DM:
@@ -116,6 +134,12 @@ void CellRendererChannels::get_preferred_height_for_width_vfunc(Gtk::Widget &wid
return get_preferred_height_for_width_vfunc_channel(widget, width, minimum_height, natural_height);
case RenderType::Thread:
return get_preferred_height_for_width_vfunc_thread(widget, width, minimum_height, natural_height);
#ifdef WITH_VOICE
case RenderType::VoiceChannel:
return get_preferred_height_for_width_vfunc_voice_channel(widget, width, minimum_height, natural_height);
case RenderType::VoiceParticipant:
return get_preferred_height_for_width_vfunc_voice_participant(widget, width, minimum_height, natural_height);
#endif
case RenderType::DMHeader:
return get_preferred_height_for_width_vfunc_dmheader(widget, width, minimum_height, natural_height);
case RenderType::DM:
@@ -133,6 +157,12 @@ void CellRendererChannels::render_vfunc(const Cairo::RefPtr<Cairo::Context> &cr,
return render_vfunc_channel(cr, widget, background_area, cell_area, flags);
case RenderType::Thread:
return render_vfunc_thread(cr, widget, background_area, cell_area, flags);
#ifdef WITH_VOICE
case RenderType::VoiceChannel:
return render_vfunc_voice_channel(cr, widget, background_area, cell_area, flags);
case RenderType::VoiceParticipant:
return render_vfunc_voice_participant(cr, widget, background_area, cell_area, flags);
#endif
case RenderType::DMHeader:
return render_vfunc_dmheader(cr, widget, background_area, cell_area, flags);
case RenderType::DM:
@@ -499,6 +529,76 @@ void CellRendererChannels::render_vfunc_thread(const Cairo::RefPtr<Cairo::Contex
}
}
#ifdef WITH_VOICE
// voice channel
void CellRendererChannels::get_preferred_width_vfunc_voice_channel(Gtk::Widget &widget, int &minimum_width, int &natural_width) const {
m_renderer_text.get_preferred_width(widget, minimum_width, natural_width);
}
void CellRendererChannels::get_preferred_width_for_height_vfunc_voice_channel(Gtk::Widget &widget, int height, int &minimum_width, int &natural_width) const {
m_renderer_text.get_preferred_width_for_height(widget, height, minimum_width, natural_width);
}
void CellRendererChannels::get_preferred_height_vfunc_voice_channel(Gtk::Widget &widget, int &minimum_height, int &natural_height) const {
m_renderer_text.get_preferred_height(widget, minimum_height, natural_height);
}
void CellRendererChannels::get_preferred_height_for_width_vfunc_voice_channel(Gtk::Widget &widget, int width, int &minimum_height, int &natural_height) const {
m_renderer_text.get_preferred_height_for_width(widget, width, minimum_height, natural_height);
}
void CellRendererChannels::render_vfunc_voice_channel(const Cairo::RefPtr<Cairo::Context> &cr, Gtk::Widget &widget, const Gdk::Rectangle &background_area, const Gdk::Rectangle &cell_area, Gtk::CellRendererState flags) {
Gtk::Requisition minimum_size, natural_size;
m_renderer_text.get_preferred_size(widget, minimum_size, natural_size);
const int text_x = background_area.get_x() + 21;
const int text_y = background_area.get_y() + background_area.get_height() / 2 - natural_size.height / 2;
const int text_w = natural_size.width;
const int text_h = natural_size.height;
Gdk::Rectangle text_cell_area(text_x, text_y, text_w, text_h);
m_renderer_text.property_foreground_rgba() = Gdk::RGBA("#0f0");
m_renderer_text.render(cr, widget, background_area, text_cell_area, flags);
m_renderer_text.property_foreground_set() = false;
}
// voice participant
void CellRendererChannels::get_preferred_width_vfunc_voice_participant(Gtk::Widget &widget, int &minimum_width, int &natural_width) const {
m_renderer_text.get_preferred_width(widget, minimum_width, natural_width);
}
void CellRendererChannels::get_preferred_width_for_height_vfunc_voice_participant(Gtk::Widget &widget, int height, int &minimum_width, int &natural_width) const {
m_renderer_text.get_preferred_width_for_height(widget, height, minimum_width, natural_width);
}
void CellRendererChannels::get_preferred_height_vfunc_voice_participant(Gtk::Widget &widget, int &minimum_height, int &natural_height) const {
m_renderer_text.get_preferred_height(widget, minimum_height, natural_height);
}
void CellRendererChannels::get_preferred_height_for_width_vfunc_voice_participant(Gtk::Widget &widget, int width, int &minimum_height, int &natural_height) const {
m_renderer_text.get_preferred_height_for_width(widget, width, minimum_height, natural_height);
}
void CellRendererChannels::render_vfunc_voice_participant(const Cairo::RefPtr<Cairo::Context> &cr, Gtk::Widget &widget, const Gdk::Rectangle &background_area, const Gdk::Rectangle &cell_area, Gtk::CellRendererState flags) {
Gtk::Requisition minimum_size, natural_size;
m_renderer_text.get_preferred_size(widget, minimum_size, natural_size);
const int text_x = background_area.get_x() + 27;
const int text_y = background_area.get_y() + background_area.get_height() / 2 - natural_size.height / 2;
const int text_w = natural_size.width;
const int text_h = natural_size.height;
Gdk::Rectangle text_cell_area(text_x, text_y, text_w, text_h);
m_renderer_text.property_foreground_rgba() = Gdk::RGBA("#f00");
m_renderer_text.render(cr, widget, background_area, text_cell_area, flags);
m_renderer_text.property_foreground_set() = false;
}
#endif
// dm header
void CellRendererChannels::get_preferred_width_vfunc_dmheader(Gtk::Widget &widget, int &minimum_width, int &natural_width) const {

View File

@@ -11,6 +11,12 @@ enum class RenderType : uint8_t {
TextChannel,
Thread,
// TODO: maybe enable anyways but without ability to join if no voice support
#ifdef WITH_VOICE
VoiceChannel,
VoiceParticipant,
#endif
DMHeader,
DM,
};
@@ -83,6 +89,30 @@ protected:
const Gdk::Rectangle &cell_area,
Gtk::CellRendererState flags);
#ifdef WITH_VOICE
// voice channel
void get_preferred_width_vfunc_voice_channel(Gtk::Widget &widget, int &minimum_width, int &natural_width) const;
void get_preferred_width_for_height_vfunc_voice_channel(Gtk::Widget &widget, int height, int &minimum_width, int &natural_width) const;
void get_preferred_height_vfunc_voice_channel(Gtk::Widget &widget, int &minimum_height, int &natural_height) const;
void get_preferred_height_for_width_vfunc_voice_channel(Gtk::Widget &widget, int width, int &minimum_height, int &natural_height) const;
void render_vfunc_voice_channel(const Cairo::RefPtr<Cairo::Context> &cr,
Gtk::Widget &widget,
const Gdk::Rectangle &background_area,
const Gdk::Rectangle &cell_area,
Gtk::CellRendererState flags);
// voice channel
void get_preferred_width_vfunc_voice_participant(Gtk::Widget &widget, int &minimum_width, int &natural_width) const;
void get_preferred_width_for_height_vfunc_voice_participant(Gtk::Widget &widget, int height, int &minimum_width, int &natural_width) const;
void get_preferred_height_vfunc_voice_participant(Gtk::Widget &widget, int &minimum_height, int &natural_height) const;
void get_preferred_height_for_width_vfunc_voice_participant(Gtk::Widget &widget, int width, int &minimum_height, int &natural_height) const;
void render_vfunc_voice_participant(const Cairo::RefPtr<Cairo::Context> &cr,
Gtk::Widget &widget,
const Gdk::Rectangle &background_area,
const Gdk::Rectangle &cell_area,
Gtk::CellRendererState flags);
#endif
// dm header
void get_preferred_width_vfunc_dmheader(Gtk::Widget &widget, int &minimum_width, int &natural_width) const;
void get_preferred_width_for_height_vfunc_dmheader(Gtk::Widget &widget, int height, int &minimum_width, int &natural_width) const;

View File

@@ -11,9 +11,9 @@ public:
protected:
Gtk::SizeRequestMode get_request_mode_vfunc() const override;
void get_preferred_width_vfunc(int &minimum_width, int &natural_width) const override;
void get_preferred_height_for_width_vfunc(int width, int &minimum_height, int &natural_height) const override;
void get_preferred_height_vfunc(int &minimum_height, int &natural_height) const override;
void get_preferred_width_for_height_vfunc(int height, int &minimum_width, int &natural_width) const override;
void get_preferred_height_vfunc(int &minimum_height, int &natural_height) const override;
void get_preferred_height_for_width_vfunc(int width, int &minimum_height, int &natural_height) const override;
void on_size_allocate(Gtk::Allocation &allocation) override;
void on_map() override;
void on_unmap() override;

View File

@@ -0,0 +1,89 @@
#include "voiceinfobox.hpp"
#include "abaddon.hpp"
#include "util.hpp"
VoiceInfoBox::VoiceInfoBox()
: Gtk::Box(Gtk::ORIENTATION_HORIZONTAL)
, m_left(Gtk::ORIENTATION_VERTICAL) {
m_disconnect_ev.signal_button_press_event().connect([this](GdkEventButton *ev) -> bool {
if (ev->type == GDK_BUTTON_PRESS && ev->button == GDK_BUTTON_PRIMARY) {
spdlog::get("discord")->debug("Request disconnect from info box");
Abaddon::Get().GetDiscordClient().DisconnectFromVoice();
return true;
}
return false;
});
AddPointerCursor(m_disconnect_ev);
get_style_context()->add_class("voice-info");
m_status.get_style_context()->add_class("voice-info-status");
m_location.get_style_context()->add_class("voice-info-location");
m_disconnect_img.get_style_context()->add_class("voice-info-disconnect-image");
m_status.set_label("You shouldn't see me");
m_location.set_label("You shouldn't see me");
Abaddon::Get().GetDiscordClient().signal_voice_requested_connect().connect([this](Snowflake channel_id) {
show();
if (const auto channel = Abaddon::Get().GetDiscordClient().GetChannel(channel_id); channel.has_value() && channel->Name.has_value()) {
if (channel->GuildID.has_value()) {
if (const auto guild = Abaddon::Get().GetDiscordClient().GetGuild(*channel->GuildID); guild.has_value()) {
m_location.set_label(*channel->Name + " / " + guild->Name);
return;
}
}
m_location.set_label(*channel->Name);
return;
}
m_location.set_label("Unknown");
});
Abaddon::Get().GetDiscordClient().signal_voice_requested_disconnect().connect([this]() {
hide();
});
Abaddon::Get().GetDiscordClient().signal_voice_client_state_update().connect([this](DiscordVoiceClient::State state) {
Glib::ustring label;
switch (state) {
case DiscordVoiceClient::State::ConnectingToWebsocket:
label = "Connecting";
break;
case DiscordVoiceClient::State::EstablishingConnection:
label = "Establishing connection";
break;
case DiscordVoiceClient::State::Connected:
label = "Connected";
break;
case DiscordVoiceClient::State::DisconnectedByServer:
case DiscordVoiceClient::State::DisconnectedByClient:
label = "Disconnected";
break;
default:
label = "Unknown";
break;
}
m_status.set_label(label);
});
m_status.set_ellipsize(Pango::ELLIPSIZE_END);
m_location.set_ellipsize(Pango::ELLIPSIZE_END);
m_disconnect_ev.add(m_disconnect_img);
m_disconnect_img.property_icon_name() = "call-stop-symbolic";
m_disconnect_img.property_icon_size() = 5;
m_disconnect_img.set_hexpand(true);
m_disconnect_img.set_halign(Gtk::ALIGN_END);
m_left.add(m_status);
m_left.add(m_location);
add(m_left);
add(m_disconnect_ev);
show_all_children();
}

View File

@@ -0,0 +1,19 @@
#pragma once
#include <gtkmm/box.h>
#include <gtkmm/eventbox.h>
#include <gtkmm/image.h>
#include <gtkmm/label.h>
class VoiceInfoBox : public Gtk::Box {
public:
VoiceInfoBox();
private:
Gtk::Box m_left;
Gtk::Label m_status;
Gtk::Label m_location;
Gtk::EventBox m_disconnect_ev;
Gtk::Image m_disconnect_img;
};

View File

@@ -0,0 +1,125 @@
#include "volumemeter.hpp"
#include <cstring>
VolumeMeter::VolumeMeter()
: Glib::ObjectBase("volumemeter")
, Gtk::Widget() {
set_has_window(true);
}
void VolumeMeter::SetVolume(double fraction) {
m_fraction = fraction;
queue_draw();
}
void VolumeMeter::SetTick(double fraction) {
m_tick = fraction;
queue_draw();
}
void VolumeMeter::SetShowTick(bool show) {
m_show_tick = show;
}
Gtk::SizeRequestMode VolumeMeter::get_request_mode_vfunc() const {
return Gtk::Widget::get_request_mode_vfunc();
}
void VolumeMeter::get_preferred_width_vfunc(int &minimum_width, int &natural_width) const {
const int width = get_allocated_width();
minimum_width = natural_width = width;
}
void VolumeMeter::get_preferred_width_for_height_vfunc(int height, int &minimum_width, int &natural_width) const {
get_preferred_width_vfunc(minimum_width, natural_width);
}
void VolumeMeter::get_preferred_height_vfunc(int &minimum_height, int &natural_height) const {
// blehhh :PPP
const int height = get_allocated_height();
minimum_height = natural_height = 4;
}
void VolumeMeter::get_preferred_height_for_width_vfunc(int width, int &minimum_height, int &natural_height) const {
get_preferred_height_vfunc(minimum_height, natural_height);
}
void VolumeMeter::on_size_allocate(Gtk::Allocation &allocation) {
set_allocation(allocation);
if (m_window)
m_window->move_resize(allocation.get_x(), allocation.get_y(), allocation.get_width(), allocation.get_height());
}
void VolumeMeter::on_map() {
Gtk::Widget::on_map();
}
void VolumeMeter::on_unmap() {
Gtk::Widget::on_unmap();
}
void VolumeMeter::on_realize() {
set_realized(true);
if (!m_window) {
GdkWindowAttr attributes;
std::memset(&attributes, 0, sizeof(attributes));
auto allocation = get_allocation();
attributes.x = allocation.get_x();
attributes.y = allocation.get_y();
attributes.width = allocation.get_width();
attributes.height = allocation.get_height();
attributes.event_mask = get_events() | Gdk::EXPOSURE_MASK;
attributes.window_type = GDK_WINDOW_CHILD;
attributes.wclass = GDK_INPUT_OUTPUT;
m_window = Gdk::Window::create(get_parent_window(), &attributes, GDK_WA_X | GDK_WA_Y);
set_window(m_window);
m_window->set_user_data(gobj());
}
}
void VolumeMeter::on_unrealize() {
m_window.reset();
Gtk::Widget::on_unrealize();
}
bool VolumeMeter::on_draw(const Cairo::RefPtr<Cairo::Context> &cr) {
const auto allocation = get_allocation();
const auto width = allocation.get_width();
const auto height = allocation.get_height();
const double LOW_MAX = 0.7 * width;
const double MID_MAX = 0.85 * width;
const double desired_width = width * m_fraction;
const double draw_low = std::min(desired_width, LOW_MAX);
const double draw_mid = std::min(desired_width, MID_MAX);
const double draw_hi = desired_width;
cr->set_source_rgb(1.0, 0.0, 0.0);
cr->rectangle(0.0, 0.0, draw_hi, height);
cr->fill();
cr->set_source_rgb(1.0, 0.5, 0.0);
cr->rectangle(0.0, 0.0, draw_mid, height);
cr->fill();
cr->set_source_rgb(.0, 1.0, 0.0);
cr->rectangle(0.0, 0.0, draw_low, height);
cr->fill();
if (m_show_tick) {
const double tick_base = width * m_tick;
cr->set_source_rgb(0.8, 0.8, 0.8);
cr->rectangle(tick_base, 0, 4, height);
cr->fill();
}
return true;
}

View File

@@ -0,0 +1,31 @@
#pragma once
#include <gtkmm/widget.h>
class VolumeMeter : public Gtk::Widget {
public:
VolumeMeter();
void SetVolume(double fraction);
void SetTick(double fraction);
void SetShowTick(bool show);
protected:
Gtk::SizeRequestMode get_request_mode_vfunc() const override;
void get_preferred_width_vfunc(int &minimum_width, int &natural_width) const override;
void get_preferred_width_for_height_vfunc(int height, int &minimum_width, int &natural_width) const override;
void get_preferred_height_vfunc(int &minimum_height, int &natural_height) const override;
void get_preferred_height_for_width_vfunc(int width, int &minimum_height, int &natural_height) const override;
void on_size_allocate(Gtk::Allocation &allocation) override;
void on_map() override;
void on_unmap() override;
void on_realize() override;
void on_unrealize() override;
bool on_draw(const Cairo::RefPtr<Cairo::Context> &cr) override;
private:
Glib::RefPtr<Gdk::Window> m_window;
double m_fraction = 0.0;
double m_tick = 0.0;
bool m_show_tick = false;
};

View File

@@ -1,6 +1,7 @@
#include "abaddon.hpp"
#include "discord.hpp"
#include "util.hpp"
#include <spdlog/spdlog.h>
#include <cinttypes>
#include <utility>
@@ -8,7 +9,8 @@ using namespace std::string_literals;
DiscordClient::DiscordClient(bool mem_store)
: m_decompress_buf(InflateChunkSize)
, m_store(mem_store) {
, m_store(mem_store)
, m_websocket("gateway-ws") {
m_msg_dispatch.connect(sigc::mem_fun(*this, &DiscordClient::MessageDispatch));
auto dispatch_cb = [this]() {
m_generic_mutex.lock();
@@ -23,6 +25,17 @@ DiscordClient::DiscordClient(bool mem_store)
m_websocket.signal_open().connect(sigc::mem_fun(*this, &DiscordClient::HandleSocketOpen));
m_websocket.signal_close().connect(sigc::mem_fun(*this, &DiscordClient::HandleSocketClose));
#ifdef WITH_VOICE
m_voice.signal_connected().connect(sigc::mem_fun(*this, &DiscordClient::OnVoiceConnected));
m_voice.signal_disconnected().connect(sigc::mem_fun(*this, &DiscordClient::OnVoiceDisconnected));
m_voice.signal_speaking().connect([this](const VoiceSpeakingData &data) {
m_signal_voice_speaking.emit(data);
});
m_voice.signal_state_update().connect([this](DiscordVoiceClient::State state) {
m_signal_voice_client_state_update.emit(state);
});
#endif
LoadEventMap();
}
@@ -1165,6 +1178,65 @@ void DiscordClient::AcceptVerificationGate(Snowflake guild_id, VerificationGateI
});
}
#ifdef WITH_VOICE
void DiscordClient::ConnectToVoice(Snowflake channel_id) {
auto channel = GetChannel(channel_id);
if (!channel.has_value()) return;
m_voice_channel_id = channel_id;
VoiceStateUpdateMessage m;
if (channel->GuildID.has_value())
m.GuildID = channel->GuildID;
m.ChannelID = channel_id;
m.PreferredRegion = "newark";
m_websocket.Send(m);
m_signal_voice_requested_connect.emit(channel_id);
}
void DiscordClient::DisconnectFromVoice() {
m_voice.Stop();
VoiceStateUpdateMessage m;
m_websocket.Send(m);
m_signal_voice_requested_disconnect.emit();
}
bool DiscordClient::IsVoiceConnected() const noexcept {
return m_voice.IsConnected();
}
bool DiscordClient::IsVoiceConnecting() const noexcept {
return m_voice.IsConnecting();
}
Snowflake DiscordClient::GetVoiceChannelID() const noexcept {
return m_voice_channel_id;
}
std::unordered_set<Snowflake> DiscordClient::GetUsersInVoiceChannel(Snowflake channel_id) {
return m_voice_state_channel_users[channel_id];
}
std::optional<uint32_t> DiscordClient::GetSSRCOfUser(Snowflake id) const {
return m_voice.GetSSRCOfUser(id);
}
std::optional<Snowflake> DiscordClient::GetVoiceState(Snowflake user_id) const {
if (const auto it = m_voice_state_user_channel.find(user_id); it != m_voice_state_user_channel.end()) {
return it->second;
}
return std::nullopt;
}
void DiscordClient::SetVoiceMuted(bool is_mute) {
m_mute_requested = is_mute;
SendVoiceStateUpdate();
}
void DiscordClient::SetVoiceDeafened(bool is_deaf) {
m_deaf_requested = is_deaf;
SendVoiceStateUpdate();
}
#endif
void DiscordClient::SetReferringChannel(Snowflake id) {
if (!id.IsValid()) {
m_http.SetPersistentHeader("Referer", "https://discord.com/channels/@me");
@@ -1484,6 +1556,14 @@ void DiscordClient::HandleGatewayMessage(std::string str) {
case GatewayEvent::GUILD_MEMBERS_CHUNK: {
HandleGatewayGuildMembersChunk(m);
} break;
#ifdef WITH_VOICE
case GatewayEvent::VOICE_STATE_UPDATE: {
HandleGatewayVoiceStateUpdate(m);
} break;
case GatewayEvent::VOICE_SERVER_UPDATE: {
HandleGatewayVoiceServerUpdate(m);
} break;
#endif
}
} break;
default:
@@ -2094,6 +2174,61 @@ void DiscordClient::HandleGatewayGuildMembersChunk(const GatewayMessage &msg) {
m_store.EndTransaction();
}
#ifdef WITH_VOICE
void DiscordClient::HandleGatewayVoiceStateUpdate(const GatewayMessage &msg) {
VoiceState data = msg.Data;
if (data.UserID == m_user_data.ID) {
spdlog::get("discord")->debug("Voice session ID: {}", data.SessionID);
m_voice.SetSessionID(data.SessionID);
// channel_id = null means disconnect. stop cuz out of order maybe
if (!data.ChannelID.has_value() && (m_voice.IsConnected() || m_voice.IsConnecting())) {
m_voice.Stop();
}
} else {
if (data.GuildID.has_value() && data.Member.has_value()) {
if (data.Member->User.has_value()) {
m_store.SetUser(data.UserID, *data.Member->User);
}
m_store.SetGuildMember(*data.GuildID, data.UserID, *data.Member);
}
}
if (data.ChannelID.has_value()) {
const auto old_state = GetVoiceState(data.UserID);
SetVoiceState(data.UserID, *data.ChannelID);
if (old_state.has_value() && *old_state != *data.ChannelID) {
m_signal_voice_user_disconnect.emit(data.UserID, *old_state);
}
m_signal_voice_user_connect.emit(data.UserID, *data.ChannelID);
} else {
const auto old_state = GetVoiceState(data.UserID);
ClearVoiceState(data.UserID);
if (old_state.has_value()) {
m_signal_voice_user_disconnect.emit(data.UserID, *old_state);
}
}
}
void DiscordClient::HandleGatewayVoiceServerUpdate(const GatewayMessage &msg) {
VoiceServerUpdateData data = msg.Data;
spdlog::get("discord")->debug("Voice server endpoint: {}", data.Endpoint);
spdlog::get("discord")->debug("Voice token: {}", data.Token);
m_voice.SetEndpoint(data.Endpoint);
m_voice.SetToken(data.Token);
if (data.GuildID.has_value()) {
m_voice.SetServerID(*data.GuildID);
} else if (data.ChannelID.has_value()) {
m_voice.SetServerID(*data.ChannelID);
} else {
spdlog::get("discord")->error("No guild or channel ID in voice server?");
}
m_voice.SetUserID(m_user_data.ID);
m_voice.Start();
}
#endif
void DiscordClient::HandleGatewayReadySupplemental(const GatewayMessage &msg) {
ReadySupplementalData data = msg.Data;
for (const auto &p : data.MergedPresences.Friends) {
@@ -2110,6 +2245,17 @@ void DiscordClient::HandleGatewayReadySupplemental(const GatewayMessage &msg) {
m_user_to_status[p.UserID] = PresenceStatus::DND;
m_signal_presence_update.emit(*user, m_user_to_status.at(p.UserID));
}
#ifdef WITH_VOICE
for (const auto &g : data.Guilds) {
for (const auto &s : g.VoiceStates) {
if (s.ChannelID.has_value()) {
SetVoiceState(s.UserID, *s.ChannelID);
}
}
}
#endif
m_signal_gateway_ready_supplemental.emit();
}
void DiscordClient::HandleGatewayReconnect(const GatewayMessage &msg) {
@@ -2375,9 +2521,8 @@ void DiscordClient::SetSuperPropertiesFromIdentity(const IdentifyMessage &identi
void DiscordClient::HandleSocketOpen() {
}
void DiscordClient::HandleSocketClose(uint16_t code) {
printf("got socket close code: %d\n", code);
auto close_code = static_cast<GatewayCloseCode>(code);
void DiscordClient::HandleSocketClose(const ix::WebSocketCloseInfo &info) {
auto close_code = static_cast<GatewayCloseCode>(info.code);
auto cb = [this, close_code]() {
m_heartbeat_waiter.kill();
if (m_heartbeat_thread.joinable()) m_heartbeat_thread.join();
@@ -2542,6 +2687,44 @@ void DiscordClient::HandleReadyGuildSettings(const ReadyEventData &data) {
}
}
#ifdef WITH_VOICE
void DiscordClient::SendVoiceStateUpdate() {
VoiceStateUpdateMessage msg;
msg.ChannelID = m_voice_channel_id;
const auto channel = GetChannel(m_voice_channel_id);
if (channel.has_value() && channel->GuildID.has_value()) {
msg.GuildID = *channel->GuildID;
}
msg.SelfMute = m_mute_requested;
msg.SelfDeaf = m_deaf_requested;
msg.SelfVideo = false;
m_websocket.Send(msg);
}
void DiscordClient::SetVoiceState(Snowflake user_id, Snowflake channel_id) {
m_voice_state_user_channel[user_id] = channel_id;
m_voice_state_channel_users[channel_id].insert(user_id);
}
void DiscordClient::ClearVoiceState(Snowflake user_id) {
if (const auto it = m_voice_state_user_channel.find(user_id); it != m_voice_state_user_channel.end()) {
m_voice_state_channel_users[it->second].erase(user_id);
// invalidated
m_voice_state_user_channel.erase(user_id);
}
}
void DiscordClient::OnVoiceConnected() {
m_signal_voice_connected.emit();
}
void DiscordClient::OnVoiceDisconnected() {
m_signal_voice_disconnected.emit();
}
#endif
void DiscordClient::LoadEventMap() {
m_event_map["READY"] = GatewayEvent::READY;
m_event_map["MESSAGE_CREATE"] = GatewayEvent::MESSAGE_CREATE;
@@ -2587,12 +2770,18 @@ void DiscordClient::LoadEventMap() {
m_event_map["MESSAGE_ACK"] = GatewayEvent::MESSAGE_ACK;
m_event_map["USER_GUILD_SETTINGS_UPDATE"] = GatewayEvent::USER_GUILD_SETTINGS_UPDATE;
m_event_map["GUILD_MEMBERS_CHUNK"] = GatewayEvent::GUILD_MEMBERS_CHUNK;
m_event_map["VOICE_STATE_UPDATE"] = GatewayEvent::VOICE_STATE_UPDATE;
m_event_map["VOICE_SERVER_UPDATE"] = GatewayEvent::VOICE_SERVER_UPDATE;
}
DiscordClient::type_signal_gateway_ready DiscordClient::signal_gateway_ready() {
return m_signal_gateway_ready;
}
DiscordClient::type_signal_gateway_ready_supplemental DiscordClient::signal_gateway_ready_supplemental() {
return m_signal_gateway_ready_supplemental;
}
DiscordClient::type_signal_message_create DiscordClient::signal_message_create() {
return m_signal_message_create;
}
@@ -2796,3 +2985,37 @@ DiscordClient::type_signal_channel_accessibility_changed DiscordClient::signal_c
DiscordClient::type_signal_message_send_fail DiscordClient::signal_message_send_fail() {
return m_signal_message_send_fail;
}
#ifdef WITH_VOICE
DiscordClient::type_signal_voice_connected DiscordClient::signal_voice_connected() {
return m_signal_voice_connected;
}
DiscordClient::type_signal_voice_disconnected DiscordClient::signal_voice_disconnected() {
return m_signal_voice_disconnected;
}
DiscordClient::type_signal_voice_speaking DiscordClient::signal_voice_speaking() {
return m_signal_voice_speaking;
}
DiscordClient::type_signal_voice_user_disconnect DiscordClient::signal_voice_user_disconnect() {
return m_signal_voice_user_disconnect;
}
DiscordClient::type_signal_voice_user_connect DiscordClient::signal_voice_user_connect() {
return m_signal_voice_user_connect;
}
DiscordClient::type_signal_voice_requested_connect DiscordClient::signal_voice_requested_connect() {
return m_signal_voice_requested_connect;
}
DiscordClient::type_signal_voice_requested_disconnect DiscordClient::signal_voice_requested_disconnect() {
return m_signal_voice_requested_disconnect;
}
DiscordClient::type_signal_voice_client_state_update DiscordClient::signal_voice_client_state_update() {
return m_signal_voice_client_state_update;
}
#endif

View File

@@ -1,9 +1,11 @@
#pragma once
#include "websocket.hpp"
#include "chatsubmitparams.hpp"
#include "waiter.hpp"
#include "httpclient.hpp"
#include "objects.hpp"
#include "store.hpp"
#include "chatsubmitparams.hpp"
#include "voiceclient.hpp"
#include "websocket.hpp"
#include <sigc++/sigc++.h>
#include <nlohmann/json.hpp>
#include <thread>
@@ -18,31 +20,6 @@
#undef GetMessage
#endif
class HeartbeatWaiter {
public:
template<class R, class P>
bool wait_for(std::chrono::duration<R, P> const &time) const {
std::unique_lock<std::mutex> lock(m);
return !cv.wait_for(lock, time, [&] { return terminate; });
}
void kill() {
std::unique_lock<std::mutex> lock(m);
terminate = true;
cv.notify_all();
}
void revive() {
std::unique_lock<std::mutex> lock(m);
terminate = false;
}
private:
mutable std::condition_variable cv;
mutable std::mutex m;
bool terminate = false;
};
class Abaddon;
class DiscordClient {
friend class Abaddon;
@@ -203,6 +180,21 @@ public:
void GetVerificationGateInfo(Snowflake guild_id, const sigc::slot<void(std::optional<VerificationGateInfoObject>)> &callback);
void AcceptVerificationGate(Snowflake guild_id, VerificationGateInfoObject info, const sigc::slot<void(DiscordError code)> &callback);
#ifdef WITH_VOICE
void ConnectToVoice(Snowflake channel_id);
void DisconnectFromVoice();
// Is fully connected
[[nodiscard]] bool IsVoiceConnected() const noexcept;
[[nodiscard]] bool IsVoiceConnecting() const noexcept;
[[nodiscard]] Snowflake GetVoiceChannelID() const noexcept;
[[nodiscard]] std::unordered_set<Snowflake> GetUsersInVoiceChannel(Snowflake channel_id);
[[nodiscard]] std::optional<uint32_t> GetSSRCOfUser(Snowflake id) const;
[[nodiscard]] std::optional<Snowflake> GetVoiceState(Snowflake user_id) const;
void SetVoiceMuted(bool is_mute);
void SetVoiceDeafened(bool is_deaf);
#endif
void SetReferringChannel(Snowflake id);
void SetBuildNumber(uint32_t build_number);
@@ -285,6 +277,12 @@ private:
void HandleGatewayReadySupplemental(const GatewayMessage &msg);
void HandleGatewayReconnect(const GatewayMessage &msg);
void HandleGatewayInvalidSession(const GatewayMessage &msg);
#ifdef WITH_VOICE
void HandleGatewayVoiceStateUpdate(const GatewayMessage &msg);
void HandleGatewayVoiceServerUpdate(const GatewayMessage &msg);
#endif
void HeartbeatThread();
void SendIdentify();
void SendResume();
@@ -293,7 +291,7 @@ private:
void SetSuperPropertiesFromIdentity(const IdentifyMessage &identity);
void HandleSocketOpen();
void HandleSocketClose(uint16_t code);
void HandleSocketClose(const ix::WebSocketCloseInfo &info);
static bool CheckCode(const http::response_type &r);
static bool CheckCode(const http::response_type &r, int expected);
@@ -337,13 +335,33 @@ private:
std::thread m_heartbeat_thread;
std::atomic<int> m_last_sequence = -1;
std::atomic<int> m_heartbeat_msec = 0;
HeartbeatWaiter m_heartbeat_waiter;
Waiter m_heartbeat_waiter;
std::atomic<bool> m_heartbeat_acked = true;
bool m_reconnecting = false; // reconnecting either to resume or reidentify
bool m_wants_resume = false; // reconnecting specifically to resume
std::string m_session_id;
#ifdef WITH_VOICE
DiscordVoiceClient m_voice;
bool m_mute_requested = false;
bool m_deaf_requested = false;
Snowflake m_voice_channel_id;
// todo sql i guess
std::unordered_map<Snowflake, Snowflake> m_voice_state_user_channel;
std::unordered_map<Snowflake, std::unordered_set<Snowflake>> m_voice_state_channel_users;
void SendVoiceStateUpdate();
void SetVoiceState(Snowflake user_id, Snowflake channel_id);
void ClearVoiceState(Snowflake user_id);
void OnVoiceConnected();
void OnVoiceDisconnected();
#endif
mutable std::mutex m_msg_mutex;
Glib::Dispatcher m_msg_dispatch;
std::queue<std::string> m_msg_queue;
@@ -361,6 +379,7 @@ private:
// signals
public:
typedef sigc::signal<void> type_signal_gateway_ready;
typedef sigc::signal<void> type_signal_gateway_ready_supplemental;
typedef sigc::signal<void, Message> type_signal_message_create;
typedef sigc::signal<void, Snowflake, Snowflake> type_signal_message_delete;
typedef sigc::signal<void, Snowflake, Snowflake> type_signal_message_update;
@@ -416,7 +435,19 @@ public:
typedef sigc::signal<void> type_signal_connected;
typedef sigc::signal<void, std::string, float> type_signal_message_progress;
#ifdef WITH_VOICE
using type_signal_voice_connected = sigc::signal<void()>;
using type_signal_voice_disconnected = sigc::signal<void()>;
using type_signal_voice_speaking = sigc::signal<void(VoiceSpeakingData)>;
using type_signal_voice_user_disconnect = sigc::signal<void(Snowflake, Snowflake)>;
using type_signal_voice_user_connect = sigc::signal<void(Snowflake, Snowflake)>;
using type_signal_voice_requested_connect = sigc::signal<void(Snowflake)>;
using type_signal_voice_requested_disconnect = sigc::signal<void()>;
using type_signal_voice_client_state_update = sigc::signal<void(DiscordVoiceClient::State)>;
#endif
type_signal_gateway_ready signal_gateway_ready();
type_signal_gateway_ready_supplemental signal_gateway_ready_supplemental();
type_signal_message_create signal_message_create();
type_signal_message_delete signal_message_delete();
type_signal_message_update signal_message_update();
@@ -470,8 +501,20 @@ public:
type_signal_connected signal_connected();
type_signal_message_progress signal_message_progress();
#ifdef WITH_VOICE
type_signal_voice_connected signal_voice_connected();
type_signal_voice_disconnected signal_voice_disconnected();
type_signal_voice_speaking signal_voice_speaking();
type_signal_voice_user_disconnect signal_voice_user_disconnect();
type_signal_voice_user_connect signal_voice_user_connect();
type_signal_voice_requested_connect signal_voice_requested_connect();
type_signal_voice_requested_disconnect signal_voice_requested_disconnect();
type_signal_voice_client_state_update signal_voice_client_state_update();
#endif
protected:
type_signal_gateway_ready m_signal_gateway_ready;
type_signal_gateway_ready_supplemental m_signal_gateway_ready_supplemental;
type_signal_message_create m_signal_message_create;
type_signal_message_delete m_signal_message_delete;
type_signal_message_update m_signal_message_update;
@@ -524,4 +567,15 @@ protected:
type_signal_disconnected m_signal_disconnected;
type_signal_connected m_signal_connected;
type_signal_message_progress m_signal_message_progress;
#ifdef WITH_VOICE
type_signal_voice_connected m_signal_voice_connected;
type_signal_voice_disconnected m_signal_voice_disconnected;
type_signal_voice_speaking m_signal_voice_speaking;
type_signal_voice_user_disconnect m_signal_voice_user_disconnect;
type_signal_voice_user_connect m_signal_voice_user_connect;
type_signal_voice_requested_connect m_signal_voice_requested_connect;
type_signal_voice_requested_disconnect m_signal_voice_requested_disconnect;
type_signal_voice_client_state_update m_signal_voice_client_state_update;
#endif
};

View File

@@ -233,8 +233,14 @@ void from_json(const nlohmann::json &j, SupplementalMergedPresencesData &m) {
JS_D("friends", m.Friends);
}
void from_json(const nlohmann::json &j, SupplementalGuildEntry &m) {
JS_D("id", m.ID);
JS_D("voice_states", m.VoiceStates);
}
void from_json(const nlohmann::json &j, ReadySupplementalData &m) {
JS_D("merged_presences", m.MergedPresences);
JS_D("guilds", m.Guilds);
}
void to_json(nlohmann::json &j, const IdentifyProperties &m) {
@@ -640,3 +646,43 @@ void from_json(const nlohmann::json &j, GuildMembersChunkData &m) {
JS_D("members", m.Members);
JS_D("guild_id", m.GuildID);
}
#ifdef WITH_VOICE
void to_json(nlohmann::json &j, const VoiceStateUpdateMessage &m) {
j["op"] = GatewayOp::VoiceStateUpdate;
if (m.GuildID.has_value())
j["d"]["guild_id"] = *m.GuildID;
else
j["d"]["guild_id"] = nullptr;
if (m.ChannelID.has_value())
j["d"]["channel_id"] = *m.ChannelID;
else
j["d"]["channel_id"] = nullptr;
j["d"]["self_mute"] = m.SelfMute;
j["d"]["self_deaf"] = m.SelfDeaf;
j["d"]["self_video"] = m.SelfVideo;
// j["d"]["preferred_region"] = m.PreferredRegion;
}
void from_json(const nlohmann::json &j, VoiceServerUpdateData &m) {
JS_D("token", m.Token);
JS_D("endpoint", m.Endpoint);
JS_ON("guild_id", m.GuildID);
JS_ON("channel_id", m.ChannelID);
}
#endif
void from_json(const nlohmann::json &j, VoiceState &m) {
JS_ON("guild_id", m.GuildID);
JS_N("channel_id", m.ChannelID);
JS_D("deaf", m.IsDeafened);
JS_D("mute", m.IsMuted);
JS_D("self_deaf", m.IsSelfDeafened);
JS_D("self_mute", m.IsSelfMuted);
JS_D("self_video", m.IsSelfVideo);
JS_O("self_stream", m.IsSelfStream);
JS_D("suppress", m.IsSuppressed);
JS_D("user_id", m.UserID);
JS_ON("member", m.Member);
JS_D("session_id", m.SessionID);
}

View File

@@ -100,6 +100,8 @@ enum class GatewayEvent : int {
MESSAGE_ACK,
USER_GUILD_SETTINGS_UPDATE,
GUILD_MEMBERS_CHUNK,
VOICE_STATE_UPDATE,
VOICE_SERVER_UPDATE,
};
enum class GatewayCloseCode : uint16_t {
@@ -352,8 +354,18 @@ struct SupplementalMergedPresencesData {
friend void from_json(const nlohmann::json &j, SupplementalMergedPresencesData &m);
};
struct VoiceState;
struct SupplementalGuildEntry {
// std::vector<?> EmbeddedActivities;
Snowflake ID;
std::vector<VoiceState> VoiceStates;
friend void from_json(const nlohmann::json &j, SupplementalGuildEntry &m);
};
struct ReadySupplementalData {
SupplementalMergedPresencesData MergedPresences;
std::vector<SupplementalGuildEntry> Guilds;
friend void from_json(const nlohmann::json &j, ReadySupplementalData &m);
};
@@ -864,3 +876,42 @@ struct GuildMembersChunkData {
friend void from_json(const nlohmann::json &j, GuildMembersChunkData &m);
};
#ifdef WITH_VOICE
struct VoiceStateUpdateMessage {
std::optional<Snowflake> GuildID;
std::optional<Snowflake> ChannelID;
bool SelfMute = false;
bool SelfDeaf = false;
bool SelfVideo = false;
std::string PreferredRegion;
friend void to_json(nlohmann::json &j, const VoiceStateUpdateMessage &m);
};
struct VoiceServerUpdateData {
std::string Token;
std::string Endpoint;
std::optional<Snowflake> GuildID;
std::optional<Snowflake> ChannelID;
friend void from_json(const nlohmann::json &j, VoiceServerUpdateData &m);
};
#endif
struct VoiceState {
std::optional<Snowflake> ChannelID;
bool IsDeafened;
bool IsMuted;
std::optional<Snowflake> GuildID;
std::optional<GuildMember> Member;
bool IsSelfDeafened;
bool IsSelfMuted;
bool IsSelfVideo;
bool IsSelfStream = false;
std::string SessionID;
bool IsSuppressed;
Snowflake UserID;
friend void from_json(const nlohmann::json &j, VoiceState &m);
};

544
src/discord/voiceclient.cpp Normal file
View File

@@ -0,0 +1,544 @@
#ifdef WITH_VOICE
// clang-format off
#include "voiceclient.hpp"
#include "json.hpp"
#include <sodium.h>
#include <spdlog/spdlog.h>
#include <spdlog/fmt/bin_to_hex.h>
#include "abaddon.hpp"
#include "audio/manager.hpp"
#ifdef _WIN32
#define S_ADDR(var) (var).sin_addr.S_un.S_addr
#define socklen_t int
#else
#define S_ADDR(var) (var).sin_addr.s_addr
#endif
// clang-format on
UDPSocket::UDPSocket()
: m_socket(INVALID_SOCKET) {
}
UDPSocket::~UDPSocket() {
Stop();
}
void UDPSocket::Connect(std::string_view ip, uint16_t port) {
std::memset(&m_server, 0, sizeof(m_server));
m_server.sin_family = AF_INET;
S_ADDR(m_server) = inet_addr(ip.data());
m_server.sin_port = htons(port);
m_socket = socket(AF_INET, SOCK_DGRAM, 0);
bind(m_socket, reinterpret_cast<sockaddr *>(&m_server), sizeof(m_server));
}
void UDPSocket::Run() {
m_running = true;
m_thread = std::thread(&UDPSocket::ReadThread, this);
}
void UDPSocket::SetSecretKey(std::array<uint8_t, 32> key) {
m_secret_key = key;
}
void UDPSocket::SetSSRC(uint32_t ssrc) {
m_ssrc = ssrc;
}
void UDPSocket::SendEncrypted(const uint8_t *data, size_t len) {
m_sequence++;
m_timestamp += 480; // this is important
std::vector<uint8_t> rtp(12 + len + crypto_secretbox_MACBYTES, 0);
rtp[0] = 0x80; // ver 2
rtp[1] = 0x78; // payload type 0x78
rtp[2] = (m_sequence >> 8) & 0xFF;
rtp[3] = (m_sequence >> 0) & 0xFF;
rtp[4] = (m_timestamp >> 24) & 0xFF;
rtp[5] = (m_timestamp >> 16) & 0xFF;
rtp[6] = (m_timestamp >> 8) & 0xFF;
rtp[7] = (m_timestamp >> 0) & 0xFF;
rtp[8] = (m_ssrc >> 24) & 0xFF;
rtp[9] = (m_ssrc >> 16) & 0xFF;
rtp[10] = (m_ssrc >> 8) & 0xFF;
rtp[11] = (m_ssrc >> 0) & 0xFF;
static std::array<uint8_t, 24> nonce = {};
std::memcpy(nonce.data(), rtp.data(), 12);
crypto_secretbox_easy(rtp.data() + 12, data, len, nonce.data(), m_secret_key.data());
Send(rtp.data(), rtp.size());
}
void UDPSocket::SendEncrypted(const std::vector<uint8_t> &data) {
SendEncrypted(data.data(), data.size());
}
void UDPSocket::Send(const uint8_t *data, size_t len) {
sendto(m_socket, reinterpret_cast<const char *>(data), static_cast<int>(len), 0, reinterpret_cast<sockaddr *>(&m_server), sizeof(m_server));
}
std::vector<uint8_t> UDPSocket::Receive() {
while (true) {
sockaddr_in from;
socklen_t fromlen = sizeof(from);
static std::array<uint8_t, 4096> buf;
int n = recvfrom(m_socket, reinterpret_cast<char *>(buf.data()), sizeof(buf), 0, reinterpret_cast<sockaddr *>(&from), &fromlen);
if (n < 0) {
return {};
} else if (S_ADDR(from) == S_ADDR(m_server) && from.sin_port == m_server.sin_port) {
return { buf.begin(), buf.begin() + n };
}
}
}
void UDPSocket::Stop() {
#ifdef _WIN32
closesocket(m_socket);
#else
close(m_socket);
#endif
m_running = false;
if (m_thread.joinable()) m_thread.join();
}
void UDPSocket::ReadThread() {
timeval tv;
while (m_running) {
static std::array<uint8_t, 4096> buf;
sockaddr_in from;
socklen_t addrlen = sizeof(from);
tv.tv_sec = 0;
tv.tv_usec = 1000000;
fd_set read_fds;
FD_ZERO(&read_fds);
FD_SET(m_socket, &read_fds);
if (select(m_socket + 1, &read_fds, nullptr, nullptr, &tv) > 0) {
int n = recvfrom(m_socket, reinterpret_cast<char *>(buf.data()), sizeof(buf), 0, reinterpret_cast<sockaddr *>(&from), &addrlen);
if (n > 0 && S_ADDR(from) == S_ADDR(m_server) && from.sin_port == m_server.sin_port) {
m_signal_data.emit({ buf.begin(), buf.begin() + n });
}
}
}
}
UDPSocket::type_signal_data UDPSocket::signal_data() {
return m_signal_data;
}
DiscordVoiceClient::DiscordVoiceClient()
: m_state(State::DisconnectedByClient)
, m_ws("voice-ws")
, m_log(spdlog::get("voice")) {
if (sodium_init() == -1) {
m_log->critical("sodium_init() failed");
}
m_udp.signal_data().connect([this](const std::vector<uint8_t> &data) {
OnUDPData(data);
});
m_ws.signal_open().connect(sigc::mem_fun(*this, &DiscordVoiceClient::OnWebsocketOpen));
m_ws.signal_close().connect(sigc::mem_fun(*this, &DiscordVoiceClient::OnWebsocketClose));
m_ws.signal_message().connect(sigc::mem_fun(*this, &DiscordVoiceClient::OnWebsocketMessage));
m_dispatcher.connect(sigc::mem_fun(*this, &DiscordVoiceClient::OnDispatch));
// idle or else singleton deadlock
Glib::signal_idle().connect_once([this]() {
auto &audio = Abaddon::Get().GetAudio();
audio.SetOpusBuffer(m_opus_buffer.data());
audio.signal_opus_packet().connect([this](int payload_size) {
if (IsConnected()) {
m_udp.SendEncrypted(m_opus_buffer.data(), payload_size);
}
});
});
}
DiscordVoiceClient::~DiscordVoiceClient() {
if (IsConnected() || IsConnecting()) Stop();
}
void DiscordVoiceClient::Start() {
SetState(State::ConnectingToWebsocket);
m_heartbeat_waiter.revive();
m_keepalive_waiter.revive();
m_ws.StartConnection("wss://" + m_endpoint + "/?v=7");
m_signal_connected.emit();
}
void DiscordVoiceClient::Stop() {
if (!IsConnected() && !IsConnecting()) {
m_log->warn("Requested stop while not connected (from {})", GetStateName(m_state));
return;
}
SetState(State::DisconnectedByClient);
m_ws.Stop(4014);
m_udp.Stop();
m_heartbeat_waiter.kill();
if (m_heartbeat_thread.joinable()) m_heartbeat_thread.join();
m_keepalive_waiter.kill();
if (m_keepalive_thread.joinable()) m_keepalive_thread.join();
m_signal_disconnected.emit();
}
void DiscordVoiceClient::SetSessionID(std::string_view session_id) {
m_session_id = session_id;
}
void DiscordVoiceClient::SetEndpoint(std::string_view endpoint) {
m_endpoint = endpoint;
}
void DiscordVoiceClient::SetToken(std::string_view token) {
m_token = token;
}
void DiscordVoiceClient::SetServerID(Snowflake id) {
m_server_id = id;
}
void DiscordVoiceClient::SetUserID(Snowflake id) {
m_user_id = id;
}
std::optional<uint32_t> DiscordVoiceClient::GetSSRCOfUser(Snowflake id) const {
if (const auto it = m_ssrc_map.find(id); it != m_ssrc_map.end()) {
return it->second;
}
return std::nullopt;
}
bool DiscordVoiceClient::IsConnected() const noexcept {
return m_state == State::Connected;
}
bool DiscordVoiceClient::IsConnecting() const noexcept {
return m_state == State::ConnectingToWebsocket || m_state == State::EstablishingConnection;
}
void DiscordVoiceClient::OnGatewayMessage(const std::string &str) {
VoiceGatewayMessage msg = nlohmann::json::parse(str);
switch (msg.Opcode) {
case VoiceGatewayOp::Hello:
HandleGatewayHello(msg);
break;
case VoiceGatewayOp::Ready:
HandleGatewayReady(msg);
break;
case VoiceGatewayOp::SessionDescription:
HandleGatewaySessionDescription(msg);
break;
case VoiceGatewayOp::Speaking:
HandleGatewaySpeaking(msg);
break;
default:
m_log->warn("Unhandled opcode: {}", static_cast<int>(msg.Opcode));
}
}
const char *DiscordVoiceClient::GetStateName(State state) {
switch (state) {
case State::DisconnectedByClient:
return "DisconnectedByClient";
case State::DisconnectedByServer:
return "DisconnectedByServer";
case State::ConnectingToWebsocket:
return "ConnectingToWebsocket";
case State::EstablishingConnection:
return "EstablishingConnection";
case State::Connected:
return "Connected";
default:
return "Unknown";
}
}
void DiscordVoiceClient::HandleGatewayHello(const VoiceGatewayMessage &m) {
VoiceHelloData d = m.Data;
m_log->debug("Received hello: {}ms", d.HeartbeatInterval);
m_heartbeat_msec = d.HeartbeatInterval;
m_heartbeat_thread = std::thread(&DiscordVoiceClient::HeartbeatThread, this);
Identify();
}
void DiscordVoiceClient::HandleGatewayReady(const VoiceGatewayMessage &m) {
VoiceReadyData d = m.Data;
m_log->debug("Received ready: {}:{} (ssrc: {})", d.IP, d.Port, d.SSRC);
m_ip = d.IP;
m_port = d.Port;
m_ssrc = d.SSRC;
if (std::find(d.Modes.begin(), d.Modes.end(), "xsalsa20_poly1305") == d.Modes.end()) {
m_log->warn("xsalsa20_poly1305 not in modes");
}
m_udp.Connect(m_ip, m_port);
m_keepalive_thread = std::thread(&DiscordVoiceClient::KeepaliveThread, this);
Discovery();
}
void DiscordVoiceClient::HandleGatewaySessionDescription(const VoiceGatewayMessage &m) {
VoiceSessionDescriptionData d = m.Data;
m_log->debug("Received session description (mode: {}) (key: {:ns}) ", d.Mode, spdlog::to_hex(d.SecretKey.begin(), d.SecretKey.end()));
VoiceSpeakingMessage msg;
msg.Delay = 0;
msg.SSRC = m_ssrc;
msg.Speaking = VoiceSpeakingType::Microphone;
m_ws.Send(msg);
m_secret_key = d.SecretKey;
m_udp.SetSSRC(m_ssrc);
m_udp.SetSecretKey(m_secret_key);
m_udp.SendEncrypted({ 0xF8, 0xFF, 0xFE });
m_udp.Run();
SetState(State::Connected);
}
void DiscordVoiceClient::HandleGatewaySpeaking(const VoiceGatewayMessage &m) {
VoiceSpeakingData d = m.Data;
m_ssrc_map[d.UserID] = d.SSRC;
m_signal_speaking.emit(d);
}
void DiscordVoiceClient::Identify() {
VoiceIdentifyMessage msg;
msg.ServerID = m_server_id;
msg.UserID = m_user_id;
msg.SessionID = m_session_id;
msg.Token = m_token;
msg.Video = true;
m_ws.Send(msg);
}
void DiscordVoiceClient::Discovery() {
std::vector<uint8_t> payload;
// request
payload.push_back(0x00);
payload.push_back(0x01);
// payload length (70)
payload.push_back(0x00);
payload.push_back(0x46);
// ssrc
payload.push_back((m_ssrc >> 24) & 0xFF);
payload.push_back((m_ssrc >> 16) & 0xFF);
payload.push_back((m_ssrc >> 8) & 0xFF);
payload.push_back((m_ssrc >> 0) & 0xFF);
// space for address and port
for (int i = 0; i < 66; i++) payload.push_back(0x00);
m_udp.Send(payload.data(), payload.size());
constexpr int MAX_TRIES = 100;
for (int i = 0; i < MAX_TRIES; i++) {
const auto response = m_udp.Receive();
if (response.size() >= 74 && response[0] == 0x00 && response[1] == 0x02) {
const char *ip = reinterpret_cast<const char *>(response.data() + 8);
uint16_t port = (response[73] << 8) | response[74];
m_log->info("Discovered IP and port: {}:{}", ip, port);
SelectProtocol(ip, port);
break;
} else {
m_log->error("Received non-discovery packet after sending request (try {}/{})", i + 1, MAX_TRIES);
}
}
}
void DiscordVoiceClient::SelectProtocol(const char *ip, uint16_t port) {
VoiceSelectProtocolMessage msg;
msg.Mode = "xsalsa20_poly1305";
msg.Address = ip;
msg.Port = port;
msg.Protocol = "udp";
m_ws.Send(msg);
}
void DiscordVoiceClient::OnWebsocketOpen() {
m_log->info("Websocket opened");
SetState(State::EstablishingConnection);
}
void DiscordVoiceClient::OnWebsocketClose(const ix::WebSocketCloseInfo &info) {
if (info.remote) {
m_log->debug("Websocket closed (remote): {} ({})", info.code, info.reason);
} else {
m_log->debug("Websocket closed (local): {} ({})", info.code, info.reason);
}
}
void DiscordVoiceClient::OnWebsocketMessage(const std::string &data) {
m_dispatch_mutex.lock();
m_dispatch_queue.push(data);
m_dispatcher.emit();
m_dispatch_mutex.unlock();
}
void DiscordVoiceClient::HeartbeatThread() {
while (true) {
if (!m_heartbeat_waiter.wait_for(std::chrono::milliseconds(m_heartbeat_msec))) break;
const auto ms = static_cast<uint64_t>(std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::system_clock::now().time_since_epoch())
.count());
m_log->trace("Heartbeat: {}", ms);
VoiceHeartbeatMessage msg;
msg.Nonce = ms;
m_ws.Send(msg);
}
}
void DiscordVoiceClient::KeepaliveThread() {
while (true) {
if (!m_heartbeat_waiter.wait_for(std::chrono::seconds(10))) break;
if (IsConnected()) {
const static uint8_t KEEPALIVE[] = { 0x13, 0x37 };
m_udp.Send(KEEPALIVE, sizeof(KEEPALIVE));
}
}
}
void DiscordVoiceClient::SetState(State state) {
m_log->debug("Changing state to {}", GetStateName(state));
m_state = state;
m_signal_state_update.emit(state);
}
void DiscordVoiceClient::OnUDPData(std::vector<uint8_t> data) {
uint8_t *payload = data.data() + 12;
uint32_t ssrc = (data[8] << 24) |
(data[9] << 16) |
(data[10] << 8) |
(data[11] << 0);
static std::array<uint8_t, 24> nonce = {};
std::memcpy(nonce.data(), data.data(), 12);
if (crypto_secretbox_open_easy(payload, payload, data.size() - 12, nonce.data(), m_secret_key.data())) {
// spdlog::get("voice")->trace("UDP payload decryption failure");
} else {
Abaddon::Get().GetAudio().FeedMeOpus(ssrc, { payload, payload + data.size() - 12 - crypto_box_MACBYTES });
}
}
void DiscordVoiceClient::OnDispatch() {
m_dispatch_mutex.lock();
if (m_dispatch_queue.empty()) {
m_dispatch_mutex.unlock();
return;
}
auto msg = std::move(m_dispatch_queue.front());
m_dispatch_queue.pop();
m_dispatch_mutex.unlock();
OnGatewayMessage(msg);
}
DiscordVoiceClient::type_signal_disconnected DiscordVoiceClient::signal_connected() {
return m_signal_connected;
}
DiscordVoiceClient::type_signal_disconnected DiscordVoiceClient::signal_disconnected() {
return m_signal_disconnected;
}
DiscordVoiceClient::type_signal_speaking DiscordVoiceClient::signal_speaking() {
return m_signal_speaking;
}
DiscordVoiceClient::type_signal_state_update DiscordVoiceClient::signal_state_update() {
return m_signal_state_update;
}
void from_json(const nlohmann::json &j, VoiceGatewayMessage &m) {
JS_D("op", m.Opcode);
m.Data = j.at("d");
}
void from_json(const nlohmann::json &j, VoiceHelloData &m) {
JS_D("heartbeat_interval", m.HeartbeatInterval);
}
void to_json(nlohmann::json &j, const VoiceHeartbeatMessage &m) {
j["op"] = VoiceGatewayOp::Heartbeat;
j["d"] = m.Nonce;
}
void to_json(nlohmann::json &j, const VoiceIdentifyMessage &m) {
j["op"] = VoiceGatewayOp::Identify;
j["d"]["server_id"] = m.ServerID;
j["d"]["user_id"] = m.UserID;
j["d"]["session_id"] = m.SessionID;
j["d"]["token"] = m.Token;
j["d"]["video"] = m.Video;
j["d"]["streams"][0]["type"] = "video";
j["d"]["streams"][0]["rid"] = "100";
j["d"]["streams"][0]["quality"] = 100;
}
void from_json(const nlohmann::json &j, VoiceReadyData::VoiceStream &m) {
JS_D("active", m.IsActive);
JS_D("quality", m.Quality);
JS_D("rid", m.RID);
JS_D("rtx_ssrc", m.RTXSSRC);
JS_D("ssrc", m.SSRC);
JS_D("type", m.Type);
}
void from_json(const nlohmann::json &j, VoiceReadyData &m) {
JS_ON("experiments", m.Experiments);
JS_D("ip", m.IP);
JS_D("modes", m.Modes);
JS_D("port", m.Port);
JS_D("ssrc", m.SSRC);
JS_ON("streams", m.Streams);
}
void to_json(nlohmann::json &j, const VoiceSelectProtocolMessage &m) {
j["op"] = VoiceGatewayOp::SelectProtocol;
j["d"]["address"] = m.Address;
j["d"]["port"] = m.Port;
j["d"]["protocol"] = m.Protocol;
j["d"]["mode"] = m.Mode;
j["d"]["data"]["address"] = m.Address;
j["d"]["data"]["port"] = m.Port;
j["d"]["data"]["mode"] = m.Mode;
}
void from_json(const nlohmann::json &j, VoiceSessionDescriptionData &m) {
JS_D("mode", m.Mode);
JS_D("secret_key", m.SecretKey);
}
void to_json(nlohmann::json &j, const VoiceSpeakingMessage &m) {
j["op"] = VoiceGatewayOp::Speaking;
j["d"]["speaking"] = m.Speaking;
j["d"]["delay"] = m.Delay;
j["d"]["ssrc"] = m.SSRC;
}
void from_json(const nlohmann::json &j, VoiceSpeakingData &m) {
JS_D("user_id", m.UserID);
JS_D("ssrc", m.SSRC);
JS_D("speaking", m.Speaking);
}
#endif

288
src/discord/voiceclient.hpp Normal file
View File

@@ -0,0 +1,288 @@
#pragma once
#ifdef WITH_VOICE
// clang-format off
#include "snowflake.hpp"
#include "waiter.hpp"
#include "websocket.hpp"
#include <mutex>
#include <optional>
#include <queue>
#include <string>
#include <glibmm/dispatcher.h>
#include <sigc++/sigc++.h>
#include <spdlog/logger.h>
#include <unordered_map>
// clang-format on
enum class VoiceGatewayCloseCode : uint16_t {
Normal = 4000,
UnknownOpcode = 4001,
InvalidPayload = 4002,
NotAuthenticated = 4003,
AuthenticationFailed = 4004,
AlreadyAuthenticated = 4005,
SessionInvalid = 4006,
SessionTimedOut = 4009,
ServerNotFound = 4011,
UnknownProtocol = 4012,
Disconnected = 4014,
ServerCrashed = 4015,
UnknownEncryption = 4016,
};
enum class VoiceGatewayOp : int {
Identify = 0,
SelectProtocol = 1,
Ready = 2,
Heartbeat = 3,
SessionDescription = 4,
Speaking = 5,
HeartbeatAck = 6,
Resume = 7,
Hello = 8,
Resumed = 9,
ClientDisconnect = 13,
};
struct VoiceGatewayMessage {
VoiceGatewayOp Opcode;
nlohmann::json Data;
friend void from_json(const nlohmann::json &j, VoiceGatewayMessage &m);
};
struct VoiceHelloData {
int HeartbeatInterval;
friend void from_json(const nlohmann::json &j, VoiceHelloData &m);
};
struct VoiceHeartbeatMessage {
uint64_t Nonce;
friend void to_json(nlohmann::json &j, const VoiceHeartbeatMessage &m);
};
struct VoiceIdentifyMessage {
Snowflake ServerID;
Snowflake UserID;
std::string SessionID;
std::string Token;
bool Video;
// todo streams i guess?
friend void to_json(nlohmann::json &j, const VoiceIdentifyMessage &m);
};
struct VoiceReadyData {
struct VoiceStream {
bool IsActive;
int Quality;
std::string RID;
int RTXSSRC;
int SSRC;
std::string Type;
friend void from_json(const nlohmann::json &j, VoiceStream &m);
};
std::vector<std::string> Experiments;
std::string IP;
std::vector<std::string> Modes;
uint16_t Port;
uint32_t SSRC;
std::vector<VoiceStream> Streams;
friend void from_json(const nlohmann::json &j, VoiceReadyData &m);
};
struct VoiceSelectProtocolMessage {
std::string Address;
uint16_t Port;
std::string Mode;
std::string Protocol;
friend void to_json(nlohmann::json &j, const VoiceSelectProtocolMessage &m);
};
struct VoiceSessionDescriptionData {
// std::string AudioCodec;
// std::string VideoCodec;
// std::string MediaSessionID;
std::string Mode;
std::array<uint8_t, 32> SecretKey;
friend void from_json(const nlohmann::json &j, VoiceSessionDescriptionData &m);
};
enum class VoiceSpeakingType {
Microphone = 1 << 0,
Soundshare = 1 << 1,
Priority = 1 << 2,
};
struct VoiceSpeakingMessage {
VoiceSpeakingType Speaking;
int Delay;
uint32_t SSRC;
friend void to_json(nlohmann::json &j, const VoiceSpeakingMessage &m);
};
struct VoiceSpeakingData {
Snowflake UserID;
uint32_t SSRC;
VoiceSpeakingType Speaking;
friend void from_json(const nlohmann::json &j, VoiceSpeakingData &m);
};
class UDPSocket {
public:
UDPSocket();
~UDPSocket();
void Connect(std::string_view ip, uint16_t port);
void Run();
void SetSecretKey(std::array<uint8_t, 32> key);
void SetSSRC(uint32_t ssrc);
void SendEncrypted(const uint8_t *data, size_t len);
void SendEncrypted(const std::vector<uint8_t> &data);
void Send(const uint8_t *data, size_t len);
std::vector<uint8_t> Receive();
void Stop();
private:
void ReadThread();
#ifdef _WIN32
SOCKET m_socket;
#else
int m_socket;
#endif
sockaddr_in m_server;
std::atomic<bool> m_running = false;
std::thread m_thread;
std::array<uint8_t, 32> m_secret_key;
uint32_t m_ssrc;
uint16_t m_sequence = 0;
uint32_t m_timestamp = 0;
public:
using type_signal_data = sigc::signal<void, std::vector<uint8_t>>;
type_signal_data signal_data();
private:
type_signal_data m_signal_data;
};
class DiscordVoiceClient {
public:
DiscordVoiceClient();
~DiscordVoiceClient();
void Start();
void Stop();
void SetSessionID(std::string_view session_id);
void SetEndpoint(std::string_view endpoint);
void SetToken(std::string_view token);
void SetServerID(Snowflake id);
void SetUserID(Snowflake id);
[[nodiscard]] std::optional<uint32_t> GetSSRCOfUser(Snowflake id) const;
// Is a websocket and udp connection fully established
[[nodiscard]] bool IsConnected() const noexcept;
[[nodiscard]] bool IsConnecting() const noexcept;
enum class State {
ConnectingToWebsocket,
EstablishingConnection,
Connected,
DisconnectedByClient,
DisconnectedByServer,
};
private:
static const char *GetStateName(State state);
void OnGatewayMessage(const std::string &msg);
void HandleGatewayHello(const VoiceGatewayMessage &m);
void HandleGatewayReady(const VoiceGatewayMessage &m);
void HandleGatewaySessionDescription(const VoiceGatewayMessage &m);
void HandleGatewaySpeaking(const VoiceGatewayMessage &m);
void Identify();
void Discovery();
void SelectProtocol(const char *ip, uint16_t port);
void OnWebsocketOpen();
void OnWebsocketClose(const ix::WebSocketCloseInfo &info);
void OnWebsocketMessage(const std::string &str);
void HeartbeatThread();
void KeepaliveThread();
void SetState(State state);
void OnUDPData(std::vector<uint8_t> data);
std::string m_session_id;
std::string m_endpoint;
std::string m_token;
Snowflake m_server_id;
Snowflake m_channel_id;
Snowflake m_user_id;
std::unordered_map<Snowflake, uint32_t> m_ssrc_map;
std::array<uint8_t, 32> m_secret_key;
std::string m_ip;
uint16_t m_port;
uint32_t m_ssrc;
int m_heartbeat_msec;
Waiter m_heartbeat_waiter;
std::thread m_heartbeat_thread;
Waiter m_keepalive_waiter;
std::thread m_keepalive_thread;
Websocket m_ws;
UDPSocket m_udp;
Glib::Dispatcher m_dispatcher;
std::queue<std::string> m_dispatch_queue;
std::mutex m_dispatch_mutex;
void OnDispatch();
std::array<uint8_t, 1275> m_opus_buffer;
std::shared_ptr<spdlog::logger> m_log;
std::atomic<State> m_state;
using type_signal_connected = sigc::signal<void()>;
using type_signal_disconnected = sigc::signal<void()>;
using type_signal_speaking = sigc::signal<void(VoiceSpeakingData)>;
using type_signal_state_update = sigc::signal<void(State)>;
type_signal_connected m_signal_connected;
type_signal_disconnected m_signal_disconnected;
type_signal_speaking m_signal_speaking;
type_signal_state_update m_signal_state_update;
public:
type_signal_connected signal_connected();
type_signal_disconnected signal_disconnected();
type_signal_speaking signal_speaking();
type_signal_state_update signal_state_update();
};
#endif

29
src/discord/waiter.hpp Normal file
View File

@@ -0,0 +1,29 @@
#pragma once
#include <chrono>
#include <condition_variable>
#include <mutex>
class Waiter {
public:
template<class R, class P>
bool wait_for(std::chrono::duration<R, P> const &time) const {
std::unique_lock<std::mutex> lock(m);
return !cv.wait_for(lock, time, [&] { return terminate; });
}
void kill() {
std::unique_lock<std::mutex> lock(m);
terminate = true;
cv.notify_all();
}
void revive() {
std::unique_lock<std::mutex> lock(m);
terminate = false;
}
private:
mutable std::condition_variable cv;
mutable std::mutex m;
bool terminate = false;
};

View File

@@ -1,14 +1,33 @@
#include "websocket.hpp"
#include <spdlog/sinks/stdout_color_sinks.h>
#include <utility>
Websocket::Websocket() = default;
Websocket::Websocket(const std::string &id)
: m_close_info { 1000, "Normal", false } {
if (m_log = spdlog::get(id); !m_log) {
m_log = spdlog::stdout_color_mt(id);
}
m_open_dispatcher.connect([this]() {
m_signal_open.emit();
});
m_close_dispatcher.connect([this]() {
Stop();
m_signal_close.emit(m_close_info);
});
}
void Websocket::StartConnection(const std::string &url) {
m_websocket.disableAutomaticReconnection();
m_websocket.setUrl(url);
m_websocket.setOnMessageCallback([this](auto &&msg) { OnMessage(std::forward<decltype(msg)>(msg)); });
m_websocket.setExtraHeaders(ix::WebSocketHttpHeaders { { "User-Agent", m_agent } }); // idk if this actually works
m_websocket.start();
m_log->debug("Starting connection to {}", url);
m_websocket = std::make_unique<ix::WebSocket>();
m_websocket->disableAutomaticReconnection();
m_websocket->setUrl(url);
m_websocket->setOnMessageCallback([this](auto &&msg) { OnMessage(std::forward<decltype(msg)>(msg)); });
m_websocket->setExtraHeaders(ix::WebSocketHttpHeaders { { "User-Agent", m_agent } }); // idk if this actually works
m_websocket->start();
}
void Websocket::SetUserAgent(std::string agent) {
@@ -24,17 +43,19 @@ void Websocket::SetPrintMessages(bool show) noexcept {
}
void Websocket::Stop() {
m_log->debug("Stopping with default close code");
Stop(ix::WebSocketCloseConstants::kNormalClosureCode);
}
void Websocket::Stop(uint16_t code) {
m_websocket.stop(code);
m_log->debug("Stopping with close code {}", code);
m_websocket-> stop(code);
}
void Websocket::Send(const std::string &str) {
if (m_print_messages)
printf("sending %s\n", str.c_str());
m_websocket.sendText(str);
m_log->trace("Send: {}", str);
m_websocket->sendText(str);
}
void Websocket::Send(const nlohmann::json &j) {
@@ -44,10 +65,13 @@ void Websocket::Send(const nlohmann::json &j) {
void Websocket::OnMessage(const ix::WebSocketMessagePtr &msg) {
switch (msg->type) {
case ix::WebSocketMessageType::Open: {
m_signal_open.emit();
m_log->debug("Received open frame, dispatching");
m_open_dispatcher.emit();
} break;
case ix::WebSocketMessageType::Close: {
m_signal_close.emit(msg->closeInfo.code);
m_log->debug("Received close frame, dispatching. {} ({}){}", msg->closeInfo.code, msg->closeInfo.reason, msg->closeInfo.remote ? " Remote" : "");
m_close_info = msg->closeInfo;
m_close_dispatcher.emit();
} break;
case ix::WebSocketMessageType::Message: {
m_signal_message.emit(msg->str);

View File

@@ -3,12 +3,14 @@
#include <ixwebsocket/IXWebSocket.h>
#include <string>
#include <functional>
#include <glibmm.h>
#include <nlohmann/json.hpp>
#include <sigc++/sigc++.h>
#include <spdlog/spdlog.h>
class Websocket {
public:
Websocket();
Websocket(const std::string &id);
void StartConnection(const std::string &url);
void SetUserAgent(std::string agent);
@@ -24,12 +26,12 @@ public:
private:
void OnMessage(const ix::WebSocketMessagePtr &msg);
ix::WebSocket m_websocket;
std::unique_ptr<ix::WebSocket> m_websocket;
std::string m_agent;
public:
using type_signal_open = sigc::signal<void>;
using type_signal_close = sigc::signal<void, uint16_t>;
using type_signal_close = sigc::signal<void, ix::WebSocketCloseInfo>;
using type_signal_message = sigc::signal<void, std::string>;
type_signal_open signal_open();
@@ -42,4 +44,10 @@ private:
type_signal_message m_signal_message;
bool m_print_messages = true;
Glib::Dispatcher m_open_dispatcher;
Glib::Dispatcher m_close_dispatcher;
ix::WebSocketCloseInfo m_close_info;
std::shared_ptr<spdlog::logger> m_log;
};

View File

@@ -6,6 +6,7 @@ MainWindow::MainWindow()
, m_content_box(Gtk::ORIENTATION_HORIZONTAL)
, m_chan_content_paned(Gtk::ORIENTATION_HORIZONTAL)
, m_content_members_paned(Gtk::ORIENTATION_HORIZONTAL)
, m_left_pane(Gtk::ORIENTATION_VERTICAL)
, m_accels(Gtk::AccelGroup::create()) {
set_default_size(1200, 800);
get_style_context()->add_class("app-window");
@@ -51,12 +52,18 @@ MainWindow::MainWindow()
m_content_stack.set_visible_child("chat");
m_content_stack.show();
m_chan_content_paned.pack1(m_channel_list);
m_voice_info.show();
m_left_pane.add(m_channel_list);
m_left_pane.add(m_voice_info);
m_left_pane.show();
m_chan_content_paned.pack1(m_left_pane);
m_chan_content_paned.pack2(m_content_members_paned);
m_chan_content_paned.child_property_shrink(m_content_members_paned) = true;
m_chan_content_paned.child_property_resize(m_content_members_paned) = true;
m_chan_content_paned.child_property_shrink(m_channel_list) = true;
m_chan_content_paned.child_property_resize(m_channel_list) = true;
m_chan_content_paned.child_property_shrink(m_left_pane) = true;
m_chan_content_paned.child_property_resize(m_left_pane) = true;
m_chan_content_paned.set_position(200);
m_chan_content_paned.show();
m_content_box.add(m_chan_content_paned);

View File

@@ -3,6 +3,7 @@
#include "components/chatwindow.hpp"
#include "components/memberlist.hpp"
#include "components/friendslist.hpp"
#include "components/voiceinfobox.hpp"
#include <gtkmm.h>
class MainWindow : public Gtk::Window {
@@ -53,6 +54,9 @@ private:
ChatWindow m_chat;
MemberList m_members;
FriendsList m_friends;
VoiceInfoBox m_voice_info;
Gtk::Box m_left_pane;
Gtk::Stack m_content_stack;

View File

@@ -0,0 +1,125 @@
#ifdef WITH_VOICE
// clang-format off
#include "voicesettingswindow.hpp"
#include "abaddon.hpp"
#include "audio/manager.hpp"
#include <spdlog/spdlog.h>
// clang-format on
VoiceSettingsWindow::VoiceSettingsWindow()
: m_main(Gtk::ORIENTATION_VERTICAL) {
get_style_context()->add_class("app-window");
set_default_size(300, 300);
m_encoding_mode.append("Voice");
m_encoding_mode.append("Music");
m_encoding_mode.append("Restricted");
m_encoding_mode.set_tooltip_text(
"Sets the coding mode for the Opus encoder\n"
"Voice - Optimize for voice quality\n"
"Music - Optimize for non-voice signals incl. music\n"
"Restricted - Optimize for non-voice, low latency. Not recommended");
const auto mode = Abaddon::Get().GetAudio().GetEncodingApplication();
if (mode == OPUS_APPLICATION_VOIP) {
m_encoding_mode.set_active(0);
} else if (mode == OPUS_APPLICATION_AUDIO) {
m_encoding_mode.set_active(1);
} else if (mode == OPUS_APPLICATION_RESTRICTED_LOWDELAY) {
m_encoding_mode.set_active(2);
}
m_encoding_mode.signal_changed().connect([this]() {
const auto mode = m_encoding_mode.get_active_text();
auto &audio = Abaddon::Get().GetAudio();
spdlog::get("audio")->debug("Chose encoding mode: {}", mode.c_str());
if (mode == "Voice") {
audio.SetEncodingApplication(OPUS_APPLICATION_VOIP);
} else if (mode == "Music") {
spdlog::get("audio")->debug("music/audio");
audio.SetEncodingApplication(OPUS_APPLICATION_AUDIO);
} else if (mode == "Restricted") {
audio.SetEncodingApplication(OPUS_APPLICATION_RESTRICTED_LOWDELAY);
}
});
m_signal.append("Auto");
m_signal.append("Voice");
m_signal.append("Music");
m_signal.set_tooltip_text(
"Signal hint. Tells Opus what the current signal is\n"
"Auto - Let Opus figure it out\n"
"Voice - Tell Opus it's a voice signal\n"
"Music - Tell Opus it's a music signal");
const auto signal = Abaddon::Get().GetAudio().GetSignalHint();
if (signal == OPUS_AUTO) {
m_signal.set_active(0);
} else if (signal == OPUS_SIGNAL_VOICE) {
m_signal.set_active(1);
} else if (signal == OPUS_SIGNAL_MUSIC) {
m_signal.set_active(2);
}
m_signal.signal_changed().connect([this]() {
const auto signal = m_signal.get_active_text();
auto &audio = Abaddon::Get().GetAudio();
spdlog::get("audio")->debug("Chose signal hint: {}", signal.c_str());
if (signal == "Auto") {
audio.SetSignalHint(OPUS_AUTO);
} else if (signal == "Voice") {
audio.SetSignalHint(OPUS_SIGNAL_VOICE);
} else if (signal == "Music") {
audio.SetSignalHint(OPUS_SIGNAL_MUSIC);
}
});
// exponential scale for bitrate because higher bitrates dont sound much different
constexpr static auto MAX_BITRATE = 128000.0;
constexpr static auto MIN_BITRATE = 2400.0;
const auto bitrate_scale = [this](double value) -> double {
value /= 100.0;
return (MAX_BITRATE - MIN_BITRATE) * value * value * value + MIN_BITRATE;
};
const auto bitrate_scale_r = [this](double value) -> double {
return 100.0 * std::cbrt((value - MIN_BITRATE) / (MAX_BITRATE - MIN_BITRATE));
};
m_bitrate.set_range(0.0, 100.0);
m_bitrate.set_value_pos(Gtk::POS_TOP);
m_bitrate.set_value(bitrate_scale_r(Abaddon::Get().GetAudio().GetBitrate()));
m_bitrate.signal_format_value().connect([this, bitrate_scale](double value) {
const auto scaled = bitrate_scale(value);
if (value <= 99.9) {
return Glib::ustring(std::to_string(static_cast<int>(scaled)));
} else {
return Glib::ustring("MAX");
}
});
m_bitrate.signal_value_changed().connect([this, bitrate_scale]() {
const auto value = m_bitrate.get_value();
const auto scaled = bitrate_scale(value);
if (value <= 99.9) {
Abaddon::Get().GetAudio().SetBitrate(static_cast<int>(scaled));
} else {
Abaddon::Get().GetAudio().SetBitrate(OPUS_BITRATE_MAX);
}
});
m_main.add(m_encoding_mode);
m_main.add(m_signal);
m_main.add(m_bitrate);
add(m_main);
show_all_children();
// no need to bring in ManageHeapWindow, no user menu
signal_hide().connect([this]() {
delete this;
});
}
#endif

View File

@@ -0,0 +1,25 @@
#pragma once
#ifdef WITH_VOICE
// clang-format off
#include <gtkmm/box.h>
#include <gtkmm/comboboxtext.h>
#include <gtkmm/scale.h>
#include <gtkmm/window.h>
// clang-format on
class VoiceSettingsWindow : public Gtk::Window {
public:
VoiceSettingsWindow();
Gtk::Box m_main;
Gtk::ComboBoxText m_encoding_mode;
Gtk::ComboBoxText m_signal;
Gtk::Scale m_bitrate;
private:
};
#endif

254
src/windows/voicewindow.cpp Normal file
View File

@@ -0,0 +1,254 @@
#ifdef WITH_VOICE
// clang-format off
#include "voicewindow.hpp"
#include "components/lazyimage.hpp"
#include "abaddon.hpp"
#include "audio/manager.hpp"
#include "voicesettingswindow.hpp"
// clang-format on
class VoiceWindowUserListEntry : public Gtk::ListBoxRow {
public:
VoiceWindowUserListEntry(Snowflake id)
: m_main(Gtk::ORIENTATION_VERTICAL)
, m_horz(Gtk::ORIENTATION_HORIZONTAL)
, m_avatar(32, 32)
, m_mute("Mute") {
m_name.set_halign(Gtk::ALIGN_START);
m_name.set_hexpand(true);
m_mute.set_halign(Gtk::ALIGN_END);
m_volume.set_range(0.0, 200.0);
m_volume.set_value_pos(Gtk::POS_LEFT);
m_volume.set_value(100.0);
m_volume.signal_value_changed().connect([this]() {
m_signal_volume.emit(m_volume.get_value());
});
m_horz.add(m_avatar);
m_horz.add(m_name);
m_horz.add(m_mute);
m_main.add(m_horz);
m_main.add(m_volume);
m_main.add(m_meter);
add(m_main);
show_all_children();
auto &discord = Abaddon::Get().GetDiscordClient();
const auto user = discord.GetUser(id);
if (user.has_value()) {
m_name.set_text(user->Username);
m_avatar.SetURL(user->GetAvatarURL("png", "32"));
} else {
m_name.set_text("Unknown user");
}
m_mute.signal_toggled().connect([this]() {
m_signal_mute_cs.emit(m_mute.get_active());
});
}
void SetVolumeMeter(double frac) {
m_meter.SetVolume(frac);
}
private:
Gtk::Box m_main;
Gtk::Box m_horz;
LazyImage m_avatar;
Gtk::Label m_name;
Gtk::CheckButton m_mute;
Gtk::Scale m_volume;
VolumeMeter m_meter;
public:
using type_signal_mute_cs = sigc::signal<void(bool)>;
using type_signal_volume = sigc::signal<void(double)>;
type_signal_mute_cs signal_mute_cs() {
return m_signal_mute_cs;
}
type_signal_volume signal_volume() {
return m_signal_volume;
}
private:
type_signal_mute_cs m_signal_mute_cs;
type_signal_volume m_signal_volume;
};
VoiceWindow::VoiceWindow(Snowflake channel_id)
: m_main(Gtk::ORIENTATION_VERTICAL)
, m_controls(Gtk::ORIENTATION_HORIZONTAL)
, m_mute("Mute")
, m_deafen("Deafen")
, m_channel_id(channel_id)
, m_menu_view("View")
, m_menu_view_settings("More _Settings", true) {
get_style_context()->add_class("app-window");
set_default_size(300, 300);
auto &discord = Abaddon::Get().GetDiscordClient();
SetUsers(discord.GetUsersInVoiceChannel(m_channel_id));
discord.signal_voice_user_disconnect().connect(sigc::mem_fun(*this, &VoiceWindow::OnUserDisconnect));
discord.signal_voice_user_connect().connect(sigc::mem_fun(*this, &VoiceWindow::OnUserConnect));
m_mute.signal_toggled().connect(sigc::mem_fun(*this, &VoiceWindow::OnMuteChanged));
m_deafen.signal_toggled().connect(sigc::mem_fun(*this, &VoiceWindow::OnDeafenChanged));
m_scroll.set_policy(Gtk::POLICY_NEVER, Gtk::POLICY_AUTOMATIC);
m_scroll.set_hexpand(true);
m_scroll.set_vexpand(true);
m_capture_volume.SetShowTick(true);
m_capture_gate.set_range(0.0, 100.0);
m_capture_gate.set_value_pos(Gtk::POS_LEFT);
m_capture_gate.set_value(0.0);
m_capture_gate.signal_value_changed().connect([this]() {
// todo this should probably emit 0-1 i dont think the mgr should be responsible for scaling down
const double val = m_capture_gate.get_value();
m_signal_gate.emit(val);
m_capture_volume.SetTick(val / 100.0);
});
m_capture_gain.set_range(0.0, 200.0);
m_capture_gain.set_value_pos(Gtk::POS_LEFT);
m_capture_gain.set_value(100.0);
m_capture_gain.signal_value_changed().connect([this]() {
const double val = m_capture_gain.get_value();
m_signal_gain.emit(val / 100.0);
});
auto *playback_renderer = Gtk::make_managed<Gtk::CellRendererText>();
m_playback_combo.set_valign(Gtk::ALIGN_END);
m_playback_combo.set_hexpand(true);
m_playback_combo.set_halign(Gtk::ALIGN_FILL);
m_playback_combo.set_model(Abaddon::Get().GetAudio().GetDevices().GetPlaybackDeviceModel());
m_playback_combo.set_active(Abaddon::Get().GetAudio().GetDevices().GetActivePlaybackDevice());
m_playback_combo.pack_start(*playback_renderer);
m_playback_combo.add_attribute(*playback_renderer, "text", 0);
m_playback_combo.signal_changed().connect([this]() {
Abaddon::Get().GetAudio().SetPlaybackDevice(m_playback_combo.get_active());
});
auto *capture_renderer = Gtk::make_managed<Gtk::CellRendererText>();
m_capture_combo.set_valign(Gtk::ALIGN_END);
m_capture_combo.set_hexpand(true);
m_capture_combo.set_halign(Gtk::ALIGN_FILL);
m_capture_combo.set_model(Abaddon::Get().GetAudio().GetDevices().GetCaptureDeviceModel());
m_capture_combo.set_active(Abaddon::Get().GetAudio().GetDevices().GetActiveCaptureDevice());
m_capture_combo.pack_start(*capture_renderer);
m_capture_combo.add_attribute(*capture_renderer, "text", 0);
m_capture_combo.signal_changed().connect([this]() {
Abaddon::Get().GetAudio().SetCaptureDevice(m_capture_combo.get_active());
});
m_menu_bar.append(m_menu_view);
m_menu_view.set_submenu(m_menu_view_sub);
m_menu_view_sub.append(m_menu_view_settings);
m_menu_view_settings.signal_activate().connect([this]() {
auto *window = new VoiceSettingsWindow;
window->show();
});
m_scroll.add(m_user_list);
m_controls.add(m_mute);
m_controls.add(m_deafen);
m_main.add(m_menu_bar);
m_main.add(m_controls);
m_main.add(m_capture_volume);
m_main.add(m_capture_gate);
m_main.add(m_capture_gain);
m_main.add(m_scroll);
m_main.add(m_playback_combo);
m_main.add(m_capture_combo);
add(m_main);
show_all_children();
Glib::signal_timeout().connect(sigc::mem_fun(*this, &VoiceWindow::UpdateVoiceMeters), 40);
}
void VoiceWindow::SetUsers(const std::unordered_set<Snowflake> &user_ids) {
for (auto id : user_ids) {
m_user_list.add(*CreateRow(id));
}
}
Gtk::ListBoxRow *VoiceWindow::CreateRow(Snowflake id) {
auto *row = Gtk::make_managed<VoiceWindowUserListEntry>(id);
m_rows[id] = row;
row->signal_mute_cs().connect([this, id](bool is_muted) {
m_signal_mute_user_cs.emit(id, is_muted);
});
row->signal_volume().connect([this, id](double volume) {
m_signal_user_volume_changed.emit(id, volume);
});
row->show_all();
return row;
}
void VoiceWindow::OnMuteChanged() {
m_signal_mute.emit(m_mute.get_active());
}
void VoiceWindow::OnDeafenChanged() {
m_signal_deafen.emit(m_deafen.get_active());
}
bool VoiceWindow::UpdateVoiceMeters() {
m_capture_volume.SetVolume(Abaddon::Get().GetAudio().GetCaptureVolumeLevel());
for (auto [id, row] : m_rows) {
const auto ssrc = Abaddon::Get().GetDiscordClient().GetSSRCOfUser(id);
if (ssrc.has_value()) {
row->SetVolumeMeter(Abaddon::Get().GetAudio().GetSSRCVolumeLevel(*ssrc));
}
}
return true;
}
void VoiceWindow::OnUserConnect(Snowflake user_id, Snowflake to_channel_id) {
if (m_channel_id == to_channel_id) {
if (auto it = m_rows.find(user_id); it == m_rows.end()) {
m_user_list.add(*CreateRow(user_id));
}
}
}
void VoiceWindow::OnUserDisconnect(Snowflake user_id, Snowflake from_channel_id) {
if (m_channel_id == from_channel_id) {
if (auto it = m_rows.find(user_id); it != m_rows.end()) {
delete it->second;
m_rows.erase(it);
}
}
}
VoiceWindow::type_signal_mute VoiceWindow::signal_mute() {
return m_signal_mute;
}
VoiceWindow::type_signal_deafen VoiceWindow::signal_deafen() {
return m_signal_deafen;
}
VoiceWindow::type_signal_gate VoiceWindow::signal_gate() {
return m_signal_gate;
}
VoiceWindow::type_signal_gate VoiceWindow::signal_gain() {
return m_signal_gain;
}
VoiceWindow::type_signal_mute_user_cs VoiceWindow::signal_mute_user_cs() {
return m_signal_mute_user_cs;
}
VoiceWindow::type_signal_user_volume_changed VoiceWindow::signal_user_volume_changed() {
return m_signal_user_volume_changed;
}
#endif

View File

@@ -0,0 +1,85 @@
#pragma once
#ifdef WITH_VOICE
// clang-format off
#include "components/volumemeter.hpp"
#include "discord/snowflake.hpp"
#include <gtkmm/box.h>
#include <gtkmm/checkbutton.h>
#include <gtkmm/combobox.h>
#include <gtkmm/listbox.h>
#include <gtkmm/menubar.h>
#include <gtkmm/progressbar.h>
#include <gtkmm/scale.h>
#include <gtkmm/scrolledwindow.h>
#include <gtkmm/window.h>
#include <unordered_set>
// clang-format on
class VoiceWindowUserListEntry;
class VoiceWindow : public Gtk::Window {
public:
VoiceWindow(Snowflake channel_id);
private:
void SetUsers(const std::unordered_set<Snowflake> &user_ids);
Gtk::ListBoxRow *CreateRow(Snowflake id);
void OnUserConnect(Snowflake user_id, Snowflake to_channel_id);
void OnUserDisconnect(Snowflake user_id, Snowflake from_channel_id);
void OnMuteChanged();
void OnDeafenChanged();
bool UpdateVoiceMeters();
Gtk::Box m_main;
Gtk::Box m_controls;
Gtk::CheckButton m_mute;
Gtk::CheckButton m_deafen;
Gtk::ScrolledWindow m_scroll;
Gtk::ListBox m_user_list;
VolumeMeter m_capture_volume;
Gtk::Scale m_capture_gate;
Gtk::Scale m_capture_gain;
Gtk::ComboBox m_playback_combo;
Gtk::ComboBox m_capture_combo;
Snowflake m_channel_id;
std::unordered_map<Snowflake, VoiceWindowUserListEntry *> m_rows;
Gtk::MenuBar m_menu_bar;
Gtk::MenuItem m_menu_view;
Gtk::Menu m_menu_view_sub;
Gtk::MenuItem m_menu_view_settings;
public:
using type_signal_mute = sigc::signal<void(bool)>;
using type_signal_deafen = sigc::signal<void(bool)>;
using type_signal_gate = sigc::signal<void(double)>;
using type_signal_gain = sigc::signal<void(double)>;
using type_signal_mute_user_cs = sigc::signal<void(Snowflake, bool)>;
using type_signal_user_volume_changed = sigc::signal<void(Snowflake, double)>;
type_signal_mute signal_mute();
type_signal_deafen signal_deafen();
type_signal_gate signal_gate();
type_signal_gain signal_gain();
type_signal_mute_user_cs signal_mute_user_cs();
type_signal_user_volume_changed signal_user_volume_changed();
private:
type_signal_mute m_signal_mute;
type_signal_deafen m_signal_deafen;
type_signal_gate m_signal_gate;
type_signal_gain m_signal_gain;
type_signal_mute_user_cs m_signal_mute_user_cs;
type_signal_user_volume_changed m_signal_user_volume_changed;
};
#endif

1
subprojects/miniaudio Submodule

Submodule subprojects/miniaudio added at 4dfe7c4c31