A mass of updates (Better client communication and speaker selection)

canary
WolverinDEV 2018-10-28 18:25:43 +01:00
parent a24ee83716
commit bc4d274705
26 changed files with 750 additions and 225 deletions

View File

@ -7,6 +7,7 @@
*/
$UI_BASE_PATH = "ui-files/";
$UI_RAW_BASE_PATH = "ui-files/raw/";
$CLIENT_BASE_PATH = "files/";
if(!isset($_SERVER['REQUEST_METHOD'])) {
@ -46,10 +47,7 @@
include $name;
return;
}
$file = fopen($name, "r") or die(json_encode([
"success" => false,
"error" => "missing file (" . $name . ")"
]));
$file = fopen($name, "r") or error_exit("missing file \"" . $name . "\".");
echo (fread($file, filesize($name)));
fclose($file);
@ -64,7 +62,7 @@
}
function handle_develop_web_request() {
global $UI_BASE_PATH;
global $UI_RAW_BASE_PATH;
if(isset($_GET) && isset($_GET["type"])) {
if($_GET["type"] === "files") {
@ -73,7 +71,7 @@
/* header("mode: develop"); */
echo ("type\thash\tpath\tname\n");
foreach (list_dir($UI_BASE_PATH) as $file) {
foreach (list_dir($UI_RAW_BASE_PATH) as $file) {
$type_idx = strrpos($file, ".");
$type = substr($file, $type_idx + 1);
if($type == "php") $type = "html";
@ -85,13 +83,13 @@
$name_idx = strrpos($name, ".");
$name = substr($name, 0, $name_idx);
echo $type . "\t" . sha1_file($UI_BASE_PATH . $file) . "\t" . $path . "\t" . $name . "." . $type . "\n";
echo $type . "\t" . sha1_file($UI_RAW_BASE_PATH . $file) . "\t" . $path . "\t" . $name . "." . $type . "\n";
}
die;
} else if($_GET["type"] === "file") {
header("Content-Type: text/plain");
$path = realpath($UI_BASE_PATH . $_GET["path"]);
$path = realpath($UI_RAW_BASE_PATH . $_GET["path"]);
$name = $_GET["name"];
if($path === False || strpos($path, realpath(".")) === False || strpos($name, "/") !== False) error_exit("Invalid file");
@ -132,10 +130,64 @@
header("Content-Transfer-Encoding: Binary");
header("Content-Length:".filesize($path . $platform->update));
header("Content-Disposition: attachment; filename=update.tar.gz");
header("info-version: 1");
readfile($path . $platform->update);
die();
}
error_exit("Missing platform, arch or file");
} else if ($_GET["type"] == "ui-info") {
global $UI_BASE_PATH;
$version_info = file_get_contents($UI_BASE_PATH . "info.json");
if($version_info === false) $version_info = array();
else $version_info = json_decode($version_info, true);
$info = array();
$info["success"] = true;
$info["versions"] = array();
foreach($version_info as $channel => $data) {
if(!isset($data["latest"])) continue;
$channel_info = [
"timestamp" => $data["latest"]["timestamp"],
"version" => $data["latest"]["version"],
"git-ref" => $data["latest"]["git-ref"],
"channel" => $channel
];
array_push($info["versions"], $channel_info);
}
die(json_encode($info));
} else if ($_GET["type"] == "ui-download") {
global $UI_BASE_PATH;
if(!isset($_GET["git-ref"]) || !isset($_GET["channel"]) || !isset($_GET["version"]))
error_exit("missing required parameters");
$version_info = file_get_contents($UI_BASE_PATH . "info.json");
if($version_info === false) $version_info = array();
else $version_info = json_decode($version_info, true);
if(!isset($version_info[$_GET["channel"]]))
error_exit("missing channel");
foreach ($version_info[$_GET["channel"]]["history"] as $entry) {
if($entry["version"] == $_GET["version"] && $entry["git-ref"] == $_GET["git-ref"]) {
header("Cache-Control: public"); // needed for internet explorer
header("Content-Type: application/binary");
header("Content-Transfer-Encoding: Binary");
header("Content-Disposition: attachment; filename=ui.tar.gz");
header("info-version: 1");
$read = readfile($entry["file"]);
header("Content-Length:" . $read);
if($read === false) error_exit("internal error: Failed to read file!");
die();
}
}
error_exit("missing version");
}
} else if($_POST["type"] == "deploy-build") {
global $CLIENT_BASE_PATH;
@ -158,7 +210,7 @@
if($_FILES["installer"]["error"] !== UPLOAD_ERR_OK) error_exit("Upload for installer failed!");
$json_version = json_decode($_POST["version"], true);
$version = $json_version["major"] . "." . $json_version["minor"] . "." . $json_version["patch"] . ($json_version["build"] > 0 ? $json_version["build"] : "");
$version = $json_version["major"] . "." . $json_version["minor"] . "." . $json_version["patch"] . ($json_version["build"] > 0 ? "-" . $json_version["build"] : "");
$path = $CLIENT_BASE_PATH . DIRECTORY_SEPARATOR . $_POST["channel"] . DIRECTORY_SEPARATOR . $version . DIRECTORY_SEPARATOR;
exec("mkdir -p " . $path);
//mkdir($path, 777, true);
@ -216,6 +268,55 @@
move_uploaded_file($_FILES["installer"]["tmp_name"],$path . $filename_install);
move_uploaded_file($_FILES["update"]["tmp_name"],$path . $filename_update);
die(json_encode([
"success" => true
]));
} else if($_POST["type"] == "deploy-ui-build") {
global $UI_BASE_PATH;
if(!isset($_POST["secret"]) || !isset($_POST["channel"]) || !isset($_POST["version"]) || !isset($_POST["git_ref"]))
error_exit("Missing required information!");
$path = $UI_BASE_PATH . DIRECTORY_SEPARATOR;
$channeled_path = $UI_BASE_PATH . DIRECTORY_SEPARATOR . $_POST["channel"];
$filename = "TeaClientUI-" . $_POST["version"] . "_" . $_POST["git_ref"] . ".tar.gz";
exec("mkdir -p " . $path);
exec("mkdir -p " . $channeled_path);
{
$require_secret = file_get_contents(".deploy_secret");
if($require_secret === false || strlen($require_secret) == 0) error_exit("Server missing secret!");
error_log($_POST["secret"]);
error_log(trim($require_secret));
if(!is_string($_POST["secret"])) error_exit("Invalid secret!");
if(strcmp(trim($require_secret), trim($_POST["secret"])) !== 0)
error_exit("Secret does not match!");
}
{
$info = file_get_contents($path . "info.json");
if($info === false) $info = array();
else $info = json_decode($info, true);
$channel_info = &$info[$_POST["channel"]];
if(!$channel_info) $channel_info = array();
$entry = [
"timestamp" => time(),
"file" => $channeled_path . DIRECTORY_SEPARATOR . $filename,
"version" => $_POST["version"],
"git-ref" => $_POST["git_ref"]
];
$channel_info["latest"] = $entry;
if(!$channel_info["history"]) $channel_info["history"] = array();
array_push($channel_info["history"], $entry);
file_put_contents($path . "info.json", json_encode($info));
}
move_uploaded_file($_FILES["file"]["tmp_name"],$channeled_path . DIRECTORY_SEPARATOR . $filename);
die(json_encode([
"success" => true
]));

View File

@ -1,4 +0,0 @@
/* native functions declaration */
import * as updater from "updater/updater";
declare function displayCriticalError(message: string);

26
client/app-definitions/native_api.d.ts vendored Normal file
View File

@ -0,0 +1,26 @@
declare namespace native {
function client_version(): Promise<string>;
}
declare namespace forum {
interface UserData {
session_id: string;
username: string;
application_data: string;
application_data_sign: string;
}
}
declare namespace audio.player {
interface Device {
device_id: string;
name: string;
}
function initialized(): boolean;
function context(): AudioContext;
function destination(): AudioNode;
function on_ready(cb: () => any): void;
function initialize(): boolean;
function available_devices(): Promise<Device[]>;
function set_device(device_id?: string): Promise<void>;
function current_device(device_id?: string): Device;
}
declare function getUserMediaFunction(): (settings: any, success: any, fail: any) => void;

View File

@ -1,9 +0,0 @@
export interface UserData {
session_id: string;
username: string;
application_data: string;
application_data_sign: string;
}
export declare function open_login(enforce?: boolean): Promise<UserData>;
export declare function current_data(): UserData | undefined;
export declare function logout(): void;

View File

@ -1,9 +1,9 @@
import {UserData} from "../app-definitions/teaforo/manager";
/// <reference path="../app-definitions/native_api.d.ts" />
const ipc = require("electron").ipcRenderer;
let callback_listener: (() => any)[] = [];
ipc.on('teaforo-update', (event, data: UserData) => {
ipc.on('teaforo-update', (event, data: forum.UserData) => {
console.log("Got data update: %o", data);
forumIdentity = data ? new TeaForumIdentity(data.application_data, data.application_data_sign) : undefined;
try {

View File

@ -97,6 +97,15 @@
],
/* web specs */
[
"web-only" => true,
"type" => "js",
"search-pattern" => "/.*\.js$/",
"build-target" => "dev",
"path" => "js/",
"local-path" => "./web/js/"
],
[
"web-only" => true,
"type" => "css",
@ -250,7 +259,7 @@
$environment = "web/dev-environment";
} else if ($_SERVER["argv"][2] == "client") {
$flagset = 0b10;
$environment = "client-api/environment/ui-files";
$environment = "client-api/environment/ui-files/raw";
} else {
error_log("Invalid type!");
goto help;
@ -262,7 +271,7 @@
$environment = "web/rel-environment";
} else if ($_SERVER["argv"][2] == "client") {
$flagset = 0b10;
$environment = "client-api/environment/ui-files";
$environment = "client-api/environment/ui-files/raw";
} else {
error_log("Invalid type!");
goto help;
@ -318,7 +327,7 @@
if(!is_dir("versions/stable"))
exec($command = "mkdir -p versions/beta", $output, $state); if($state) goto handle_error;
exec($command = "ln -s ../api.php ./", $output, $state);
exec($command = "ln -s ../api.php ./", $output, $state); $state = 0; //Dont handle an error here!
if($state) goto handle_error;
}

View File

@ -50,6 +50,7 @@
<link rel="stylesheet" href="css/modals.css" type="text/css">
<link rel="stylesheet" href="css/modal-banlist.css" type="text/css">
<link rel="stylesheet" href="css/modal-bancreate.css" type="text/css">
<link rel="stylesheet" href="css/modal-settings.css" type="text/css">
<link rel="stylesheet" href="css/loader.css" type="text/css">
<link rel="stylesheet" href="css/music/info_plate.css" type="text/css">
<link rel="stylesheet" href="css/frame/SelectInfo.css" type="text/css">

View File

@ -1,6 +1,6 @@
{
"name": "client",
"version": "1.0.0",
"version": "1.0.1",
"description": "Welcome here! This repository is created with two reasons:\n 1. People can bring their own ideas and follow their implementation\n 2. People can see TeaSpeak Web client progress and avoid creating repetitive issues all the time.",
"main": "main.js",
"directories": {},

99
scripts/deploy_ui_files.sh Executable file
View File

@ -0,0 +1,99 @@
#!/usr/bin/env bash
TMP_FILE_NAME="TeaSpeakUI.tar.gz"
TMP_DIR_NAME="tmp"
BASEDIR=$(dirname "$0")
cd "$BASEDIR/../"
if [ "$#" -ne 2 ]; then
echo "Illegal number of parameters"
exit 1
fi
if [ ! -d client-api/environment/ui-files/ ]; then
echo "Missing UI Files"
exit 1
fi
if [ "${teaclient_deploy_secret}" == "" ]; then
echo "Missing deploy secret!"
exit 1
fi
if [ -e "${TMP_FILE_NAME}" ]; then
echo "Temp file already exists!"
echo "Deleting it!"
rm ${TMP_FILE_NAME}
if [ $? -ne 0 ]; then
echo "Failed to delete file"
exit 1
fi
fi
GIT_HASH=$(git rev-parse --verify --short HEAD)
APPLICATION_VERSION=$(cat package.json | python -c "import sys, json; print(json.load(sys.stdin)['version'])")
echo "Git hash ${GIT_HASH} on version ${APPLICATION_VERSION} on channel $2"
#Packaging the app
cd client-api/environment/ui-files/
if [ -e ${TMP_DIR_NAME} ]; then
rm -r ${TMP_DIR_NAME}
if [ $? -ne 0 ]; then
echo "Failed to remove temporary directory!"
exit 1
fi
fi
cp -rL raw ${TMP_DIR_NAME}
for file in $(find ${TMP_DIR_NAME} -name '*.php'); do
echo "Evaluating php file $file"
RESULT=$(php "${file}" 2> /dev/null)
CODE=$?
if [ ${CODE} -ne 0 ]; then
echo "Failed to evaluate php file $file!"
echo "Return code $CODE"
exit 1
fi
echo "${RESULT}" > "${file::-4}.html"
done
cd ${TMP_DIR_NAME}
tar chvzf ${TMP_FILE_NAME} *
if [ $? -ne 0 ]; then
echo "Failed to pack file"
exit 1
fi
mv ${TMP_FILE_NAME} ../../../../
cd ../
rm -r ${TMP_DIR_NAME}
cd ../../../
RESP=$(curl \
-k \
-X POST \
-F "type=deploy-ui-build" \
-F "channel=$2" \
-F "version=$APPLICATION_VERSION" \
-F "git_ref=$GIT_HASH" \
-F "secret=${teaclient_deploy_secret}" \
-F "file=@`pwd`/TeaSpeakUI.tar.gz" \
$1
)
echo "$RESP"
SUCCESS=$(echo ${RESP} | python -c "import sys, json; print(json.load(sys.stdin)['success'])")
if [ ! "${SUCCESS}" == "True" ]; then
ERROR=$(echo ${RESP} | python -c "import sys, json; print(json.load(sys.stdin)['error'])" 2>/dev/null)
if [ $? -ne 0 ]; then
ERROR=$(echo ${RESP} | python -c "import sys, json; print(json.load(sys.stdin)['msg'])" 2>/dev/null)
fi
echo "Failed to deploy build!"
echo "${ERROR}"
rm ${TMP_FILE_NAME}
exit 1
fi
echo "Build deployed!"

View File

@ -0,0 +1,100 @@
.modal .settings_audio {
display: flex;
flex-direction: column;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
margin: 3px;
> div {
margin: 2px;
}
a {
align-self: center;
}
.group_box {
display: flex;
flex-direction: column;
}
.settings-device {
display: flex;
flex-direction: column;
width: 100%;
a {
flex-grow: 0;
}
.settings-device-error {
display: none;
width: 100%;
margin-bottom: 3px;
padding: 2px;
align-self: center;
text-align: center;
vertical-align: center;
border: darkred 2px solid;
border-radius: 4px;
background: #be00006b;
}
.settings-device-select {
display: flex;
flex-direction: row;
justify-content: stretch;
> div {
flex-grow: 1;
flex-shrink: 1;
}
select {
flex-grow: 1;
margin-left: 5px;
width: 100%;
}
}
}
.settings-vad-container {
display: flex;
flex-direction: row;
margin-top: 5px;
> div {
width: 50%;
}
fieldset {
input {
vertical-align: text-bottom;
}
}
.settings-vad {
display: flex;
flex-direction: column;
}
.settings-vad-impl {
display: flex;
justify-content: space-around;
padding: 5px;
> div {
align-self: center;
}
.settings-vad-impl-entry {
display: none;
}
}
}
}

5
shared/external/defaults.d.ts vendored Normal file
View File

@ -0,0 +1,5 @@
interface Window {
displayCriticalError: typeof displayCriticalError;
}
declare function displayCriticalError(message: string);

View File

@ -29,12 +29,12 @@ abstract class BasicCodec implements Codec {
channelCount: number = 1;
samplesPerUnit: number = 960;
constructor(codecSampleRate: number) {
protected constructor(codecSampleRate: number) {
this.channelCount = 1;
this.samplesPerUnit = 960;
this._audioContext = new (window.webkitOfflineAudioContext || window.OfflineAudioContext)(AudioController.globalContext.destination.channelCount, 1024,AudioController.globalContext.sampleRate );
this._audioContext = new (window.webkitOfflineAudioContext || window.OfflineAudioContext)(audio.player.destination().channelCount, 1024, audio.player.context().sampleRate);
this._codecSampleRate = codecSampleRate;
this._decodeResampler = new AudioResampler(AudioController.globalContext.sampleRate);
this._decodeResampler = new AudioResampler(audio.player.context().sampleRate);
this._encodeResampler = new AudioResampler(codecSampleRate);
}

View File

@ -34,7 +34,6 @@ class CodecWrapper extends BasicCodec {
this._workerListener.push({
token: token,
resolve: data => {
console.log("Init result: %o", data);
this._initialized = data["success"] == true;
if(data["success"] == true)
resolve();
@ -144,7 +143,7 @@ class CodecWrapper extends BasicCodec {
private sendWorkerMessage(message: any, transfare?: any[]) {
message["timestamp"] = Date.now();
this._worker.postMessage(message, transfare);
this._worker.postMessage(message, transfare as any);
}
private onWorkerMessage(message: any) {

View File

@ -329,11 +329,17 @@ class HandshakeHandler {
}).then(() => this.handshake_finished()); //TODO handle error
}
private async handshake_finished(version?: string) {
if(window.require && !version) {
version = "?.?.?"; //FIXME findout version!
private handshake_finished(version?: string) {
if(native_client && window["native"] && native.client_version && !version) {
native.client_version()
.then( this.handshake_finished.bind(this))
.catch(error => {
console.error("Failed to get version:");
console.error(error);
this.handshake_finished("?.?.?");
});
return;
}
let data = {
//TODO variables!
client_nickname: this.name ? this.name : this.identity.name(),
@ -344,7 +350,7 @@ class HandshakeHandler {
client_browser_engine: navigator.product
};
if(window.require) {
if(version) {
data.client_version = "TeaClient ";
data.client_version += " " + version;

View File

@ -52,21 +52,21 @@ namespace app {
}
}
function loadScripts(paths: (string | string[])[]) : {path: string, promise: Promise<Boolean>}[] {
function load_scripts(paths: (string | string[])[]) : {path: string, promise: Promise<Boolean>}[] {
let result = [];
for(let path of paths)
result.push({path: path, promise: loadScript(path)});
result.push({path: path, promise: load_script(path)});
return result;
}
function loadScript(path: string | string[]) : Promise<Boolean> {
function load_script(path: string | string[]) : Promise<Boolean> {
if(Array.isArray(path)) { //Having fallbacks
return new Promise<Boolean>((resolve, reject) => {
loadScript(path[0]).then(resolve).catch(error => {
load_script(path[0]).then(resolve).catch(error => {
if(path.length >= 2) {
loadScript(path.slice(1)).then(resolve).catch(() => reject("could not load file " + formatPath(path)));
load_script(path.slice(1)).then(resolve).catch(() => reject("could not load file " + formatPath(path)));
} else {
reject("could not load file (event fallback's)");
reject("could not load file");
}
});
});
@ -99,7 +99,7 @@ function formatPath(path: string | string[]) {
function loadRelease() {
app.type = app.Type.RELEASE;
console.log("Load for release!");
awaitLoad(loadScripts([
awaitLoad(load_scripts([
//Load general API's
["wasm/TeaWeb-Identity.js"],
["js/client.min.js", "js/client.js"]
@ -110,11 +110,19 @@ function loadRelease() {
console.error("Could not load " + error.path);
});
}
/** Only possible for developers! **/
function loadDebug() {
app.type = app.Type.DEBUG;
console.log("Load for debug!");
let custom_scripts: string[] | string[][] = [];
if(!window.require) {
console.log("Adding browser audio player");
custom_scripts.push(["js/audio/AudioPlayer.js"]);
}
load_wait_scripts([
["wasm/TeaWeb-Identity.js"],
@ -171,7 +179,9 @@ function loadDebug() {
"js/FileManager.js",
"js/client.js",
"js/chat.js",
"js/Identity.js"
"js/Identity.js",
...custom_scripts
]).then(() => load_wait_scripts([
"js/codec/CodecWrapper.js"
])).then(() => load_wait_scripts([
@ -208,7 +218,7 @@ function awaitLoad(promises: {path: string, promise: Promise<Boolean>}[]) : Prom
}
function load_wait_scripts(paths: (string | string[])[]) : Promise<void> {
return awaitLoad(loadScripts(paths));
return awaitLoad(load_scripts(paths));
}
@ -293,7 +303,7 @@ function loadSide() {
["https://webrtc.github.io/adapter/adapter-latest.js"]
])).then(() => {
//Load the teaweb scripts
loadScript("js/proto.js").then(loadDebug).catch(loadRelease);
load_script("js/proto.js").then(loadDebug).catch(loadRelease);
//Load the teaweb templates
loadTemplates();
});
@ -345,7 +355,7 @@ if(typeof Module === "undefined")
app.initialize();
app.loadedListener.push(fadeoutLoader);
if(!window.displayCriticalError) {
if(!window.displayCriticalError) { /* Declare this function here only because its required before load */
window.displayCriticalError = function(message: string) {
if(typeof(createErrorModal) !== 'undefined') {
createErrorModal("A critical error occurred while loading the page!", message, {closeable: false}).open();

View File

@ -138,7 +138,7 @@ function main() {
$("#music-test").replaceWith(tag);
//Modals.spawnSettingsModal();
Modals.spawnSettingsModal();
/*
Modals.spawnYesNo("Are your sure?", "Do you really want to exit?", flag => {
console.log("Response: " + flag);
@ -154,13 +154,17 @@ function main() {
app.loadedListener.push(() => {
try {
main();
if(!AudioController.initialized()) {
if(!audio.player.initialized()) {
log.info(LogCategory.VOICE, "Initialize audio controller later!");
$(document).one('click', event => AudioController.initializeFromGesture());
if(!audio.player.initializeFromGesture) {
console.error("Missing audio.player.initializeFromGesture");
} else
$(document).one('click', event => audio.player.initializeFromGesture());
}
} catch (ex) {
console.error(ex.stack);
if(ex instanceof ReferenceError || ex instanceof TypeError)
ex = ex.message + ":<br>" + ex.stack;
ex = ex.name + ": " + ex.message;
displayCriticalError("Failed to invoke main function:<br>" + ex);
}
});

View File

@ -4,6 +4,8 @@
/// <reference path="../../voice/AudioController.ts" />
namespace Modals {
import set = Reflect.set;
export function spawnSettingsModal() {
let modal;
modal = createModal({
@ -35,87 +37,190 @@ namespace Modals {
function initialiseSettingListeners(modal: Modal, tag: JQuery) {
//Voice
initialiseVoiceListeners(modal, tag.find(".settings_voice"));
initialiseVoiceListeners(modal, tag.find(".settings_audio"));
}
function initialiseVoiceListeners(modal: Modal, tag: JQuery) {
let currentVAD = settings.global("vad_type");
let currentVAD = settings.global("vad_type", "ppt");
tag.find("input[type=radio][name=\"vad_type\"]").change(function (this: HTMLButtonElement) {
tag.find(".vad_settings .vad_type").text($(this).attr("display"));
tag.find(".vad_settings .vad_type_settings").hide();
tag.find(".vad_settings .vad_type_" + this.value).show();
settings.changeGlobal("vad_type", this.value);
globalClient.voiceConnection.voiceRecorder.reinitialiseVAD();
{ //Initialized voice activation detection
const vad_tag = tag.find(".settings-vad-container");
switch (this.value) {
case "ppt":
let keyCode: number = parseInt(settings.global("vad_ppt_key", JQuery.Key.T.toString()));
tag.find(".vat_ppt_key").text(String.fromCharCode(keyCode));
break;
case "vad":
let slider = tag.find(".vad_vad_slider");
let vad: VoiceActivityDetectorVAD = globalClient.voiceConnection.voiceRecorder.getVADHandler() as VoiceActivityDetectorVAD;
slider.val(vad.percentageThreshold);
slider.trigger("change");
globalClient.voiceConnection.voiceRecorder.update(true);
vad.percentage_listener = per => {
tag.find(".vad_vad_bar_filler")
.css("width", per + "%");
};
break;
vad_tag.find('input[type=radio]').on('change', event => {
const select = event.currentTarget as HTMLSelectElement;
{
vad_tag.find(".settings-vad-impl-entry").hide();
vad_tag.find(".setting-vad-" + select.value).show();
}
{
settings.changeGlobal("vad_type", select.value);
globalClient.voiceConnection.voiceRecorder.reinitialiseVAD();
}
switch (select.value) {
case "ppt":
let keyCode: number = parseInt(settings.global("vad_ppt_key", JQuery.Key.T.toString()));
vad_tag.find(".vat_ppt_key").text(String.fromCharCode(keyCode));
break;
case "vad":
let slider = vad_tag.find(".vad_vad_slider");
let vad: VoiceActivityDetectorVAD = globalClient.voiceConnection.voiceRecorder.getVADHandler() as VoiceActivityDetectorVAD;
slider.val(vad.percentageThreshold);
slider.trigger("change");
globalClient.voiceConnection.voiceRecorder.update(true);
vad.percentage_listener = per => {
vad_tag.find(".vad_vad_bar_filler")
.css("width", per + "%");
};
break;
}
});
{ //Initialized push to talk
vad_tag.find(".vat_ppt_key").click(function () {
let modal = createModal({
body: "",
header: () => {
let head = $.spawn("div");
head.text("Type the key you wish");
head.css("background-color", "blue");
return head;
},
footer: ""
});
$(document).one("keypress", function (e) {
console.log("Got key " + e.keyCode);
modal.close();
settings.changeGlobal("vad_ppt_key", e.keyCode.toString());
globalClient.voiceConnection.voiceRecorder.reinitialiseVAD();
vad_tag.find(".vat_ppt_key").text(String.fromCharCode(e.keyCode));
});
modal.open();
});
}
});
if(!currentVAD)
currentVAD = "ppt";
let elm = tag.find("input[type=radio][name=\"vad_type\"][value=\"" + currentVAD + "\"]");
elm.attr("checked", "true");
{ //Initialized voice activation detection
let slider = vad_tag.find(".vad_vad_slider");
slider.on("input change", () => {
settings.changeGlobal("vad_threshold", slider.val().toString());
let vad = globalClient.voiceConnection.voiceRecorder.getVADHandler();
if(vad instanceof VoiceActivityDetectorVAD)
vad.percentageThreshold = slider.val() as number;
vad_tag.find(".vad_vad_slider_value").text(slider.val().toString());
});
modal.properties.registerCloseListener(() => {
let vad = globalClient.voiceConnection.voiceRecorder.getVADHandler();
if(vad instanceof VoiceActivityDetectorVAD)
vad.percentage_listener = undefined;
});
}
tag.find(".vat_ppt_key").click(function () {
let modal = createModal({
body: "",
header: () => {
let head = $.spawn("div");
head.text("Type the key you wish");
head.css("background-color", "blue");
return head;
},
footer: ""
let target_tag = vad_tag.find('input[type=radio][name="vad_type"][value="' + currentVAD + '"]');
if(target_tag.length == 0) {
console.warn("Failed to find tag for " + currentVAD + ". Using latest tag!");
target_tag = vad_tag.find('input[type=radio][name="vad_type"]').last();
}
target_tag.prop("checked", true);
setTimeout(() => target_tag.trigger('change'), 0);
}
{ //Initialize microphone
const setting_tag = tag.find(".settings-microphone");
const tag_select = setting_tag.find(".audio-select-microphone");
console.log(setting_tag);
console.log(setting_tag.find(".settings-device-error"));
console.log(setting_tag.find(".settings-device-error").html());
{ //List devices
$.spawn("option")
.attr("device-id", "")
.attr("device-group", "")
.text("No device")
.appendTo(tag_select);
navigator.mediaDevices.enumerateDevices().then(devices => {
const active_device = globalClient.voiceConnection.voiceRecorder.device_id();
for(const device of devices) {
console.debug("Got device %s (%s): %s", device.deviceId, device.kind, device.label);
if(device.kind !== 'audioinput') continue;
$.spawn("option")
.attr("device-id", device.deviceId)
.attr("device-group", device.groupId)
.text(device.label)
.prop("selected", device.deviceId == active_device)
.appendTo(tag_select);
}
}).catch(error => {
console.error("Could not enumerate over devices!");
console.error(error);
setting_tag.find(".settings-device-error")
.text("Could not get device list!")
.css("display", "block");
});
if(tag_select.find("option:selected").length == 0)
tag_select.find("option").prop("selected", true);
}
{
tag_select.on('change', event => {
let selected_tag = tag_select.find("option:selected");
let deviceId = selected_tag.attr("device-id");
let groupId = selected_tag.attr("device-group");
console.log("Selected microphone device: id: %o group: %o", deviceId, groupId);
globalClient.voiceConnection.voiceRecorder.change_device(deviceId, groupId);
});
}
}
{ //Initialize speaker
const setting_tag = tag.find(".settings-speaker");
const tag_select = setting_tag.find(".audio-select-speaker");
const active_device = audio.player.current_device();
audio.player.available_devices().then(devices => {
for(const device of devices) {
$.spawn("option")
.attr("device-id", device.device_id)
.text(device.name)
.prop("selected", device.device_id == active_device.device_id)
.appendTo(tag_select);
}
}).catch(error => {
console.error("Could not enumerate over devices!");
console.error(error);
setting_tag.find(".settings-device-error")
.text("Could not get device list!")
.css("display", "block");
});
$(document).one("keypress", function (e) {
console.log("Got key " + e.keyCode);
modal.close();
settings.changeGlobal("vad_ppt_key", e.keyCode.toString());
globalClient.voiceConnection.voiceRecorder.reinitialiseVAD();
tag.find(".vat_ppt_key").text(String.fromCharCode(e.keyCode));
});
modal.open();
});
//VAD VAD
let slider = tag.find(".vad_vad_slider");
slider.on("input change", () => {
settings.changeGlobal("vad_threshold", slider.val().toString());
let vad = globalClient.voiceConnection.voiceRecorder.getVADHandler();
if(vad instanceof VoiceActivityDetectorVAD)
vad.percentageThreshold = slider.val() as number;
tag.find(".vad_vad_slider_value").text(slider.val().toString());
});
modal.properties.registerCloseListener(() => {
let vad = globalClient.voiceConnection.voiceRecorder.getVADHandler();
if(vad instanceof VoiceActivityDetectorVAD)
vad.percentage_listener = undefined;
if(tag_select.find("option:selected").length == 0)
tag_select.find("option").prop("selected", true);
});
//Trigger radio button select for VAD setting setup
elm.trigger("change");
{
const error_tag = setting_tag.find(".settings-device-error");
tag_select.on('change', event => {
let selected_tag = tag_select.find("option:selected");
let deviceId = selected_tag.attr("device-id");
console.log("Selected speaker device: id: %o", deviceId);
audio.player.set_device(deviceId).then(() => error_tag.css("display", "none")).catch(error => {
console.error(error);
error_tag
.text("Failed to change device!")
.css("display", "block");
});
});
}
}
//Initialise microphones
/*
let select_microphone = tag.find(".voice_microphone_select");
let select_error = tag.find(".voice_microphone_select_error");
@ -148,6 +253,7 @@ namespace Modals {
console.log("Selected microphone device: id: %o group: %o", deviceId, groupId);
globalClient.voiceConnection.voiceRecorder.change_device(deviceId, groupId);
});
*/
//Initialise speakers
}

View File

@ -1,3 +1,5 @@
/// <reference path="../../exports/audio/AudioPlayer.d.ts" />
enum PlayerState {
PREBUFFERING,
PLAYING,
@ -6,73 +8,15 @@ enum PlayerState {
STOPPED
}
interface Navigator {
mozGetUserMedia(constraints: MediaStreamConstraints, successCallback: NavigatorUserMediaSuccessCallback, errorCallback: NavigatorUserMediaErrorCallback): void;
webkitGetUserMedia(constraints: MediaStreamConstraints, successCallback: NavigatorUserMediaSuccessCallback, errorCallback: NavigatorUserMediaErrorCallback): void;
}
class AudioController {
private static getUserMediaFunction() {
if(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)
return (settings, success, fail) => { navigator.mediaDevices.getUserMedia(settings).then(success).catch(fail); };
return navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
}
public static userMedia = AudioController.getUserMediaFunction();
private static _globalContext: AudioContext;
private static _globalContextPromise: Promise<void>;
private static _audioInstances: AudioController[] = [];
private static _initialized_listener: (() => any)[] = [];
private static _globalReplayScheduler: NodeJS.Timer;
private static _timeIndex: number = 0;
private static _audioDestinationStream: MediaStream;
static get globalContext() : AudioContext {
if(this._globalContext && this._globalContext.state != "suspended") return this._globalContext;
if(!this._globalContext)
this._globalContext = new (window.webkitAudioContext || window.AudioContext)();
if(this._globalContext.state == "suspended") {
if(!this._globalContextPromise) {
(this._globalContextPromise = this._globalContext.resume()).then(() => {
this.fire_initialized();
}).catch(error => {
displayCriticalError("Failed to initialize global audio context! (" + error + ")");
});
}
this._globalContext.resume(); //We already have our listener
return undefined;
}
if(this._globalContext.state == "running") {
this.fire_initialized();
return this._globalContext;
}
return undefined;
}
private static fire_initialized() {
while(this._initialized_listener.length > 0)
this._initialized_listener.pop_front()();
}
static initialized() : boolean {
return (this.globalContext || {state: ""}).state === "running";
}
static on_initialized(callback: () => any) {
if(this.globalContext)
callback();
else
this._initialized_listener.push(callback);
}
static initializeFromGesture() {
AudioController.globalContext;
}
static initializeAudioController() {
AudioController.globalContext; //Just test here
if(!audio.player.initialize())
console.warn("Failed to initialize audio controller!");
//this._globalReplayScheduler = setInterval(() => { AudioController.invokeNextReplay(); }, 20); //Fix me
}
@ -142,7 +86,7 @@ class AudioController {
onSilence: () => void;
constructor() {
AudioController.on_initialized(() => this.speakerContext = AudioController.globalContext);
audio.player.on_ready(() => this.speakerContext = audio.player.context());
this.onSpeaking = function () { };
this.onSilence = function () { };
@ -204,6 +148,10 @@ class AudioController {
private playQueue() {
let buffer: AudioBuffer;
while(buffer = this.audioCache.pop_front()) {
if(this.playingAudioCache.length >= this._latencyBufferLength * 1.5 + 3) {
console.log("Dropping buffer because playing queue grows to much");
continue; /* drop the data (we're behind) */
}
if(this._timeIndex < this.speakerContext.currentTime) this._timeIndex = this.speakerContext.currentTime;
let player = this.speakerContext.createBufferSource();
@ -212,7 +160,7 @@ class AudioController {
player.onended = () => this.removeNode(player);
this.playingAudioCache.push(player);
player.connect(AudioController.globalContext.destination);
player.connect(audio.player.destination());
player.start(this._timeIndex);
this._timeIndex += buffer.duration;
}
@ -274,4 +222,10 @@ class AudioController {
this._codecCache.push(new CodecClientCache());
return this._codecCache[codec];
}
}
function getUserMediaFunction() {
if(navigator.mediaDevices && navigator.mediaDevices.getUserMedia)
return (settings, success, fail) => { navigator.mediaDevices.getUserMedia(settings).then(success).catch(fail); };
return navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
}

View File

@ -2,7 +2,7 @@ class AudioResampler {
targetSampleRate: number;
private _use_promise: boolean;
constructor(targetSampleRate: number = 44100){
constructor(targetSampleRate: number){
this.targetSampleRate = targetSampleRate;
if(this.targetSampleRate < 3000 || this.targetSampleRate > 384000) throw "The target sample rate is outside the range [3000, 384000].";
}
@ -10,11 +10,11 @@ class AudioResampler {
resample(buffer: AudioBuffer) : Promise<AudioBuffer> {
if(!buffer) {
console.warn("Received empty buffer as input! Returning empty output!");
return new Promise<AudioBuffer>(resolve => resolve(undefined));
return Promise.resolve(buffer);
}
//console.log("Encode from %i to %i", buffer.sampleRate, this.targetSampleRate);
if(buffer.sampleRate == this.targetSampleRate)
return new Promise<AudioBuffer>(resolve => resolve(buffer));
return Promise.resolve(buffer);
let context;
context = new (window.webkitOfflineAudioContext || window.OfflineAudioContext)(buffer.numberOfChannels, Math.ceil(buffer.length * this.targetSampleRate / buffer.sampleRate), this.targetSampleRate);

View File

@ -146,7 +146,7 @@ class VoiceConnection {
this.voiceRecorder.on_start = this.handleVoiceStarted.bind(this);
this.voiceRecorder.reinitialiseVAD();
AudioController.on_initialized(() => {
audio.player.on_ready(() => {
log.info(LogCategory.VOICE, "Initializing voice handler after AudioController has been initialized!");
this.codec_pool[4].initialize(2);
this.codec_pool[5].initialize(2);
@ -193,7 +193,7 @@ class VoiceConnection {
stream.disconnect();
if(!this.local_audio_stream)
this.local_audio_stream = AudioController.globalContext.createMediaStreamDestination();
this.local_audio_stream = audio.player.context().createMediaStreamDestination();
stream.connect(this.local_audio_stream);
}

View File

@ -65,8 +65,8 @@ class VoiceRecorder {
this._deviceId = settings.global("microphone_device_id", "default");
this._deviceGroup = settings.global("microphone_device_group", "default");
AudioController.on_initialized(() => {
this.audioContext = AudioController.globalContext;
audio.player.on_ready(() => {
this.audioContext = audio.player.context();
this.processor = this.audioContext.createScriptProcessor(VoiceRecorder.BUFFER_SIZE, VoiceRecorder.CHANNELS, VoiceRecorder.CHANNELS);
const empty_buffer = this.audioContext.createBuffer(VoiceRecorder.CHANNELS, VoiceRecorder.BUFFER_SIZE, 48000);
@ -102,7 +102,7 @@ class VoiceRecorder {
}
available() : boolean {
return !!AudioController.userMedia;
return !!getUserMediaFunction() && !!getUserMediaFunction();
}
recording() : boolean {
@ -184,7 +184,8 @@ class VoiceRecorder {
console.log("[VoiceRecorder] Start recording! (Device: %o | Group: %o)", device, groupId);
this._recording = true;
AudioController.userMedia({
//FIXME Implement that here for thew client as well
getUserMediaFunction()({
audio: {
deviceId: device,
groupId: groupId
@ -257,7 +258,7 @@ class VoiceActivityDetectorVAD extends VoiceActivityDetector {
percentage_listener: (per: number) => void = ($) => {};
initialise() {
this.analyzer = AudioController.globalContext.createAnalyser();
this.analyzer = audio.player.context().createAnalyser();
this.analyzer.smoothingTimeConstant = 1; //TODO test
this.buffer = new Uint8Array(this.analyzer.fftSize);
return super.initialise();

View File

@ -95,7 +95,6 @@ class OpusWorker implements CodecWorker {
if (result < 0) {
return "invalid result on decode (" + result + ")";
}
console.log("Result: %o | Channel count %o", result, this.channelCount);
return Module.HEAPF32.slice(this.decodeBuffer.byteOffset / 4, (this.decodeBuffer.byteOffset / 4) + (result * this.channelCount));
}

View File

@ -564,47 +564,65 @@
<x-content>Didnt setuped yet!</x-content>
</x-entry>
<x-entry>
<x-tag>Voice</x-tag>
<x-tag>Audio</x-tag>
<x-content>
<div class="settings_voice align_column">
<div style="justify-content: right">
<a style="margin-left: 20px">Microphone:</a>
<select class="voice_microphone_select"></select>
<hr>
</div>
<div style="display: flex; flex-direction: row; width: 100%; justify-content: space-evenly"><div style="vertical-align: center; margin: 20px; min-width: 175px">
<a>Voice Activity Detection</a>
<div>
<fieldset class="GroupBox">
<div><input type="radio" name="vad_type" value="pt" display="Always active"> Always active</div>
<div><input type="radio" name="vad_type" value="vad" display="Voice activity detection"> Voice activity detection</div>
<div><input type="radio" name="vad_type" value="ppt" display="Push to talk"> Push to talk</div>
</fieldset>
</div>
</div>
<div style="border-left:1px solid #000;height: auto;"></div>
<div style="flex-direction: column; align-content: stretch; vertical-align: center; margin: 20px;">
<div class="vad_settings">
<div style="font-size: 14px; text-align: center"><a class="vad_type">Type[Unknown]</a> settings</div>
<div class="vad_type_settings vad_type_pt">There are no setting entries for an <b>always</b> online voice detection.</div>
<div class="vad_type_settings vad_type_ppt">
<a>Push to talk key:</a>
<button class="vat_ppt_key">Uninitialised</button>
<div class="settings_audio">
<div class="group_box">
<div class="header">Microphone</div>
<div class="content settings-microphone">
<div class="settings-device settings-device-microphone">
<div class="settings-device-error"></div>
<div class="settings-device-select">
<a>Device:</a>
<div><select class="audio-select-microphone"></select></div>
</div>
<div class="vad_type_settings vad_type_vad">
<div>Voice activity threshold (<a class="vad_vad_slider_value">20</a>%)</div>
<div class="vad_vad_threshold_selector">
<div class="vad_vad_bar">
<div style="width: 100%; height: 100%; position: absolute">
<div class="vad_vad_bar_filler"></div>
</div>
<div class="settings-vad-container">
<div class="group_box">
<div class="header">Voice Activity Detection</div>
<div class="content">
<fieldset>
<div><input type="radio" name="vad_type" value="pt" display="Always active"> Always active</div>
<div><input type="radio" name="vad_type" value="vad" display="Voice activity detection"> Voice activity detection</div>
<div><input type="radio" name="vad_type" value="ppt" display="Push to talk"> Push to talk</div>
</fieldset>
</div>
</div>
<div class="settings-vad-impl">
<div class="settings-vad-impl-entry setting-vad-pt">
There are no setting entries for an <b>always</b> online voice detection.
</div>
<div class="settings-vad-impl-entry setting-vad-ppt">
<a>Push to talk key:</a>
<button class="vat_ppt_key">Uninitialised</button>
</div>
<div class="settings-vad-impl-entry setting-vad-vad">
<div>Voice activity threshold (<a class="vad_vad_slider_value">20</a>%)</div>
<div class="vad_vad_threshold_selector">
<div class="vad_vad_bar">
<div style="width: 100%; height: 100%; position: absolute">
<div class="vad_vad_bar_filler"></div>
</div>
<input type="range" min="0" max="100" value="50" class="vad_vad_slider">
</div>
<input type="range" min="0" max="100" value="50" class="vad_vad_slider">
</div>
</div>
</div>
</div>
</div>
</div>
<div class="group_box">
<div class="header">Speaker</div>
<div class="content settings-speaker">
<div class="settings-device settings-device-speaker">
<div class="settings-device-error"></div>
<div class="settings-device-select">
<a>Device:</a>
<div><select class="audio-select-speaker"></select></div>
</div>
</div>
</div>
</div>
</div>
</x-content>
</x-entry>

View File

@ -1,7 +1,7 @@
{
"compilerOptions": {
"target": "es6",
"module": "none",
"module": "commonjs",
"sourceMap": true
},
"exclude": [

View File

@ -0,0 +1,84 @@
interface Navigator {
mozGetUserMedia(constraints: MediaStreamConstraints, successCallback: NavigatorUserMediaSuccessCallback, errorCallback: NavigatorUserMediaErrorCallback): void;
webkitGetUserMedia(constraints: MediaStreamConstraints, successCallback: NavigatorUserMediaSuccessCallback, errorCallback: NavigatorUserMediaErrorCallback): void;
}
namespace audio.player {
let _globalContext: AudioContext;
let _globalContextPromise: Promise<void>;
let _initialized_listener: (() => any)[] = [];
export interface Device {
device_id: string;
name: string;
}
export function initialize() : boolean {
context();
return true;
}
export function initialized() : boolean {
return !!_globalContext && _globalContext.state === 'running';
}
function fire_initialized() {
console.log("Fire initialized: %o", _initialized_listener);
while(_initialized_listener.length > 0)
_initialized_listener.pop_front()();
}
export function context() : AudioContext {
if(_globalContext && _globalContext.state != "suspended") return _globalContext;
if(!_globalContext)
_globalContext = new (window.webkitAudioContext || window.AudioContext)();
if(_globalContext.state == "suspended") {
if(!_globalContextPromise) {
(_globalContextPromise = _globalContext.resume()).then(() => {
fire_initialized();
}).catch(error => {
displayCriticalError("Failed to initialize global audio context! (" + error + ")");
});
}
_globalContext.resume(); //We already have our listener
return undefined;
}
if(_globalContext.state == "running") {
fire_initialized();
return _globalContext;
}
return undefined;
}
export function destination() : AudioNode {
return context().destination;
}
export function on_ready(cb: () => any) {
if(initialized())
cb();
else
_initialized_listener.push(cb);
}
export const WEB_DEVICE: Device = {device_id: "", name: "default playback"};
export function available_devices() : Promise<Device[]> {
return Promise.resolve([WEB_DEVICE])
}
export function set_device(device_id: string) : Promise<void> {
return Promise.resolve();
}
export function current_device() : Device {
return WEB_DEVICE;
}
export function initializeFromGesture() {
context();
}
}

16
web/tsdeclaration.json Normal file
View File

@ -0,0 +1,16 @@
{
"compilerOptions": {
"listFiles": true,
"module": "system",
"target": "es6",
"declaration": true,
"emitDeclarationOnly": true,
"allowJs": false,
"checkJs": false,
"outFile": "declarations/web_api"
},
"include": [
"js/**/*.ts"
]
}