Improved voice recorder API (Required for the client native audio API)
This commit is contained in:
parent
620b9e6203
commit
3b10c01541
12 changed files with 988 additions and 579 deletions
|
@ -555,7 +555,7 @@ class ConnectionHandler {
|
|||
this.client_status.input_hardware = true; /* IDK if we have input hardware or not, but it dosn't matter at all so */
|
||||
} else {
|
||||
const audio_source = vconnection.voice_recorder();
|
||||
const recording_supported = typeof(audio_source) !== "undefined" && audio_source.is_recording_supported() && (!targetChannel || vconnection.encoding_supported(targetChannel.properties.channel_codec));
|
||||
const recording_supported = typeof(audio_source) !== "undefined" && audio_source.record_supported && (!targetChannel || vconnection.encoding_supported(targetChannel.properties.channel_codec));
|
||||
const playback_supported = !targetChannel || vconnection.decoding_supported(targetChannel.properties.channel_codec);
|
||||
|
||||
property_update["client_input_hardware"] = recording_supported;
|
||||
|
@ -609,8 +609,13 @@ class ConnectionHandler {
|
|||
this.client_status.sound_record_supported = support_record;
|
||||
this.client_status.sound_playback_supported = support_playback;
|
||||
|
||||
if(vconnection && vconnection.voice_recorder() && vconnection.voice_recorder().is_recording_supported())
|
||||
vconnection.voice_recorder().set_recording(!this.client_status.input_muted && !this.client_status.output_muted);
|
||||
if(vconnection && vconnection.voice_recorder() && vconnection.voice_recorder().record_supported) {
|
||||
const active = !this.client_status.input_muted && !this.client_status.output_muted;
|
||||
if(active)
|
||||
vconnection.voice_recorder().input.start();
|
||||
else
|
||||
vconnection.voice_recorder().input.stop();
|
||||
}
|
||||
|
||||
if(control_bar.current_connection_handler() === this)
|
||||
control_bar.apply_server_voice_state();
|
||||
|
@ -653,12 +658,10 @@ class ConnectionHandler {
|
|||
this.invoke_resized_on_activate = false;
|
||||
}
|
||||
|
||||
acquire_recorder(voice_recoder: VoiceRecorder, update_control_bar: boolean) {
|
||||
acquire_recorder(voice_recoder: RecorderProfile, update_control_bar: boolean) {
|
||||
const vconnection = this.serverConnection.voice_connection();
|
||||
if(vconnection)
|
||||
vconnection.acquire_voice_recorder(voice_recoder);
|
||||
if(voice_recoder)
|
||||
voice_recoder.clean_recording_supported();
|
||||
this.update_voice_status(undefined);
|
||||
}
|
||||
}
|
|
@ -81,8 +81,8 @@ namespace connection {
|
|||
abstract available_clients() : VoiceClient[];
|
||||
abstract unregister_client(client: VoiceClient) : Promise<void>;
|
||||
|
||||
abstract voice_recorder() : VoiceRecorder;
|
||||
abstract acquire_voice_recorder(recorder: VoiceRecorder | undefined);
|
||||
abstract voice_recorder() : RecorderProfile;
|
||||
abstract acquire_voice_recorder(recorder: RecorderProfile | undefined);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -592,10 +592,13 @@ const loader_javascript = {
|
|||
|
||||
//Load audio
|
||||
"js/voice/VoiceHandler.js",
|
||||
"js/voice/VoiceRecorder.js",
|
||||
"js/voice/AudioResampler.js",
|
||||
"js/voice/VoiceClient.js",
|
||||
|
||||
"js/voice/RecorderBase.js",
|
||||
"js/voice/JavascriptRecorder.js",
|
||||
"js/voice/RecorderProfile.js",
|
||||
|
||||
//Load codec
|
||||
"js/codec/Codec.js",
|
||||
"js/codec/BasicCodec.js",
|
||||
|
|
|
@ -13,7 +13,7 @@ let settings: Settings;
|
|||
const js_render = window.jsrender || $;
|
||||
const native_client = window.require !== undefined;
|
||||
|
||||
function getUserMediaFunction() {
|
||||
function getUserMediaFunction() : (constraints: MediaStreamConstraints, success: (stream: MediaStream) => any, fail: (error: any) => any) => any {
|
||||
if((navigator as any).mediaDevices && (navigator as any).mediaDevices.getUserMedia)
|
||||
return (settings, success, fail) => { (navigator as any).mediaDevices.getUserMedia(settings).then(success).catch(fail); };
|
||||
return (navigator as any).getUserMedia || (navigator as any).webkitGetUserMedia || (navigator as any).mozGetUserMedia;
|
||||
|
@ -129,6 +129,10 @@ async function initialize_app() {
|
|||
else
|
||||
console.warn("Client does not support audio.player.set_master_volume()... May client is too old?");
|
||||
|
||||
await audio.recorder.refresh_devices();
|
||||
default_recorder = new RecorderProfile("default");
|
||||
await default_recorder.initialize();
|
||||
|
||||
sound.initialize().then(() => {
|
||||
console.log(tr("Sounds initialitzed"));
|
||||
});
|
||||
|
@ -235,14 +239,12 @@ function Base64DecodeUrl(str: string, pad?: boolean){
|
|||
|
||||
function main() {
|
||||
//http://localhost:63343/Web-Client/index.php?_ijt=omcpmt8b9hnjlfguh8ajgrgolr&default_connect_url=true&default_connect_type=teamspeak&default_connect_url=localhost%3A9987&disableUnloadDialog=1&loader_ignore_age=1
|
||||
voice_recoder = new VoiceRecorder();
|
||||
voice_recoder.reinitialiseVAD();
|
||||
|
||||
server_connections = new ServerConnectionManager($("#connection-handlers"));
|
||||
control_bar.initialise(); /* before connection handler to allow property apply */
|
||||
|
||||
const initial_handler = server_connections.spawn_server_connection_handler();
|
||||
initial_handler.acquire_recorder(voice_recoder, false);
|
||||
initial_handler.acquire_recorder(default_recorder, false);
|
||||
control_bar.set_connection_handler(initial_handler);
|
||||
/** Setup the XF forum identity **/
|
||||
profiles.identities.setup_forum();
|
||||
|
|
|
@ -209,6 +209,12 @@ class Settings extends StaticSettings {
|
|||
}
|
||||
};
|
||||
|
||||
static readonly FN_PROFILE_RECORD: (name: string) => SettingsKey<any> = name => {
|
||||
return {
|
||||
key: 'profile_record' + name
|
||||
}
|
||||
};
|
||||
|
||||
static readonly KEYS = (() => {
|
||||
const result = [];
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ class ControlBar {
|
|||
if(this.connection_handler) {
|
||||
this.connection_handler.client_status.input_muted = this._button_microphone !== "enabled";
|
||||
if(!this.connection_handler.client_status.input_hardware)
|
||||
this.connection_handler.acquire_recorder(voice_recoder, true); /* acquire_recorder already updates the voice status */
|
||||
this.connection_handler.acquire_recorder(default_recorder, true); /* acquire_recorder already updates the voice status */
|
||||
else
|
||||
this.connection_handler.update_voice_status(undefined);
|
||||
|
||||
|
|
|
@ -259,8 +259,15 @@ namespace Modals {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
let vad_mapping = {
|
||||
"threshold": "vad",
|
||||
"push_to_talk": "ppt",
|
||||
"active": "pt"
|
||||
};
|
||||
|
||||
function initialiseVoiceListeners(modal: Modal, tag: JQuery) {
|
||||
let currentVAD = settings.global("vad_type", "vad");
|
||||
let currentVAD = vad_mapping[default_recorder.get_vad_type()] || "vad";
|
||||
|
||||
const display_error = (message: string) => {
|
||||
const alert = tag.find(".settings-device-error").first();
|
||||
|
@ -282,30 +289,28 @@ namespace Modals {
|
|||
vad_tag.find(".settings-vad-impl-entry").hide();
|
||||
vad_tag.find(".setting-vad-" + select.value).show();
|
||||
}
|
||||
{
|
||||
settings.changeGlobal("vad_type", select.value);
|
||||
voice_recoder.reinitialiseVAD();
|
||||
}
|
||||
|
||||
switch (select.value) {
|
||||
case "ppt":
|
||||
let ppt_settings: PPTKeySettings = settings.global('vad_ppt_settings', undefined);
|
||||
ppt_settings = ppt_settings ? JSON.parse(ppt_settings as any as string) : {};
|
||||
default_recorder.set_vad_type("push_to_talk");
|
||||
|
||||
vad_tag.find(".vat_ppt_key").text(ppt.key_description(ppt_settings));
|
||||
vad_tag.find(".ppt-delay input").val(ppt_settings.delay === undefined ? 300 : ppt_settings.delay);
|
||||
vad_tag.find(".vat_ppt_key").text(ppt.key_description(default_recorder.get_vad_ppt_key()));
|
||||
vad_tag.find(".ppt-delay input").val(default_recorder.get_vad_ppt_delay());
|
||||
|
||||
break;
|
||||
case "vad":
|
||||
default_recorder.set_vad_type("threshold");
|
||||
|
||||
let slider = vad_tag.find(".vad_vad_slider");
|
||||
let vad: VoiceActivityDetectorVAD = voice_recoder.getVADHandler() as VoiceActivityDetectorVAD;
|
||||
slider.val(vad.percentageThreshold);
|
||||
slider.val(default_recorder.get_vad_threshold());
|
||||
slider.trigger("change");
|
||||
voice_recoder.set_recording(true);
|
||||
vad.percentage_listener = per => {
|
||||
vad_tag.find(".vad_vad_bar_filler")
|
||||
.css("width", (100 - per) + "%");
|
||||
};
|
||||
|
||||
const filter = default_recorder.input.get_filter(audio.recorder.filter.Type.THRESHOLD) as audio.recorder.filter.ThresholdFilter;
|
||||
filter.callback_level = level => vad_tag.find(".vad_vad_bar_filler").css("width", (100 - level) + "%");
|
||||
break;
|
||||
|
||||
case "pt":
|
||||
default_recorder.set_vad_type("active");
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
@ -328,12 +333,7 @@ namespace Modals {
|
|||
settings.changeGlobal('vad_ppt_key', undefined); //TODO remove that because its legacy shit
|
||||
console.log(tr("Got key %o"), event);
|
||||
|
||||
let ppt_settings: PPTKeySettings = settings.global('vad_ppt_settings', undefined);
|
||||
ppt_settings = ppt_settings ? JSON.parse(ppt_settings as any as string) : {};
|
||||
Object.assign(ppt_settings, event);
|
||||
settings.changeGlobal('vad_ppt_settings', ppt_settings);
|
||||
|
||||
voice_recoder.reinitialiseVAD();
|
||||
default_recorder.set_vad_ppt_key(event);
|
||||
|
||||
ppt.unregister_key_listener(listener);
|
||||
modal.close();
|
||||
|
@ -345,12 +345,7 @@ namespace Modals {
|
|||
});
|
||||
|
||||
vad_tag.find(".ppt-delay input").on('change', event => {
|
||||
let ppt_settings: PPTKeySettings = settings.global('vad_ppt_settings', undefined);
|
||||
ppt_settings = ppt_settings ? JSON.parse(ppt_settings as any as string) : {};
|
||||
ppt_settings.delay = (<HTMLInputElement>event.target).valueAsNumber;
|
||||
settings.changeGlobal('vad_ppt_settings', ppt_settings);
|
||||
|
||||
voice_recoder.reinitialiseVAD();
|
||||
default_recorder.set_vad_ppt_delay((<HTMLInputElement>event.target).valueAsNumber);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -358,16 +353,12 @@ namespace Modals {
|
|||
let slider = vad_tag.find(".vad_vad_slider");
|
||||
slider.on("input change", () => {
|
||||
settings.changeGlobal("vad_threshold", slider.val().toString());
|
||||
let vad = voice_recoder.getVADHandler();
|
||||
if (vad instanceof VoiceActivityDetectorVAD)
|
||||
vad.percentageThreshold = slider.val() as number;
|
||||
default_recorder.set_vad_threshold(slider.val() as number);
|
||||
vad_tag.find(".vad_vad_slider_value").text(slider.val().toString());
|
||||
});
|
||||
modal.properties.registerCloseListener(() => {
|
||||
let vad = voice_recoder.getVADHandler();
|
||||
if (vad instanceof VoiceActivityDetectorVAD)
|
||||
vad.percentage_listener = undefined;
|
||||
|
||||
const filter = default_recorder.input.get_filter(audio.recorder.filter.Type.THRESHOLD) as audio.recorder.filter.ThresholdFilter;
|
||||
filter.callback_level = undefined;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -391,30 +382,19 @@ namespace Modals {
|
|||
|
||||
$.spawn("option")
|
||||
.attr("device-id", "")
|
||||
.attr("device-group", "")
|
||||
.text(tr("No device"))
|
||||
.appendTo(tag_select);
|
||||
|
||||
navigator.mediaDevices.enumerateDevices().then(devices => {
|
||||
const active_device = voice_recoder.device_id();
|
||||
|
||||
for (const device of devices) {
|
||||
console.debug(tr("Got device %s (%s): %s (%o)"), device.deviceId, device.kind, device.label);
|
||||
if (device.kind !== 'audioinput') continue;
|
||||
const active_device = default_recorder.current_device();
|
||||
audio.recorder.devices().forEach(device => {
|
||||
console.debug(tr("Got device %o"), device);
|
||||
|
||||
$.spawn("option")
|
||||
.attr("device-id", device.deviceId)
|
||||
.attr("device-group", device.groupId)
|
||||
.text(device.label)
|
||||
.prop("selected", device.deviceId == active_device)
|
||||
.attr("device-id", device.unique_id)
|
||||
.text(device.name)
|
||||
.prop("selected", active_device && device.unique_id == active_device.unique_id)
|
||||
.appendTo(tag_select);
|
||||
}
|
||||
}).catch(error => {
|
||||
console.error(tr("Could not enumerate over devices!"));
|
||||
console.error(error);
|
||||
display_error(tr("Could not get microphone device list!"));
|
||||
});
|
||||
|
||||
if (tag_select.find("option:selected").length == 0)
|
||||
tag_select.find("option").prop("selected", true);
|
||||
|
||||
|
@ -424,9 +404,12 @@ namespace Modals {
|
|||
tag_select.on('change', event => {
|
||||
let selected_tag = tag_select.find("option:selected");
|
||||
let deviceId = selected_tag.attr("device-id");
|
||||
let groupId = selected_tag.attr("device-group");
|
||||
console.log(tr("Selected microphone device: id: %o group: %o"), deviceId, groupId);
|
||||
voice_recoder.change_device(deviceId, groupId);
|
||||
console.log(tr("Selected microphone device: id: %o"), deviceId);
|
||||
const device = audio.recorder.devices().find(e => e.unique_id === deviceId);
|
||||
if(!device)
|
||||
console.warn(tr("Failed to find device!"));
|
||||
|
||||
default_recorder.set_device(device);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
535
shared/js/voice/JavascriptRecorder.ts
Normal file
535
shared/js/voice/JavascriptRecorder.ts
Normal file
|
@ -0,0 +1,535 @@
|
|||
namespace audio {
|
||||
export namespace recorder {
|
||||
/* TODO: Recognise if we got device permission and update list */
|
||||
let _queried_devices: JavascriptInputDevice[];
|
||||
|
||||
interface JavascriptInputDevice extends InputDevice {
|
||||
device_id: string;
|
||||
group_id: string;
|
||||
}
|
||||
|
||||
async function query_devices() {
|
||||
const general_supported = !!getUserMediaFunction();
|
||||
|
||||
try {
|
||||
const context = player.context();
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
|
||||
_queried_devices = devices.filter(e => e.kind === "audioinput").map((e: MediaDeviceInfo): JavascriptInputDevice => {
|
||||
return {
|
||||
channels: context ? context.destination.channelCount : 2,
|
||||
sample_rate: context ? context.sampleRate : 44100,
|
||||
|
||||
default_input: e.deviceId == "default",
|
||||
name: e.label || "device-id{" + e.deviceId+ "}",
|
||||
|
||||
supported: general_supported,
|
||||
|
||||
device_id: e.deviceId,
|
||||
group_id: e.groupId,
|
||||
|
||||
unique_id: e.groupId + "-" + e.deviceId
|
||||
}
|
||||
});
|
||||
} catch(error) {
|
||||
console.warn(tr("Failed to query microphone devices (%o)"), error);
|
||||
_queried_devices = [];
|
||||
}
|
||||
}
|
||||
|
||||
export function devices() : InputDevice[] {
|
||||
if(typeof(_queried_devices) === "undefined")
|
||||
query_devices();
|
||||
|
||||
return _queried_devices || [];
|
||||
}
|
||||
|
||||
|
||||
export function device_refresh_available() : boolean { return true; }
|
||||
export function refresh_devices() : Promise<void> { return query_devices(); }
|
||||
|
||||
export function create_input() : AbstractInput { return new JavascriptInput(); }
|
||||
|
||||
query_devices(); /* general query */
|
||||
|
||||
export namespace filter {
|
||||
export abstract class JAbstractFilter<NodeType extends AudioNode> implements Filter {
|
||||
source_node: AudioNode;
|
||||
audio_node: NodeType;
|
||||
|
||||
context: AudioContext;
|
||||
enabled: boolean = false;
|
||||
|
||||
active: boolean = false; /* if true the filter filters! */
|
||||
callback_active_change: (new_state: boolean) => any;
|
||||
|
||||
abstract initialize(context: AudioContext, source_node: AudioNode);
|
||||
abstract finalize();
|
||||
|
||||
is_enabled(): boolean {
|
||||
return this.enabled;
|
||||
}
|
||||
}
|
||||
|
||||
export class JThresholdFilter extends JAbstractFilter<GainNode> implements ThresholdFilter {
|
||||
private static update_task_interval = 20; /* 20ms */
|
||||
|
||||
type: Type.THRESHOLD = Type.THRESHOLD;
|
||||
private _threshold = 50;
|
||||
|
||||
private _update_task: any;
|
||||
private _analyser: AnalyserNode;
|
||||
private _analyse_buffer: Uint8Array;
|
||||
|
||||
private _silence_count = 0;
|
||||
private _margin_frames = 5;
|
||||
|
||||
finalize() {
|
||||
clearInterval(this._update_task);
|
||||
this._update_task = 0;
|
||||
|
||||
if(this.source_node) {
|
||||
try { this.source_node.disconnect(this._analyser) } catch (error) {}
|
||||
try { this.source_node.disconnect(this.audio_node) } catch (error) {}
|
||||
}
|
||||
|
||||
this._analyser = undefined;
|
||||
this.source_node = undefined;
|
||||
this.audio_node = undefined;
|
||||
this.context = undefined;
|
||||
}
|
||||
|
||||
initialize(context: AudioContext, source_node: AudioNode) {
|
||||
this.context = context;
|
||||
this.source_node = source_node;
|
||||
|
||||
this.audio_node = context.createGain();
|
||||
this._analyser = context.createAnalyser();
|
||||
|
||||
const optimal_ftt_size = Math.ceil((source_node.context || context).sampleRate * (JThresholdFilter.update_task_interval / 1000));
|
||||
const base2_ftt = Math.pow(2, Math.ceil(Math.log2(optimal_ftt_size)));
|
||||
this._analyser.fftSize = base2_ftt;
|
||||
|
||||
if(!this._analyse_buffer || this._analyse_buffer.length < this._analyser.fftSize)
|
||||
this._analyse_buffer = new Uint8Array(this._analyser.fftSize);
|
||||
|
||||
this.active = false;
|
||||
this.audio_node.gain.value = 1;
|
||||
this._update_task = setInterval(() => this._analyse(), JThresholdFilter.update_task_interval);
|
||||
|
||||
this.source_node.connect(this.audio_node);
|
||||
this.source_node.connect(this._analyser);
|
||||
}
|
||||
|
||||
get_margin_frames(): number { return this._margin_frames; }
|
||||
set_margin_frames(value: number) {
|
||||
this._margin_frames = value;
|
||||
}
|
||||
|
||||
get_threshold(): number {
|
||||
return this._threshold;
|
||||
}
|
||||
|
||||
set_threshold(value: number): Promise<void> {
|
||||
this._threshold = value;
|
||||
return Promise.resolve();
|
||||
}
|
||||
|
||||
private _analyse() {
|
||||
let level;
|
||||
{
|
||||
let total = 0, float, rms;
|
||||
this._analyser.getByteTimeDomainData(this._analyse_buffer);
|
||||
|
||||
for(let index = 0; index < this._analyser.fftSize; index++) {
|
||||
float = ( this._analyse_buffer[index++] / 0x7f ) - 1;
|
||||
total += (float * float);
|
||||
}
|
||||
rms = Math.sqrt(total / this._analyser.fftSize);
|
||||
let db = 20 * ( Math.log(rms) / Math.log(10) );
|
||||
// sanity check
|
||||
|
||||
db = Math.max(-192, Math.min(db, 0));
|
||||
level = 100 + ( db * 1.92 );
|
||||
}
|
||||
|
||||
let state = false;
|
||||
if(level > this._threshold) {
|
||||
this._silence_count = 0;
|
||||
state = true;
|
||||
} else {
|
||||
state = this._silence_count++ < this._margin_frames;
|
||||
}
|
||||
if(state) {
|
||||
this.audio_node.gain.value = 1;
|
||||
if(this.active) {
|
||||
this.active = false;
|
||||
this.callback_active_change(false);
|
||||
}
|
||||
} else {
|
||||
this.audio_node.gain.value = 0;
|
||||
if(!this.active) {
|
||||
this.active = true;
|
||||
this.callback_active_change(true);
|
||||
}
|
||||
}
|
||||
|
||||
if(this.callback_level)
|
||||
this.callback_level(level);
|
||||
}
|
||||
}
|
||||
|
||||
export class JStateFilter extends JAbstractFilter<GainNode> implements StateFilter {
|
||||
type: Type.STATE = Type.STATE;
|
||||
|
||||
finalize() {
|
||||
if(this.source_node) {
|
||||
try { this.source_node.disconnect(this.audio_node) } catch (error) {}
|
||||
}
|
||||
|
||||
this.source_node = undefined;
|
||||
this.audio_node = undefined;
|
||||
this.context = undefined;
|
||||
}
|
||||
|
||||
initialize(context: AudioContext, source_node: AudioNode) {
|
||||
this.context = context;
|
||||
this.source_node = source_node;
|
||||
|
||||
this.audio_node = context.createGain();
|
||||
this.audio_node.gain.value = this.active ? 0 : 1;
|
||||
|
||||
this.source_node.connect(this.audio_node);
|
||||
}
|
||||
|
||||
is_active(): boolean {
|
||||
return this.active;
|
||||
}
|
||||
|
||||
set_state(state: boolean): Promise<void> {
|
||||
if(this.active === state)
|
||||
return Promise.resolve();
|
||||
|
||||
this.active = state;
|
||||
if(this.audio_node)
|
||||
this.audio_node.gain.value = state ? 0 : 1;
|
||||
this.callback_active_change(state);
|
||||
return Promise.resolve();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class JavascriptInput extends AbstractInput {
|
||||
private _state: InputState = InputState.PAUSED;
|
||||
private _current_device: JavascriptInputDevice | undefined;
|
||||
private _current_consumer: InputConsumer;
|
||||
|
||||
private _current_stream: MediaStream;
|
||||
private _current_audio_stream: MediaStreamAudioSourceNode;
|
||||
|
||||
private _audio_context: AudioContext;
|
||||
private _source_node: AudioNode; /* last node which could be connected to the target; target might be the _consumer_node */
|
||||
private _consumer_callback_node: ScriptProcessorNode;
|
||||
private _mute_node: GainNode;
|
||||
|
||||
private _filters: filter.Filter[] = [];
|
||||
private _filter_active: boolean = false;
|
||||
|
||||
constructor() {
|
||||
super();
|
||||
|
||||
player.on_ready(() => this._audio_initialized());
|
||||
}
|
||||
|
||||
private _audio_initialized() {
|
||||
this._audio_context = player.context();
|
||||
if(!this._audio_context)
|
||||
return;
|
||||
|
||||
this._mute_node = this._audio_context.createGain();
|
||||
this._mute_node.gain.value = 0;
|
||||
this._mute_node.connect(this._audio_context.destination);
|
||||
|
||||
this._consumer_callback_node = this._audio_context.createScriptProcessor(1024 * 4);
|
||||
this._consumer_callback_node.addEventListener('audioprocess', event => this._audio_callback(event));
|
||||
this._consumer_callback_node.connect(this._mute_node);
|
||||
|
||||
if(this._state === InputState.INITIALIZING)
|
||||
this.start();
|
||||
}
|
||||
|
||||
private _initialize_filters() {
|
||||
const filters = this._filters as any as filter.JAbstractFilter<AudioNode>[];
|
||||
for(const filter of filters) {
|
||||
if(filter.is_enabled())
|
||||
filter.finalize();
|
||||
}
|
||||
|
||||
if(this._audio_context && this._current_audio_stream) {
|
||||
const active_filter = filters.filter(e => e.is_enabled());
|
||||
let stream: AudioNode = this._current_audio_stream;
|
||||
for(const f of active_filter) {
|
||||
f.initialize(this._audio_context, stream);
|
||||
stream = f.audio_node;
|
||||
}
|
||||
this._switch_source_node(stream);
|
||||
}
|
||||
}
|
||||
|
||||
private _audio_callback(event: AudioProcessingEvent) {
|
||||
if(!this._current_consumer || this._current_consumer.type !== InputConsumerType.CALLBACK)
|
||||
return;
|
||||
|
||||
const callback = this._current_consumer as CallbackInputConsumer;
|
||||
if(callback.callback_audio)
|
||||
callback.callback_audio(event.inputBuffer);
|
||||
if(callback.callback_buffer) {
|
||||
console.warn(tr("AudioInput has callback buffer, but this isn't supported yet!"));
|
||||
}
|
||||
}
|
||||
|
||||
current_state() : InputState { return this._state; };
|
||||
|
||||
async start() {
|
||||
this._state = InputState.INITIALIZING;
|
||||
if(!this._current_device) {
|
||||
return;
|
||||
}
|
||||
|
||||
if(!this._audio_context) {
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
const media_function = getUserMediaFunction();
|
||||
if(!media_function)
|
||||
throw tr("recording isn't supported");
|
||||
|
||||
try {
|
||||
this._current_stream = await new Promise<MediaStream>((resolve, reject) => {
|
||||
media_function({
|
||||
audio: {
|
||||
deviceId: this._current_device.device_id,
|
||||
groupId: this._current_device.group_id,
|
||||
|
||||
echoCancellation: true /* enable by default */
|
||||
},
|
||||
video: false
|
||||
}, stream => resolve(stream), error => reject(error));
|
||||
});
|
||||
} catch(error) {
|
||||
console.warn(tr("Failed to initialize recording stream (%o)"), error);
|
||||
throw tr("record stream initialisation failed");
|
||||
}
|
||||
|
||||
this._current_audio_stream = this._audio_context.createMediaStreamSource(this._current_stream);
|
||||
this._initialize_filters();
|
||||
this._state = InputState.RECORDING;
|
||||
} catch(error) {
|
||||
console.warn(tr("Failed to start recorder (%o)"), error);
|
||||
this._state = InputState.PAUSED;
|
||||
throw error;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
||||
async stop() {
|
||||
this._state = InputState.PAUSED;
|
||||
if(this._current_audio_stream)
|
||||
this._current_audio_stream.disconnect();
|
||||
|
||||
if(this._current_stream) {
|
||||
if(this._current_stream.stop)
|
||||
this._current_stream.stop();
|
||||
else
|
||||
this._current_stream.getTracks().forEach(value => {
|
||||
value.stop();
|
||||
});
|
||||
}
|
||||
|
||||
this._current_stream = undefined;
|
||||
this._current_audio_stream = undefined;
|
||||
this._initialize_filters();
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
||||
current_device(): InputDevice | undefined {
|
||||
return this._current_device;
|
||||
}
|
||||
|
||||
async set_device(device: InputDevice | undefined) {
|
||||
if(this._current_device === device)
|
||||
return;
|
||||
|
||||
|
||||
const saved_state = this._state;
|
||||
try {
|
||||
await this.stop();
|
||||
} catch(error) {
|
||||
console.warn(tr("Failed to stop previous record session (%o)"), error);
|
||||
}
|
||||
|
||||
this._current_device = device as any; /* TODO: Test for device_id and device_group */
|
||||
if(!device) {
|
||||
this._state = InputState.PAUSED;
|
||||
return;
|
||||
}
|
||||
|
||||
if(saved_state == InputState.DRY || saved_state == InputState.INITIALIZING || saved_state == InputState.RECORDING) {
|
||||
try {
|
||||
await this.start()
|
||||
} catch(error) {
|
||||
console.warn(tr("Failed to start new recording stream (%o)"), error);
|
||||
throw "failed to start record";
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
get_filter(type: filter.Type): filter.Filter | undefined {
|
||||
for(const filter of this._filters)
|
||||
if(filter.type == type)
|
||||
return filter;
|
||||
|
||||
let new_filter: filter.JAbstractFilter<AudioNode>;
|
||||
switch (type) {
|
||||
case filter.Type.STATE:
|
||||
new_filter = new filter.JStateFilter();
|
||||
break;
|
||||
case filter.Type.VOICE_LEVEL:
|
||||
throw "voice filter isn't supported!";
|
||||
case filter.Type.THRESHOLD:
|
||||
new_filter = new filter.JThresholdFilter();
|
||||
break;
|
||||
default:
|
||||
throw "invalid filter type, or type isn't implemented! (" + type + ")";
|
||||
}
|
||||
|
||||
new_filter.callback_active_change = () => this._recalculate_filter_status();
|
||||
this._filters.push(new_filter as any);
|
||||
this.enable_filter(type);
|
||||
return new_filter as any;
|
||||
}
|
||||
|
||||
private find_filter(type: filter.Type) : filter.JAbstractFilter<AudioNode> | undefined {
|
||||
for(const filter of this._filters)
|
||||
if(filter.type == type)
|
||||
return filter as any;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
private previous_filter(type: filter.Type) : filter.JAbstractFilter<AudioNode> | undefined {
|
||||
for(let index = 1; index < this._filters.length; index++)
|
||||
if(this._filters[index].type === type)
|
||||
return this._filters.slice(0, index).reverse().find(e => e.is_enabled()) as any;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
private next_filter(type: filter.Type) : filter.JAbstractFilter<AudioNode> | undefined {
|
||||
for(let index = 0; index < this._filters.length - 1; index++)
|
||||
if(this._filters[index].type === type)
|
||||
return this._filters.slice(index + 1).find(e => e.is_enabled()) as any;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
clear_filter() {
|
||||
for(const filter of this._filters) {
|
||||
if(!filter.is_enabled())
|
||||
continue;
|
||||
filter.finalize();
|
||||
filter.enabled = false;
|
||||
}
|
||||
|
||||
this._initialize_filters();
|
||||
this._recalculate_filter_status();
|
||||
}
|
||||
|
||||
disable_filter(type: filter.Type) {
|
||||
const filter = this.find_filter(type);
|
||||
if(!filter) return;
|
||||
|
||||
/* test if the filter is active */
|
||||
if(!filter.is_enabled())
|
||||
return;
|
||||
|
||||
filter.enabled = false;
|
||||
filter.finalize();
|
||||
this._initialize_filters();
|
||||
this._recalculate_filter_status();
|
||||
}
|
||||
|
||||
enable_filter(type: filter.Type) {
|
||||
const filter = this.get_filter(type) as any as filter.JAbstractFilter<AudioNode>;
|
||||
if(filter.is_enabled())
|
||||
return;
|
||||
|
||||
filter.enabled = true;
|
||||
this._initialize_filters();
|
||||
this._recalculate_filter_status();
|
||||
}
|
||||
|
||||
private _recalculate_filter_status() {
|
||||
let filtered = this._filters.filter(e => e.is_enabled()).filter(e => (e as any as filter.JAbstractFilter<AudioNode>).active).length > 0;
|
||||
if(filtered === this._filter_active)
|
||||
return;
|
||||
|
||||
this._filter_active = filtered;
|
||||
if(filtered) {
|
||||
if(this.callback_end)
|
||||
this.callback_end();
|
||||
} else {
|
||||
if(this.callback_begin)
|
||||
this.callback_begin();
|
||||
}
|
||||
}
|
||||
|
||||
current_consumer(): InputConsumer | undefined {
|
||||
return this._current_consumer;
|
||||
}
|
||||
|
||||
async set_consumer(consumer: InputConsumer) {
|
||||
if(this._current_consumer) {
|
||||
if(this._current_consumer.type == InputConsumerType.NODE) {
|
||||
if(this._source_node)
|
||||
(this._current_consumer as NodeInputConsumer).callback_disconnect(this._source_node)
|
||||
} else if(this._current_consumer.type === InputConsumerType.CALLBACK) {
|
||||
if(this._source_node)
|
||||
this._source_node.disconnect(this._consumer_callback_node);
|
||||
}
|
||||
}
|
||||
|
||||
if(consumer) {
|
||||
if(consumer.type == InputConsumerType.CALLBACK) {
|
||||
if(this._source_node)
|
||||
this._source_node.connect(this._consumer_callback_node);
|
||||
} else if(consumer.type == InputConsumerType.NODE) {
|
||||
if(this._source_node)
|
||||
(consumer as NodeInputConsumer).callback_node(this._source_node);
|
||||
} else {
|
||||
throw "native callback consumers are not supported!";
|
||||
}
|
||||
}
|
||||
this._current_consumer = consumer;
|
||||
}
|
||||
|
||||
private _switch_source_node(new_node: AudioNode) {
|
||||
if(this._current_consumer) {
|
||||
if(this._current_consumer.type == InputConsumerType.NODE) {
|
||||
const node_consumer = this._current_consumer as NodeInputConsumer;
|
||||
if(this._source_node)
|
||||
node_consumer.callback_disconnect(this._source_node);
|
||||
if(new_node)
|
||||
node_consumer.callback_node(new_node);
|
||||
} else if(this._current_consumer.type == InputConsumerType.CALLBACK) {
|
||||
this._source_node.disconnect(this._consumer_callback_node);
|
||||
if(new_node)
|
||||
new_node.connect(this._consumer_callback_node);
|
||||
}
|
||||
}
|
||||
this._source_node = new_node;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
118
shared/js/voice/RecorderBase.ts
Normal file
118
shared/js/voice/RecorderBase.ts
Normal file
|
@ -0,0 +1,118 @@
|
|||
namespace audio {
|
||||
export namespace recorder {
|
||||
export interface InputDevice {
|
||||
unique_id: string;
|
||||
name: string;
|
||||
default_input: boolean;
|
||||
|
||||
supported: boolean;
|
||||
|
||||
sample_rate: number;
|
||||
channels: number;
|
||||
}
|
||||
|
||||
export declare function devices() : InputDevice[];
|
||||
|
||||
export declare function device_refresh_available() : boolean;
|
||||
export declare function refresh_devices() : Promise<void>;
|
||||
|
||||
export declare function create_input() : AbstractInput;
|
||||
|
||||
export enum InputConsumerType {
|
||||
CALLBACK,
|
||||
NODE,
|
||||
NATIVE
|
||||
}
|
||||
|
||||
export interface InputConsumer {
|
||||
type: InputConsumerType;
|
||||
}
|
||||
|
||||
export interface CallbackInputConsumer extends InputConsumer {
|
||||
type: InputConsumerType.CALLBACK;
|
||||
|
||||
callback_audio?: (buffer: AudioBuffer) => any;
|
||||
callback_buffer?: (buffer: Float32Array, samples: number, channels: number) => any;
|
||||
}
|
||||
|
||||
export interface NodeInputConsumer extends InputConsumer {
|
||||
type: InputConsumerType.NODE;
|
||||
|
||||
callback_node: (source_node: AudioNode) => any;
|
||||
callback_disconnect: (source_node: AudioNode) => any;
|
||||
}
|
||||
|
||||
|
||||
export namespace filter {
|
||||
export enum Type {
|
||||
THRESHOLD,
|
||||
VOICE_LEVEL,
|
||||
STATE
|
||||
}
|
||||
|
||||
export interface Filter {
|
||||
type: Type;
|
||||
|
||||
is_enabled() : boolean;
|
||||
}
|
||||
|
||||
export interface MarginedFilter {
|
||||
get_margin_frames() : number;
|
||||
set_margin_frames(value: number);
|
||||
}
|
||||
|
||||
export interface ThresholdFilter extends Filter, MarginedFilter {
|
||||
type: Type.THRESHOLD;
|
||||
|
||||
get_threshold() : number;
|
||||
set_threshold(value: number) : Promise<void>;
|
||||
|
||||
callback_level?: (value: number) => any;
|
||||
}
|
||||
|
||||
export interface VoiceLevelFilter extends Filter, MarginedFilter {
|
||||
type: Type.VOICE_LEVEL;
|
||||
|
||||
get_level() : number;
|
||||
}
|
||||
|
||||
export interface StateFilter extends Filter {
|
||||
type: Type.STATE;
|
||||
|
||||
set_state(state: boolean) : Promise<void>;
|
||||
is_active() : boolean; /* if true the the filter allows data to pass */
|
||||
}
|
||||
}
|
||||
|
||||
export enum InputState {
|
||||
PAUSED,
|
||||
INITIALIZING,
|
||||
RECORDING,
|
||||
DRY
|
||||
}
|
||||
|
||||
export abstract class AbstractInput {
|
||||
abstract current_state() : InputState;
|
||||
|
||||
abstract start() : Promise<void>;
|
||||
abstract stop() : Promise<void>;
|
||||
|
||||
abstract current_device() : InputDevice | undefined;
|
||||
abstract set_device(device: InputDevice | undefined) : Promise<void>;
|
||||
|
||||
abstract current_consumer() : InputConsumer | undefined;
|
||||
abstract set_consumer(consumer: InputConsumer) : Promise<void>;
|
||||
|
||||
callback_state_change: () => any;
|
||||
callback_begin: () => any;
|
||||
callback_end: () => any;
|
||||
|
||||
abstract get_filter(type: filter.Type) : filter.Filter | undefined;
|
||||
|
||||
abstract clear_filter();
|
||||
abstract disable_filter(type: filter.Type);
|
||||
abstract enable_filter(type: filter.Type);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
235
shared/js/voice/RecorderProfile.ts
Normal file
235
shared/js/voice/RecorderProfile.ts
Normal file
|
@ -0,0 +1,235 @@
|
|||
|
||||
type VadType = "threshold" | "push_to_talk" | "active";
|
||||
interface RecorderProfileConfig {
|
||||
version: number;
|
||||
|
||||
/* devices unique id */
|
||||
device_id: string | undefined;
|
||||
|
||||
vad_type: VadType;
|
||||
vad_threshold: {
|
||||
threshold: number;
|
||||
}
|
||||
vad_push_to_talk: {
|
||||
delay: number;
|
||||
key_code: string;
|
||||
|
||||
key_ctrl: boolean;
|
||||
key_windows: boolean;
|
||||
key_shift: boolean;
|
||||
key_alt: boolean;
|
||||
}
|
||||
}
|
||||
|
||||
let default_recorder: RecorderProfile /* needs initialize */
|
||||
class RecorderProfile {
|
||||
readonly name;
|
||||
readonly volatile; /* not saving profile */
|
||||
|
||||
config: RecorderProfileConfig;
|
||||
input: audio.recorder.AbstractInput;
|
||||
|
||||
current_handler: ConnectionHandler;
|
||||
|
||||
callback_support_change: () => any;
|
||||
callback_start: () => any;
|
||||
callback_stop: () => any;
|
||||
|
||||
callback_unmount: () => any; /* called if somebody else takes the ownership */
|
||||
|
||||
record_supported: boolean;
|
||||
|
||||
private _ppt_hook: ppt.KeyHook;
|
||||
private _ppt_timeout: NodeJS.Timer;
|
||||
private _ppt_hook_registered: boolean;
|
||||
|
||||
constructor(name: string, volatile?: boolean) {
|
||||
this.name = name;
|
||||
this.volatile = typeof(volatile) === "boolean" ? volatile : false;
|
||||
|
||||
this.initialize_input();
|
||||
|
||||
this._ppt_hook = {
|
||||
callback_release: () => {
|
||||
if(this._ppt_timeout)
|
||||
clearTimeout(this._ppt_timeout);
|
||||
|
||||
this._ppt_timeout = setTimeout(() => {
|
||||
const filter = this.input.get_filter(audio.recorder.filter.Type.STATE) as audio.recorder.filter.StateFilter;
|
||||
if(filter)
|
||||
filter.set_state(true);
|
||||
}, this.config.vad_push_to_talk.delay);
|
||||
},
|
||||
callback_press: () => {
|
||||
if(this._ppt_timeout)
|
||||
clearTimeout(this._ppt_timeout);
|
||||
|
||||
const filter = this.input.get_filter(audio.recorder.filter.Type.STATE) as audio.recorder.filter.StateFilter;
|
||||
if(filter)
|
||||
filter.set_state(false);
|
||||
},
|
||||
|
||||
cancel: false
|
||||
} as ppt.KeyHook;
|
||||
this._ppt_hook_registered = false;
|
||||
this.record_supported = true;
|
||||
}
|
||||
|
||||
async initialize() {
|
||||
await this.load();
|
||||
await this.reinitialize_filter();
|
||||
await this.input.start();
|
||||
}
|
||||
|
||||
private initialize_input() {
|
||||
this.input = audio.recorder.create_input();
|
||||
this.input.callback_begin = () => {
|
||||
console.log("Voice start");
|
||||
if(this.callback_start)
|
||||
this.callback_start();
|
||||
};
|
||||
|
||||
this.input.callback_end = () => {
|
||||
console.log("Voice end");
|
||||
if(this.callback_stop)
|
||||
this.callback_stop();
|
||||
};
|
||||
|
||||
this.input.callback_state_change = () => {
|
||||
this.record_supported = this.input.current_state() === audio.recorder.InputState.RECORDING || this.input.current_state() === audio.recorder.InputState.DRY;
|
||||
if(this.callback_support_change)
|
||||
this.callback_support_change();
|
||||
}
|
||||
}
|
||||
|
||||
private async load() {
|
||||
this.config = settings.static_global(Settings.FN_PROFILE_RECORD(this.name), {}) as RecorderProfileConfig;
|
||||
if(typeof(this.config.version) === "undefined") {
|
||||
/* default config */
|
||||
this.config = {
|
||||
version: 1,
|
||||
device_id: undefined,
|
||||
|
||||
vad_threshold: {
|
||||
threshold: 50
|
||||
},
|
||||
vad_type: "threshold",
|
||||
vad_push_to_talk: {
|
||||
delay: 300,
|
||||
key_alt: false,
|
||||
key_ctrl: false,
|
||||
key_shift: false,
|
||||
key_windows: false,
|
||||
key_code: 't'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const all_devices = audio.recorder.devices();
|
||||
const devices = all_devices.filter(e => e.default_input || e.unique_id === this.config.device_id);
|
||||
const device = devices.find(e => e.unique_id === this.config.device_id) || devices[0];
|
||||
|
||||
console.log(tr("Loaded record profile device %s | %o (%o)"), this.config.device_id, device, all_devices);
|
||||
try {
|
||||
await this.input.set_device(device);
|
||||
} catch(error) {
|
||||
console.error(tr("Failed to set input device (%o)"), error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private save(enforce?: boolean) {
|
||||
if(enforce || !this.volatile) {
|
||||
settings.changeGlobal(Settings.FN_PROFILE_RECORD(this.name), this.config);
|
||||
}
|
||||
}
|
||||
|
||||
private async reinitialize_filter() {
|
||||
this.input.clear_filter();
|
||||
if(this._ppt_hook_registered) {
|
||||
ppt.unregister_key_hook(this._ppt_hook);
|
||||
this._ppt_hook_registered = false;
|
||||
}
|
||||
|
||||
if(this.config.vad_type === "threshold") {
|
||||
const filter = this.input.get_filter(audio.recorder.filter.Type.THRESHOLD) as audio.recorder.filter.ThresholdFilter;
|
||||
await filter.set_threshold(this.config.vad_threshold.threshold);
|
||||
|
||||
this.input.enable_filter(audio.recorder.filter.Type.THRESHOLD);
|
||||
} else if(this.config.vad_type === "push_to_talk") {
|
||||
const filter = this.input.get_filter(audio.recorder.filter.Type.STATE) as audio.recorder.filter.StateFilter;
|
||||
await filter.set_state(true);
|
||||
|
||||
for(const key of ["key_alt", "key_ctrl", "key_shift", "key_windows", "key_code"])
|
||||
this._ppt_hook[key] = this.config.vad_push_to_talk[key];
|
||||
ppt.register_key_hook(this._ppt_hook);
|
||||
this._ppt_hook_registered = true;
|
||||
|
||||
this.input.enable_filter(audio.recorder.filter.Type.STATE);
|
||||
} else if(this.config.vad_type === "active") {}
|
||||
}
|
||||
|
||||
async unmount() {
|
||||
if(this.callback_unmount)
|
||||
this.callback_unmount();
|
||||
if(this.input) {
|
||||
try {
|
||||
await this.input.set_consumer(undefined);
|
||||
} catch(error) {
|
||||
console.warn(tr("Failed to unmount input consumer for profile (%o)"), error);
|
||||
}
|
||||
}
|
||||
|
||||
this.callback_start = undefined;
|
||||
this.callback_stop = undefined;
|
||||
this.current_handler = undefined;
|
||||
}
|
||||
|
||||
get_vad_type() { return this.config.vad_type; }
|
||||
set_vad_type(type: VadType) {
|
||||
if(this.config.vad_type === type)
|
||||
return;
|
||||
|
||||
this.config.vad_type = type;
|
||||
this.reinitialize_filter();
|
||||
this.save();
|
||||
}
|
||||
|
||||
get_vad_threshold() { return this.config.vad_threshold.threshold; }
|
||||
set_vad_threshold(value: number) {
|
||||
if(this.config.vad_threshold.threshold === value)
|
||||
return;
|
||||
|
||||
this.config.vad_threshold.threshold = value;
|
||||
this.reinitialize_filter();
|
||||
this.save();
|
||||
}
|
||||
|
||||
get_vad_ppt_key() : ppt.KeyDescriptor { return this.config.vad_push_to_talk; }
|
||||
set_vad_ppt_key(key: ppt.KeyDescriptor) {
|
||||
for(const _key of ["key_alt", "key_ctrl", "key_shift", "key_windows", "key_code"])
|
||||
this.config.vad_push_to_talk[_key] = key[_key];
|
||||
|
||||
this.reinitialize_filter();
|
||||
this.save();
|
||||
}
|
||||
|
||||
get_vad_ppt_delay() { return this.config.vad_push_to_talk.delay; }
|
||||
set_vad_ppt_delay(value: number) {
|
||||
if(this.config.vad_push_to_talk.delay === value)
|
||||
return;
|
||||
|
||||
this.config.vad_push_to_talk.delay = value;
|
||||
this.reinitialize_filter();
|
||||
this.save();
|
||||
}
|
||||
|
||||
|
||||
current_device() : audio.recorder.InputDevice | undefined { return this.input.current_device(); }
|
||||
set_device(device: audio.recorder.InputDevice | undefined) : Promise<void> {
|
||||
this.config.device_id = device ? device.unique_id : undefined;
|
||||
this.save();
|
||||
return this.input.set_device(device);
|
||||
}
|
||||
}
|
|
@ -1,6 +1,5 @@
|
|||
/// <reference path="../ConnectionHandler.ts" />
|
||||
/// <reference path="../codec/Codec.ts" />
|
||||
/// <reference path="VoiceRecorder.ts" />
|
||||
/// <reference path="VoiceClient.ts" />
|
||||
|
||||
namespace audio {
|
||||
|
@ -136,7 +135,7 @@ namespace audio {
|
|||
private chunkVPacketId: number = 0;
|
||||
private send_task: NodeJS.Timer;
|
||||
|
||||
private _audio_source: VoiceRecorder;
|
||||
private _audio_source: RecorderProfile;
|
||||
private _audio_clients: audio.js.VoiceClientController[] = [];
|
||||
|
||||
constructor(connection: connection.ServerConnection) {
|
||||
|
@ -191,47 +190,53 @@ namespace audio {
|
|||
if(!this.javascript_encoding_supported()) return;
|
||||
}
|
||||
|
||||
acquire_voice_recorder(recorder: VoiceRecorder | undefined, enforce?: boolean) {
|
||||
acquire_voice_recorder(recorder: RecorderProfile | undefined, enforce?: boolean) {
|
||||
if(this._audio_source === recorder && !enforce)
|
||||
return;
|
||||
|
||||
if(this._audio_source) {
|
||||
this._audio_source.own_recoder(undefined); /* release ownership */
|
||||
}
|
||||
if(recorder)
|
||||
recorder.unmount(); /* FIXME: Await promise? */
|
||||
if(this._audio_source)
|
||||
this._audio_source.unmount();
|
||||
|
||||
this.handleVoiceEnded();
|
||||
this._audio_source = recorder;
|
||||
|
||||
if(recorder) {
|
||||
recorder.own_recoder(this);
|
||||
recorder.current_handler = this.connection.client;
|
||||
|
||||
recorder.on_end = this.handleVoiceEnded.bind(this);
|
||||
recorder.on_start = this.handleVoiceStarted.bind(this);
|
||||
recorder.on_yield = this.on_recoder_yield.bind(this);
|
||||
recorder.on_support_state_change = () => {
|
||||
recorder.callback_unmount = this.on_recoder_yield.bind(this);
|
||||
recorder.callback_start = this.handleVoiceStarted.bind(this);
|
||||
recorder.callback_stop = this.handleVoiceEnded.bind(this);
|
||||
|
||||
recorder.callback_support_change = () => {
|
||||
this.connection.client.update_voice_status(undefined);
|
||||
};
|
||||
|
||||
if(this._type == VoiceEncodeType.NATIVE_ENCODE) {
|
||||
recorder.on_initialized(() => {
|
||||
audio.player.on_ready(() => {
|
||||
if(this._audio_source !== recorder)
|
||||
return;
|
||||
|
||||
if(!this.local_audio_stream)
|
||||
this.setup_native(); /* requires initialized audio */
|
||||
|
||||
recorder.input.set_consumer({
|
||||
type: audio.recorder.InputConsumerType.NODE,
|
||||
callback_node: node => {
|
||||
if(!this.local_audio_stream)
|
||||
return;
|
||||
|
||||
/* an output stream is only available if the recorder is ready */
|
||||
const stream = recorder.get_output_stream();
|
||||
stream.disconnect();
|
||||
stream.connect(this.local_audio_stream);
|
||||
});
|
||||
});
|
||||
node.connect(this.local_audio_stream);
|
||||
},
|
||||
callback_disconnect: node => {
|
||||
if(!this.local_audio_stream)
|
||||
return;
|
||||
|
||||
node.disconnect(this.local_audio_stream);
|
||||
}
|
||||
} as audio.recorder.NodeInputConsumer);
|
||||
} else {
|
||||
recorder.on_data = this.handleVoiceData.bind(this);
|
||||
recorder.input.set_consumer({
|
||||
type: audio.recorder.InputConsumerType.CALLBACK,
|
||||
callback_audio: buffer => this.handleVoiceData(buffer, false)
|
||||
} as audio.recorder.CallbackInputConsumer);
|
||||
}
|
||||
} else {
|
||||
this.connection.client.update_voice_status(undefined);
|
||||
|
@ -531,6 +536,7 @@ namespace audio {
|
|||
}
|
||||
|
||||
private on_recoder_yield() {
|
||||
console.log("Lost recorder!");
|
||||
this._audio_source = undefined;
|
||||
this.acquire_voice_recorder(undefined, true);
|
||||
}
|
||||
|
@ -539,7 +545,7 @@ namespace audio {
|
|||
return typeof(this.dataChannel) !== "undefined" && this.dataChannel.readyState === "open";
|
||||
}
|
||||
|
||||
voice_recorder(): VoiceRecorder {
|
||||
voice_recorder(): RecorderProfile {
|
||||
return this._audio_source;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,482 +0,0 @@
|
|||
/// <reference path="VoiceHandler.ts" />
|
||||
/// <reference path="../ui/elements/modal.ts" />
|
||||
|
||||
abstract class VoiceActivityDetector {
|
||||
protected handle: VoiceRecorder;
|
||||
|
||||
abstract shouldRecord(buffer: AudioBuffer) : boolean;
|
||||
initialise() {}
|
||||
finalize() {}
|
||||
|
||||
initialiseNewStream(old: MediaStreamAudioSourceNode, _new: MediaStreamAudioSourceNode) : void {}
|
||||
|
||||
changeHandle(handle: VoiceRecorder, triggerNewStream: boolean) {
|
||||
const oldStream = !this.handle ? undefined : this.handle.getMicrophoneStream();
|
||||
this.handle = handle;
|
||||
if(triggerNewStream) this.initialiseNewStream(oldStream, !handle ? undefined : handle.getMicrophoneStream());
|
||||
}
|
||||
}
|
||||
|
||||
//A small class extention
|
||||
interface MediaStreamConstraints {
|
||||
deviceId?: string;
|
||||
groupId?: string;
|
||||
}
|
||||
|
||||
if(!AudioBuffer.prototype.copyToChannel) { //Webkit does not implement this function
|
||||
AudioBuffer.prototype.copyToChannel = function (source: Float32Array, channelNumber: number, startInChannel?: number) {
|
||||
if(!startInChannel) startInChannel = 0;
|
||||
|
||||
let destination = this.getChannelData(channelNumber);
|
||||
for(let index = 0; index < source.length; index++)
|
||||
if(destination.length < index + startInChannel)
|
||||
destination[index + startInChannel] = source[index];
|
||||
}
|
||||
}
|
||||
|
||||
let voice_recoder: VoiceRecorder;
|
||||
class VoiceRecorder {
|
||||
private static readonly CHANNEL = 0;
|
||||
private static readonly CHANNELS = 2;
|
||||
private static readonly BUFFER_SIZE = 1024 * 4;
|
||||
|
||||
on_support_state_change: () => any;
|
||||
on_data: (data: AudioBuffer, head: boolean) => void = undefined;
|
||||
on_end: () => any;
|
||||
on_start: () => any;
|
||||
on_yield: () => any; /* called when owner looses ownership */
|
||||
|
||||
owner: connection.voice.AbstractVoiceConnection | undefined;
|
||||
|
||||
private on_ready_callbacks: (() => any)[] = [];
|
||||
|
||||
private _recording: boolean = false;
|
||||
private _recording_supported: boolean = true; /* recording is supported until anything else had been set */
|
||||
private _tag_favicon: JQuery;
|
||||
|
||||
private microphoneStream: MediaStreamAudioSourceNode = undefined;
|
||||
private mediaStream: MediaStream = undefined;
|
||||
|
||||
private audioContext: AudioContext;
|
||||
private processor: ScriptProcessorNode;
|
||||
private _mute_node: GainNode;
|
||||
|
||||
get_output_stream() : ScriptProcessorNode { return this.processor; }
|
||||
|
||||
private vadHandler: VoiceActivityDetector;
|
||||
private _chunkCount: number = 0;
|
||||
|
||||
private _deviceId: string;
|
||||
private _deviceGroup: string;
|
||||
|
||||
private current_handler: ConnectionHandler;
|
||||
|
||||
constructor() {
|
||||
this._deviceId = settings.global("microphone_device_id", "default");
|
||||
this._deviceGroup = settings.global("microphone_device_group", "default");
|
||||
|
||||
audio.player.on_ready(() => {
|
||||
this.audioContext = audio.player.context();
|
||||
this.processor = this.audioContext.createScriptProcessor(VoiceRecorder.BUFFER_SIZE, VoiceRecorder.CHANNELS, VoiceRecorder.CHANNELS);
|
||||
this._mute_node = this.audioContext.createGain();
|
||||
this._mute_node.gain.value = 0;
|
||||
this._mute_node.connect(this.audioContext.destination);
|
||||
|
||||
const empty_buffer = this.audioContext.createBuffer(VoiceRecorder.CHANNELS, VoiceRecorder.BUFFER_SIZE, 48000);
|
||||
this.processor.addEventListener('audioprocess', ev => {
|
||||
if(this.microphoneStream && this.vadHandler.shouldRecord(ev.inputBuffer)) {
|
||||
if(this._chunkCount == 0)
|
||||
this.on_voice_start();
|
||||
|
||||
if(this.on_data)
|
||||
this.on_data(ev.inputBuffer, this._chunkCount == 0);
|
||||
else {
|
||||
for(let channel = 0; channel < ev.inputBuffer.numberOfChannels; channel++)
|
||||
ev.outputBuffer.copyToChannel(ev.inputBuffer.getChannelData(channel), channel);
|
||||
}
|
||||
this._chunkCount++;
|
||||
} else {
|
||||
if(this._chunkCount != 0 )
|
||||
this.on_voice_end();
|
||||
this._chunkCount = 0;
|
||||
|
||||
for(let channel = 0; channel < ev.inputBuffer.numberOfChannels; channel++)
|
||||
ev.outputBuffer.copyToChannel(empty_buffer.getChannelData(channel), channel);
|
||||
}
|
||||
});
|
||||
this.processor.connect(this._mute_node);
|
||||
|
||||
if(this.vadHandler)
|
||||
this.vadHandler.initialise();
|
||||
this.on_microphone(this.mediaStream);
|
||||
|
||||
for(const callback of this.on_ready_callbacks)
|
||||
callback();
|
||||
this.on_ready_callbacks = [];
|
||||
});
|
||||
|
||||
this.setVADHandler(new PassThroughVAD());
|
||||
this._tag_favicon = $("head link[rel='icon']");
|
||||
}
|
||||
|
||||
own_recoder(connection: connection.voice.AbstractVoiceConnection | undefined) {
|
||||
if(connection === this.owner)
|
||||
return;
|
||||
if(this.on_yield)
|
||||
this.on_yield();
|
||||
|
||||
this.owner = connection;
|
||||
|
||||
this.on_end = undefined;
|
||||
this.on_start = undefined;
|
||||
this.on_data = undefined;
|
||||
this.on_yield = undefined;
|
||||
this.on_support_state_change = undefined;
|
||||
this.on_ready_callbacks = [];
|
||||
|
||||
this._chunkCount = 0;
|
||||
|
||||
if(this.processor) /* processor stream might be null because of the late audio initialisation */
|
||||
this.processor.connect(this._mute_node);
|
||||
}
|
||||
|
||||
input_available() : boolean {
|
||||
return !!getUserMediaFunction();
|
||||
}
|
||||
|
||||
getMediaStream() : MediaStream {
|
||||
return this.mediaStream;
|
||||
}
|
||||
|
||||
getMicrophoneStream() : MediaStreamAudioSourceNode {
|
||||
return this.microphoneStream;
|
||||
}
|
||||
|
||||
reinitialiseVAD() {
|
||||
let type = settings.global("vad_type", "vad");
|
||||
if(type == "ppt") {
|
||||
if(settings.global('vad_ppt_key', undefined)) {
|
||||
//TODO remove that because its legacy shit
|
||||
createErrorModal(tr("VAD changed!"), tr("VAD key detection changed.<br>Please reset your PPT key!")).open();
|
||||
}
|
||||
let ppt_settings: PPTKeySettings = settings.global('vad_ppt_settings', undefined);
|
||||
ppt_settings = ppt_settings ? JSON.parse(ppt_settings as any as string) : {};
|
||||
|
||||
if(ppt_settings.version === undefined)
|
||||
ppt_settings.version = 1;
|
||||
|
||||
if(ppt_settings.key_code === undefined)
|
||||
ppt_settings.key_code = "KeyT";
|
||||
|
||||
if(ppt_settings.key_ctrl === undefined)
|
||||
ppt_settings.key_ctrl = false;
|
||||
|
||||
if(ppt_settings.key_shift === undefined)
|
||||
ppt_settings.key_shift = false;
|
||||
|
||||
if(ppt_settings.key_alt === undefined)
|
||||
ppt_settings.key_alt = false;
|
||||
|
||||
if(ppt_settings.key_windows === undefined)
|
||||
ppt_settings.key_windows = false;
|
||||
|
||||
if(ppt_settings.delay === undefined)
|
||||
ppt_settings.delay = 300;
|
||||
|
||||
if(!(this.getVADHandler() instanceof PushToTalkVAD))
|
||||
this.setVADHandler(new PushToTalkVAD(ppt_settings));
|
||||
else (this.getVADHandler() as PushToTalkVAD).settings = ppt_settings;
|
||||
} else if(type == "pt") {
|
||||
if(!(this.getVADHandler() instanceof PassThroughVAD))
|
||||
this.setVADHandler(new PassThroughVAD());
|
||||
} else if(type == "vad") {
|
||||
if(!(this.getVADHandler() instanceof VoiceActivityDetectorVAD))
|
||||
this.setVADHandler(new VoiceActivityDetectorVAD());
|
||||
(this.getVADHandler() as VoiceActivityDetectorVAD).percentageThreshold = settings.global("vad_threshold", 50);
|
||||
} else {
|
||||
console.warn(tr("Invalid VAD (Voice activation detector) handler! (%o)"), type);
|
||||
}
|
||||
}
|
||||
|
||||
setVADHandler(handler: VoiceActivityDetector) {
|
||||
if(this.vadHandler) {
|
||||
this.vadHandler.changeHandle(null, true);
|
||||
this.vadHandler.finalize();
|
||||
}
|
||||
|
||||
this.vadHandler = handler;
|
||||
this.vadHandler.changeHandle(this, false);
|
||||
if(this.audioContext) {
|
||||
this.vadHandler.initialise();
|
||||
if(this.microphoneStream)
|
||||
this.vadHandler.initialiseNewStream(undefined, this.microphoneStream);
|
||||
}
|
||||
}
|
||||
|
||||
getVADHandler() : VoiceActivityDetector {
|
||||
return this.vadHandler;
|
||||
}
|
||||
|
||||
set_recording(flag_enabled: boolean) {
|
||||
if(this._recording == flag_enabled)
|
||||
return;
|
||||
|
||||
if(flag_enabled)
|
||||
this.start_recording(this._deviceId, this._deviceGroup);
|
||||
else
|
||||
this.stop_recording();
|
||||
}
|
||||
|
||||
clean_recording_supported() { this._recording_supported = true; }
|
||||
|
||||
is_recording_supported() { return this._recording_supported; }
|
||||
|
||||
is_recording() { return this._recording; }
|
||||
|
||||
device_group_id() : string { return this._deviceGroup; }
|
||||
device_id() : string { return this._deviceId; }
|
||||
|
||||
change_device(device: string, group: string) {
|
||||
if(this._deviceId == device && this._deviceGroup == group) return;
|
||||
this._deviceId = device;
|
||||
this._deviceGroup = group;
|
||||
|
||||
settings.changeGlobal("microphone_device_id", device);
|
||||
settings.changeGlobal("microphone_device_group", group);
|
||||
if(this._recording) {
|
||||
this.stop_recording();
|
||||
this.start_recording(device, group);
|
||||
}
|
||||
}
|
||||
|
||||
start_recording(device: string, groupId: string){
|
||||
this._deviceId = device;
|
||||
this._deviceGroup = groupId;
|
||||
|
||||
console.log(tr("[VoiceRecorder] Start recording! (Device: %o | Group: %o)"), device, groupId);
|
||||
this._recording = true;
|
||||
|
||||
//FIXME Implement that here for thew client as well
|
||||
getUserMediaFunction()({
|
||||
audio: {
|
||||
deviceId: device,
|
||||
groupId: groupId,
|
||||
echoCancellation: true,
|
||||
echoCancellationType: 'browser'
|
||||
}
|
||||
}, this.on_microphone.bind(this), error => {
|
||||
this._recording = false;
|
||||
if(this._recording_supported) {
|
||||
this._recording_supported = false;
|
||||
if(this.on_support_state_change)
|
||||
this.on_support_state_change();
|
||||
}
|
||||
|
||||
createErrorModal(tr("Could not resolve microphone!"), tr("Could not resolve microphone!<br>Message: ") + error).open();
|
||||
console.error(tr("Could not get microphone!"));
|
||||
console.error(error);
|
||||
});
|
||||
}
|
||||
|
||||
stop_recording(stop_media_stream: boolean = true){
|
||||
console.log(tr("Stop recording!"));
|
||||
this._recording = false;
|
||||
|
||||
if(this.microphoneStream) this.microphoneStream.disconnect();
|
||||
this.microphoneStream = undefined;
|
||||
|
||||
if(stop_media_stream && this.mediaStream) {
|
||||
if(this.mediaStream.stop)
|
||||
this.mediaStream.stop();
|
||||
else
|
||||
this.mediaStream.getTracks().forEach(value => {
|
||||
value.stop();
|
||||
});
|
||||
this.mediaStream = undefined;
|
||||
}
|
||||
}
|
||||
|
||||
on_initialized(callback: () => any) {
|
||||
if(this.processor)
|
||||
callback();
|
||||
else
|
||||
this.on_ready_callbacks.push(callback);
|
||||
}
|
||||
|
||||
private on_microphone(stream: MediaStream) {
|
||||
const old_microphone_stream = this.microphoneStream;
|
||||
if(old_microphone_stream)
|
||||
this.stop_recording(this.mediaStream != stream); //Disconnect old stream
|
||||
|
||||
this.mediaStream = stream;
|
||||
if(!this.mediaStream)
|
||||
return;
|
||||
|
||||
if(!this.audioContext) {
|
||||
console.log(tr("[VoiceRecorder] Got microphone stream, but havn't a audio context. Waiting until its initialized"));
|
||||
return;
|
||||
}
|
||||
|
||||
this.microphoneStream = this.audioContext.createMediaStreamSource(stream);
|
||||
this.microphoneStream.connect(this.processor);
|
||||
if(this.vadHandler)
|
||||
this.vadHandler.initialiseNewStream(old_microphone_stream, this.microphoneStream);
|
||||
|
||||
if(!this._recording_supported) {
|
||||
this._recording_supported = true;
|
||||
if(this.on_support_state_change)
|
||||
this.on_support_state_change();
|
||||
}
|
||||
}
|
||||
|
||||
private on_voice_start() {
|
||||
this._tag_favicon.attr('href', "img/favicon/speaking.png");
|
||||
if(this.on_start)
|
||||
this.on_start();
|
||||
|
||||
}
|
||||
private on_voice_end() {
|
||||
this._tag_favicon.attr('href', "img/favicon/teacup.png");
|
||||
if(this.on_end)
|
||||
this.on_end();
|
||||
}
|
||||
}
|
||||
class MuteVAD extends VoiceActivityDetector {
|
||||
shouldRecord(buffer: AudioBuffer): boolean {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
class PassThroughVAD extends VoiceActivityDetector {
|
||||
shouldRecord(buffer: AudioBuffer): boolean {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
class VoiceActivityDetectorVAD extends VoiceActivityDetector {
|
||||
analyzer: AnalyserNode;
|
||||
buffer: Uint8Array;
|
||||
|
||||
continuesCount: number = 0;
|
||||
maxContinuesCount: number = 12;
|
||||
|
||||
percentageThreshold: number = 50;
|
||||
|
||||
percentage_listener: (per: number) => void = ($) => {};
|
||||
|
||||
initialise() {
|
||||
this.analyzer = audio.player.context().createAnalyser();
|
||||
this.analyzer.smoothingTimeConstant = 1; //TODO test
|
||||
this.buffer = new Uint8Array(this.analyzer.fftSize);
|
||||
return super.initialise();
|
||||
}
|
||||
|
||||
initialiseNewStream(old: MediaStreamAudioSourceNode, _new: MediaStreamAudioSourceNode): void {
|
||||
if(this.analyzer)
|
||||
this.analyzer.disconnect();
|
||||
if(_new)
|
||||
_new.connect(this.analyzer);
|
||||
}
|
||||
|
||||
shouldRecord(buffer: AudioBuffer): boolean {
|
||||
let usage = this.calculateUsage();
|
||||
if($.isFunction(this.percentage_listener))
|
||||
this.percentage_listener(usage);
|
||||
if(usage >= this.percentageThreshold) {
|
||||
this.continuesCount = 0;
|
||||
} else this.continuesCount++;
|
||||
return this.continuesCount < this.maxContinuesCount;
|
||||
}
|
||||
|
||||
calculateUsage() : number {
|
||||
let total = 0
|
||||
,float
|
||||
,rms;
|
||||
this.analyzer.getByteTimeDomainData(this.buffer);
|
||||
for(let index = 0; index < this.analyzer.fftSize; index++) {
|
||||
float = ( this.buffer[index++] / 0x7f ) - 1;
|
||||
total += (float * float);
|
||||
}
|
||||
rms = Math.sqrt(total / this.analyzer.fftSize);
|
||||
let db = 20 * ( Math.log(rms) / Math.log(10) );
|
||||
// sanity check
|
||||
db = Math.max(-192, Math.min(db, 0));
|
||||
let percentage = 100 + ( db * 1.92 );
|
||||
return percentage;
|
||||
}
|
||||
}
|
||||
|
||||
interface PPTKeySettings extends ppt.KeyDescriptor {
|
||||
version?: number;
|
||||
delay: number;
|
||||
}
|
||||
|
||||
class PushToTalkVAD extends VoiceActivityDetector {
|
||||
private _settings: PPTKeySettings;
|
||||
private _key_hook: ppt.KeyHook;
|
||||
private _timeout: NodeJS.Timer;
|
||||
|
||||
private _pushed: boolean = false;
|
||||
|
||||
constructor(settings: PPTKeySettings) {
|
||||
super();
|
||||
this._settings = settings;
|
||||
this._key_hook = {
|
||||
callback_release: () => {
|
||||
if(this._timeout)
|
||||
clearTimeout(this._timeout);
|
||||
|
||||
if(this._settings.delay > 0)
|
||||
this._timeout = setTimeout(() => this._pushed = false, this._settings.delay);
|
||||
else
|
||||
this._pushed = false;
|
||||
},
|
||||
callback_press: () => {
|
||||
if(this._timeout)
|
||||
clearTimeout(this._timeout);
|
||||
|
||||
this._pushed = true;
|
||||
},
|
||||
|
||||
cancel: false
|
||||
} as ppt.KeyHook;
|
||||
|
||||
this.initialize_hook();
|
||||
}
|
||||
|
||||
private initialize_hook() {
|
||||
this._key_hook.key_code = this._settings.key_code;
|
||||
this._key_hook.key_alt = this._settings.key_alt;
|
||||
this._key_hook.key_ctrl = this._settings.key_ctrl;
|
||||
this._key_hook.key_shift = this._settings.key_shift;
|
||||
this._key_hook.key_windows = this._settings.key_windows;
|
||||
}
|
||||
|
||||
initialise() {
|
||||
ppt.register_key_hook(this._key_hook);
|
||||
return super.initialise();
|
||||
}
|
||||
|
||||
finalize() {
|
||||
ppt.unregister_key_hook(this._key_hook);
|
||||
return super.finalize();
|
||||
}
|
||||
|
||||
set pushed(flag: boolean) {
|
||||
this._pushed = flag;
|
||||
}
|
||||
|
||||
set settings(settings: PPTKeySettings) {
|
||||
ppt.unregister_key_hook(this._key_hook);
|
||||
|
||||
this._settings = settings;
|
||||
this.initialize_hook();
|
||||
this._pushed = false;
|
||||
|
||||
ppt.register_key_hook(this._key_hook);
|
||||
}
|
||||
|
||||
shouldRecord(buffer: AudioBuffer): boolean {
|
||||
return this._pushed;
|
||||
}
|
||||
}
|
Loading…
Add table
Reference in a new issue