Initial commit

This commit is contained in:
Mindboost
2025-07-01 10:53:26 +00:00
commit 38050e5c69
416 changed files with 48708 additions and 0 deletions

385
lib/AudioFunctions.ts Normal file
View File

@@ -0,0 +1,385 @@
import { createDevice, type Device } from '@rnbo/js'
import { Howl } from 'howler'
import { ensureAudio } from '~/stores/audio'
import { ANC } from '~/stores/interfaces/ANC'
import { HeadsetType } from '~/stores/interfaces/HeadsetType'
import { useUserStore } from '~/stores/user'
function waitForCanPlay(audio: HTMLMediaElement): Promise<void> {
return new Promise((resolve) => {
if (audio.readyState >= 3) {
resolve()
} else {
const onCanPlay = () => {
audio.removeEventListener('canplay', onCanPlay)
resolve()
}
audio.addEventListener('canplay', onCanPlay)
}
})
}
async function onFallback(reason: string, ctx?: AudioContext, src?:string): Promise<AudioNode | null> {
useNuxtApp().$logger.warn(`[Audio-Fallback] ⚠️ ${reason}`)
if (reason.includes('captureStream nicht verfügbar')) {
// → Safari oder alter Browser
useNuxtApp().$logger.info('Nutze Fallback via decodeAudioData()...')
// Trigger eigene Fallback-Logik, z.B.:
if(ctx && src) return await createAudioSource(ctx, src)
}
else if (reason.includes('kein AudioTrack')) {
useNuxtApp().$logger.warn('Audio-Stream hat keinen Track evtl. nicht geladen?')
// Optionale Retry-Logik hier?
}
else if (reason.includes('Audioelement hat einen Fehler')) {
alert('Audio konnte nicht geladen werden. Bitte prüfen Sie die Quelle oder Ihre Netzwerkverbindung.')
}
else if (reason.includes('MediaStreamSource')) {
useNuxtApp().$logger.error('Web Audio konnte keine Verbindung zum Stream herstellen.')
if(ctx && src) return await createAudioSource(ctx, src)
}
else {
useNuxtApp().$logger.error(`[Audio-Fallback] Unbekannter Fehler: ${reason}`)
}
return null
}
async function setupAudioSource(
audioElement: HTMLMediaElement,
audioContext: AudioContext): Promise<MediaElementAudioSourceNode | AudioBufferSourceNode | MediaStreamAudioSourceNode | null> {
await ensureAudio()
useNuxtApp().$logger.log("HTML5 Element vor dem umwandeln = ", {audioElement})
try {
// 1. Quelle vorhanden?
if (!audioElement.src && !audioElement.srcObject) {
onFallback('Audioelement hat keine gültige Quelle')
return null
}
// 2. Fehler prüfen
if (audioElement.error) {
onFallback('Audioelement hat einen Fehler (z.B. 404, CORS, etc.)', audioContext, audioElement.src )
return null
}
// 3. Versuche zu laden
try {
await waitForCanPlay(audioElement)
audioElement.volume = 0.0
await audioElement.play()
} catch (err) {
useNuxtApp().$logger.info('Audio konnte nicht abgespielt werden (Autoplay blockiert?):', err)
await ensureAudio()
audioElement.volume = 0.0
await audioElement.play()
}
// 4. Ist ready?
if (audioElement.readyState < 2) {
setTimeout(() => {
setupAudioSource(audioElement, audioContext)
}, 500)
onFallback('Audioelement ist nicht bereit (readyState < 2)')
return null
}
// 5. Nutze den MediaStream
const stream = getMediaStreamFromElement(audioElement)
if (!stream) {
onFallback('captureStream (inkl. mozCaptureStream) nicht verfügbar')
if(audioElement.src) useNuxtApp().$logger.log("Audio Src is a string")
if(audioElement.srcObject) useNuxtApp().$logger.log("Audio Src is a srcObject")
return createAudioSource(audioContext, audioElement.src)
}
if (stream.getAudioTracks().length === 0) {
onFallback('captureStream liefert keinen AudioTrack')
return createAudioSource(audioContext, audioElement.src)
}
// 6. Teste node-Erstellung
const node = audioContext.createMediaStreamSource(stream)
return node
} catch (error) {
onFallback(`Fehler beim Aufbau der MediaStreamSource: ${error}`)
return null
}
}
function getMediaStreamFromElement(mediaElement: HTMLMediaElement): MediaStream | null {
const element = mediaElement as HTMLMediaElement & {
captureStream?: () => MediaStream
mozCaptureStream?: () => MediaStream
}
if (typeof element.captureStream === 'function') {
return element.captureStream()
} else if (typeof element.mozCaptureStream === 'function') {
useNuxtApp().$logger.warn('[AudioBridge] ⚠️ captureStream() nicht verfügbar, versuche mozCaptureStream() (Firefox-Fallback)')
return element.mozCaptureStream()
}
useNuxtApp().$logger.warn('[AudioBridge] ❌ Weder captureStream() noch mozCaptureStream() verfügbar.')
return null
}
async function createStreamingAudioSource (ctx: AudioContext, fileName: string): Promise<Howl> {
if(Howler.ctx != null) {
Howler.ctx.close()
}
if(fileName == null || fileName == undefined) {
useNuxtApp().$logger.error('Could not find file with name ' + fileName)
}
// Howler.ctx = ctx
// Load your audio file with streaming enabled
const baseURL = window.location.origin
const sound: Howl = await new Howl({
src: [fileName],
html5: false, // Enables streaming
loop: true
})
if(sound.state() === "unloaded") {
useNuxtApp().$logger.warn('Could not find file with name, please check the following resource -> '+ fileName)
}
// Assuming you have the Web Audio API context (Howler.ctx)
// and it's not null or undefined, you can proceed to create a custom AudioNode
useNuxtApp().$logger.log( {sound })
return sound
// Note: This is a simplified example. Depending on what you're doing,
// you might not want to connect directly to the destination but to other nodes for further processing.
}
function getAttenuationFactor () {
const user = useUserStore().user as any
const anc = user.settings.anc_type
const ht = user.settings.headphone_type
if (anc === ANC.Yes && ht === HeadsetType.OverEar) {
return 0.0562
}
if (anc === ANC.No && ht === HeadsetType.OverEar) {
return 0.5623
}
if (anc === ANC.Yes && ht === HeadsetType.InEar) {
return 0.1778
}
if (anc === ANC.No && ht === HeadsetType.InEar) {
return 0.0316
}
return 0.5623
}
function throttle<T extends (...args: any[]) => void>(
func: T,
limit: number
): T {
let inThrottle: boolean
return function (this: any, ...args: any[]) {
if (!inThrottle) {
func.apply(this, args)
inThrottle = true
setTimeout(() => (inThrottle = false), limit)
}
} as T
}
async function createAudioSource (
ctx: AudioContext,
path: string
): Promise<AudioBufferSourceNode> {
// Works but is slow
const response: Response = await fetch(path)
const source: AudioBufferSourceNode = ctx.createBufferSource()
// Check if the partial content was successfully fetched
if (response.status === 206 || response.status === 200) {
// Retrieve the chunk as an ArrayBuffer
const buffer: ArrayBuffer = await response.arrayBuffer()
// Decode the audio data
const decodedAudio: AudioBuffer = await ctx.decodeAudioData(buffer)
// checkClipping(decodedAudio)
// useNuxtApp().$logger.log('Noise decoded successfully')
source.buffer = decodedAudio
source.loop = true
return source
} else {
throw new Error(
`Failed to fetch audio chunk: HTTP status ${response.status}`
)
}
/*
const decodedAudio: AudioBuffer = await fetchAndDecodeAudioChunk(ctx, path, startByte, endByte)
const source: AudioBufferSourceNode = ctx.createBufferSource()
source.buffer = decodedAudio
return source
*/
}
function createRNBODevice (ctx: AudioContext, patcher: any): Promise<Device> {
return createDevice({
context: ctx,
patcher
})
}
function connectAudioNodes (
from: AudioNode,
to: AudioNode,
output?: number,
input?: number
): void {
/*
// useNuxtApp().$logger.log('Connecting the Node: ', { from }, ' with ' + from.numberOfInputs + ' inputs',
' and with ' + from.numberOfOutputs + ' outputs ... with ')
// useNuxtApp().$logger.log('the Node: ', { to }, ' with ' + to.numberOfInputs + ' inputs',
' and with ' + to.numberOfOutputs + ' outputs ... defined Channels are o/i: ' + output + ' and ' + input)
*/
// useNuxtApp().$logger.log('new Node connected ', { allNodes })
try {
if (output !== undefined && input !== undefined) {
from.connect(to, output, input)
// useNuxtApp().$logger.log('Connect specific channels o/i ' + output, input, { to }, { from })
} else if (output !== undefined) {
from.connect(to, output)
// useNuxtApp().$logger.log('Connect with specific output channel ' + output, { to }, { from })
} else {
from.connect(to)
// useNuxtApp().$logger.log('Connect without specific channels', { to }, { from })
}
} catch (error) {
// useNuxtApp().$logger.log('Error while connecting' + error)
}
}
function checkClipping (buffer: AudioBuffer): boolean {
let isClipping = false
for (
let channelNumber = 0;
channelNumber < buffer.numberOfChannels;
channelNumber++
) {
const element: Float32Array = buffer.getChannelData(channelNumber)
// Iterate through buffer to check if any of the |values| exceeds 1.
for (let i = 0; i < buffer.length; i++) {
const absValue = Math.abs(element[i])
if (absValue >= 1.0) {
isClipping = true
break
}
}
}
return isClipping
}
function calculateNormalizedVolume (db: number) : number {
const minDB = -18
const maxDB = 6
if (db < minDB || db > maxDB) {
return Math.pow(10, db/ 20)
}
return Math.pow(10, db / 20)
}
function stereoToMono (stereoNode: AudioNode): AudioNode {
if (stereoNode.channelCount !== 2) {
useNuxtApp().$logger.warn('Not a stereo node. Mismatch of channels, expected 2 channels')
return stereoNode
}
const context = stereoNode.context
const splitter = context.createChannelSplitter(2)
const gainL = context.createGain()
const gainR = context.createGain()
gainL.gain.value = 0.5
gainR.gain.value = 0.5
// ChannelMerger, aber wir nutzen nur den ersten Output-Kanal
const merger = context.createChannelMerger(1)
// Verkabelung
stereoNode.connect(splitter)
splitter.connect(gainL, 0) // Left channel
splitter.connect(gainR, 1) // Right channel
gainL.connect(merger, 0, 0)
gainR.connect(merger, 0, 0)
useNuxtApp().$logger.info('Stereo node umgewandelt')
useNuxtApp().$logger.log({merger })
return merger
}
/**
* Analysiert, ob ein Mikrofon innerhalb eines Zeitfensters aktiv war (z.B. Sprache, Geräusche).
*
* @param micNode - Eine MediaStreamAudioSourceNode (z.B. von getUserMedia)
* @param durationMs - Die Dauer der Analyse in Millisekunden
* @param threshold - Optional: Empfindlichkeit (0.01 ist guter Standardwert)
* @returns Promise<boolean> - true, wenn Aktivität erkannt wurde, sonst false
NOT TESTED
async function analyseMicActivity(
micNode: MediaStreamAudioSourceNode,
durationMs: number,
threshold: number = 0.01
): Promise<boolean> {
const audioContext = micNode.context
const analyser = audioContext.createAnalyser()
analyser.fftSize = 2048
const bufferLength = analyser.fftSize
const dataArray = new Float32Array(bufferLength)
micNode.connect(analyser)
return new Promise((resolve) => {
let activeDetected = false
const startTime = performance.now()
const analyze = () => {
analyser.getFloatTimeDomainData(dataArray)
// RMS berechnen
let sum = 0
for (let i = 0; i < bufferLength; i++) {
sum += dataArray[i] * dataArray[i]
}
const rms = Math.sqrt(sum / bufferLength)
if (rms > threshold) {
activeDetected = true
}
if (performance.now() - startTime < durationMs) {
requestAnimationFrame(analyze)
} else {
micNode.disconnect(analyser)
resolve(activeDetected)
}
}
analyze()
})
}
*/
/**
* EXPORTS
*/
export {
createAudioSource,
createRNBODevice,
createStreamingAudioSource,
connectAudioNodes,
checkClipping,
getAttenuationFactor,
throttle,
calculateNormalizedVolume,
stereoToMono,
setupAudioSource,
getMediaStreamFromElement
}

57
lib/BufferLoader.ts Normal file
View File

@@ -0,0 +1,57 @@
import { useBufferStore } from '~/stores/buffer'
export class BufferLoader {
context: AudioContext
urlList: string[]
onload: (bufferList: AudioBuffer[], urlList:string[]) => void
bufferList: AudioBuffer[]
loadCount: number
constructor (context: AudioContext, urlList: string[], callback: (bufferList: AudioBuffer[], urlList:string[]) => void) {
this.context = context
this.urlList = urlList
this.onload = callback
this.bufferList = new Array(urlList.length)
this.loadCount = 0
}
loadBuffer (url: string, index: number): void {
// Load buffer asynchronously
const request = new XMLHttpRequest()
request.open('GET', url, true)
request.responseType = 'arraybuffer'
request.onload = () => {
// Asynchronously decode the audio file data in request.response
const fileName = useBufferStore().extractFilename(url)
this.context.decodeAudioData(
request.response,
(buffer: AudioBuffer) => {
if (!buffer) {
alert('error decoding file data: ' + url)
return
}
this.bufferList[index] = buffer
if (++this.loadCount === this.urlList.length) { this.onload(this.bufferList, this.urlList) }
},
(_error) => {
// useNuxtApp().$logger.error('decodeAudioData error', error)
}
)
useBufferStore().saveBufferInIndexedDb(fileName, request.response)
}
request.onerror = () => {
alert('BufferLoader: XHR error')
}
request.send()
}
load (): void {
this.urlList.forEach((url, index) => {
this.loadBuffer(url, index)
})
}
}

21
lib/db.ts Normal file
View File

@@ -0,0 +1,21 @@
import { Dexie, type Table } from 'dexie'
export interface AudioSource {
id?: number;
name: string;
buffer: ArrayBuffer;
}
export default class Database extends Dexie {
// 'audiosources' is added by dexie when declaring the stores()
// We just tell the typing system this is the case
audiosources!: Table<AudioSource>
constructor () {
super('audiosources')
this.version(1).stores({
audiosources: '++id, &name' // Primary key and indexed props
})
}
}
export const db = new Database()