class OctaveBandProcessor extends AudioWorkletProcessor { constructor() { super() // Define center frequencies for 9 octave bands this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000] this.filters = [] this.lastUpdateTimestamp = 0 this.updateInterval = 0.125 // Update every 0.125 seconds this.filteredMicSignal =[] // Create an A-weighting filter for specific frequencies this.createAWeightingFilter() // Create bandpass filters for each center frequency this.centerFrequencies.forEach((frequency) => { const filter = new BiquadFilterNode( audioContext, { type: "bandpass", frequency: frequency, Q: 1.41, // Set the desired Q value }, this.filters.push(filter), ) }) // Set up analyzers for calculating percentiles this.setupAnalyzers() } setupAnalyzers() { this.analyzers = [] this.centerFrequencies.forEach((frequency) => { this.analyzers.push([]) for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4 const analyzer = audioContext.createAnalyser() analyzer.fftSize = 2048 // Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter if (i === 0) { this.aWeightingFilter.connect(analyzer) } this.analyzers[this.analyzers.length - 1].push(analyzer) } }) } process(inputs, outputs) { const numOutputoctaves = filters.length for (let i = 0; i < numOutputoctaves; i++) { // const outputChannel = outputs[i][0] const mic = inputs[0] const maskingsig = inputs[1] const harmonic = inputs[2] // Apply the filter to the input channel const filteredMicSignal[i] = this.filters[i].process(mic) const filteredMaskSignal = this.filters[i].process(maskingsig) const filteredHarmoSignal = this.filters[i].process(harmonic) // Apply A-weighting only to the microphone signal (channel 0) const aWeightedSignal[i] = this.aWeightingFilter.process(filteredMicSignal) this.calculateRMSLevel(aWeightedSignal) outputChannel.set(aWeightedSignal) // For other channels, pass the signal without A-weighting outputChannel.set(filteredSignal) // Check if it's time to update percentiles const currentTime = this.currentTime if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) { this.updatePercentiles(i) this.lastUpdateTimestamp = currentTime } } return true } calculateRMSLevel(signal, channelIndex) { const data = new Float32Array(signal.length) signal.copyFromChannel(data, 0) const sum = data.reduce((acc, val) => acc + val * val, 0) const rmsLevel = Math.sqrt(sum / data.length) const dBLevel = 20 * Math.log10(rmsLevel) // Convert to dB return dBLevel } updatePercentiles(channelIndex) { for (let i = 0; i < this.centerFrequencies.length; i++) { //const analyzer = this.analyzers[i][channelIndex] //const levelData = new Float32Array(analyzer.frequencyBinCount) analyzer.getFloatFrequencyData(levelData) // Calculate percentiles for each octave band and each channel const percentile10 = this.calculatePercentile(levelData, 10) const percentile90 = this.calculatePercentile(levelData, 90) const percentileDiff = percentile10 - percentile90 // Store the percentile difference for each channel and each octave band // You can use suitable data structures to store these values for future comparisons } } calculatePercentile(data, percentile) { const sortedData = data.slice().sort((a, b) => a - b) const index = Math.floor((percentile / 100) * sortedData.length) return sortedData[index] } createAWeightingFilter() { // Use the provided A-weighting filter coefficients const aWeightingCoefficients = [ 0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554, ] //David // Create a custom IIR filter node with the A-weighting coefficients this.aWeightingFilter = new IIRFilterNode(audioContext, { //infinit Impuls Response feedforward: aWeightingCoefficients, feedback: [1], }) } // combineAndCalculate() { // let LAF10_90_total = 0 // Initialize the total LAF10%-90% // for (let i = 0; i < this.centerFrequencies.length; i++) { // const micAnalyzer = this.analyzers[i][0] // Analyzer for microphone audio (identifier 0) // const audioFile1Analyzer = this.analyzers[i][3] // Analyzer for audioFile1 (identifier 3) // const audioFile2Analyzer = this.analyzers[i][4] // Analyzer for audioFile2 (identifier 4) // // Calculate percentiles for the microphone audio // const micPercentile10 = this.calculatePercentile(micAnalyzer, 10) // const micPercentile90 = this.calculatePercentile(micAnalyzer, 90) // // Calculate percentiles for audioFile1 // const audioFile1Percentile10 = this.calculatePercentile( // audioFile1Analyzer, // 10, // ) // const audioFile1Percentile90 = this.calculatePercentile( // audioFile1Analyzer, // 90, // ) // // Calculate percentiles for audioFile2 // const audioFile2Percentile10 = this.calculatePercentile( // audioFile2Analyzer, // 10, // ) // const audioFile2Percentile90 = this.calculatePercentile( // audioFile2Analyzer, // 90, // ) // // Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately // const micLAF10_90 = micPercentile10 - micPercentile90 // const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90 // const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90 // // Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2 // const combinedLAF10_90 = // micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90 // // Add the combined LAF10%-90% to the total // LAF10_90_total += combinedLAF10_90 // } // // return LAF10_90_total; // } } registerProcessor("octave", OctaveBandProcessor)