Compare commits
2 Commits
f33e61f2d3
...
653b9220f8
Author | SHA1 | Date |
---|---|---|
Robert Rapp | 653b9220f8 | |
Robert Rapp | fd3ccb8fa2 |
43
README.md
43
README.md
|
@ -1,3 +1,44 @@
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
pro Messpunkt
|
||||||
|
125ms
|
||||||
|
LAF90
|
||||||
|
LAF10
|
||||||
|
TargetGain
|
||||||
|
CurrentGain
|
||||||
|
MicrofonSignal clear
|
||||||
|
MicofoneSigal gedämpft (AWeighted)
|
||||||
|
|
||||||
|
[time: 125, values: [35,50,50,40]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Nuxt 3 Minimal Starter
|
# Nuxt 3 Minimal Starter
|
||||||
|
|
||||||
Look at the [Nuxt 3 documentation](https://nuxt.com/docs/getting-started/introduction) to learn more.
|
Look at the [Nuxt 3 documentation](https://nuxt.com/docs/getting-started/introduction) to learn more.
|
||||||
|
@ -56,3 +97,5 @@ npm run preview
|
||||||
```
|
```
|
||||||
|
|
||||||
Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information.
|
Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information.
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,88 @@
|
||||||
|
<template>
|
||||||
|
<div>
|
||||||
|
<button @click="startMicrophone">Start Microphone</button>
|
||||||
|
<button @click="stopMicrophone">Stop Microphone</button>
|
||||||
|
<div v-if="errorMessage">{{ errorMessage }}</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
export default {
|
||||||
|
data() {
|
||||||
|
return {
|
||||||
|
audioContext: null,
|
||||||
|
microphoneStream: null,
|
||||||
|
audioProcessorNode: null,
|
||||||
|
errorMessage: null,
|
||||||
|
};
|
||||||
|
},
|
||||||
|
methods: {
|
||||||
|
async startMicrophone() {
|
||||||
|
try {
|
||||||
|
this.initializeAudioContext();
|
||||||
|
await this.requestMicrophoneAccess();
|
||||||
|
await this.setupAudioProcessing();
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error starting microphone:', error.message);
|
||||||
|
this.errorMessage = error.message;
|
||||||
|
}
|
||||||
|
finally{
|
||||||
|
console.log("Microphone started")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
stopMicrophone() {
|
||||||
|
this.cleanup();
|
||||||
|
},
|
||||||
|
initializeAudioContext() {
|
||||||
|
this.audioContext = new window.AudioContext();
|
||||||
|
},
|
||||||
|
async requestMicrophoneAccess() {
|
||||||
|
try {
|
||||||
|
this.microphoneStream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error accessing microphone:', error.message);
|
||||||
|
this.errorMessage = 'Microphone access denied. Please grant permission in your browser settings.';
|
||||||
|
throw new Error('Microphone access denied.');
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!this.microphoneStream || !(this.microphoneStream instanceof MediaStream)) {
|
||||||
|
throw new Error('Microphone stream is not available.');
|
||||||
|
}
|
||||||
|
},
|
||||||
|
async setupAudioProcessing() {
|
||||||
|
try {
|
||||||
|
const microphoneSource = this.audioContext.createMediaStreamSource(this.microphoneStream);
|
||||||
|
await this.audioContext.audioWorklet.addModule('/scripts/octave2.js');
|
||||||
|
|
||||||
|
//this.audioProcessorNode = new AudioWorkletNode(this.audioContext, 'octave');
|
||||||
|
//microphoneSource.connect(this.audioProcessorNode);
|
||||||
|
//this.audioProcessorNode.connect(this.audioContext.destination);
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error setting up audio processing:', error.message);
|
||||||
|
this.errorMessage = 'Error setting up audio processing. Please check your microphone and try again.';
|
||||||
|
throw new Error('Audio processing setup failed.');
|
||||||
|
}
|
||||||
|
},
|
||||||
|
cleanup() {
|
||||||
|
if (this.audioContext) {
|
||||||
|
if (this.audioProcessorNode) {
|
||||||
|
this.audioProcessorNode.disconnect();
|
||||||
|
this.audioProcessorNode.port.postMessage({ command: 'stop' });
|
||||||
|
}
|
||||||
|
this.audioContext.close();
|
||||||
|
this.resetVariables();
|
||||||
|
}
|
||||||
|
},
|
||||||
|
resetVariables() {
|
||||||
|
this.audioContext = null;
|
||||||
|
this.microphoneStream = null;
|
||||||
|
this.audioProcessorNode = null;
|
||||||
|
this.errorMessage = null;
|
||||||
|
},
|
||||||
|
},
|
||||||
|
beforeDestroy() {
|
||||||
|
this.cleanup();
|
||||||
|
},
|
||||||
|
};
|
||||||
|
</script>
|
||||||
|
|
|
@ -1,97 +1,100 @@
|
||||||
class OctaveBandProcessor extends AudioWorkletProcessor {
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
constructor() {
|
constructor() {
|
||||||
super();
|
super()
|
||||||
// Define center frequencies for 9 octave bands
|
// Define center frequencies for 9 octave bands
|
||||||
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000]
|
||||||
this.filters = [];
|
this.filters = []
|
||||||
this.lastUpdateTimestamp = 0;
|
this.lastUpdateTimestamp = 0
|
||||||
this.updateInterval = 0.125; // Update every 0.125 seconds
|
this.updateInterval = 0.125 // Update every 0.125 seconds
|
||||||
|
|
||||||
// Create an A-weighting filter for specific frequencies
|
// Create an A-weighting filter for specific frequencies
|
||||||
this.createAWeightingFilter();
|
this.createAWeightingFilter()
|
||||||
|
|
||||||
// Create bandpass filters for each center frequency
|
// Create bandpass filters for each center frequency
|
||||||
this.centerFrequencies.forEach(frequency => {
|
this.centerFrequencies.forEach((frequency) => {
|
||||||
const filter = new BiquadFilterNode(audioContext, {
|
const filter = new BiquadFilterNode(
|
||||||
type: 'bandpass',
|
audioContext,
|
||||||
|
{
|
||||||
|
type: "bandpass",
|
||||||
frequency: frequency,
|
frequency: frequency,
|
||||||
Q: 1.41, // Set the desired Q value
|
Q: 1.41, // Set the desired Q value
|
||||||
},
|
},
|
||||||
this.filters.push(filter))
|
this.filters.push(filter),
|
||||||
});
|
)
|
||||||
|
})
|
||||||
|
|
||||||
// Set up analyzers for calculating percentiles
|
// Set up analyzers for calculating percentiles
|
||||||
this.setupAnalyzers();
|
this.setupAnalyzers()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
setupAnalyzers() {
|
setupAnalyzers() {
|
||||||
this.analyzers = [];
|
this.analyzers = []
|
||||||
this.centerFrequencies.forEach(frequency => {
|
this.centerFrequencies.forEach((frequency) => {
|
||||||
this.analyzers.push([]);
|
this.analyzers.push([])
|
||||||
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
for (let i = 0; i < 5; i++) {
|
||||||
const analyzer = audioContext.createAnalyser();
|
// Unique identifiers from 0 to 4
|
||||||
analyzer.fftSize = 2048;
|
const analyzer = audioContext.createAnalyser()
|
||||||
|
analyzer.fftSize = 2048
|
||||||
|
|
||||||
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
if (i === 0) {
|
if (i === 0) {
|
||||||
this.aWeightingFilter.connect(analyzer);
|
this.aWeightingFilter.connect(analyzer)
|
||||||
}
|
}
|
||||||
|
|
||||||
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
this.analyzers[this.analyzers.length - 1].push(analyzer)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
process(inputs, outputs) {
|
process(inputs, outputs) {
|
||||||
const numOutputChannels = outputs.length;
|
const numOutputChannels = outputs.length
|
||||||
for (let i = 0; i < numOutputChannels; i++) {
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
const outputChannel = outputs[i][0];
|
const outputChannel = outputs[i][0]
|
||||||
const inputChannel = inputs[i][0];
|
const inputChannel = inputs[i][0]
|
||||||
|
|
||||||
// Apply the filter to the input channel
|
// Apply the filter to the input channel
|
||||||
const filteredSignal = this.filters[i].process(inputChannel);
|
const filteredSignal = this.filters[i].process(inputChannel)
|
||||||
|
|
||||||
// Apply A-weighting only to the microphone signal (channel 0)
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
if (i === 0) {
|
if (i === 0) {
|
||||||
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal)
|
||||||
outputChannel.set(aWeightedSignal);
|
outputChannel.set(aWeightedSignal)
|
||||||
} else {
|
} else {
|
||||||
// For other channels, pass the signal without A-weighting
|
// For other channels, pass the signal without A-weighting
|
||||||
outputChannel.set(filteredSignal);
|
outputChannel.set(filteredSignal)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if it's time to update percentiles
|
// Check if it's time to update percentiles
|
||||||
const currentTime = this.currentTime;
|
const currentTime = this.currentTime
|
||||||
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
this.updatePercentiles(i);
|
this.updatePercentiles(i)
|
||||||
this.lastUpdateTimestamp = currentTime;
|
this.lastUpdateTimestamp = currentTime
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
calculateRMSLevel(signal, channelIndex) {
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
const data = new Float32Array(signal.length);
|
const data = new Float32Array(signal.length)
|
||||||
signal.copyFromChannel(data, 0);
|
signal.copyFromChannel(data, 0)
|
||||||
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
const sum = data.reduce((acc, val) => acc + val * val, 0)
|
||||||
const rmsLevel = Math.sqrt(sum / data.length);
|
const rmsLevel = Math.sqrt(sum / data.length)
|
||||||
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
const dBLevel = 20 * Math.log10(rmsLevel) // Convert to dB
|
||||||
return dBLevel;
|
return dBLevel
|
||||||
}
|
}
|
||||||
|
|
||||||
updatePercentiles(channelIndex) {
|
updatePercentiles(channelIndex) {
|
||||||
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
const analyzer = this.analyzers[i][channelIndex];
|
const analyzer = this.analyzers[i][channelIndex]
|
||||||
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
const levelData = new Float32Array(analyzer.frequencyBinCount)
|
||||||
analyzer.getFloatFrequencyData(levelData);
|
analyzer.getFloatFrequencyData(levelData)
|
||||||
|
|
||||||
// Calculate percentiles for each octave band and each channel
|
// Calculate percentiles for each octave band and each channel
|
||||||
const percentile10 = this.calculatePercentile(levelData, 10);
|
const percentile10 = this.calculatePercentile(levelData, 10)
|
||||||
const percentile90 = this.calculatePercentile(levelData, 90);
|
const percentile90 = this.calculatePercentile(levelData, 90)
|
||||||
|
|
||||||
const percentileDiff = percentile10 - percentile90;
|
const percentileDiff = percentile10 - percentile90
|
||||||
|
|
||||||
// Store the percentile difference for each channel and each octave band
|
// Store the percentile difference for each channel and each octave band
|
||||||
// You can use suitable data structures to store these values for future comparisons
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
@ -99,61 +102,71 @@ class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
}
|
}
|
||||||
|
|
||||||
calculatePercentile(data, percentile) {
|
calculatePercentile(data, percentile) {
|
||||||
const sortedData = data.slice().sort((a, b) => a - b);
|
const sortedData = data.slice().sort((a, b) => a - b)
|
||||||
const index = Math.floor((percentile / 100) * sortedData.length);
|
const index = Math.floor((percentile / 100) * sortedData.length)
|
||||||
return sortedData[index];
|
return sortedData[index]
|
||||||
}
|
}
|
||||||
|
|
||||||
createAWeightingFilter() {
|
createAWeightingFilter() {
|
||||||
// Use the provided A-weighting filter coefficients
|
// Use the provided A-weighting filter coefficients
|
||||||
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
const aWeightingCoefficients = [
|
||||||
|
0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554,
|
||||||
|
] //David
|
||||||
|
|
||||||
// Create a custom IIR filter node with the A-weighting coefficients
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
feedforward: aWeightingCoefficients,
|
feedforward: aWeightingCoefficients,
|
||||||
feedback: [1],
|
feedback: [1],
|
||||||
});
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
combineAndCalculate() {
|
combineAndCalculate() {
|
||||||
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
let LAF10_90_total = 0 // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
const micAnalyzer = this.analyzers[i][0] // Analyzer for microphone audio (identifier 0)
|
||||||
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
const audioFile1Analyzer = this.analyzers[i][3] // Analyzer for audioFile1 (identifier 3)
|
||||||
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
const audioFile2Analyzer = this.analyzers[i][4] // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
// Calculate percentiles for the microphone audio
|
// Calculate percentiles for the microphone audio
|
||||||
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10)
|
||||||
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90)
|
||||||
|
|
||||||
// Calculate percentiles for audioFile1
|
// Calculate percentiles for audioFile1
|
||||||
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
const audioFile1Percentile10 = this.calculatePercentile(
|
||||||
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
audioFile1Analyzer,
|
||||||
|
10,
|
||||||
|
)
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(
|
||||||
|
audioFile1Analyzer,
|
||||||
|
90,
|
||||||
|
)
|
||||||
|
|
||||||
// Calculate percentiles for audioFile2
|
// Calculate percentiles for audioFile2
|
||||||
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
const audioFile2Percentile10 = this.calculatePercentile(
|
||||||
const audioFile2Percentile90 = this.calculatePercentile(audioFile2Analyzer, 90);
|
audioFile2Analyzer,
|
||||||
|
10,
|
||||||
|
)
|
||||||
|
const audioFile2Percentile90 = this.calculatePercentile(
|
||||||
|
audioFile2Analyzer,
|
||||||
|
90,
|
||||||
|
)
|
||||||
|
|
||||||
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
const micLAF10_90 = micPercentile10 - micPercentile90;
|
const micLAF10_90 = micPercentile10 - micPercentile90
|
||||||
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90
|
||||||
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90
|
||||||
|
|
||||||
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
const combinedLAF10_90 =
|
||||||
|
micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90
|
||||||
|
|
||||||
// Add the combined LAF10%-90% to the total
|
// Add the combined LAF10%-90% to the total
|
||||||
LAF10_90_total += combinedLAF10_90;
|
LAF10_90_total += combinedLAF10_90
|
||||||
}
|
}
|
||||||
|
|
||||||
// return LAF10_90_total;
|
// return LAF10_90_total;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
registerProcessor("octave", OctaveBandProcessor)
|
||||||
|
|
||||||
registerProcessor('octave', OctaveBandProcessor);
|
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,153 @@
|
||||||
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
// Define center frequencies for 9 octave bands
|
||||||
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
||||||
|
this.filters = [];
|
||||||
|
this.lastUpdateTimestamp = 0;
|
||||||
|
this.updateInterval = 0.125; // Update every 0.125 seconds
|
||||||
|
|
||||||
|
// Create an A-weighting filter for specific frequencies
|
||||||
|
this.createAWeightingFilter();
|
||||||
|
|
||||||
|
// Create bandpass filters for each center frequency
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
const filter = new BiquadFilterNode(audioContext, {
|
||||||
|
type: 'bandpass',
|
||||||
|
frequency: frequency,
|
||||||
|
Q: 1.41, // Set the desired Q value
|
||||||
|
});
|
||||||
|
this.filters.push(filter);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set up analyzers for calculating percentiles
|
||||||
|
this.setupAnalyzers();
|
||||||
|
}
|
||||||
|
|
||||||
|
createAWeightingFilter() {
|
||||||
|
// Use the provided A-weighting filter coefficients
|
||||||
|
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
||||||
|
|
||||||
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
|
feedforward: aWeightingCoefficients,
|
||||||
|
feedback: [1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
setupAnalyzers() {
|
||||||
|
this.analyzers = [];
|
||||||
|
this.centerFrequencies.forEach((frequency) => {
|
||||||
|
this.analyzers.push([]);
|
||||||
|
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
||||||
|
const analyzer = audioContext.createAnalyser();
|
||||||
|
analyzer.fftSize = 2048;
|
||||||
|
|
||||||
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
|
if (i === 0) {
|
||||||
|
this.aWeightingFilter.connect(analyzer);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
process(inputs, outputs) {
|
||||||
|
const numOutputChannels = outputs.length;
|
||||||
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
|
const outputChannel = outputs[i][0];
|
||||||
|
const inputChannel = inputs[i][0];
|
||||||
|
|
||||||
|
// Apply the filter to the input channel
|
||||||
|
const filteredSignal = this.filters[i].process(inputChannel);
|
||||||
|
|
||||||
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
|
if (i === 0) {
|
||||||
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
||||||
|
outputChannel.set(aWeightedSignal);
|
||||||
|
} else {
|
||||||
|
// For other channels, pass the signal without A-weighting
|
||||||
|
outputChannel.set(filteredSignal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's time to update percentiles
|
||||||
|
const currentTime = this.currentTime;
|
||||||
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
|
this.updatePercentiles(i);
|
||||||
|
this.lastUpdateTimestamp = currentTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
|
const data = new Float32Array(signal.length);
|
||||||
|
signal.copyFromChannel(data, 0);
|
||||||
|
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
||||||
|
const rmsLevel = Math.sqrt(sum / data.length);
|
||||||
|
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
||||||
|
return dBLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePercentiles(channelIndex) {
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const analyzer = this.analyzers[i][channelIndex];
|
||||||
|
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
||||||
|
analyzer.getFloatFrequencyData(levelData);
|
||||||
|
|
||||||
|
// Calculate percentiles for each octave band and each channel
|
||||||
|
const percentile10 = this.calculatePercentile(levelData, 10);
|
||||||
|
const percentile90 = this.calculatePercentile(levelData, 90);
|
||||||
|
|
||||||
|
const percentileDiff = percentile10 - percentile90;
|
||||||
|
|
||||||
|
// Store the percentile difference for each channel and each octave band
|
||||||
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatePercentile(data, percentile) {
|
||||||
|
const sortedData = data.slice().sort((a, b) => a - b);
|
||||||
|
const index = Math.floor((percentile / 100) * sortedData.length);
|
||||||
|
return sortedData[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
combineAndCalculate() {
|
||||||
|
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
||||||
|
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
||||||
|
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
|
// Calculate percentiles for the microphone audio
|
||||||
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
||||||
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile1
|
||||||
|
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile2
|
||||||
|
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
||||||
|
const audioFile2Percentile90 = this.calculatePercentile(audioFile2Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
|
const micLAF10_90 = micPercentile10 - micPercentile90;
|
||||||
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
||||||
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
||||||
|
|
||||||
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
|
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
||||||
|
|
||||||
|
// Add the combined LAF10%-90% to the total
|
||||||
|
LAF10_90_total += combinedLAF10_90;
|
||||||
|
}
|
||||||
|
|
||||||
|
return LAF10_90_total;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registerProcessor('octave', OctaveBandProcessor);
|
Loading…
Reference in New Issue