Compare commits

...

2 Commits

Author SHA1 Message Date
Robert Rapp 653b9220f8 ChannelSteamer und Octave.js2 2023-12-16 22:46:22 +01:00
Robert Rapp fd3ccb8fa2 Test mit octave.js 2023-12-16 22:45:56 +01:00
4 changed files with 425 additions and 128 deletions

View File

@ -1,3 +1,44 @@
pro Messpunkt
125ms
LAF90
LAF10
TargetGain
CurrentGain
MicrofonSignal clear
MicofoneSigal gedämpft (AWeighted)
[time: 125, values: [35,50,50,40]
# Nuxt 3 Minimal Starter # Nuxt 3 Minimal Starter
Look at the [Nuxt 3 documentation](https://nuxt.com/docs/getting-started/introduction) to learn more. Look at the [Nuxt 3 documentation](https://nuxt.com/docs/getting-started/introduction) to learn more.
@ -56,3 +97,5 @@ npm run preview
``` ```
Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information. Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information.

88
pages/ChannelStreamer.vue Normal file
View File

@ -0,0 +1,88 @@
<template>
<div>
<button @click="startMicrophone">Start Microphone</button>
<button @click="stopMicrophone">Stop Microphone</button>
<div v-if="errorMessage">{{ errorMessage }}</div>
</div>
</template>
<script>
export default {
data() {
return {
audioContext: null,
microphoneStream: null,
audioProcessorNode: null,
errorMessage: null,
};
},
methods: {
async startMicrophone() {
try {
this.initializeAudioContext();
await this.requestMicrophoneAccess();
await this.setupAudioProcessing();
} catch (error) {
console.error('Error starting microphone:', error.message);
this.errorMessage = error.message;
}
finally{
console.log("Microphone started")
}
},
stopMicrophone() {
this.cleanup();
},
initializeAudioContext() {
this.audioContext = new window.AudioContext();
},
async requestMicrophoneAccess() {
try {
this.microphoneStream = await navigator.mediaDevices.getUserMedia({ audio: true });
} catch (error) {
console.error('Error accessing microphone:', error.message);
this.errorMessage = 'Microphone access denied. Please grant permission in your browser settings.';
throw new Error('Microphone access denied.');
}
if (!this.microphoneStream || !(this.microphoneStream instanceof MediaStream)) {
throw new Error('Microphone stream is not available.');
}
},
async setupAudioProcessing() {
try {
const microphoneSource = this.audioContext.createMediaStreamSource(this.microphoneStream);
await this.audioContext.audioWorklet.addModule('/scripts/octave2.js');
//this.audioProcessorNode = new AudioWorkletNode(this.audioContext, 'octave');
//microphoneSource.connect(this.audioProcessorNode);
//this.audioProcessorNode.connect(this.audioContext.destination);
} catch (error) {
console.error('Error setting up audio processing:', error.message);
this.errorMessage = 'Error setting up audio processing. Please check your microphone and try again.';
throw new Error('Audio processing setup failed.');
}
},
cleanup() {
if (this.audioContext) {
if (this.audioProcessorNode) {
this.audioProcessorNode.disconnect();
this.audioProcessorNode.port.postMessage({ command: 'stop' });
}
this.audioContext.close();
this.resetVariables();
}
},
resetVariables() {
this.audioContext = null;
this.microphoneStream = null;
this.audioProcessorNode = null;
this.errorMessage = null;
},
},
beforeDestroy() {
this.cleanup();
},
};
</script>

View File

@ -1,159 +1,172 @@
class OctaveBandProcessor extends AudioWorkletProcessor { class OctaveBandProcessor extends AudioWorkletProcessor {
constructor() { constructor() {
super(); super()
// Define center frequencies for 9 octave bands // Define center frequencies for 9 octave bands
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000]; this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000]
this.filters = []; this.filters = []
this.lastUpdateTimestamp = 0; this.lastUpdateTimestamp = 0
this.updateInterval = 0.125; // Update every 0.125 seconds this.updateInterval = 0.125 // Update every 0.125 seconds
// Create an A-weighting filter for specific frequencies // Create an A-weighting filter for specific frequencies
this.createAWeightingFilter(); this.createAWeightingFilter()
// Create bandpass filters for each center frequency // Create bandpass filters for each center frequency
this.centerFrequencies.forEach(frequency => { this.centerFrequencies.forEach((frequency) => {
const filter = new BiquadFilterNode(audioContext, { const filter = new BiquadFilterNode(
type: 'bandpass', audioContext,
frequency: frequency, {
Q: 1.41, // Set the desired Q value type: "bandpass",
}, frequency: frequency,
this.filters.push(filter)) Q: 1.41, // Set the desired Q value
}); },
this.filters.push(filter),
)
})
// Set up analyzers for calculating percentiles // Set up analyzers for calculating percentiles
this.setupAnalyzers(); this.setupAnalyzers()
}
} setupAnalyzers() {
this.analyzers = []
this.centerFrequencies.forEach((frequency) => {
this.analyzers.push([])
for (let i = 0; i < 5; i++) {
// Unique identifiers from 0 to 4
const analyzer = audioContext.createAnalyser()
analyzer.fftSize = 2048
setupAnalyzers() { // Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
this.analyzers = []; if (i === 0) {
this.centerFrequencies.forEach(frequency => { this.aWeightingFilter.connect(analyzer)
this.analyzers.push([]); }
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
const analyzer = audioContext.createAnalyser();
analyzer.fftSize = 2048;
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter this.analyzers[this.analyzers.length - 1].push(analyzer)
if (i === 0) { }
this.aWeightingFilter.connect(analyzer); })
} }
this.analyzers[this.analyzers.length - 1].push(analyzer); process(inputs, outputs) {
} const numOutputChannels = outputs.length
}) for (let i = 0; i < numOutputChannels; i++) {
} const outputChannel = outputs[i][0]
const inputChannel = inputs[i][0]
process(inputs, outputs) { // Apply the filter to the input channel
const numOutputChannels = outputs.length; const filteredSignal = this.filters[i].process(inputChannel)
for (let i = 0; i < numOutputChannels; i++) {
const outputChannel = outputs[i][0];
const inputChannel = inputs[i][0];
// Apply the filter to the input channel // Apply A-weighting only to the microphone signal (channel 0)
const filteredSignal = this.filters[i].process(inputChannel); if (i === 0) {
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal)
outputChannel.set(aWeightedSignal)
} else {
// For other channels, pass the signal without A-weighting
outputChannel.set(filteredSignal)
}
// Apply A-weighting only to the microphone signal (channel 0) // Check if it's time to update percentiles
if (i === 0) { const currentTime = this.currentTime
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal); if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
outputChannel.set(aWeightedSignal); this.updatePercentiles(i)
} else { this.lastUpdateTimestamp = currentTime
// For other channels, pass the signal without A-weighting }
outputChannel.set(filteredSignal); }
}
// Check if it's time to update percentiles return true
const currentTime = this.currentTime; }
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
this.updatePercentiles(i);
this.lastUpdateTimestamp = currentTime;
}
}
return true; calculateRMSLevel(signal, channelIndex) {
} const data = new Float32Array(signal.length)
signal.copyFromChannel(data, 0)
const sum = data.reduce((acc, val) => acc + val * val, 0)
const rmsLevel = Math.sqrt(sum / data.length)
const dBLevel = 20 * Math.log10(rmsLevel) // Convert to dB
return dBLevel
}
calculateRMSLevel(signal, channelIndex) { updatePercentiles(channelIndex) {
const data = new Float32Array(signal.length); for (let i = 0; i < this.centerFrequencies.length; i++) {
signal.copyFromChannel(data, 0); const analyzer = this.analyzers[i][channelIndex]
const sum = data.reduce((acc, val) => acc + val * val, 0); const levelData = new Float32Array(analyzer.frequencyBinCount)
const rmsLevel = Math.sqrt(sum / data.length); analyzer.getFloatFrequencyData(levelData)
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
return dBLevel;
}
updatePercentiles(channelIndex) { // Calculate percentiles for each octave band and each channel
for (let i = 0; i < this.centerFrequencies.length; i++) { const percentile10 = this.calculatePercentile(levelData, 10)
const analyzer = this.analyzers[i][channelIndex]; const percentile90 = this.calculatePercentile(levelData, 90)
const levelData = new Float32Array(analyzer.frequencyBinCount);
analyzer.getFloatFrequencyData(levelData);
// Calculate percentiles for each octave band and each channel const percentileDiff = percentile10 - percentile90
const percentile10 = this.calculatePercentile(levelData, 10);
const percentile90 = this.calculatePercentile(levelData, 90);
const percentileDiff = percentile10 - percentile90; // Store the percentile difference for each channel and each octave band
// You can use suitable data structures to store these values for future comparisons
}
}
// Store the percentile difference for each channel and each octave band calculatePercentile(data, percentile) {
// You can use suitable data structures to store these values for future comparisons const sortedData = data.slice().sort((a, b) => a - b)
} const index = Math.floor((percentile / 100) * sortedData.length)
} return sortedData[index]
}
calculatePercentile(data, percentile) { createAWeightingFilter() {
const sortedData = data.slice().sort((a, b) => a - b); // Use the provided A-weighting filter coefficients
const index = Math.floor((percentile / 100) * sortedData.length); const aWeightingCoefficients = [
return sortedData[index]; 0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554,
} ] //David
createAWeightingFilter() { // Create a custom IIR filter node with the A-weighting coefficients
// Use the provided A-weighting filter coefficients this.aWeightingFilter = new IIRFilterNode(audioContext, {
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David feedforward: aWeightingCoefficients,
feedback: [1],
})
}
// Create a custom IIR filter node with the A-weighting coefficients combineAndCalculate() {
this.aWeightingFilter = new IIRFilterNode(audioContext, { let LAF10_90_total = 0 // Initialize the total LAF10%-90%
feedforward: aWeightingCoefficients,
feedback: [1],
});
}
for (let i = 0; i < this.centerFrequencies.length; i++) {
const micAnalyzer = this.analyzers[i][0] // Analyzer for microphone audio (identifier 0)
const audioFile1Analyzer = this.analyzers[i][3] // Analyzer for audioFile1 (identifier 3)
const audioFile2Analyzer = this.analyzers[i][4] // Analyzer for audioFile2 (identifier 4)
// Calculate percentiles for the microphone audio
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10)
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90)
combineAndCalculate() { // Calculate percentiles for audioFile1
let LAF10_90_total = 0; // Initialize the total LAF10%-90% const audioFile1Percentile10 = this.calculatePercentile(
audioFile1Analyzer,
10,
)
const audioFile1Percentile90 = this.calculatePercentile(
audioFile1Analyzer,
90,
)
for (let i = 0; i < this.centerFrequencies.length; i++) { // Calculate percentiles for audioFile2
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0) const audioFile2Percentile10 = this.calculatePercentile(
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3) audioFile2Analyzer,
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4) 10,
)
const audioFile2Percentile90 = this.calculatePercentile(
audioFile2Analyzer,
90,
)
// Calculate percentiles for the microphone audio // Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10); const micLAF10_90 = micPercentile10 - micPercentile90
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90); const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90
// Calculate percentiles for audioFile1 // Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10); const combinedLAF10_90 =
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90); micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90
// Calculate percentiles for audioFile2 // Add the combined LAF10%-90% to the total
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10); LAF10_90_total += combinedLAF10_90
const audioFile2Percentile90 = this.calculatePercentile(audioFile2Analyzer, 90); }
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
const micLAF10_90 = micPercentile10 - micPercentile90;
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
// Add the combined LAF10%-90% to the total
LAF10_90_total += combinedLAF10_90;
}
// return LAF10_90_total;
}
// return LAF10_90_total;
}
} }
registerProcessor('octave', OctaveBandProcessor); registerProcessor("octave", OctaveBandProcessor)

153
public/scripts/octave2.js Normal file
View File

@ -0,0 +1,153 @@
class OctaveBandProcessor extends AudioWorkletProcessor {
constructor() {
super();
// Define center frequencies for 9 octave bands
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
this.filters = [];
this.lastUpdateTimestamp = 0;
this.updateInterval = 0.125; // Update every 0.125 seconds
// Create an A-weighting filter for specific frequencies
this.createAWeightingFilter();
// Create bandpass filters for each center frequency
this.centerFrequencies.forEach(frequency => {
const filter = new BiquadFilterNode(audioContext, {
type: 'bandpass',
frequency: frequency,
Q: 1.41, // Set the desired Q value
});
this.filters.push(filter);
});
// Set up analyzers for calculating percentiles
this.setupAnalyzers();
}
createAWeightingFilter() {
// Use the provided A-weighting filter coefficients
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
// Create a custom IIR filter node with the A-weighting coefficients
this.aWeightingFilter = new IIRFilterNode(audioContext, {
feedforward: aWeightingCoefficients,
feedback: [1],
});
}
setupAnalyzers() {
this.analyzers = [];
this.centerFrequencies.forEach((frequency) => {
this.analyzers.push([]);
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
const analyzer = audioContext.createAnalyser();
analyzer.fftSize = 2048;
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
if (i === 0) {
this.aWeightingFilter.connect(analyzer);
}
this.analyzers[this.analyzers.length - 1].push(analyzer);
}
})
}
process(inputs, outputs) {
const numOutputChannels = outputs.length;
for (let i = 0; i < numOutputChannels; i++) {
const outputChannel = outputs[i][0];
const inputChannel = inputs[i][0];
// Apply the filter to the input channel
const filteredSignal = this.filters[i].process(inputChannel);
// Apply A-weighting only to the microphone signal (channel 0)
if (i === 0) {
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
outputChannel.set(aWeightedSignal);
} else {
// For other channels, pass the signal without A-weighting
outputChannel.set(filteredSignal);
}
// Check if it's time to update percentiles
const currentTime = this.currentTime;
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
this.updatePercentiles(i);
this.lastUpdateTimestamp = currentTime;
}
}
return true;
}
calculateRMSLevel(signal, channelIndex) {
const data = new Float32Array(signal.length);
signal.copyFromChannel(data, 0);
const sum = data.reduce((acc, val) => acc + val * val, 0);
const rmsLevel = Math.sqrt(sum / data.length);
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
return dBLevel;
}
updatePercentiles(channelIndex) {
for (let i = 0; i < this.centerFrequencies.length; i++) {
const analyzer = this.analyzers[i][channelIndex];
const levelData = new Float32Array(analyzer.frequencyBinCount);
analyzer.getFloatFrequencyData(levelData);
// Calculate percentiles for each octave band and each channel
const percentile10 = this.calculatePercentile(levelData, 10);
const percentile90 = this.calculatePercentile(levelData, 90);
const percentileDiff = percentile10 - percentile90;
// Store the percentile difference for each channel and each octave band
// You can use suitable data structures to store these values for future comparisons
}
}
calculatePercentile(data, percentile) {
const sortedData = data.slice().sort((a, b) => a - b);
const index = Math.floor((percentile / 100) * sortedData.length);
return sortedData[index];
}
combineAndCalculate() {
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
for (let i = 0; i < this.centerFrequencies.length; i++) {
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
// Calculate percentiles for the microphone audio
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
// Calculate percentiles for audioFile1
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
// Calculate percentiles for audioFile2
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
const audioFile2Percentile90 = this.calculatePercentile(audioFile2Analyzer, 90);
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
const micLAF10_90 = micPercentile10 - micPercentile90;
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
// Add the combined LAF10%-90% to the total
LAF10_90_total += combinedLAF10_90;
}
return LAF10_90_total;
}
}
registerProcessor('octave', OctaveBandProcessor);