first commiting, auf höchsten Konzenrationsstufe und breitesten inspirationhorizon
|
@ -1,67 +0,0 @@
|
||||||
class BandpassProcessor extends AudioWorkletProcessor {
|
|
||||||
process(inputs, outputs, parameters) {
|
|
||||||
const input = inputs[0];
|
|
||||||
const output = outputs[0];
|
|
||||||
const frequency = parameters.frequency;
|
|
||||||
const Q = parameters.Q;
|
|
||||||
|
|
||||||
for (let channel = 0; channel < input.length; channel++) {
|
|
||||||
const inputChannel = input[channel];
|
|
||||||
const outputChannel = output[channel];
|
|
||||||
|
|
||||||
for (let i = 0; i < inputChannel.length; i++) {
|
|
||||||
// Apply bandpass filter to inputChannel[i] and store the result in outputChannel[i]
|
|
||||||
// using the provided frequency and Q parameters
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Calculate the RMS value of the output audio data
|
|
||||||
const rms = this.calculateRMS(output);
|
|
||||||
|
|
||||||
// Calculate the dB values
|
|
||||||
const dbValues = this.convertToDB(output);
|
|
||||||
|
|
||||||
// Calculate the 10th and 90th percentile values
|
|
||||||
const percentile10 = this.calculatePercentile(dbValues, 10);
|
|
||||||
const percentile90 = this.calculatePercentile(dbValues, 90);
|
|
||||||
|
|
||||||
// Send the processed data to the main thread
|
|
||||||
this.port.postMessage({ rms, dbValues, percentile10, percentile90 });
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
calculateRMS(data) {
|
|
||||||
let sumOfSquares = 0;
|
|
||||||
for (let channel = 0; channel < data.length; channel++) {
|
|
||||||
const channelData = data[channel];
|
|
||||||
for (let i = 0; i < channelData.length; i++) {
|
|
||||||
sumOfSquares += channelData[i] * channelData[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const meanSquare = sumOfSquares / (data.length * data[0].length);
|
|
||||||
const rms = Math.sqrt(meanSquare);
|
|
||||||
return rms;
|
|
||||||
}
|
|
||||||
|
|
||||||
calculatePercentile(data, percentile) {
|
|
||||||
const sortedData = data.slice().sort((a, b) => a - b);
|
|
||||||
const index = Math.floor((percentile / 100) * sortedData.length);
|
|
||||||
return sortedData[index];
|
|
||||||
}
|
|
||||||
|
|
||||||
convertToDB(data) {
|
|
||||||
const dbValues = [];
|
|
||||||
for (let channel = 0; channel < data.length; channel++) {
|
|
||||||
const channelData = data[channel];
|
|
||||||
for (let i = 0; i < channelData.length; i++) {
|
|
||||||
const amplitude = Math.abs(channelData[i]);
|
|
||||||
const db = 20 * Math.log10(amplitude + this.minAmplitude);
|
|
||||||
dbValues.push(db);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return dbValues;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export default registerProcessor('bandpass-processor', BandpassProcessor);
|
|
Before Width: | Height: | Size: 562 B After Width: | Height: | Size: 562 B |
Before Width: | Height: | Size: 864 B After Width: | Height: | Size: 864 B |
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
Before Width: | Height: | Size: 2.3 KiB After Width: | Height: | Size: 2.3 KiB |
Before Width: | Height: | Size: 342 B After Width: | Height: | Size: 342 B |
Before Width: | Height: | Size: 475 B After Width: | Height: | Size: 475 B |
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 28 KiB After Width: | Height: | Size: 28 KiB |
Before Width: | Height: | Size: 18 KiB After Width: | Height: | Size: 18 KiB |
Before Width: | Height: | Size: 12 KiB After Width: | Height: | Size: 12 KiB |
Before Width: | Height: | Size: 5.2 KiB After Width: | Height: | Size: 5.2 KiB |
Before Width: | Height: | Size: 1.6 KiB After Width: | Height: | Size: 1.6 KiB |
Before Width: | Height: | Size: 457 B After Width: | Height: | Size: 457 B |
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 21 KiB |
Before Width: | Height: | Size: 768 B After Width: | Height: | Size: 768 B |
Before Width: | Height: | Size: 333 B After Width: | Height: | Size: 333 B |
Before Width: | Height: | Size: 333 B After Width: | Height: | Size: 333 B |
Before Width: | Height: | Size: 503 B After Width: | Height: | Size: 503 B |
Before Width: | Height: | Size: 496 B After Width: | Height: | Size: 496 B |
Before Width: | Height: | Size: 530 B After Width: | Height: | Size: 530 B |
Before Width: | Height: | Size: 723 B After Width: | Height: | Size: 723 B |
Before Width: | Height: | Size: 518 B After Width: | Height: | Size: 518 B |
Before Width: | Height: | Size: 370 B After Width: | Height: | Size: 370 B |
Before Width: | Height: | Size: 668 B After Width: | Height: | Size: 668 B |
Before Width: | Height: | Size: 569 B After Width: | Height: | Size: 569 B |
Before Width: | Height: | Size: 342 B After Width: | Height: | Size: 342 B |
Before Width: | Height: | Size: 697 B After Width: | Height: | Size: 697 B |
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 50 KiB |
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 52 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
Before Width: | Height: | Size: 50 KiB After Width: | Height: | Size: 50 KiB |
Before Width: | Height: | Size: 53 KiB After Width: | Height: | Size: 53 KiB |
|
@ -0,0 +1,153 @@
|
||||||
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
// Define center frequencies for 9 octave bands
|
||||||
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
||||||
|
this.filters = [];
|
||||||
|
this.lastUpdateTimestamp = 0;
|
||||||
|
this.updateInterval = 0.125; // Update every 0.125 seconds
|
||||||
|
|
||||||
|
// Create an A-weighting filter for specific frequencies
|
||||||
|
this.createAWeightingFilter();
|
||||||
|
|
||||||
|
// Create bandpass filters for each center frequency
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
const filter = new BiquadFilterNode(audioContext, {
|
||||||
|
type: 'bandpass',
|
||||||
|
frequency: frequency,
|
||||||
|
Q: 1.41, // Set the desired Q value
|
||||||
|
});
|
||||||
|
this.filters.push(filter);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set up analyzers for calculating percentiles
|
||||||
|
this.setupAnalyzers();
|
||||||
|
}
|
||||||
|
|
||||||
|
createAWeightingFilter() {
|
||||||
|
// Use the provided A-weighting filter coefficients
|
||||||
|
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
||||||
|
|
||||||
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
|
feedforward: aWeightingCoefficients,
|
||||||
|
feedback: [1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
setupAnalyzers() {
|
||||||
|
this.analyzers = [];
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
this.analyzers.push([]);
|
||||||
|
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
||||||
|
const analyzer = audioContext.createAnalyser();
|
||||||
|
analyzer.fftSize = 2048;
|
||||||
|
|
||||||
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
|
if (i === 0) {
|
||||||
|
this.aWeightingFilter.connect(analyzer);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
process(inputs, outputs) {
|
||||||
|
const numOutputChannels = outputs.length;
|
||||||
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
|
const outputChannel = outputs[i][0];
|
||||||
|
const inputChannel = inputs[i][0];
|
||||||
|
|
||||||
|
// Apply the filter to the input channel
|
||||||
|
const filteredSignal = this.filters[i].process(inputChannel);
|
||||||
|
|
||||||
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
|
if (i === 0) {
|
||||||
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
||||||
|
outputChannel.set(aWeightedSignal);
|
||||||
|
} else {
|
||||||
|
// For other channels, pass the signal without A-weighting
|
||||||
|
outputChannel.set(filteredSignal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's time to update percentiles
|
||||||
|
const currentTime = this.currentTime;
|
||||||
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
|
this.updatePercentiles(i);
|
||||||
|
this.lastUpdateTimestamp = currentTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
|
const data = new Float32Array(signal.length);
|
||||||
|
signal.copyFromChannel(data, 0);
|
||||||
|
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
||||||
|
const rmsLevel = Math.sqrt(sum / data.length);
|
||||||
|
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
||||||
|
return dBLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePercentiles(channelIndex) {
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const analyzer = this.analyzers[i][channelIndex];
|
||||||
|
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
||||||
|
analyzer.getFloatFrequencyData(levelData);
|
||||||
|
|
||||||
|
// Calculate percentiles for each octave band and each channel
|
||||||
|
const percentile10 = this.calculatePercentile(levelData, 10);
|
||||||
|
const percentile90 = this.calculatePercentile(levelData, 90);
|
||||||
|
|
||||||
|
const percentileDiff = percentile10 - percentile90;
|
||||||
|
|
||||||
|
// Store the percentile difference for each channel and each octave band
|
||||||
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatePercentile(data, percentile) {
|
||||||
|
const sortedData = data.slice().sort((a, b) => a - b);
|
||||||
|
const index = Math.floor((percentile / 100) * sortedData.length);
|
||||||
|
return sortedData[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
combineAndCalculate() {
|
||||||
|
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
||||||
|
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
||||||
|
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
|
// Calculate percentiles for the microphone audio
|
||||||
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
||||||
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile1
|
||||||
|
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile2
|
||||||
|
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
||||||
|
const audioFile2Percentile90 = this calculatePercentile(audioFile2Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
|
const micLAF10_90 = micPercentile10 - micPercentile90;
|
||||||
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
||||||
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
||||||
|
|
||||||
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
|
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
||||||
|
|
||||||
|
// Add the combined LAF10%-90% to the total
|
||||||
|
LAF10_90_total += combinedLAF10_90;
|
||||||
|
}
|
||||||
|
|
||||||
|
return LAF10_90_total;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registerProcessor('octave', OctaveBandProcessor);
|
|
@ -0,0 +1,28 @@
|
||||||
|
-----BEGIN PRIVATE KEY-----
|
||||||
|
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCxQKVsWo37+7Ut
|
||||||
|
gaYs8JN0Gxx8QrxA4+NqOZnwgu5dJnwEuWWJGiT/oTuBSiI+x4HeoFWlTFoFrc5v
|
||||||
|
XL7K1AUdS0AL2XhXha2Bsd6E5xyJKf8wkBFGB9ENBiNI7PvoOATsA71SxZHf4uNx
|
||||||
|
DF7Gg6cYC8MquhWWCn+bLPJzVnEe7fHss6tBcLufrAOXPFbCeUp/VVVPkF0bBwh+
|
||||||
|
jREdvhvArj03FVBlgsUK3EXH5RwJ2pZaKg6ZNdyb5fHURCOIYjv5UJxdsdKyvKbt
|
||||||
|
rY12i/ebB5P7pquIIykmo4FsP2aLvfUf2WDcEVaBum7DD3kUqjBE/54i/A3hVhVD
|
||||||
|
kfZJpOCrAgMBAAECggEABaNekfr7tDbfZ43NslCuulZhk4ICf62gDCyZmHNHdC3L
|
||||||
|
2RpWCR/n2GDtDyGxMVB0303KLeunCR/sRpr9tBXPtUPD3WpXZtVNpsyK1Bqz3isT
|
||||||
|
45wQaJjJxOYSlR0wrdaGVY4i5XpW7nciMIaXtOQbmn1HAev+rH982XvWdHhTpvlf
|
||||||
|
JtSzqBB20GJLUKhmDx/wyAJ8m2RXBHCnztRqDqqfJMfavbxIahwhXAmTANcMOErl
|
||||||
|
ugElPPei5oiH6Ecv+SwljptG1C7q++pYAsjkAuD2t8rTRNQWRhBYT3oumGBmEjh5
|
||||||
|
3jneSBdPGIdv1bnjsmuPl8sdPzwXc6y/EQ9Iiqk68QKBgQDuFGSolBzLMZNITLs/
|
||||||
|
rf3HWNs8AxGol7XLg8dRpPGuflGgB6pGOsYsfMQv2YXT3FndAeMCMR0VB3kuYo77
|
||||||
|
Q20gj4c2MDdbjPUGZi76wlLwYw36H46TsBiuoOZfk46FbZlaaFYydLHZPrWbnDzn
|
||||||
|
KgFXjYnojIFn86AO+u3Z2Mad0QKBgQC+mCmDZzORu5AowhIGAe42gEq1e9nIh9BB
|
||||||
|
iupo4I/+/EtSi4+m0KoDOUoR99bbs3md25rC6+6oLI9FAc4bp8pKd9443kr7SUy6
|
||||||
|
YYr70QFr9ubZOYMabjkUhXw/kzhqjfU6B1ce1JPS/3/mEdAxjHAppN+jqxp8+efT
|
||||||
|
lwryLV9JuwKBgD21pX2YnnoAiJd15BcWZzAzlOfSN9KGOEXfC1vbMBW1gjzDn9wC
|
||||||
|
QfmoAUYR0MFgXR6O7aNUzZ/0xvFB9KOmD3QgH8Do8IBXYe9drxGqKstKMYZChbc8
|
||||||
|
Lrmc1PmzCn8FMHmhj64WVz7jJTmHXrXgSmbCNhvDx4sFN9iKK/qKWLjxAoGBAJGr
|
||||||
|
5v7B4A5glvwv6GqjCxio0XEIahn0g920eRkTmbs/xaofdPoAvhCcttoo3RUqhad7
|
||||||
|
czvL66qp9A7AJHHKurhUCYrZi+Gn0ncZmoqA4l9MZIBejq+i0wm2RJKqyRHX0jg5
|
||||||
|
6AJuY1V/ZpfHwaI9PnT1yOBlJGek8eUsqncS6qOFAoGBAIqGzvI43w51hdT/tvS9
|
||||||
|
kUhoef++BuT7Rod6rK40l4ti3YfyABN5jFtPG/Bsz7nn7gb3YT/Q2RKzuEQFpLIK
|
||||||
|
JzMu6ElRSLyMTNbtSntq9Qw/hmpcSmzj0iao3kp5Arr9mkhua6ZbUUM0xDYMt4eN
|
||||||
|
EWsxmbuQkcHzvSBHV9w3NBak
|
||||||
|
-----END PRIVATE KEY-----
|
|
@ -0,0 +1,23 @@
|
||||||
|
-----BEGIN CERTIFICATE-----
|
||||||
|
MIID3zCCAscCFFB62ppfH5aXwlofGjPHdDOoRDZmMA0GCSqGSIb3DQEBCwUAMIGr
|
||||||
|
MQswCQYDVQQGEwJERTEbMBkGA1UECAwSQmFkZW4tV3VlcnR0ZW1iZXJnMRIwEAYD
|
||||||
|
VQQHDAlTdHV0dGdhcnQxEjAQBgNVBAoMCU1pbmRib29zdDEWMBQGA1UECwwNSVQg
|
||||||
|
RGVwYXJ0bWVudDEUMBIGA1UEAwwLUm9iZXJ0IFJhcHAxKTAnBgkqhkiG9w0BCQEW
|
||||||
|
GnJvYmVydC5yYXBwQG1pbmRib29zdC50ZWFtMB4XDTIzMTEwMzE1MzkzM1oXDTI0
|
||||||
|
MTEwMjE1MzkzM1owgasxCzAJBgNVBAYTAkRFMRswGQYDVQQIDBJCYWRlbi1XdWVy
|
||||||
|
dHRlbWJlcmcxEjAQBgNVBAcMCVN0dXR0Z2FydDESMBAGA1UECgwJTWluZGJvb3N0
|
||||||
|
MRYwFAYDVQQLDA1JVCBEZXBhcnRtZW50MRQwEgYDVQQDDAtSb2JlcnQgUmFwcDEp
|
||||||
|
MCcGCSqGSIb3DQEJARYacm9iZXJ0LnJhcHBAbWluZGJvb3N0LnRlYW0wggEiMA0G
|
||||||
|
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCxQKVsWo37+7UtgaYs8JN0Gxx8QrxA
|
||||||
|
4+NqOZnwgu5dJnwEuWWJGiT/oTuBSiI+x4HeoFWlTFoFrc5vXL7K1AUdS0AL2XhX
|
||||||
|
ha2Bsd6E5xyJKf8wkBFGB9ENBiNI7PvoOATsA71SxZHf4uNxDF7Gg6cYC8MquhWW
|
||||||
|
Cn+bLPJzVnEe7fHss6tBcLufrAOXPFbCeUp/VVVPkF0bBwh+jREdvhvArj03FVBl
|
||||||
|
gsUK3EXH5RwJ2pZaKg6ZNdyb5fHURCOIYjv5UJxdsdKyvKbtrY12i/ebB5P7pquI
|
||||||
|
Iykmo4FsP2aLvfUf2WDcEVaBum7DD3kUqjBE/54i/A3hVhVDkfZJpOCrAgMBAAEw
|
||||||
|
DQYJKoZIhvcNAQELBQADggEBAJ17zv0dhYSAFD1RBMBh6bVvsyzPFLe7P79dmo/I
|
||||||
|
DbZRi91rIfZMZa6vbnwcYh6tyi9bGtm/tWaCglnJ6ENjSgAupZUGWQ4TTGS4C0sj
|
||||||
|
rRjygWtLYchQVOKGeTbMqG/75GDciwKXhmNNflGWkBSbqdBPubBm+9lr3C2HArYB
|
||||||
|
+FvdIfWxLcpgZANZ4h4lThin40kUJ/3cD8d7RCqk+KhWZ09tMyMojBlNaRv0ejvS
|
||||||
|
I7Yb/Rvnf5gWK3UCmHlcSrX0zO0ulSL2Cgi9EswLnHNiFRYhDoCfSRfY1NlBUhF6
|
||||||
|
xpCSStKIS3CzZpDR4F1U32VqOQx3bfl5gb/cnMAu0FEQtgM=
|
||||||
|
-----END CERTIFICATE-----
|
|
@ -1,9 +1,27 @@
|
||||||
// https://nuxt.com/docs/api/configuration/nuxt-config
|
// https://nuxt.com/docs/api/configuration/nuxt-config
|
||||||
// @ts-ignore
|
// @ts-ignore
|
||||||
|
|
||||||
|
import path from 'path'
|
||||||
|
import fs from 'fs'
|
||||||
|
// import { defineNuxtConfig } from 'nuxt3'
|
||||||
|
|
||||||
export default defineNuxtConfig({
|
export default defineNuxtConfig({
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
devServer: {
|
||||||
|
|
||||||
|
https: {
|
||||||
|
|
||||||
|
// key: fs.readFileSync(path.resolve(__dirname, '.nuxt/cert/privater-schluessel.key')),
|
||||||
|
// cert: fs.readFileSync(path.resolve(__dirname, '.nuxt/cert/zertifikat.crt'))
|
||||||
|
key:'cert/privater-schluessel.key',
|
||||||
|
cert:'cert/zertifikat.crt'
|
||||||
|
|
||||||
|
}
|
||||||
|
},
|
||||||
|
|
||||||
ssr:false,
|
ssr:false,
|
||||||
plugins:[
|
plugins:[
|
||||||
{src: '~/plugins/AudioVisual.client', mode: 'client'},
|
{src: '~/plugins/AudioVisual.client', mode: 'client'},
|
||||||
|
@ -496,4 +514,5 @@ export default defineNuxtConfig({
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
"@nuxt/vite-builder": "^3.0.0",
|
"@nuxt/vite-builder": "^3.0.0",
|
||||||
"@nuxtjs/i18n": "^8.0.0-beta.10",
|
"@nuxtjs/i18n": "^8.0.0-beta.10",
|
||||||
"@nuxtjs/tailwindcss": "^6.1.3",
|
"@nuxtjs/tailwindcss": "^6.1.3",
|
||||||
"nuxt": "3.0.0",
|
"nuxt": "^3.8.0",
|
||||||
"nuxt-headlessui": "^1.0.4",
|
"nuxt-headlessui": "^1.0.4",
|
||||||
"vue-stripe-js": "^1.0.1"
|
"vue-stripe-js": "^1.0.1"
|
||||||
},
|
},
|
||||||
|
@ -23,8 +23,10 @@
|
||||||
"@stripe/stripe-js": "^1.46.0",
|
"@stripe/stripe-js": "^1.46.0",
|
||||||
"axios": "^1.3.2",
|
"axios": "^1.3.2",
|
||||||
"bootstrap": "^5.2.3",
|
"bootstrap": "^5.2.3",
|
||||||
|
"chart.js": "^4.4.0",
|
||||||
"pinia": "^2.0.28",
|
"pinia": "^2.0.28",
|
||||||
"pinia-plugin-persistedstate": "^3.0.2",
|
"pinia-plugin-persistedstate": "^3.0.2",
|
||||||
|
"standardized-audio-context": "^25.3.58",
|
||||||
"toastr": "^2.1.4",
|
"toastr": "^2.1.4",
|
||||||
"vite": "^3.2.5",
|
"vite": "^3.2.5",
|
||||||
"vue": "^3.2.26",
|
"vue": "^3.2.26",
|
||||||
|
|
|
@ -0,0 +1,70 @@
|
||||||
|
<template>
|
||||||
|
<div>
|
||||||
|
<button @click="startMicrophone">Start Microphone</button>
|
||||||
|
<button @click="stopMicrophone">Stop Microphone</button>
|
||||||
|
<div v-if="errorMessage">{{ errorMessage }}</div>
|
||||||
|
</div>
|
||||||
|
</template>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
export default {
|
||||||
|
data() {
|
||||||
|
return {
|
||||||
|
audioContext: null,
|
||||||
|
microphoneStream: null,
|
||||||
|
audioProcessorNode: null,
|
||||||
|
errorMessage: null,
|
||||||
|
};
|
||||||
|
},
|
||||||
|
methods: {
|
||||||
|
async startMicrophone() {
|
||||||
|
|
||||||
|
this.audioContext = new window.AudioContext ();
|
||||||
|
|
||||||
|
// Request access to the microphone
|
||||||
|
this.microphoneStream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||||
|
|
||||||
|
// Check if the microphone stream is available
|
||||||
|
if (!this.microphoneStream) {
|
||||||
|
console.error("Microphone stream is not available.");
|
||||||
|
this.errorMessage = "Microphone stream is not available.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the microphoneStream is a MediaStream object
|
||||||
|
if (!(this.microphoneStream instanceof MediaStream)) {
|
||||||
|
console.error("Invalid microphone stream.");
|
||||||
|
this.errorMessage = "Invalid microphone stream.";
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a MediaStreamAudioSourceNode from the microphone stream
|
||||||
|
const microphoneSource = this.audioContext.createMediaStreamSource(this.microphoneStream);
|
||||||
|
await this.audioContext.audioWorklet.addModule('@/plugins/octave.js');
|
||||||
|
this.audioProcessorNode = new AudioWorkletNode(this.audioContext, 'octave');
|
||||||
|
microphoneSource.connect(this.audioProcessorNode);
|
||||||
|
this.audioProcessorNode.connect(this.audioContext.destination);
|
||||||
|
// Try to add the Audio Worklet module
|
||||||
|
|
||||||
|
},
|
||||||
|
stopMicrophone() {
|
||||||
|
if (this.audioContext) {
|
||||||
|
// Disconnect and close the AudioWorkletNode
|
||||||
|
if (this.audioProcessorNode) {
|
||||||
|
this.audioProcessorNode.disconnect();
|
||||||
|
this.audioProcessorNode.port.postMessage({ command: 'stop' });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close the AudioContext
|
||||||
|
this.audioContext.close();
|
||||||
|
|
||||||
|
// Reset variables
|
||||||
|
this.audioContext = null;
|
||||||
|
this.microphoneStream = null;
|
||||||
|
this.audioProcessorNode = null;
|
||||||
|
this.errorMessage = null;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
</script>
|
|
@ -20,20 +20,90 @@
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
<canvas id="audioSignalChart" width="400" height="200"></canvas>
|
||||||
</div>
|
</div>
|
||||||
</template>
|
</template>
|
||||||
|
|
||||||
<script>
|
<script>
|
||||||
|
// import BandpassProcessor from '~/plugin/octav.js'; // Adjust the path
|
||||||
|
// import Chart from 'chart.js';
|
||||||
|
|
||||||
export default {
|
export default {
|
||||||
name:'HomePage',
|
name: 'HomePage',
|
||||||
|
|
||||||
created() {
|
data() {
|
||||||
return{
|
return {
|
||||||
player : '',
|
bandpassProcessor: null,
|
||||||
canvas : '',
|
audioContext: null,
|
||||||
mySource : "./symphony.mp3",
|
oscillator: null,
|
||||||
}
|
};
|
||||||
}
|
},
|
||||||
|
|
||||||
|
mounted() {
|
||||||
|
// Initialize the BandpassProcessor
|
||||||
|
this.bandpassProcessor = new BandpassProcessor();
|
||||||
|
|
||||||
|
// Initialize the AudioContext
|
||||||
|
this.audioContext = new AudioContext();
|
||||||
|
|
||||||
|
// Create an oscillator
|
||||||
|
this.oscillator = this.audioContext.createOscillator();
|
||||||
|
this.oscillator.type = 'sine'; // You can change the waveform type
|
||||||
|
this.oscillator.frequency.setValueAtTime(440, this.audioContext.currentTime); // Set the initial frequency
|
||||||
|
|
||||||
|
// Connect the oscillator to the BandpassProcessor
|
||||||
|
this.oscillator.connect(this.bandpassProcessor);
|
||||||
|
|
||||||
|
// Start the oscillator
|
||||||
|
this.oscillator.start();
|
||||||
|
|
||||||
|
// Send parameters to the BandpassProcessor
|
||||||
|
this.bandpassProcessor.port.postMessage({
|
||||||
|
frequency: 1000, // Adjust the frequency value as needed
|
||||||
|
Q: 5, // Adjust the Q value as needed
|
||||||
|
});
|
||||||
|
|
||||||
|
// Listen for processed data from the BandpassProcessor
|
||||||
|
this.bandpassProcessor.port.onmessage = (event) => {
|
||||||
|
const { rms, dbValues, percentile10, percentile90 } = event.data;
|
||||||
|
// Use the processed data as needed
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create a line chart
|
||||||
|
const ctx = document.getElementById('audioSignalChart').getContext('2d');
|
||||||
|
this.audioSignalChart = new Chart(ctx, {
|
||||||
|
type: 'line',
|
||||||
|
data: {
|
||||||
|
labels: [], // You can add time labels or use an array index
|
||||||
|
datasets: [
|
||||||
|
{
|
||||||
|
label: 'Audio Signal',
|
||||||
|
data: [], // Populate this with your audio signal data
|
||||||
|
borderColor: 'rgb(75, 192, 192)',
|
||||||
|
borderWidth: 2,
|
||||||
|
fill: false,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
responsive: true,
|
||||||
|
maintainAspectRatio: false,
|
||||||
|
scales: {
|
||||||
|
x: {
|
||||||
|
title: {
|
||||||
|
display: true,
|
||||||
|
text: 'Time',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
y: {
|
||||||
|
title: {
|
||||||
|
display: true,
|
||||||
|
text: 'Amplitude',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
};
|
||||||
</script>
|
</script>
|
|
@ -0,0 +1,162 @@
|
||||||
|
new AudioWorkletProcessor ()
|
||||||
|
|
||||||
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
// Define center frequencies for 9 octave bands
|
||||||
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
||||||
|
this.filters = [];
|
||||||
|
this.lastUpdateTimestamp = 0;
|
||||||
|
this.updateInterval = 0.125; // Update every 0.125 seconds
|
||||||
|
|
||||||
|
// Create an A-weighting filter for specific frequencies
|
||||||
|
this.createAWeightingFilter();
|
||||||
|
|
||||||
|
// Create bandpass filters for each center frequency
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
const filter = new BiquadFilterNode(audioContext, {
|
||||||
|
type: 'bandpass',
|
||||||
|
frequency: frequency,
|
||||||
|
Q: 1.41, // Set the desired Q value
|
||||||
|
},
|
||||||
|
this.filters.push(filter))
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set up analyzers for calculating percentiles
|
||||||
|
this.setupAnalyzers();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
setupAnalyzers() {
|
||||||
|
this.analyzers = [];
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
this.analyzers.push([]);
|
||||||
|
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
||||||
|
const analyzer = audioContext.createAnalyser();
|
||||||
|
analyzer.fftSize = 2048;
|
||||||
|
|
||||||
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
|
if (i === 0) {
|
||||||
|
this.aWeightingFilter.connect(analyzer);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
process(inputs, outputs) {
|
||||||
|
const numOutputChannels = outputs.length;
|
||||||
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
|
const outputChannel = outputs[i][0];
|
||||||
|
const inputChannel = inputs[i][0];
|
||||||
|
|
||||||
|
// Apply the filter to the input channel
|
||||||
|
const filteredSignal = this.filters[i].process(inputChannel);
|
||||||
|
|
||||||
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
|
if (i === 0) {
|
||||||
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
||||||
|
outputChannel.set(aWeightedSignal);
|
||||||
|
} else {
|
||||||
|
// For other channels, pass the signal without A-weighting
|
||||||
|
outputChannel.set(filteredSignal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's time to update percentiles
|
||||||
|
const currentTime = this.currentTime;
|
||||||
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
|
this.updatePercentiles(i);
|
||||||
|
this.lastUpdateTimestamp = currentTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
|
const data = new Float32Array(signal.length);
|
||||||
|
signal.copyFromChannel(data, 0);
|
||||||
|
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
||||||
|
const rmsLevel = Math.sqrt(sum / data.length);
|
||||||
|
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
||||||
|
return dBLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePercentiles(channelIndex) {
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const analyzer = this.analyzers[i][channelIndex];
|
||||||
|
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
||||||
|
analyzer.getFloatFrequencyData(levelData);
|
||||||
|
|
||||||
|
// Calculate percentiles for each octave band and each channel
|
||||||
|
const percentile10 = this.calculatePercentile(levelData, 10);
|
||||||
|
const percentile90 = this.calculatePercentile(levelData, 90);
|
||||||
|
|
||||||
|
const percentileDiff = percentile10 - percentile90;
|
||||||
|
|
||||||
|
// Store the percentile difference for each channel and each octave band
|
||||||
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatePercentile(data, percentile) {
|
||||||
|
const sortedData = data.slice().sort((a, b) => a - b);
|
||||||
|
const index = Math.floor((percentile / 100) * sortedData.length);
|
||||||
|
return sortedData[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
createAWeightingFilter() {
|
||||||
|
// Use the provided A-weighting filter coefficients
|
||||||
|
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
||||||
|
|
||||||
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
|
feedforward: aWeightingCoefficients,
|
||||||
|
feedback: [1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
combineAndCalculate() {
|
||||||
|
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
||||||
|
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
||||||
|
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
|
// Calculate percentiles for the microphone audio
|
||||||
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
||||||
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile1
|
||||||
|
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile2
|
||||||
|
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
||||||
|
const audioFile2Percentile90 = this.calculatePercentile(audioFile2Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
|
const micLAF10_90 = micPercentile10 - micPercentile90;
|
||||||
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
||||||
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
||||||
|
|
||||||
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
|
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
||||||
|
|
||||||
|
// Add the combined LAF10%-90% to the total
|
||||||
|
LAF10_90_total += combinedLAF10_90;
|
||||||
|
}
|
||||||
|
|
||||||
|
// return LAF10_90_total;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
registerProcessor('octave', OctaveBandProcessor);
|
||||||
|
|
|
@ -0,0 +1,162 @@
|
||||||
|
new AudioWorkletProcessor ()
|
||||||
|
|
||||||
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
// Define center frequencies for 9 octave bands
|
||||||
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
||||||
|
this.filters = [];
|
||||||
|
this.lastUpdateTimestamp = 0;
|
||||||
|
this.updateInterval = 0.125; // Update every 0.125 seconds
|
||||||
|
|
||||||
|
// Create an A-weighting filter for specific frequencies
|
||||||
|
this.createAWeightingFilter();
|
||||||
|
|
||||||
|
// Create bandpass filters for each center frequency
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
const filter = new BiquadFilterNode(audioContext, {
|
||||||
|
type: 'bandpass',
|
||||||
|
frequency: frequency,
|
||||||
|
Q: 1.41, // Set the desired Q value
|
||||||
|
},
|
||||||
|
this.filters.push(filter))
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set up analyzers for calculating percentiles
|
||||||
|
this.setupAnalyzers();
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
setupAnalyzers() {
|
||||||
|
this.analyzers = [];
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
this.analyzers.push([]);
|
||||||
|
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
||||||
|
const analyzer = audioContext.createAnalyser();
|
||||||
|
analyzer.fftSize = 2048;
|
||||||
|
|
||||||
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
|
if (i === 0) {
|
||||||
|
this.aWeightingFilter.connect(analyzer);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
process(inputs, outputs) {
|
||||||
|
const numOutputChannels = outputs.length;
|
||||||
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
|
const outputChannel = outputs[i][0];
|
||||||
|
const inputChannel = inputs[i][0];
|
||||||
|
|
||||||
|
// Apply the filter to the input channel
|
||||||
|
const filteredSignal = this.filters[i].process(inputChannel);
|
||||||
|
|
||||||
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
|
if (i === 0) {
|
||||||
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
||||||
|
outputChannel.set(aWeightedSignal);
|
||||||
|
} else {
|
||||||
|
// For other channels, pass the signal without A-weighting
|
||||||
|
outputChannel.set(filteredSignal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's time to update percentiles
|
||||||
|
const currentTime = this.currentTime;
|
||||||
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
|
this.updatePercentiles(i);
|
||||||
|
this.lastUpdateTimestamp = currentTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
|
const data = new Float32Array(signal.length);
|
||||||
|
signal.copyFromChannel(data, 0);
|
||||||
|
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
||||||
|
const rmsLevel = Math.sqrt(sum / data.length);
|
||||||
|
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
||||||
|
return dBLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePercentiles(channelIndex) {
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const analyzer = this.analyzers[i][channelIndex];
|
||||||
|
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
||||||
|
analyzer.getFloatFrequencyData(levelData);
|
||||||
|
|
||||||
|
// Calculate percentiles for each octave band and each channel
|
||||||
|
const percentile10 = this.calculatePercentile(levelData, 10);
|
||||||
|
const percentile90 = this.calculatePercentile(levelData, 90);
|
||||||
|
|
||||||
|
const percentileDiff = percentile10 - percentile90;
|
||||||
|
|
||||||
|
// Store the percentile difference for each channel and each octave band
|
||||||
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatePercentile(data, percentile) {
|
||||||
|
const sortedData = data.slice().sort((a, b) => a - b);
|
||||||
|
const index = Math.floor((percentile / 100) * sortedData.length);
|
||||||
|
return sortedData[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
createAWeightingFilter() {
|
||||||
|
// Use the provided A-weighting filter coefficients
|
||||||
|
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
||||||
|
|
||||||
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
|
feedforward: aWeightingCoefficients,
|
||||||
|
feedback: [1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
combineAndCalculate() {
|
||||||
|
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
||||||
|
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
||||||
|
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
|
// Calculate percentiles for the microphone audio
|
||||||
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
||||||
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile1
|
||||||
|
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile2
|
||||||
|
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
||||||
|
const audioFile2Percentile90 = this.calculatePercentile(audioFile2Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
|
const micLAF10_90 = micPercentile10 - micPercentile90;
|
||||||
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
||||||
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
||||||
|
|
||||||
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
|
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
||||||
|
|
||||||
|
// Add the combined LAF10%-90% to the total
|
||||||
|
LAF10_90_total += combinedLAF10_90;
|
||||||
|
}
|
||||||
|
|
||||||
|
// return LAF10_90_total;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
registerProcessor('octave', OctaveBandProcessor);
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
def get_octave_js_path():
|
||||||
|
current_directory = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
octave_js_path = os.path.join(current_directory, 'plugins', 'octave.js')
|
||||||
|
return octave_js_path
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
nuxt_config = f"module.exports = {{\n octaveJsPath: '{get_octave_js_path()}'\n}};"
|
||||||
|
with open('nuxt.config.js', 'w') as file:
|
||||||
|
file.write(nuxt_config)
|
|
@ -0,0 +1,153 @@
|
||||||
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
// Define center frequencies for 9 octave bands
|
||||||
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
||||||
|
this.filters = [];
|
||||||
|
this.lastUpdateTimestamp = 0;
|
||||||
|
this.updateInterval = 0.125; // Update every 0.125 seconds
|
||||||
|
|
||||||
|
// Create an A-weighting filter for specific frequencies
|
||||||
|
this.createAWeightingFilter();
|
||||||
|
|
||||||
|
// Create bandpass filters for each center frequency
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
const filter = new BiquadFilterNode(audioContext, {
|
||||||
|
type: 'bandpass',
|
||||||
|
frequency: frequency,
|
||||||
|
Q: 1.41, // Set the desired Q value
|
||||||
|
});
|
||||||
|
this.filters.push(filter);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set up analyzers for calculating percentiles
|
||||||
|
this.setupAnalyzers();
|
||||||
|
}
|
||||||
|
|
||||||
|
createAWeightingFilter() {
|
||||||
|
// Use the provided A-weighting filter coefficients
|
||||||
|
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
||||||
|
|
||||||
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
|
feedforward: aWeightingCoefficients,
|
||||||
|
feedback: [1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
setupAnalyzers() {
|
||||||
|
this.analyzers = [];
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
this.analyzers.push([]);
|
||||||
|
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
||||||
|
const analyzer = audioContext.createAnalyser();
|
||||||
|
analyzer.fftSize = 2048;
|
||||||
|
|
||||||
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
|
if (i === 0) {
|
||||||
|
this.aWeightingFilter.connect(analyzer);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
process(inputs, outputs) {
|
||||||
|
const numOutputChannels = outputs.length;
|
||||||
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
|
const outputChannel = outputs[i][0];
|
||||||
|
const inputChannel = inputs[i][0];
|
||||||
|
|
||||||
|
// Apply the filter to the input channel
|
||||||
|
const filteredSignal = this.filters[i].process(inputChannel);
|
||||||
|
|
||||||
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
|
if (i === 0) {
|
||||||
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
||||||
|
outputChannel.set(aWeightedSignal);
|
||||||
|
} else {
|
||||||
|
// For other channels, pass the signal without A-weighting
|
||||||
|
outputChannel.set(filteredSignal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's time to update percentiles
|
||||||
|
const currentTime = this.currentTime;
|
||||||
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
|
this.updatePercentiles(i);
|
||||||
|
this.lastUpdateTimestamp = currentTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
|
const data = new Float32Array(signal.length);
|
||||||
|
signal.copyFromChannel(data, 0);
|
||||||
|
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
||||||
|
const rmsLevel = Math.sqrt(sum / data.length);
|
||||||
|
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
||||||
|
return dBLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePercentiles(channelIndex) {
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const analyzer = this.analyzers[i][channelIndex];
|
||||||
|
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
||||||
|
analyzer.getFloatFrequencyData(levelData);
|
||||||
|
|
||||||
|
// Calculate percentiles for each octave band and each channel
|
||||||
|
const percentile10 = this.calculatePercentile(levelData, 10);
|
||||||
|
const percentile90 = this.calculatePercentile(levelData, 90);
|
||||||
|
|
||||||
|
const percentileDiff = percentile10 - percentile90;
|
||||||
|
|
||||||
|
// Store the percentile difference for each channel and each octave band
|
||||||
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatePercentile(data, percentile) {
|
||||||
|
const sortedData = data.slice().sort((a, b) => a - b);
|
||||||
|
const index = Math.floor((percentile / 100) * sortedData.length);
|
||||||
|
return sortedData[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
combineAndCalculate() {
|
||||||
|
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
||||||
|
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
||||||
|
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
|
// Calculate percentiles for the microphone audio
|
||||||
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
||||||
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile1
|
||||||
|
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile2
|
||||||
|
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
||||||
|
const audioFile2Percentile90 = this calculatePercentile(audioFile2Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
|
const micLAF10_90 = micPercentile10 - micPercentile90;
|
||||||
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
||||||
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
||||||
|
|
||||||
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
|
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
||||||
|
|
||||||
|
// Add the combined LAF10%-90% to the total
|
||||||
|
LAF10_90_total += combinedLAF10_90;
|
||||||
|
}
|
||||||
|
|
||||||
|
return LAF10_90_total;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registerProcessor('octave', OctaveBandProcessor);
|
|
@ -0,0 +1,154 @@
|
||||||
|
class OctaveBandProcessor extends AudioWorkletProcessor {
|
||||||
|
constructor() {
|
||||||
|
super();
|
||||||
|
// Define center frequencies for 9 octave bands
|
||||||
|
this.centerFrequencies = [63, 125, 250, 500, 1000, 2000, 4000, 8000, 16000];
|
||||||
|
this.filters = [];
|
||||||
|
this.lastUpdateTimestamp = 0;
|
||||||
|
this.updateInterval = 0.125; // Update every 0.125 seconds
|
||||||
|
|
||||||
|
// Create an A-weighting filter for specific frequencies
|
||||||
|
this.createAWeightingFilter();
|
||||||
|
|
||||||
|
// Create bandpass filters for each center frequency
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
const filter = new BiquadFilterNode(audioContext, {
|
||||||
|
type: 'bandpass',
|
||||||
|
frequency: frequency,
|
||||||
|
Q: 1.41, // Set the desired Q value
|
||||||
|
});
|
||||||
|
this.filters.push(filter);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set up analyzers for calculating percentiles
|
||||||
|
this.setupAnalyzers();
|
||||||
|
setupAnalyzers() {
|
||||||
|
this.analyzers = [];
|
||||||
|
this.centerFrequencies.forEach(frequency => {
|
||||||
|
this.analyzers.push([]);
|
||||||
|
for (let i = 0; i < 5; i++) { // Unique identifiers from 0 to 4
|
||||||
|
const analyzer = audioContext.createAnalyser();
|
||||||
|
analyzer.fftSize = 2048;
|
||||||
|
|
||||||
|
// Check if the identifier is 0 (microphone audio) before connecting to the A-weighting filter
|
||||||
|
if (i === 0) {
|
||||||
|
this.aWeightingFilter.connect(analyzer);
|
||||||
|
}
|
||||||
|
|
||||||
|
this.analyzers[this.analyzers.length - 1].push(analyzer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
createAWeightingFilter() {
|
||||||
|
// Use the provided A-weighting filter coefficients
|
||||||
|
const aWeightingCoefficients = [0, -0.051, -0.142, -0.245, -0.383, -0.65, -1.293, -2.594, -6.554]; //David
|
||||||
|
|
||||||
|
// Create a custom IIR filter node with the A-weighting coefficients
|
||||||
|
this.aWeightingFilter = new IIRFilterNode(audioContext, {
|
||||||
|
feedforward: aWeightingCoefficients,
|
||||||
|
feedback: [1],
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
process(inputs, outputs) {
|
||||||
|
const numOutputChannels = outputs.length;
|
||||||
|
for (let i = 0; i < numOutputChannels; i++) {
|
||||||
|
const outputChannel = outputs[i][0];
|
||||||
|
const inputChannel = inputs[i][0];
|
||||||
|
|
||||||
|
// Apply the filter to the input channel
|
||||||
|
const filteredSignal = this.filters[i].process(inputChannel);
|
||||||
|
|
||||||
|
// Apply A-weighting only to the microphone signal (channel 0)
|
||||||
|
if (i === 0) {
|
||||||
|
const aWeightedSignal = this.aWeightingFilter.process(filteredSignal);
|
||||||
|
outputChannel.set(aWeightedSignal);
|
||||||
|
} else {
|
||||||
|
// For other channels, pass the signal without A-weighting
|
||||||
|
outputChannel.set(filteredSignal);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if it's time to update percentiles
|
||||||
|
const currentTime = this.currentTime;
|
||||||
|
if (currentTime - this.lastUpdateTimestamp >= this.updateInterval) {
|
||||||
|
this.updatePercentiles(i);
|
||||||
|
this.lastUpdateTimestamp = currentTime;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
calculateRMSLevel(signal, channelIndex) {
|
||||||
|
const data = new Float32Array(signal.length);
|
||||||
|
signal.copyFromChannel(data, 0);
|
||||||
|
const sum = data.reduce((acc, val) => acc + val * val, 0);
|
||||||
|
const rmsLevel = Math.sqrt(sum / data.length);
|
||||||
|
const dBLevel = 20 * Math.log10(rmsLevel); // Convert to dB
|
||||||
|
return dBLevel;
|
||||||
|
}
|
||||||
|
|
||||||
|
updatePercentiles(channelIndex) {
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const analyzer = this.analyzers[i][channelIndex];
|
||||||
|
const levelData = new Float32Array(analyzer.frequencyBinCount);
|
||||||
|
analyzer.getFloatFrequencyData(levelData);
|
||||||
|
|
||||||
|
// Calculate percentiles for each octave band and each channel
|
||||||
|
const percentile10 = this.calculatePercentile(levelData, 10);
|
||||||
|
const percentile90 = this.calculatePercentile(levelData, 90);
|
||||||
|
|
||||||
|
const percentileDiff = percentile10 - percentile90;
|
||||||
|
|
||||||
|
// Store the percentile difference for each channel and each octave band
|
||||||
|
// You can use suitable data structures to store these values for future comparisons
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
calculatePercentile(data, percentile) {
|
||||||
|
const sortedData = data.slice().sort((a, b) => a - b);
|
||||||
|
const index = Math.floor((percentile / 100) * sortedData.length);
|
||||||
|
return sortedData[index];
|
||||||
|
}
|
||||||
|
|
||||||
|
combineAndCalculate() {
|
||||||
|
let LAF10_90_total = 0; // Initialize the total LAF10%-90%
|
||||||
|
|
||||||
|
for (let i = 0; i < this.centerFrequencies.length; i++) {
|
||||||
|
const micAnalyzer = this.analyzers[i][0]; // Analyzer for microphone audio (identifier 0)
|
||||||
|
const audioFile1Analyzer = this.analyzers[i][3]; // Analyzer for audioFile1 (identifier 3)
|
||||||
|
const audioFile2Analyzer = this.analyzers[i][4]; // Analyzer for audioFile2 (identifier 4)
|
||||||
|
|
||||||
|
// Calculate percentiles for the microphone audio
|
||||||
|
const micPercentile10 = this.calculatePercentile(micAnalyzer, 10);
|
||||||
|
const micPercentile90 = this.calculatePercentile(micAnalyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile1
|
||||||
|
const audioFile1Percentile10 = this.calculatePercentile(audioFile1Analyzer, 10);
|
||||||
|
const audioFile1Percentile90 = this.calculatePercentile(audioFile1Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate percentiles for audioFile2
|
||||||
|
const audioFile2Percentile10 = this.calculatePercentile(audioFile2Analyzer, 10);
|
||||||
|
const audioFile2Percentile90 = this calculatePercentile(audioFile2Analyzer, 90);
|
||||||
|
|
||||||
|
// Calculate LAF10%-90% for microphone audio, audioFile1, and audioFile2 separately
|
||||||
|
const micLAF10_90 = micPercentile10 - micPercentile90;
|
||||||
|
const audioFile1LAF10_90 = audioFile1Percentile10 - audioFile1Percentile90;
|
||||||
|
const audioFile2LAF10_90 = audioFile2Percentile10 - audioFile2Percentile90;
|
||||||
|
|
||||||
|
// Calculate combined LAF10%-90% for microphone audio, audioFile1, and audioFile2
|
||||||
|
const combinedLAF10_90 = micLAF10_90 + audioFile1LAF10_90 + audioFile2LAF10_90;
|
||||||
|
|
||||||
|
// Add the combined LAF10%-90% to the total
|
||||||
|
LAF10_90_total += combinedLAF10_90;
|
||||||
|
}
|
||||||
|
|
||||||
|
return LAF10_90_total;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
registerProcessor('octave', OctaveBandProcessor);
|
|
@ -1,4 +1,5 @@
|
||||||
{
|
{
|
||||||
// https://nuxt.com/docs/guide/concepts/typescript
|
// https://nuxt.com/docs/guide/concepts/typescript
|
||||||
"extends": "./.nuxt/tsconfig.json"
|
"extends": "./.nuxt/tsconfig.json"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|