mindboost-rnbo-test-project/src/main.ts

348 lines
11 KiB
TypeScript
Raw Blame History

This file contains invisible Unicode characters!

This file contains invisible Unicode characters that may be processed differently from what appears below. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to reveal hidden characters.

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import RNBO, { createDevice, TimeNow } from '@rnbo/js'
import { fetchOrLoadBuffer } from './composables/BufferStore'
import { setupPlayButton } from './composables/Player';
import { getAudioContext } from './audio/context'
import { updateRNBOParameters } from './lib/device';
import patcherUrl from '../public/patches/6Kanal_adaptive_soundscape/All_In_One_ohneRegelschleife_Mindboost_Algo_clean_02_6in_2out.rnbopat.export.json?url'
import { attachDBValueListener } from './dbValueListener';
import { connectSliderToGain } from "./composables/GainStore";
import { attachValueUpdate } from './composables/RNBOParameterStore';
// RNBO
// RNBO
let patcherPromise: Promise<any>
let microphone: MediaStreamAudioSourceNode
async function getPatcher() {
if (!patcherPromise) patcherPromise = fetch(patcherUrl).then(r => r.json())
return patcherPromise
}
async function init() {
// Schritt 1: Kontext
const ctx = getAudioContext()
await ctx.suspend()
initMicrophone(ctx)
// Schritt 2: Fetch Patcher
await getPatcher()
// Schritt 3: Erstelle alle Devices
const patcher = await getPatcher()
const rnboDevice = await createDevice({
context: ctx,
patcher
})
// Initiale Werte
const events = {
/**
* Schickt einen numerischen Event-Input an das Gerät.
* @param {string} name genau wie im Max-Patch benannt
* @param {number} val
* @param {number} [when=context.currentTime] Sekunden relativ zur AudioClock
*/
send(name: any, val: any, when = TimeNow) {
const event = new RNBO.MessageEvent(when, name, val)
rnboDevice.scheduleEvent(event)
console.log(`Input Event ${name} schedueled.`)
},
};
attachValueUpdate(events)
updateRNBOParameters(events, [
{ tag: 'in7', value: 5000 },
{ tag: 'in8', value: 125 },
{ tag: 'in9', value: 2.5 },
{ tag: 'in10', value: 2500 },
{ tag: 'in11', value: 3000 },
{ tag: 'in12', value: 0.5623 },
{ tag: 'in13', value: -12. },
])
// PARAMETER
// PARAMETER
// PARAMETER
console.log("Alle Ins gesetzt")
// Schritt 4 Fetch alle AudioBuffer
const buffer1 = await fetchOrLoadBuffer("100003",
'audio/MindMusik_15min_48KHz_Forest.mp3',
ctx
)
const buffer2 = await fetchOrLoadBuffer("100004",
'audio/masking/3bands/mid_band_256kbps.webm',
ctx
)
const buffer3 = await fetchOrLoadBuffer("100005",
'audio/MindMusik_15min_48KHz_Forest.mp3',
ctx
)
const sourceNode1 = ctx.createBufferSource() // stereo
const sourceNode2 = ctx.createBufferSource() // stereo
const sourceNode3 = ctx.createBufferSource() // stereo
sourceNode1.buffer = buffer1
sourceNode2.buffer = buffer2
sourceNode3.buffer = buffer3
sourceNode1.loop = true
sourceNode2.loop = true
sourceNode3.loop = true
const noiseGain = ctx.createGain();
const scapeGain = ctx.createGain();
noiseGain.gain.linearRampToValueAtTime(1, getAudioContext().currentTime + 1);
scapeGain.gain.linearRampToValueAtTime(0, getAudioContext().currentTime + 1);
rnboDevice.node.channelCountMode = 'max'
rnboDevice.node.channelInterpretation = 'speakers'
const inputGainNode = ctx.createGain()
connectSliderToGain("noiseGain", noiseGain, "noise")
connectSliderToGain("scapeGain", scapeGain, "scape")
// Connecte Audios auf ein Merger
sourceNode2.connect(noiseGain)
sourceNode3.connect(scapeGain)
let { left:micL, right:micR } = splitStereoIntoMonoChannels(microphone)
const { left: sourceNode1L, right: sourceNode1R } = splitStereoIntoMonoChannels(sourceNode1)
const {left: sourceNode2L, right: sourceNode2R } = splitStereoIntoMonoChannels(noiseGain)
const {left: sourceNode3L, right: sourceNode3R } = splitStereoIntoMonoChannels(scapeGain)
console.log({sourceNode1L, sourceNode1R }, {sourceNode2L, sourceNode2R },{sourceNode3L, sourceNode3R })
// sourceNode1L.connect(ctx.destination, 0, 0)
micL.connect(rnboDevice.node, 0, 0)
micR.connect(rnboDevice.node, 0, 1)
sourceNode2L.connect(rnboDevice.node, 0, 2)
sourceNode2R.connect(rnboDevice.node, 0 ,3)
sourceNode3L.connect(rnboDevice.node, 0 ,4)
sourceNode3R.connect(rnboDevice.node, 0, 5)
rnboDevice.node.connect(ctx.destination)
console.log("RNBO Device created connected with input source")
console.log("Everything is setup, display a play button")
setupPlayButton([sourceNode2,sourceNode3], inputGainNode , ctx)
attachDBValueListener(rnboDevice)
// UI
const ui = document.createElement('div')
ui.style.padding = '20px'
// Report-Infos anhängen
ui.appendChild(createNodeInspector('RNBO Device', rnboDevice.node))
ui.appendChild(createNodeInspectorAndToggle('Mic L', micL, 0, events))
ui.appendChild(createNodeInspectorAndToggle('Mic R', micR, 0, events))
ui.appendChild(createNodeInspectorAndToggle('Scape Source 2', sourceNode2, 1, events))
ui.appendChild(createNodeInspectorAndToggle('Scape Source 3', sourceNode3, 2, events))
console.log("rnboDevice.numInputChannels= "+ rnboDevice.numInputChannels)
console.log("rnboDevice.numOutputChannels = "+ rnboDevice.numOutputChannels)
console.log("rnboDevice.node.numberOfInputs = "+ rnboDevice.node.numberOfInputs)
console.log("rnboDevice.node.numberOfOutputs = "+ rnboDevice.node.numberOfOutputs)
document.body.appendChild(ui)
updateRNBOParameters(events, [
{ tag: 'in7', value: 5000 },
{ tag: 'in8', value: 125 },
{ tag: 'in9', value: 2.5 },
{ tag: 'in10', value: 25 },
{ tag: 'in11', value: 30 },
{ tag: 'in12', value: 0.5623 },
{ tag: 'in13', value: -12. },
])
}
init() // hey ho lets go
// resume only after interaction inside the iframe (mobile safe)
window.addEventListener('click', () => getAudioContext().resume(), { once: true })
console.log("Await unlock context")
function createNodeInspector(label: string, node: AudioNode): HTMLElement {
const container = document.createElement('div')
container.style.marginTop = '10px'
container.style.padding = '8px'
container.style.border = '1px solid #ccc'
container.style.borderRadius = '6px'
container.style.fontFamily = 'monospace'
container.style.background = '#f9f9f9'
const title = document.createElement('strong')
title.textContent = `🎧 ${label}`
container.appendChild(title)
const list = document.createElement('ul')
list.style.listStyle = 'none'
list.style.padding = '4px'
list.style.margin = '4px 0'
const addRow = (key: string, value: string | number) => {
const li = document.createElement('li')
li.textContent = `${key}: ${value}`
list.appendChild(li)
}
addRow('channelCount', node.channelCount)
addRow('channelCountMode', node.channelCountMode)
addRow('channelInterpretation', node.channelInterpretation)
addRow('numberOfInputs', node.numberOfInputs)
addRow('numberOfOutputs', node.numberOfOutputs)
container.appendChild(list)
return container
}
function createNodeInspectorAndToggle(label: string, node: AudioNode, sourceIndex: number, events: { send: (tag: string, value: number) => void }): HTMLElement {
const container = document.createElement('div')
container.style.marginTop = '10px'
container.style.padding = '8px'
container.style.border = '1px solid #ccc'
container.style.borderRadius = '6px'
container.style.fontFamily = 'monospace'
container.style.background = '#f9f9f9'
const title = document.createElement('strong')
title.textContent = `🎧 ${label}`
container.appendChild(title)
const list = document.createElement('ul')
list.style.listStyle = 'none'
list.style.padding = '4px'
list.style.margin = '4px 0'
const addRow = (key: string, value: string | number) => {
const li = document.createElement('li')
li.textContent = `${key}: ${value}`
list.appendChild(li)
}
addRow('channelCount', node.channelCount)
addRow('channelCountMode', node.channelCountMode)
addRow('channelInterpretation', node.channelInterpretation)
addRow('numberOfInputs', node.numberOfInputs)
addRow('numberOfOutputs', node.numberOfOutputs)
container.appendChild(list)
if(node instanceof AudioBufferSourceNode || node instanceof GainNode) {
// Button-UI für L/R Kanalsteuerung (toggle 012)
const buttonContainer = document.createElement('div')
buttonContainer.style.marginTop = '8px'
;['L', 'R'].forEach((side, i) => {
const modeBtn = document.createElement('button')
modeBtn.textContent = `Mute ${side}: 0`
modeBtn.style.marginRight = '8px'
let currentMode = 0
const paramTag = `in${7 + sourceIndex * 2 + i}` // z.B. in7, in8, ..., in12
modeBtn.addEventListener('click', () => {
currentMode = (currentMode + 1) % 3
modeBtn.textContent = `Mute ${side}: ${currentMode}`
events.send(paramTag, currentMode)
console.log(`🔁 Sent ${paramTag} = ${currentMode}`)
})
buttonContainer.appendChild(modeBtn)
})
container.appendChild(buttonContainer)
}
return container
}
function splitStereoIntoMonoChannels(node: AudioNode): { left: GainNode, right: GainNode } {
console.log("BEFORE SPLIT: channelCount -> "+ node.channelCount)
const ctx = getAudioContext()
const splitter = ctx.createChannelSplitter(2)
node.connect(splitter)
const leftGain = ctx.createGain()
leftGain.channelCount = 1
leftGain.channelCountMode = 'explicit'
leftGain.channelInterpretation = 'discrete'
leftGain.gain.setValueAtTime(1,leftGain.context.currentTime)
const rightGain = ctx.createGain()
rightGain.channelCount = 1
rightGain.channelCountMode = 'explicit'
rightGain.channelInterpretation = 'discrete'
rightGain.gain.setValueAtTime(1,rightGain.context.currentTime)
splitter.connect(leftGain, 0) // Kanal 0 = Links
splitter.connect(rightGain, 1) // Kanal 1 = Rechts
console.log("AFTER SPLIT: channelCount -> ",{ left: leftGain, right: rightGain } )
return { left: leftGain, right: rightGain }
}
/**
* Enables the Microphone to use it.
*/
async function initMicrophone(ctx:AudioContext) {
try {
await navigator.mediaDevices.getUserMedia({ audio: true })
} catch {
window.alert("Keine Zugriff aufs Mikrofon.")
}
const availableDevices = await updateAvailableDevices()
console.log("MIKROFONE ", {availableDevices})
try {
console.log("prefered device !!! ")
const constraints: MediaStreamConstraints = {
audio: { echoCancellation: false,
noiseSuppression: false,
autoGainControl: true },
video: false
}
const stream = await navigator.mediaDevices.getUserMedia(
constraints
)
const source = ctx.createMediaStreamSource(stream)
createNodeInspector("MIKROFON", source)
microphone = source
console.log("SOURCE MICROFON", {source})
} catch (error) {
console.error("Error!: connecting the microphone to deivces... connected")
}
}
/**
* This function updates the List of possible microphone sources.
*/
async function updateAvailableDevices (): Promise<MediaDeviceInfo[]> {
try {
const devices = await navigator.mediaDevices.enumerateDevices()
const newDevices = devices.filter(
device => device.kind === 'audioinput'
)
if (newDevices.length === 0) {
console.warn(
'No audio input devices found. This might be due to missing permissions or no connected devices.'
)
}
return newDevices
} catch (error) {
console.error('Error enumerating devices:', error)
}
return []
}