Initial commit
This commit is contained in:
225
stores/audio.ts
Normal file
225
stores/audio.ts
Normal file
@@ -0,0 +1,225 @@
|
||||
import { defineStore } from 'pinia'
|
||||
import type { Logger } from 'pino'
|
||||
|
||||
interface State {
|
||||
audioContext: AudioContext | null,
|
||||
masterGainNoise: GainNode | null,
|
||||
masterGainMusic: GainNode | null,
|
||||
audioSink: typeof Audio | null,
|
||||
showOverlay: boolean,
|
||||
monitoringInterval: NodeJS.Timeout | null,
|
||||
playing: boolean,
|
||||
scene: string,
|
||||
state: string | null,
|
||||
volume: number,
|
||||
noiseVolume: number
|
||||
}
|
||||
|
||||
function createOverlay (audioContext: AudioContext) {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
logger.info('Create Overlay')
|
||||
const overlay = document.createElement('div')
|
||||
overlay.className = 'overlay'
|
||||
|
||||
// Erstelle den Button
|
||||
const button = document.createElement('button')
|
||||
button.className = 'btn btn-primary btn-lg'
|
||||
button.style.backgroundColor = '#e9c046'
|
||||
button.style.border = 'none'
|
||||
button.style.color = 'black'
|
||||
button.innerText = 'Ready'
|
||||
|
||||
// Füge Event-Listener hinzu, um den AudioContext zu entsperren
|
||||
button.addEventListener('click', async () => {
|
||||
try {
|
||||
await audioContext.resume()
|
||||
logger.info('AudioContext resumed.')
|
||||
overlay.remove() // Entferne das Overlay nach der Interaktion
|
||||
} catch (error) {
|
||||
useAudioStore().resetAudioContext()
|
||||
useAudioStore().initializeAudioContext()
|
||||
logger.error('Error resuming AudioContext:', error)
|
||||
}
|
||||
})
|
||||
|
||||
// Füge Button dem Overlay hinzu
|
||||
overlay.appendChild(button)
|
||||
|
||||
// Füge Overlay zum DOM hinzu
|
||||
document.body.appendChild(overlay)
|
||||
}
|
||||
|
||||
export const useAudioStore = defineStore('audio', {
|
||||
state: (): State => ({
|
||||
audioContext: null as AudioContext | null,
|
||||
masterGainNoise: null as GainNode | null,
|
||||
masterGainMusic: null as GainNode | null,
|
||||
audioSink: null as typeof Audio | null,
|
||||
showOverlay: false,
|
||||
monitoringInterval: null as NodeJS.Timeout | null,
|
||||
playing: false,
|
||||
scene: 'Lagoon',
|
||||
state: null as string | null,
|
||||
volume: parseFloat(localStorage.getItem('volume') || '1'),
|
||||
noiseVolume: parseFloat(localStorage.getItem('noiseVolume') || '1')
|
||||
}),
|
||||
actions: {
|
||||
getNewAudioElement (newTrack: string) {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
logger.info(`Current track changed to: ${newTrack}`)
|
||||
return useMediaProvider().getAudioElementForTitle(newTrack)
|
||||
// Additional logic for changing tracks can be added here
|
||||
// For example, you might want to stop the current audio, load the new track, etc.
|
||||
// This depends on how your audio playback is implemented
|
||||
},
|
||||
async ensureAudioContextRunning () {
|
||||
if (!this.audioContext) {
|
||||
this.initializeAudioContext()
|
||||
}
|
||||
if (this.audioContext?.state === 'suspended') {
|
||||
try {
|
||||
await this.audioContext.resume()
|
||||
} catch (error) {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
logger.error('Error resuming AudioContext:', error)
|
||||
this.showOverlay = true
|
||||
}
|
||||
}
|
||||
},
|
||||
initializeAudioContext () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
if (!this.audioContext) {
|
||||
this.audioContext = new AudioContext()
|
||||
this.prepareGains(this.audioContext)
|
||||
logger.info('AudioContext initialized and Gains prepared.')
|
||||
// Attach some error handling, to ensure audiocontext is running all the time.
|
||||
this.startAudioContextMonitor()
|
||||
this.addStateMonitor()
|
||||
}
|
||||
},
|
||||
prepareGains(audioContext: AudioContext){
|
||||
this.masterGainNoise = null
|
||||
this.masterGainMusic = null
|
||||
this.masterGainNoise = audioContext.createGain()
|
||||
this.masterGainMusic = audioContext.createGain()
|
||||
this.masterGainNoise.connect(audioContext.destination)
|
||||
this.masterGainMusic.connect(audioContext.destination)
|
||||
},
|
||||
addStateMonitor () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
if (!this.audioContext) {
|
||||
this.initializeAudioContext()
|
||||
return
|
||||
}
|
||||
// Überwache Änderungen im AudioContext-State
|
||||
this.audioContext.onstatechange = () => {
|
||||
const currentState = this.audioContext?.state
|
||||
logger.info('AudioContext state changed:', currentState)
|
||||
if (currentState === 'suspended') {
|
||||
createOverlay(this.getContext())
|
||||
}
|
||||
}
|
||||
// Optional: Überwache Änderungen in den Audioausgabe-Sinks
|
||||
if ('onsinkchange' in this.audioContext) {
|
||||
|
||||
this.audioContext.onsinkchange = () => {
|
||||
logger.info('Audio sink configuration has changed.')
|
||||
// Füge hier zusätzliche Logik hinzu, falls erforderlich
|
||||
}
|
||||
}
|
||||
},
|
||||
getMasterGainNoise (): GainNode {
|
||||
if(this.masterGainNoise) {
|
||||
return this.masterGainNoise
|
||||
} else {
|
||||
const context = this.getContext()
|
||||
this.masterGainNoise = context.createGain()
|
||||
this.masterGainNoise.connect(context.destination)
|
||||
return this.masterGainNoise
|
||||
}
|
||||
|
||||
},
|
||||
getContext (): AudioContext {
|
||||
if (!this.audioContext ) {
|
||||
this.audioContext ||= new AudioContext()
|
||||
}
|
||||
return this.audioContext
|
||||
},
|
||||
async resumeAudioContext () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
if (this.audioContext?.state === 'suspended') {
|
||||
try {
|
||||
await this.audioContext.resume()
|
||||
this.state = this.audioContext.state
|
||||
const state = this.state
|
||||
logger.info('AudioContext resumed. ', { state })
|
||||
this.stopAudioContextMonitor()
|
||||
} catch (error) {
|
||||
logger.error('Error resuming AudioContext:', error)
|
||||
this.resetAudioContext()
|
||||
this.initializeAudioContext()
|
||||
await this.ensureAudioContextRunning()
|
||||
}
|
||||
}
|
||||
},
|
||||
togglePlaying (): void {
|
||||
this.playing = !this.playing
|
||||
},
|
||||
isPlaying (): boolean {
|
||||
return this.playing
|
||||
},
|
||||
setPlaying (stateChange: boolean): void {
|
||||
this.playing = stateChange
|
||||
},
|
||||
setVolume (newVolume: number) {
|
||||
this.volume = newVolume
|
||||
localStorage.setItem('volume', newVolume.toString())
|
||||
},
|
||||
setNoiseVolume (newVolume: number) {
|
||||
this.noiseVolume = newVolume
|
||||
localStorage.setItem('noiseVolume', newVolume.toString())
|
||||
},
|
||||
startAudioContextMonitor () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
// Starte ein Intervall, wenn keines aktiv ist
|
||||
if (!this.monitoringInterval) {
|
||||
this.monitoringInterval = setInterval(() => {
|
||||
if (this.audioContext?.state === 'suspended') {
|
||||
logger.info('AudioContext is suspended. Attempting to resume...')
|
||||
this.audioContext.resume()
|
||||
}
|
||||
}, 500) // Überprüft alle 500ms
|
||||
logger.info('AudioContext monitoring started.')
|
||||
|
||||
}
|
||||
},
|
||||
stopAudioContextMonitor () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
// Stoppe das Intervall, falls es läuft
|
||||
if (this.monitoringInterval) {
|
||||
clearInterval(this.monitoringInterval)
|
||||
this.monitoringInterval = null
|
||||
logger.info('AudioContext monitoring stopped.')
|
||||
}
|
||||
},
|
||||
resetAudioContext () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
if (this.audioContext) {
|
||||
this.audioContext.close()
|
||||
this.audioContext = null
|
||||
logger.info('AudioContext has been closed.')
|
||||
}
|
||||
}
|
||||
},
|
||||
getters: {
|
||||
getPlaying: state => state.playing,
|
||||
getVolume: state => state.volume,
|
||||
getNoiseVolume: state => state.noiseVolume
|
||||
}
|
||||
})
|
||||
|
||||
// Provide a method to ensure audio context is running
|
||||
export const ensureAudio = async () => {
|
||||
const audioStore = useAudioStore()
|
||||
await audioStore.ensureAudioContextRunning()
|
||||
}
|
16
stores/counter.js
Normal file
16
stores/counter.js
Normal file
@@ -0,0 +1,16 @@
|
||||
// stores/counter.js
|
||||
import { defineStore } from 'pinia'
|
||||
|
||||
export const useCounterStore = defineStore('counter', {
|
||||
state: () => {
|
||||
return { count: 20 }
|
||||
},
|
||||
// could also be defined as
|
||||
// state: () => ({ count: 0 })
|
||||
actions: {
|
||||
increment (a) {
|
||||
this.count = a
|
||||
}
|
||||
},
|
||||
persist: true
|
||||
})
|
111
stores/device.ts
Normal file
111
stores/device.ts
Normal file
@@ -0,0 +1,111 @@
|
||||
// stores/devices.ts
|
||||
import { defineStore } from 'pinia'
|
||||
import { type Device } from '@rnbo/js'
|
||||
import { useAudioStore } from './audio'
|
||||
import { createRNBODevice } from '~/lib/AudioFunctions'
|
||||
import patcher from '~/assets/patches/1.3.1_versions/export/js/LAF-Controll-Values_Simple_Band1000.rnbopat.export.json'
|
||||
import noisePatcher from '~/assets/patches/1.3.1_versions/nomusicPatch/export/js/ASM_Vers_4in2out_48kHz_NoMusik.rnbopat.export.json'
|
||||
import passPatcher from '~/assets/patches/1.3.1_versions/passthrough/passthrough_Stereo.rnbopat.export.json'
|
||||
import controlValuesPatcher from '~/assets/patches/1.3.1_versions/controlvalues/LAF-Controll-Values_Simple_Band1000.rnbopat.export.json'
|
||||
import bandPatcher from '~/assets/patches/1.3.1_versions/singleBand/adaptive_masking_controller_NoMusic.rnbopat.export.json'
|
||||
interface DeviceInfo {
|
||||
id: string;
|
||||
name: string;
|
||||
device: Device | null; // Assuming it could be an AudioWorkletNode for example
|
||||
audioNode: AudioNode | null;
|
||||
}
|
||||
export const useDevicesStore = defineStore('devices', {
|
||||
state: () => ({
|
||||
devices: new Map<string, DeviceInfo>()
|
||||
}),
|
||||
|
||||
actions: {
|
||||
async createDevice (name: string) {
|
||||
try {
|
||||
// useNuxtApp().$logger.log('Creating device')
|
||||
const context = useAudioStore().audioContext
|
||||
if (context === null) { return }
|
||||
const device = await createRNBODevice(context, patcher)
|
||||
// useNuxtApp().$logger.log('device created', { device })
|
||||
this.devices.set(name, { id: name, name, device, audioNode: device.node })
|
||||
return device
|
||||
} catch (error) {
|
||||
// useNuxtApp().$logger.error('Failed to create device:', error)
|
||||
}
|
||||
},
|
||||
async createNoiseDevice (name: string) {
|
||||
if(this.devices.has(name)) return this.devices.get(name)?.device
|
||||
try {
|
||||
// useNuxtApp().$logger.log('Creating device')
|
||||
const context = useAudioStore().audioContext
|
||||
if (context === null) { return }
|
||||
const device = await createRNBODevice(context, noisePatcher)
|
||||
// useNuxtApp().$logger.log('device created', { device })
|
||||
this.devices.set(name, { id: name, name, device, audioNode: device.node })
|
||||
return device
|
||||
} catch (error) {
|
||||
// useNuxtApp().$logger.error('Failed to create device:', error)
|
||||
}
|
||||
},
|
||||
async createPasstroughDevice (name: string) {
|
||||
if(this.devices.has(name)) return this.devices.get(name)?.device
|
||||
try {
|
||||
// useNuxtApp().$logger.log('Creating device')
|
||||
const context = useAudioStore().audioContext
|
||||
if (context === null) { return }
|
||||
const device = await createRNBODevice(context, passPatcher)
|
||||
// useNuxtApp().$logger.log('device created', { device })
|
||||
this.devices.set(name, { id: name, name, device, audioNode: device.node })
|
||||
return device
|
||||
} catch (error) {
|
||||
// useNuxtApp().$logger.error('Failed to create device:', error)
|
||||
}
|
||||
},
|
||||
async createFullBandDevice (name: string) {
|
||||
if(this.devices.has(name)) return this.devices.get(name)?.device
|
||||
try {
|
||||
// useNuxtApp().$logger.log('Creating device')
|
||||
const context = useAudioStore().audioContext
|
||||
if (context === null) { return }
|
||||
const device = await createRNBODevice(context, bandPatcher)
|
||||
// useNuxtApp().$logger.log('device created', { device })
|
||||
this.devices.set(name, { id: name, name, device, audioNode: device.node })
|
||||
return device
|
||||
} catch (error) {
|
||||
// useNuxtApp().$logger.error('Failed to create device:', error)
|
||||
}
|
||||
},
|
||||
async createControlValuesDevice (name: string, centerFrequency: number = 1000) {
|
||||
if(this.devices.has(name+centerFrequency)) return this.devices.get(name+centerFrequency)?.device
|
||||
try {
|
||||
const context = useAudioStore().audioContext
|
||||
if (context === null) { return }
|
||||
const device = await createRNBODevice(context, controlValuesPatcher)
|
||||
const param = device.parametersById.get('centerFrequency')
|
||||
param.value = centerFrequency
|
||||
this.devices.set(name+centerFrequency, { id: name+centerFrequency, name, device, audioNode: device.node })
|
||||
return device
|
||||
} catch (error) {
|
||||
useNuxtApp().$logger.error('Failed to create control values device:', error)
|
||||
}
|
||||
},
|
||||
removeDevice (id: string) {
|
||||
const deviceInfo = this.devices.get(id)
|
||||
if (deviceInfo) {
|
||||
if (deviceInfo.audioNode) {
|
||||
deviceInfo.audioNode.disconnect()
|
||||
deviceInfo.audioNode = null
|
||||
}
|
||||
this.devices.delete(id)
|
||||
}
|
||||
},
|
||||
getDevice (id: string) {
|
||||
const device = this.devices.get(id)
|
||||
return device
|
||||
},
|
||||
getDeviceAudioNode (id: string): AudioNode | null {
|
||||
const device = this.devices.get(id)
|
||||
return device ? device.audioNode : null
|
||||
}
|
||||
}
|
||||
})
|
5
stores/interfaces/ANC.ts
Normal file
5
stores/interfaces/ANC.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
export enum ANC{
|
||||
No = 'No',
|
||||
Yes = 'Yes',
|
||||
|
||||
}
|
5
stores/interfaces/AudioBufferItem.ts
Normal file
5
stores/interfaces/AudioBufferItem.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
export interface AudioBufferItem {
|
||||
url: string;
|
||||
buffer: AudioBuffer | null;
|
||||
name: string;
|
||||
}
|
7
stores/interfaces/AudioNodeItem.ts
Normal file
7
stores/interfaces/AudioNodeItem.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
export interface AudioNodeItem {
|
||||
type: string;
|
||||
node: AudioNode | null;
|
||||
started?: boolean | false;
|
||||
previousNodeName?:string;
|
||||
nextNodeName?:string;
|
||||
}
|
4
stores/interfaces/HeadsetType.ts
Normal file
4
stores/interfaces/HeadsetType.ts
Normal file
@@ -0,0 +1,4 @@
|
||||
export enum HeadsetType{
|
||||
OverEar = 'OverEar',
|
||||
InEar = 'InEar'
|
||||
}
|
7
stores/interfaces/Microphone.ts
Normal file
7
stores/interfaces/Microphone.ts
Normal file
@@ -0,0 +1,7 @@
|
||||
export interface Microphone {
|
||||
microphoneNode: AudioNode | null;
|
||||
microphoneStream: MediaStream | null;
|
||||
label: string;
|
||||
silenceDetected?: string;
|
||||
deviceId?: string;
|
||||
}
|
6
stores/interfaces/SoundScape.ts
Normal file
6
stores/interfaces/SoundScape.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export enum SoundScape{
|
||||
Lagoon = 'lagoon',
|
||||
Forest = 'forest',
|
||||
Meadow = 'meadow',
|
||||
Tropics = 'Tropics', // tropics not known by the backend, x represents tropics currently. it is a bug in the backend
|
||||
}
|
222
stores/interfaces/oldstores/audio_old.ts
Normal file
222
stores/interfaces/oldstores/audio_old.ts
Normal file
@@ -0,0 +1,222 @@
|
||||
// stores/counter.js
|
||||
import { defineStore, type _ActionsTree, type StoreDefinition, type StateTree } from 'pinia'
|
||||
import { type AudioNodeItem } from '../AudioNodeItem'
|
||||
import { useUserStore } from '../../user'
|
||||
import { connectAudioNodes } from '~/lib/AudioFunctions'
|
||||
import type Player from '~/components/Player/Player'
|
||||
|
||||
export const useAudioStore: StoreDefinition = defineStore<'audio', StateTree, _ActionsTree>('audio', {
|
||||
state: () => {
|
||||
return {
|
||||
audioContext: new AudioContext() as AudioContext,
|
||||
nodes: new Map() as Map<string, AudioNodeItem>,
|
||||
playing: false,
|
||||
scene: 'Lagoon',
|
||||
acusticCheckResult: 0,
|
||||
scenePlayer: null as Player | null,
|
||||
noisePlayer: null as Player | null
|
||||
}
|
||||
},
|
||||
|
||||
actions: {
|
||||
setPlayers ({ scenePlayer, noisePlayer }:any) {
|
||||
this.scenePlayer = scenePlayer
|
||||
this.noisePlayer = noisePlayer
|
||||
},
|
||||
//
|
||||
// Audio Context Actions
|
||||
//
|
||||
isPlaying (): boolean {
|
||||
return this.playing
|
||||
},
|
||||
|
||||
pauseContext (): void {
|
||||
const audioContext = this.getAudioContext()
|
||||
if (audioContext) {
|
||||
audioContext.playbackRate = 0
|
||||
audioContext.suspend()
|
||||
}
|
||||
},
|
||||
async getAudioContext () : Promise<AudioContext> {
|
||||
// useNuxtApp().$logger.log('getAudioContext')
|
||||
if (Howler.ctx) {
|
||||
// useNuxtApp().$logger.log('Audio Context is there...')
|
||||
if (Howler.ctx.state === 'suspended') { Howler.ctx.resume() }
|
||||
return Howler.ctx
|
||||
} else {
|
||||
return await this.initializeAudioContext()
|
||||
}
|
||||
},
|
||||
getContext () : AudioContext {
|
||||
// useNuxtApp().$logger.log('getContext')
|
||||
|
||||
if (this.audioContext) { return this.audioContext } else {
|
||||
this.audioContext = new AudioContext()
|
||||
return this.audioContext
|
||||
}
|
||||
},
|
||||
async initializeAudioContext (): Promise<AudioContext> {
|
||||
// useNuxtApp().$logger.log('initializeAudioContext')
|
||||
return await this.audioContext
|
||||
},
|
||||
|
||||
resetAudioContext () {
|
||||
if (Howler.ctx) {
|
||||
// useNuxtApp().$logger.log('AudioContext has been reset')
|
||||
Howler.unload()
|
||||
} else if (this.audioContext instanceof AudioContext && this.audioContext.state !== 'closed') {
|
||||
try {
|
||||
Howler.stop()
|
||||
Howler.unload()
|
||||
this.audioContext = null
|
||||
this.setPlaying(false)
|
||||
// useNuxtApp().$logger.log('AudioContext has been reset')
|
||||
} catch (_error) {
|
||||
// useNuxtApp().$logger.error('Error closing AudioContext:', _error)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
setPlaying (stateChange: boolean): void {
|
||||
this.playing = stateChange
|
||||
},
|
||||
//
|
||||
// Working with Web Audio Nodes
|
||||
//
|
||||
removeNodeByType (name: string): void {
|
||||
if (this.nodes && this.nodes.has(name)) {
|
||||
this.nodes.delete(name)
|
||||
}
|
||||
},
|
||||
getNodeByType (name: string): AudioNodeItem | null {
|
||||
if (this.nodes instanceof Map) {
|
||||
return this.nodes.get(name) as AudioNodeItem
|
||||
}
|
||||
|
||||
return null
|
||||
},
|
||||
getMicrophoneNode (): AudioNodeItem | null {
|
||||
return this.nodes.get('microphone')
|
||||
},
|
||||
async getBufferSourceNode (name: string): Promise<AudioNodeItem | null> {
|
||||
const node:AudioNodeItem = this.nodes.has(name)
|
||||
if (node) {
|
||||
// useNuxtApp().$logger.log('Node gibts schon')
|
||||
return node
|
||||
} else {
|
||||
await this.createBufferSource(name)
|
||||
const createdNode = this.getNodeByType(name)
|
||||
// useNuxtApp().$logger.log('created node ', { createdNode })
|
||||
return createdNode
|
||||
}
|
||||
},
|
||||
async createMediaStreamNodeAsync (steam: MediaStream): Promise<MediaStreamAudioSourceNode> {
|
||||
const audioCtx: AudioContext = await this.getAudioContext()
|
||||
// useNuxtApp().$logger.log({ audioCtx })
|
||||
const streamNode = audioCtx.createMediaStreamSource(steam)
|
||||
this.addNode('microphone', streamNode)
|
||||
return streamNode
|
||||
},
|
||||
createMediaStreamNode (steam: MediaStream, audioContext: AudioContext): MediaStreamAudioSourceNode {
|
||||
const audioCtx: AudioContext = audioContext
|
||||
// useNuxtApp().$logger.log({ audioCtx })
|
||||
const streamNode = audioCtx.createMediaStreamSource(steam)
|
||||
this.addNode('microphone', streamNode)
|
||||
return streamNode
|
||||
},
|
||||
async createBufferSource (name:string): Promise<AudioNodeItem> {
|
||||
const bufferStore = this.bufferStore()
|
||||
const audioBuffer = await bufferStore.getAudioBufferByName(name)
|
||||
// if (!audioBuffer) { useNuxtApp().$logger.log({ bufferStore }) }
|
||||
const audioCtx = await this.getAudioContext()
|
||||
const abn = audioCtx.createBufferSource()
|
||||
abn.buffer = audioBuffer
|
||||
const result = this.addNode(name, abn)
|
||||
return result
|
||||
},
|
||||
disconnectAllNodes (): void {
|
||||
if (this.nodes.length > 0) {
|
||||
this.nodes.forEach((n:AudioNodeItem) => n.node?.disconnect())
|
||||
this.nodes = {} as Map<string, AudioNodeItem>
|
||||
}
|
||||
},
|
||||
startNode (node:AudioNodeItem) {
|
||||
if (node.node instanceof AudioBufferSourceNode) {
|
||||
node.node?.start(0)
|
||||
// useNuxtApp().$logger.log('started Node' + node.type, { nod1 })
|
||||
} else {
|
||||
// useNuxtApp().$logger.error('Node not defined in start node ')
|
||||
}
|
||||
},
|
||||
addNode (type: string, node: AudioNode): AudioNodeItem {
|
||||
this.nodes.set(type, { type, node, started: false })
|
||||
// useNuxtApp().$logger.log('adding node', { values: [...this.nodes.values()] })
|
||||
return { type, node, started: false } as AudioNodeItem
|
||||
},
|
||||
connectNodes (nodeFrom: AudioNodeItem, nodeTo: AudioNodeItem, channelOutput?: number, channelInput?:number): void {
|
||||
if (nodeFrom && nodeTo && !channelInput && !channelOutput) {
|
||||
connectAudioNodes(nodeFrom.node as AudioNode, nodeTo.node as AudioNode)
|
||||
}
|
||||
if (nodeFrom && nodeTo && channelOutput && !channelInput) {
|
||||
connectAudioNodes(nodeFrom.node as AudioNode, nodeTo.node as AudioNode, channelOutput)
|
||||
}
|
||||
if (nodeFrom && nodeTo && channelOutput && channelInput) {
|
||||
connectAudioNodes(nodeFrom.node as AudioNode, nodeTo.node as AudioNode, channelOutput, channelInput)
|
||||
}
|
||||
|
||||
// useNuxtApp().$logger.log('nodeTo tpye: ' + nodeTo.type)
|
||||
if (nodeTo.type) { nodeFrom.nextNodeName = nodeTo.type }
|
||||
nodeTo.previousNodeName = nodeFrom.type
|
||||
},
|
||||
startAllSources () : void {
|
||||
const app = useNuxtApp()
|
||||
// @ts-ignore
|
||||
const user = useUserStore(app.$pinia)
|
||||
const userObj = user.user as any
|
||||
// useNuxtApp().$logger.log({ userObj })
|
||||
this.nodes.forEach((node: AudioNodeItem, key: string) => {
|
||||
const userSoundscape = userObj.settings.soundscape === 'Tropics' ? 'tropics' : userObj.settings.soundscape || 'meadow'
|
||||
if (key === userSoundscape || key === 'noise') {
|
||||
if (node.node instanceof AudioBufferSourceNode && !node.started) {
|
||||
this.startBufferSourceNode(node.node)
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
startBufferSourceNode (node: AudioBufferSourceNode) {
|
||||
// useNuxtApp().$logger.log('startBufferSourceNode ')
|
||||
|
||||
this.nodes.forEach((value: AudioNodeItem) => {
|
||||
if (value.node === node) {
|
||||
const c = value.node as AudioBufferSourceNode
|
||||
// useNuxtApp().$logger.log('AudioContext ' + audioContext.state)
|
||||
c.start(0)
|
||||
value.started = true
|
||||
}
|
||||
})
|
||||
},
|
||||
replaceMusicNode (oldNode: AudioNodeItem, newNode: AudioNodeItem):AudioNodeItem {
|
||||
if (!this.nodes.has(oldNode.type) || !this.nodes.has(newNode.type)) {
|
||||
// this.$logger.warn('AudioNodes not present in nodes Map')
|
||||
}
|
||||
if (oldNode.node ! instanceof AudioBufferSourceNode || oldNode.node ! instanceof AudioBufferSourceNode) {
|
||||
// createLogger().warn('AudioNodes not AudioBufferSourceNodes')
|
||||
}
|
||||
|
||||
const nextNode = oldNode.nextNodeName ? this.nodes.get(oldNode.nextNodeName) : null
|
||||
if (!nextNode || (nextNode && nextNode.node ! instanceof ChannelSplitterNode)) {
|
||||
// createLogger().warn('AudioNodes not ChannelSplitterNodes')
|
||||
} else {
|
||||
oldNode.node?.disconnect()
|
||||
oldNode.started = false
|
||||
oldNode = {} as AudioNodeItem
|
||||
if (newNode && newNode.node) {
|
||||
if (nextNode.type) { newNode.nextNodeName = nextNode.type as string }
|
||||
newNode.node.connect(nextNode?.node as AudioNode)
|
||||
return newNode
|
||||
}
|
||||
}
|
||||
return newNode
|
||||
}
|
||||
}
|
||||
})
|
287
stores/microphone.ts
Normal file
287
stores/microphone.ts
Normal file
@@ -0,0 +1,287 @@
|
||||
import { defineStore, type _ActionsTree, type StateTree, type StoreDefinition } from 'pinia'
|
||||
import type { Microphone } from './interfaces/Microphone'
|
||||
import { useAudioStore } from './audio'
|
||||
import microphoneList from '~/assets/microphone_list.json'
|
||||
import type { Logger } from 'pino'
|
||||
|
||||
let pendingMicrophonePromise: Promise<Microphone> | null = null
|
||||
|
||||
|
||||
export const useMicStore: StoreDefinition = defineStore<
|
||||
'microphone',
|
||||
StateTree,
|
||||
_ActionsTree
|
||||
>('microphone', {
|
||||
|
||||
state: () => ({
|
||||
microphone: {} as Microphone | null,
|
||||
availableDevices: [] as MediaDeviceInfo[],
|
||||
microphoneActive: false,
|
||||
}),
|
||||
actions: {
|
||||
async getMicrophone (context?: AudioContext): Promise<Microphone> {
|
||||
logger: useNuxtApp().$logger as Logger
|
||||
if(!context){
|
||||
await useAudioStore().ensureAudioContextRunning()
|
||||
context = useAudioStore().getContext()
|
||||
}
|
||||
if (this.microphone &&
|
||||
this.microphone.microphoneNode &&
|
||||
this.microphone.microphoneStream
|
||||
) {
|
||||
return this.microphone
|
||||
}
|
||||
// Wenn bereits eine Anfrage läuft, darauf warten
|
||||
if (pendingMicrophonePromise) {
|
||||
return pendingMicrophonePromise
|
||||
}
|
||||
pendingMicrophonePromise = (async () => {
|
||||
await this.updateAvailableDevices()
|
||||
const audioCtx = context || await useAudioStore().getContext()
|
||||
|
||||
this.microphone.microphoneStream = await this.getMediaStream()
|
||||
this.microphone.microphoneNode = audioCtx.createMediaStreamSource(this.microphone.microphoneStream)
|
||||
|
||||
const track = this.microphone.microphoneStream.getAudioTracks()[0].label
|
||||
this.microphone.label ||= track.label || 'Unknown Microphone'
|
||||
|
||||
return this.microphone
|
||||
})()
|
||||
// Warten auf Ergebnis, danach Promise wieder löschen
|
||||
const result = await pendingMicrophonePromise
|
||||
pendingMicrophonePromise = null
|
||||
return result
|
||||
},
|
||||
/**
|
||||
* This function updates the audioContext of an microphoneNode. the given
|
||||
* context's readyness is checked before the microphone node is destroyed
|
||||
* and recreated within the new context.
|
||||
* @param context
|
||||
*/
|
||||
async updateMicrophoneNodeContext(context: AudioContext) {
|
||||
if(context.state !== "running") {
|
||||
const audioStore = useAudioStore()
|
||||
try {
|
||||
await audioStore.resumeAudioContext()
|
||||
} catch (error) {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
logger.error("Resume audio context failed" + context.state)
|
||||
}
|
||||
}
|
||||
const mediaStream = this.microphone.microphoneStream
|
||||
const label = this.microphone.microphoneLabel
|
||||
this.microphone.microphoneNode.disconnect()
|
||||
this.microphone.microphoneNode = null
|
||||
const microphoneNode = context.createMediaStreamSource(mediaStream)
|
||||
this.microphone = {
|
||||
microphoneNode,
|
||||
microphoneStream: mediaStream,
|
||||
label,
|
||||
}
|
||||
}
|
||||
,
|
||||
/**
|
||||
* This function is especially for the Howler.js audio API and creates a
|
||||
* microphone audio node within the audio context of Howler, to connect
|
||||
* audio nodes of howler direct or indirect with the microphone.
|
||||
* @returns Promise<Microphone>
|
||||
*/
|
||||
async getMicrophoneInHowlerContext (): Promise<Microphone> {
|
||||
const audioContext = Howler.ctx
|
||||
if (
|
||||
this.microphone &&
|
||||
this.microphone.microphoneNode &&
|
||||
this.microphone.microphoneStream
|
||||
) {
|
||||
if(this.microphone.microphoneNode.context !== audioContext) {
|
||||
await this.updateMicrophoneNodeContext(audioContext)
|
||||
}
|
||||
return this.microphone as Microphone
|
||||
}
|
||||
// Wenn bereits eine Anfrage läuft, darauf warten
|
||||
if (pendingMicrophonePromise) {
|
||||
return pendingMicrophonePromise
|
||||
}
|
||||
pendingMicrophonePromise = (async () => {
|
||||
await this.updateAvailableDevices()
|
||||
const mediaStream = await this.getMediaStream()
|
||||
|
||||
const audioCtx = Howler.ctx
|
||||
this.microphone.microphoneNode ||= audioCtx.createMediaStreamSource(mediaStream)
|
||||
const track = this.microphone.microphoneStream.getAudioTracks()[0]
|
||||
this.microphone.microphoneLabel = track.label || 'Unknown Microphone'
|
||||
|
||||
return this.microphone
|
||||
})()
|
||||
// Warten auf Ergebnis, danach Promise wieder löschen
|
||||
const result = await pendingMicrophonePromise
|
||||
pendingMicrophonePromise = null
|
||||
return result
|
||||
},
|
||||
|
||||
/**
|
||||
* This function interacts with the browser User-API to get the microphone
|
||||
* it requests the permission to access the microphone, then sets the
|
||||
* active state of microphone usage what can be seen in the browser tab.
|
||||
* To avoid wrong calculations the stream is requested without any preprocessing.
|
||||
*/
|
||||
async attachMicrophone () {
|
||||
if (this.microphone && !this.microphone.microphoneStream) {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({
|
||||
audio: {
|
||||
echoCancellation: false,
|
||||
noiseSuppression: false,
|
||||
autoGainControl: false
|
||||
},
|
||||
video: false
|
||||
})
|
||||
this.microphone.microphoneStream = stream
|
||||
this.microphoneActive = true
|
||||
}
|
||||
},
|
||||
/**
|
||||
* This function stops the usage of the microphone. It stops all tracks of the used
|
||||
* MediaStream and sets the internal state to null. After that the browser should not
|
||||
* detect any usage of the microphone caused by the microphone store.
|
||||
*/
|
||||
detachMicrophone () {
|
||||
if (this.microphone && this.microphone.microphoneStream) {
|
||||
this.detachMediaStream()
|
||||
this.microphoneActive = false
|
||||
}
|
||||
},
|
||||
/**
|
||||
* This function updates the List of possible microphone sources.
|
||||
*/
|
||||
async updateAvailableDevices () {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
try {
|
||||
const devices = await navigator.mediaDevices.enumerateDevices()
|
||||
this.availableDevices = devices.filter(
|
||||
device => device.kind === 'audioinput'
|
||||
)
|
||||
if (this.availableDevices.length === 0) {
|
||||
logger.warn(
|
||||
'No audio input devices found. This might be due to missing permissions or no connected devices.'
|
||||
)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Error enumerating devices:', error)
|
||||
}
|
||||
},
|
||||
/**
|
||||
* This function is used to request the media stream without worring about any preprocessing of the
|
||||
* Stream nor preferred devices the algorithm works best with.
|
||||
* @returns { Promise<MediaStream> }
|
||||
*/
|
||||
async getMediaStream (): Promise<MediaStream> {
|
||||
if(this.microphone && this.microphone.microphoneStream) {
|
||||
return this.microphone.microphoneStream
|
||||
}
|
||||
const preferredDevice = this.getPreferredDevice()
|
||||
|
||||
const constraints: MediaStreamConstraints = {
|
||||
audio: preferredDevice
|
||||
? { deviceId: { exact: preferredDevice.deviceId } }
|
||||
: {
|
||||
echoCancellation: false,
|
||||
noiseSuppression: false,
|
||||
autoGainControl: false
|
||||
},
|
||||
video: false
|
||||
}
|
||||
if(this.microphone === null) this.microphone = {}
|
||||
this.microphone.microphoneStream = await navigator.mediaDevices.getUserMedia(
|
||||
constraints
|
||||
)
|
||||
return this.microphone.microphoneStream
|
||||
},
|
||||
|
||||
/**
|
||||
* Attempts to find a preferred microphone device from the available devices.
|
||||
*
|
||||
* This method iterates through a list of preferred microphone names and tries to match them
|
||||
* against the labels of available audio input devices. It uses a case-insensitive regex match,
|
||||
* allowing for partial matches. The method includes safety measures such as input sanitization,
|
||||
* length limits, and timeout protection against potential regex-based attacks.
|
||||
*
|
||||
* If a match is found, it returns the corresponding MediaDeviceInfo object.
|
||||
* If no match is found, it returns undefined, indicating that the default device should be used.
|
||||
*
|
||||
* @returns {MediaDeviceInfo | undefined} The matched device info or undefined if no match is found.
|
||||
*/
|
||||
getPreferredDevice (): MediaDeviceInfo | undefined {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
for (const preferredMic of microphoneList) {
|
||||
if (preferredMic.length > 200) {
|
||||
// Limit the length of preferred mic names
|
||||
logger.warn(
|
||||
`Skipping overly long preferred microphone name: ${preferredMic.substring(
|
||||
0,
|
||||
20
|
||||
)}...`
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
// Sanitize the preferred microphone name
|
||||
const sanitizedMicName = preferredMic.replace(/[^\w\s-]/gi, '')
|
||||
// Create a case-insensitive regular expression from the sanitized name
|
||||
const preferredMicRegex = new RegExp(sanitizedMicName, 'i')
|
||||
const matchingDevice = this.availableDevices.find(
|
||||
(device: { label: string }) => {
|
||||
// Use a timeout to prevent long-running regex matches
|
||||
const timeout = setTimeout(() => {
|
||||
return false
|
||||
}, 1000) // 1 second timeout
|
||||
|
||||
const match = preferredMicRegex.test(device.label)
|
||||
clearTimeout(timeout)
|
||||
return match
|
||||
}
|
||||
)
|
||||
|
||||
if (matchingDevice) {
|
||||
// Found preferred microphone
|
||||
return matchingDevice
|
||||
}
|
||||
}
|
||||
return undefined
|
||||
},
|
||||
/**
|
||||
* This function is used to detach the media stream an give free all resources.
|
||||
*/
|
||||
detachMediaStream () {
|
||||
this.microphone?.microphoneStream
|
||||
.getTracks()
|
||||
.forEach((track: MediaStreamTrack) => {
|
||||
track.stop()
|
||||
})
|
||||
this.microphone = {}
|
||||
pendingMicrophonePromise = null
|
||||
},
|
||||
/**
|
||||
* This function is an helper to switch the microphone if a deviceId is provided. This changed also the microphone node.
|
||||
* @param deviceId
|
||||
* @param context
|
||||
*/
|
||||
async switchMicrophone (deviceId: string, context?: AudioContext) {
|
||||
const logger = useNuxtApp().$logger as Logger
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: { deviceId: { exact: deviceId } } })
|
||||
// Update store by new stream
|
||||
this.microphone = { microphoneStream: stream, microphoneNode: null, deviceId:deviceId, label: stream.getAudioTracks() }
|
||||
// Create new Audiocontext and add stream
|
||||
const audioContext = context || await useAudioStore().getContext()
|
||||
const source = audioContext.createMediaStreamSource(stream)
|
||||
this.microphone.microphoneStream ||= stream
|
||||
this.microphone.microphoneNode ||= source
|
||||
this.microphone.deviceId ||= deviceId
|
||||
this.microphone.deviceId ||= deviceId
|
||||
} catch (error) {
|
||||
logger.error('Error switching microphone:', error)
|
||||
throw error
|
||||
}
|
||||
},
|
||||
}
|
||||
})
|
247
stores/player.ts
Normal file
247
stores/player.ts
Normal file
@@ -0,0 +1,247 @@
|
||||
// stores/counter.js
|
||||
import { defineStore, type _ActionsTree, type StoreDefinition, type StateTree } from 'pinia'
|
||||
import { type AudioNodeItem } from './interfaces/AudioNodeItem'
|
||||
import { useUserStore } from './user'
|
||||
import { connectAudioNodes } from '~/lib/AudioFunctions'
|
||||
import type Player from '~/components/Player/Player'
|
||||
|
||||
export const usePlayerStore: StoreDefinition = defineStore<'player', StateTree, _ActionsTree>('player', {
|
||||
state: () => {
|
||||
return {
|
||||
audioContext: Howler.ctx as AudioContext,
|
||||
nodes: new Map() as Map<string, AudioNodeItem>,
|
||||
playing: false,
|
||||
scene: 'Lagoon',
|
||||
acusticCheckResult: 0,
|
||||
scenePlayer: null as Player | null,
|
||||
noisePlayer: null as Player | null
|
||||
}
|
||||
},
|
||||
|
||||
actions: {
|
||||
setPlayers ({ scenePlayer, noisePlayer }:any) {
|
||||
this.scenePlayer = scenePlayer
|
||||
this.noisePlayer = noisePlayer
|
||||
},
|
||||
//
|
||||
// Audio Context Actions
|
||||
//
|
||||
isPlaying (): boolean {
|
||||
return this.playing
|
||||
},
|
||||
|
||||
pauseContext (): void {
|
||||
const audioContext = this.getAudioContext()
|
||||
if (audioContext) {
|
||||
audioContext.playbackRate = 0
|
||||
audioContext.suspend()
|
||||
}
|
||||
},
|
||||
async getAudioContext () : Promise<AudioContext> {
|
||||
if (Howler.ctx) {
|
||||
// useNuxtApp().$logger.log('Audio Context is there...')
|
||||
if (Howler.ctx.state === 'suspended') { Howler.ctx.resume() }
|
||||
return Howler.ctx
|
||||
} else {
|
||||
return await this.initializeAudioContext()
|
||||
}
|
||||
},
|
||||
getContext () : AudioContext {
|
||||
// useNuxtApp().$logger.log('getContext')
|
||||
|
||||
if (this.audioContext) { return this.audioContext } else {
|
||||
this.audioContext = Howler.ctx
|
||||
return this.audioContext
|
||||
}
|
||||
},
|
||||
async initializeAudioContext (): Promise<AudioContext> {
|
||||
// useNuxtApp().$logger.log('initializeAudioContext')
|
||||
this.resetAudioContext()
|
||||
return await this.audioContext
|
||||
},
|
||||
|
||||
resetAudioContext () {
|
||||
if (Howler.ctx) {
|
||||
// useNuxtApp().$logger.log('AudioContext has been reset')
|
||||
Howler.unload()
|
||||
} else if (this.audioContext instanceof AudioContext && this.audioContext.state !== 'closed') {
|
||||
try {
|
||||
Howler.stop()
|
||||
Howler.unload()
|
||||
this.audioContext = null
|
||||
this.setPlaying(false)
|
||||
// useNuxtApp().$logger.log('AudioContext has been reset')
|
||||
} catch (_error) {
|
||||
// useNuxtApp().$logger.error('Error closing AudioContext:', _error)
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
setPlaying (stateChange: boolean): void {
|
||||
this.playing = stateChange
|
||||
},
|
||||
//
|
||||
// Working with Web Audio Nodes
|
||||
//
|
||||
removeNodeByType (name: string): void {
|
||||
if (this.nodes && this.nodes.has(name)) {
|
||||
this.nodes.delete(name)
|
||||
}
|
||||
},
|
||||
getNodeByType (name: string): AudioNodeItem | null {
|
||||
if (this.nodes instanceof Map) {
|
||||
return this.nodes.get(name) as AudioNodeItem
|
||||
}
|
||||
|
||||
return null
|
||||
},
|
||||
getMicrophoneNode (): AudioNodeItem | null {
|
||||
return this.nodes.get('microphone')
|
||||
},
|
||||
async getBufferSourceNode (name: string): Promise<AudioNodeItem | null> {
|
||||
const node:AudioNodeItem = this.nodes.has(name)
|
||||
if (node) {
|
||||
// useNuxtApp().$logger.log('Node gibts schon')
|
||||
return node
|
||||
} else {
|
||||
await this.createBufferSource(name)
|
||||
const createdNode = this.getNodeByType(name)
|
||||
// useNuxtApp().$logger.log('created node ', { createdNode })
|
||||
return createdNode
|
||||
}
|
||||
},
|
||||
async createMediaStreamNodeAsync (steam: MediaStream): Promise<MediaStreamAudioSourceNode> {
|
||||
const audioCtx: AudioContext = await this.getAudioContext()
|
||||
// useNuxtApp().$logger.log({ audioCtx })
|
||||
const streamNode = audioCtx.createMediaStreamSource(steam)
|
||||
this.addNode('microphone', streamNode)
|
||||
return streamNode
|
||||
},
|
||||
createMediaStreamNode (steam: MediaStream, audioContext: AudioContext): MediaStreamAudioSourceNode {
|
||||
const audioCtx: AudioContext = audioContext
|
||||
// useNuxtApp().$logger.log({ audioCtx })
|
||||
const streamNode = audioCtx.createMediaStreamSource(steam)
|
||||
this.addNode('microphone', streamNode)
|
||||
return streamNode
|
||||
},
|
||||
async createBufferSource (name:string): Promise<AudioNodeItem> {
|
||||
const bufferStore = this.bufferStore()
|
||||
const audioBuffer = await bufferStore.getAudioBufferByName(name)
|
||||
// if (!audioBuffer) { useNuxtApp().$logger.log({ bufferStore }) }
|
||||
const audioCtx = await this.getAudioContext()
|
||||
const abn = audioCtx.createBufferSource()
|
||||
abn.buffer = audioBuffer
|
||||
const result = this.addNode(name, abn)
|
||||
return result
|
||||
},
|
||||
disconnectAllNodes (): void {
|
||||
if (this.nodes.length > 0) {
|
||||
this.nodes.forEach((n:AudioNodeItem) => n.node?.disconnect())
|
||||
this.nodes = {} as Map<string, AudioNodeItem>
|
||||
}
|
||||
},
|
||||
disconnectNode (node: AudioNodeItem): void {
|
||||
const prevNode = node.previousNodeName
|
||||
const nextNode = node.nextNodeName
|
||||
node.node?.disconnect()
|
||||
},
|
||||
startNode (node:AudioNodeItem) {
|
||||
if (node.node instanceof AudioBufferSourceNode) {
|
||||
node.node?.start(0)
|
||||
node.started = true
|
||||
// useNuxtApp().$logger.log('started Node' + node.type, { nod1 })
|
||||
} else {
|
||||
useNuxtApp().$logger.info(`Node ${node.type} is not a AudioBufferSourceNode, cannot be started`)
|
||||
// useNuxtApp().$logger.error('Node not defined in start node ')
|
||||
}
|
||||
},
|
||||
addNode(type: string, node: AudioNode): AudioNodeItem {
|
||||
const existingNode = this.nodes.get(type)
|
||||
|
||||
const isCriticalType = node instanceof AudioBufferSourceNode || node instanceof OscillatorNode
|
||||
const wasStarted = existingNode?.started ?? false
|
||||
|
||||
if (existingNode) {
|
||||
if (isCriticalType && wasStarted) {
|
||||
useNuxtApp().$logger.warn(`[addNode] ⚠️ Node "${type}" ist vom Typ ${node.constructor.name} und wurde bereits gestartet. Ein erneuter Start würde fehlschlagen.`)
|
||||
}
|
||||
}
|
||||
|
||||
const audioNodeItem: AudioNodeItem = {
|
||||
type,
|
||||
node,
|
||||
started: wasStarted,
|
||||
previousNodeName: existingNode?.previousNodeName,
|
||||
nextNodeName: existingNode?.nextNodeName,
|
||||
}
|
||||
|
||||
this.nodes.set(type, audioNodeItem)
|
||||
return audioNodeItem
|
||||
},
|
||||
connectNodes (nodeFrom: AudioNodeItem, nodeTo: AudioNodeItem, channelOutput?: number, channelInput?:number): void {
|
||||
if (nodeFrom && nodeTo && !channelInput && !channelOutput) {
|
||||
connectAudioNodes(nodeFrom.node as AudioNode, nodeTo.node as AudioNode)
|
||||
}
|
||||
if (nodeFrom && nodeTo && channelOutput && !channelInput) {
|
||||
connectAudioNodes(nodeFrom.node as AudioNode, nodeTo.node as AudioNode, channelOutput)
|
||||
}
|
||||
if (nodeFrom && nodeTo && channelOutput && channelInput) {
|
||||
connectAudioNodes(nodeFrom.node as AudioNode, nodeTo.node as AudioNode, channelOutput, channelInput)
|
||||
}
|
||||
|
||||
// useNuxtApp().$logger.log('nodeTo tpye: ' + nodeTo.type)
|
||||
if (nodeTo.type) { nodeFrom.nextNodeName = nodeTo.type }
|
||||
nodeTo.previousNodeName = nodeFrom.type
|
||||
},
|
||||
startAllSources () : void {
|
||||
const app = useNuxtApp()
|
||||
// @ts-ignore
|
||||
const user = useUserStore(app.$pinia)
|
||||
const userObj = user.user as any
|
||||
// useNuxtApp().$logger.log({ userObj })
|
||||
this.nodes.forEach((node: AudioNodeItem, key: string) => {
|
||||
const userSoundscape = userObj.settings.soundscape === 'Tropics' ? 'tropics' : userObj.settings.soundscape || 'meadow'
|
||||
if (key === userSoundscape || key === 'noise') {
|
||||
if (node.node instanceof AudioBufferSourceNode && !node.started) {
|
||||
this.startBufferSourceNode(node.node)
|
||||
}
|
||||
}
|
||||
})
|
||||
},
|
||||
startBufferSourceNode (node: AudioBufferSourceNode) {
|
||||
// useNuxtApp().$logger.log('startBufferSourceNode ')
|
||||
|
||||
this.nodes.forEach((value: AudioNodeItem) => {
|
||||
if (value.node === node) {
|
||||
const c = value.node as AudioBufferSourceNode
|
||||
// useNuxtApp().$logger.log('AudioContext ' + audioContext.state)
|
||||
c.start(0)
|
||||
value.started = true
|
||||
}
|
||||
})
|
||||
},
|
||||
replaceMusicNode (oldNode: AudioNodeItem, newNode: AudioNodeItem):AudioNodeItem {
|
||||
if (!this.nodes.has(oldNode.type) || !this.nodes.has(newNode.type)) {
|
||||
// this.$logger.warn('AudioNodes not present in nodes Map')
|
||||
}
|
||||
if (oldNode.node ! instanceof AudioBufferSourceNode || oldNode.node ! instanceof AudioBufferSourceNode) {
|
||||
// createLogger().warn('AudioNodes not AudioBufferSourceNodes')
|
||||
}
|
||||
|
||||
const nextNode = oldNode.nextNodeName ? this.nodes.get(oldNode.nextNodeName) : null
|
||||
if (!nextNode || (nextNode && nextNode.node ! instanceof ChannelSplitterNode)) {
|
||||
// createLogger().warn('AudioNodes not ChannelSplitterNodes')
|
||||
} else {
|
||||
oldNode.node?.disconnect()
|
||||
oldNode.started = false
|
||||
oldNode = {} as AudioNodeItem
|
||||
if (newNode && newNode.node) {
|
||||
if (nextNode.type) { newNode.nextNodeName = nextNode.type as string }
|
||||
newNode.node.connect(nextNode?.node as AudioNode)
|
||||
return newNode
|
||||
}
|
||||
}
|
||||
return newNode
|
||||
}
|
||||
}
|
||||
})
|
78
stores/timer.js
Normal file
78
stores/timer.js
Normal file
@@ -0,0 +1,78 @@
|
||||
import { defineStore } from 'pinia'
|
||||
|
||||
export const useTimerStore = defineStore('timer', {
|
||||
state: () => ({
|
||||
started: false,
|
||||
progressBarWidth: '100%',
|
||||
settings: {
|
||||
timer: {
|
||||
work: {
|
||||
text: 'Focus',
|
||||
time: 25,
|
||||
color: '#72A5ED'
|
||||
},
|
||||
short: {
|
||||
text: 'Short Break',
|
||||
time: 5,
|
||||
color: '#FBA9A9'
|
||||
},
|
||||
long: {
|
||||
text: 'Long Break',
|
||||
time: 15,
|
||||
color: '#D77574'
|
||||
}
|
||||
},
|
||||
autoStart: false,
|
||||
maxSessions: 4 // the number of sessions before a long break
|
||||
},
|
||||
timeRemaining: 25 * 60,
|
||||
currentSession: 'work',
|
||||
currentSessionNumber: 1,
|
||||
showNotification: true,
|
||||
playSessionEndSound: true,
|
||||
alarmAudio: 'bell_focus'
|
||||
}),
|
||||
getters: {
|
||||
isStarted: state => state.started,
|
||||
getTimeRemaining: state => state.timeRemaining,
|
||||
getSessionTime: state => state.settings.timer[state.currentSession].time,
|
||||
getCurrentSession: state => state.currentSession,
|
||||
isWork: state => state.settings.timer[state.currentSession] === 'work',
|
||||
getCurrentSessionNumber: state => state.currentSessionNumber,
|
||||
getMaxSessions: state => state.settings.maxSessions,
|
||||
getCurrentColor: state => state.settings.timer[state.currentSession].color,
|
||||
getProgressBarWidth: state => state.settings.progressBarWidth
|
||||
},
|
||||
actions: {
|
||||
toggleTimer () {
|
||||
this.started = !this.started
|
||||
},
|
||||
clearTimeRemaining () {
|
||||
this.timeRemaining = this.getSessionTime * 60
|
||||
},
|
||||
setTimeRemaining (time) {
|
||||
this.timeRemaining = time
|
||||
},
|
||||
nextSession () {
|
||||
// if current session is not work (i.e. a break)
|
||||
if (this.currentSession !== 'work') {
|
||||
this.currentSessionNumber = this.currentSessionNumber % this.settings.maxSessions + 1
|
||||
this.currentSession = 'work'
|
||||
} else if (this.currentSessionNumber === this.settings.maxSessions) {
|
||||
this.currentSession = 'long'
|
||||
} else {
|
||||
this.currentSession = 'short'
|
||||
}
|
||||
this.timeRemaining = this.settings.timer[this.currentSession].time * 60
|
||||
},
|
||||
setMaxSessions (maxSessionsNumber) {
|
||||
this.settings.maxSessions = maxSessionsNumber
|
||||
},
|
||||
setTimerSettings (workTime, shortBreakTime, longBreakTime) {
|
||||
this.settings.timer.work.time = workTime
|
||||
this.settings.timer.short.time = shortBreakTime
|
||||
this.settings.timer.long.time = longBreakTime
|
||||
this.timeRemaining = workTime * 60
|
||||
}
|
||||
}
|
||||
})
|
108
stores/user.js
Normal file
108
stores/user.js
Normal file
@@ -0,0 +1,108 @@
|
||||
// stores/counter.js
|
||||
import { defineStore } from 'pinia'
|
||||
export const useUserStore = defineStore('user', {
|
||||
state: () => {
|
||||
return {
|
||||
user: {},
|
||||
is_login: false,
|
||||
token: '',
|
||||
active_subscription: '',
|
||||
audioInputDevice: {},
|
||||
audioOutputDevice: {},
|
||||
nextAuthRequired: Date.now(), // allow offline access for one day
|
||||
offlineMode: false, // if user is offline then don't try to connect to the backend
|
||||
soundMode: localStorage.getItem('soundMode') || 'soundscape'
|
||||
}
|
||||
},
|
||||
actions: {
|
||||
login (user, token) {
|
||||
this.user = user
|
||||
this.is_login = true
|
||||
this.token = token
|
||||
if (!this.offlineMode) { this.nextAuthRequired = Date.now() + 24 * 60 * 60 * 1000 }
|
||||
},
|
||||
logout () {
|
||||
this.user = {}
|
||||
this.is_login = false
|
||||
},
|
||||
updateUser (user) {
|
||||
this.user = user
|
||||
},
|
||||
saveInputdevice (device) {
|
||||
this.audioInputDevice = device
|
||||
},
|
||||
saveOutputDevice (device) {
|
||||
this.audioOutputDevice = device
|
||||
},
|
||||
changeOfflineMode (state) {
|
||||
this.offlineMode = state
|
||||
},
|
||||
updateHeadsetType (state) {
|
||||
if (this.user) {
|
||||
if (!this.user.settings) {
|
||||
this.user.settings = {}
|
||||
}
|
||||
this.user.settings.headphone_type = state
|
||||
}
|
||||
},
|
||||
updateANC (state) {
|
||||
if (this.user) {
|
||||
if (!this.user.settings) {
|
||||
this.user.settings = {}
|
||||
}
|
||||
this.user.settings.anc_type = state
|
||||
}
|
||||
},
|
||||
updateSoundMode (mode) {
|
||||
if (['soundscape', 'music'].includes(mode)) {
|
||||
this.soundMode = mode
|
||||
localStorage.setItem('soundMode', mode)
|
||||
window.dispatchEvent(new Event('localStorageUpdated'))
|
||||
}
|
||||
},
|
||||
markOnboardingCompleted () {
|
||||
if (this.user) {
|
||||
this.user.onboarding_completed = true
|
||||
}
|
||||
},
|
||||
toggleTimerVisibility (status) {
|
||||
if (this.user) {
|
||||
this.user.timer_activated = status
|
||||
}
|
||||
}
|
||||
},
|
||||
getters: {
|
||||
getUserScenery () {
|
||||
if (this.user.scenery) {
|
||||
return this.user.settings.soundscape
|
||||
} else {
|
||||
return 'meadow'
|
||||
}
|
||||
},
|
||||
isAuthenticated () {
|
||||
if (this.is_login === true) {
|
||||
this.changeOfflineMode(false)
|
||||
return true
|
||||
} else if (Date.now() < this.nextAuthRequired) {
|
||||
this.changeOfflineMode(true)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
isOnboardingCompleted: state => state.user?.onboarding_completed === true,
|
||||
isTimerActivated: state => state.user?.timer_activated === true
|
||||
},
|
||||
persist: {
|
||||
strategies: [
|
||||
{
|
||||
storage: localStorage,
|
||||
paths: ['token'] // nur das Token wird gespeichert
|
||||
}
|
||||
]
|
||||
}
|
||||
})
|
||||
|
||||
window.addEventListener('localStorageUpdated', () => {
|
||||
const store = useUserStore()
|
||||
store.soundMode = localStorage.getItem('soundMode') || 'soundscape'
|
||||
})
|
Reference in New Issue
Block a user