diff --git a/api-reports/2_12.txt b/api-reports/2_12.txt
index 9d5cd5409..17d292297 100644
--- a/api-reports/2_12.txt
+++ b/api-reports/2_12.txt
@@ -81,8 +81,8 @@ Algorithm[JT] val name: String
 AnalyserNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AnalyserNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AnalyserNode[JT] var channelCount: Int
-AnalyserNode[JT] var channelCountMode: Int
-AnalyserNode[JT] var channelInterpretation: String
+AnalyserNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AnalyserNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AnalyserNode[JT] def connect(audioNode: AudioNode): Unit
 AnalyserNode[JT] def connect(audioParam: AudioParam): Unit
 AnalyserNode[JT] val context: AudioContext
@@ -425,8 +425,8 @@ AudioBufferSourceNode[JT] def addEventListener[T <: Event](`type`: String, liste
 AudioBufferSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioBufferSourceNode[JT] var buffer: AudioBuffer
 AudioBufferSourceNode[JT] var channelCount: Int
-AudioBufferSourceNode[JT] var channelCountMode: Int
-AudioBufferSourceNode[JT] var channelInterpretation: String
+AudioBufferSourceNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioBufferSourceNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioBufferSourceNode[JT] def connect(audioNode: AudioNode): Unit
 AudioBufferSourceNode[JT] def connect(audioParam: AudioParam): Unit
 AudioBufferSourceNode[JT] val context: AudioContext
@@ -441,10 +441,23 @@ AudioBufferSourceNode[JT] var onended: js.Function1[Event, _]
 AudioBufferSourceNode[JT] val playbackRate: AudioParam
 AudioBufferSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioBufferSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
-AudioBufferSourceNode[JT] def start(when: Double?, offset: Double?, duration: Double?): Unit
-AudioBufferSourceNode[JT] def stop(when: Double?): Unit
+AudioBufferSourceNode[JT] def start(): Unit
+AudioBufferSourceNode[JT] def start(when: Double): Unit
+AudioBufferSourceNode[JT] def start(when: Double, offset: Double): Unit
+AudioBufferSourceNode[JT] def start(when: Double, offset: Double, duration: Double): Unit
+AudioBufferSourceNode[JT] def stop(): Unit
+AudioBufferSourceNode[JT] def stop(when: Double): Unit
+AudioBufferSourceNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[AudioBufferSourceNodeOptions]?): AudioBufferSourceNode
+AudioBufferSourceNodeOptions[JT] var buffer: js.UndefOr[AudioBuffer]
+AudioBufferSourceNodeOptions[JT] var detune: js.UndefOr[Double]
+AudioBufferSourceNodeOptions[JT] var loop: js.UndefOr[Boolean]
+AudioBufferSourceNodeOptions[JT] var loopEnd: js.UndefOr[Double]
+AudioBufferSourceNodeOptions[JT] var loopStart: js.UndefOr[Double]
+AudioBufferSourceNodeOptions[JT] var playbackRate: js.UndefOr[Double]
 AudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioContext[JC] val audioWorklet: AudioWorklet
+AudioContext[JC] def baseLatency: Double
 AudioContext[JC] def close(): js.Promise[Unit]
 AudioContext[JC] def createAnalyser(): AnalyserNode
 AudioContext[JC] def createBiquadFilter(): BiquadFilterNode
@@ -468,7 +481,9 @@ AudioContext[JC] def currentTime: Double
 AudioContext[JC] def decodeAudioData(audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _]?, errorCallback: js.Function0[_]?): js.Promise[AudioBuffer]
 AudioContext[JC] val destination: AudioDestinationNode
 AudioContext[JC] def dispatchEvent(evt: Event): Boolean
+AudioContext[JC] def getOutputTimestamp: AudioTimestamp
 AudioContext[JC] val listener: AudioListener
+AudioContext[JC] def outputLatency: Double
 AudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioContext[JC] def resume(): js.Promise[Unit]
@@ -478,8 +493,8 @@ AudioContext[JC] def suspend(): js.Promise[Unit]
 AudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioDestinationNode[JT] var channelCount: Int
-AudioDestinationNode[JT] var channelCountMode: Int
-AudioDestinationNode[JT] var channelInterpretation: String
+AudioDestinationNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioDestinationNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioDestinationNode[JT] def connect(audioNode: AudioNode): Unit
 AudioDestinationNode[JT] def connect(audioParam: AudioParam): Unit
 AudioDestinationNode[JT] val context: AudioContext
@@ -493,8 +508,8 @@ AudioDestinationNode[JT] def removeEventListener[T <: Event](`type`: String, lis
 AudioListener[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioListener[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioListener[JT] var channelCount: Int
-AudioListener[JT] var channelCountMode: Int
-AudioListener[JT] var channelInterpretation: String
+AudioListener[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioListener[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioListener[JT] def connect(audioNode: AudioNode): Unit
 AudioListener[JT] def connect(audioParam: AudioParam): Unit
 AudioListener[JT] val context: AudioContext
@@ -511,8 +526,8 @@ AudioListener[JT] var speedOfSound: Double
 AudioNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioNode[JT] var channelCount: Int
-AudioNode[JT] var channelCountMode: Int
-AudioNode[JT] var channelInterpretation: String
+AudioNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioNode[JT] def connect(audioNode: AudioNode): Unit
 AudioNode[JT] def connect(audioParam: AudioParam): Unit
 AudioNode[JT] val context: AudioContext
@@ -522,12 +537,19 @@ AudioNode[JT] val numberOfInputs: Int
 AudioNode[JT] val numberOfOutputs: Int
 AudioNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioNodeChannelCountMode[JT]
+AudioNodeChannelCountMode[SO] val `clamped-max`: AudioNodeChannelCountMode
+AudioNodeChannelCountMode[SO] val explicit: AudioNodeChannelCountMode
+AudioNodeChannelCountMode[SO] val max: AudioNodeChannelCountMode
+AudioNodeChannelInterpretation[JT]
+AudioNodeChannelInterpretation[SO] val discrete: AudioNodeChannelInterpretation
+AudioNodeChannelInterpretation[SO] val speakers: AudioNodeChannelInterpretation
 AudioParam[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioParam[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioParam[JT] def cancelScheduledValues(startTime: Double): Unit
 AudioParam[JT] var channelCount: Int
-AudioParam[JT] var channelCountMode: Int
-AudioParam[JT] var channelInterpretation: String
+AudioParam[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioParam[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioParam[JT] def connect(audioNode: AudioNode): Unit
 AudioParam[JT] def connect(audioParam: AudioParam): Unit
 AudioParam[JT] val context: AudioContext
@@ -536,6 +558,8 @@ AudioParam[JT] def disconnect(output: AudioNode?): Unit
 AudioParam[JT] def dispatchEvent(evt: Event): Boolean
 AudioParam[JT] def exponentialRampToValueAtTime(value: Double, endTime: Double): Unit
 AudioParam[JT] def linearRampToValueAtTime(value: Double, endTime: Double): Unit
+AudioParam[JT] val maxValue: Double
+AudioParam[JT] val minValue: Double
 AudioParam[JT] val numberOfInputs: Int
 AudioParam[JT] val numberOfOutputs: Int
 AudioParam[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
@@ -544,6 +568,43 @@ AudioParam[JT] def setTargetAtTime(target: Double, startTime: Double, timeConsta
 AudioParam[JT] def setValueAtTime(value: Double, startTime: Double): Unit
 AudioParam[JT] def setValueCurveAtTime(values: js.typedarray.Float32Array, startTime: Double, duration: Double): Unit
 AudioParam[JT] var value: Double
+AudioParamAutomationRate[JT]
+AudioParamAutomationRate[SO] val `a-rate`: AudioParamAutomationRate
+AudioParamAutomationRate[SO] val `k-rate`: AudioParamAutomationRate
+AudioParamDescriptor[JT] var automationRate: js.UndefOr[AudioParamAutomationRate]
+AudioParamDescriptor[JT] var defaultValue: js.UndefOr[Double]
+AudioParamDescriptor[JT] var maxValue: js.UndefOr[Double]
+AudioParamDescriptor[JT] var minValue: js.UndefOr[Double]
+AudioParamDescriptor[JT] var name: String
+AudioParamMap[JC] @JSBracketAccess def apply(index: K): V
+AudioParamMap[JC] def entries(): js.Iterator[js.Tuple2[K, V]]
+AudioParamMap[JC] def forEach(callbackFn: js.Function2[V, K, Unit]): Unit
+AudioParamMap[JC] def has(key: K): Boolean
+AudioParamMap[JC] @JSName(js.Symbol.iterator) override def jsIterator(): js.Iterator[js.Tuple2[K, V]]
+AudioParamMap[JC] def keys(): js.Iterator[K]
+AudioParamMap[JC] def size: Int
+AudioParamMap[JC] def values(): js.Iterator[V]
+AudioScheduledSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioScheduledSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioScheduledSourceNode[JC] var channelCount: Int
+AudioScheduledSourceNode[JC] var channelCountMode: AudioNodeChannelCountMode
+AudioScheduledSourceNode[JC] var channelInterpretation: AudioNodeChannelInterpretation
+AudioScheduledSourceNode[JC] def connect(audioNode: AudioNode): Unit
+AudioScheduledSourceNode[JC] def connect(audioParam: AudioParam): Unit
+AudioScheduledSourceNode[JC] val context: AudioContext
+AudioScheduledSourceNode[JC] def disconnect(output: AudioNode?): Unit
+AudioScheduledSourceNode[JC] def dispatchEvent(evt: Event): Boolean
+AudioScheduledSourceNode[JC] val numberOfInputs: Int
+AudioScheduledSourceNode[JC] val numberOfOutputs: Int
+AudioScheduledSourceNode[JC] var onended: js.Function1[Event, _]
+AudioScheduledSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioScheduledSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioScheduledSourceNode[JC] def start(): Unit
+AudioScheduledSourceNode[JC] def start(when: Double): Unit
+AudioScheduledSourceNode[JC] def stop(): Unit
+AudioScheduledSourceNode[JC] def stop(when: Double): Unit
+AudioTimestamp[JT] var contextTime: Double
+AudioTimestamp[JT] var performanceTime: Double
 AudioTrack[JT] var enabled: Boolean
 AudioTrack[JT] val id: String
 AudioTrack[JT] val kind: String
@@ -560,6 +621,68 @@ AudioTrackList[JT] var onchange: js.Function1[Event, Any]
 AudioTrackList[JT] var onremovetrack: js.Function1[TrackEvent, Any]
 AudioTrackList[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioTrackList[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioWorklet[JC] def addModule(moduleURL: String, options: WorkletOptions?): js.Promise[Unit]
+AudioWorkletGlobalScope[JC] def currentFrame: Int
+AudioWorkletGlobalScope[JC] def currentTime: Double
+AudioWorkletGlobalScope[JC] def registerProcessor(name: String, processorCtor: js.Dynamic): Unit
+AudioWorkletGlobalScope[JC] def sampleRate: Float
+AudioWorkletGlobalScope[JO] def globalThis: AudioWorkletGlobalScope
+AudioWorkletNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioWorkletNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioWorkletNode[JC] var channelCount: Int
+AudioWorkletNode[JC] var channelCountMode: AudioNodeChannelCountMode
+AudioWorkletNode[JC] var channelInterpretation: AudioNodeChannelInterpretation
+AudioWorkletNode[JC] def connect(audioNode: AudioNode): Unit
+AudioWorkletNode[JC] def connect(audioParam: AudioParam): Unit
+AudioWorkletNode[JC] val context: AudioContext
+AudioWorkletNode[JC] def disconnect(output: AudioNode?): Unit
+AudioWorkletNode[JC] def dispatchEvent(evt: Event): Boolean
+AudioWorkletNode[JC] val numberOfInputs: Int
+AudioWorkletNode[JC] val numberOfOutputs: Int
+AudioWorkletNode[JC] var onprocessorerror: js.Function1[Event, _]
+AudioWorkletNode[JC] val parameters: AudioParamMap
+AudioWorkletNode[JC] val port: MessagePort
+AudioWorkletNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioWorkletNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioWorkletNodeOptions[JT] var numberOfInputs: js.UndefOr[Int]
+AudioWorkletNodeOptions[JT] var numberOfOutputs: js.UndefOr[Int]
+AudioWorkletNodeOptions[JT] var outputChannelCount: js.UndefOr[js.Array[Int]]
+AudioWorkletNodeOptions[JT] var parameterData: js.UndefOr[js.Object]
+AudioWorkletNodeOptions[JT] var processorOptions: js.UndefOr[js.Any]
+AudioWorkletProcessor[JC] val port: MessagePort
+AudioWorkletProcessor[JC] def process(inputs: js.Array[js.Array[js.typedarray.Float32Array]], outputs: js.Array[js.Array[js.typedarray.Float32Array]], parameters: js.Object): Boolean
+AudioWorkletProcessorOptions[JT] var numberOfInputs: Int
+AudioWorkletProcessorOptions[JT] var numberOfOutputs: Int
+AudioWorkletProcessorOptions[JT] var outputChannelCount: js.Array[Int]
+AudioWorkletProcessorOptions[JT] var parameterData: js.UndefOr[js.Object]
+AudioWorkletProcessorOptions[JT] var processorOptions: js.UndefOr[js.Any]
+BaseAudioContext[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+BaseAudioContext[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+BaseAudioContext[JT] val audioWorklet: AudioWorklet
+BaseAudioContext[JT] def createAnalyser(): AnalyserNode
+BaseAudioContext[JT] def createBiquadFilter(): BiquadFilterNode
+BaseAudioContext[JT] def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer
+BaseAudioContext[JT] def createBufferSource(): AudioBufferSourceNode
+BaseAudioContext[JT] def createChannelMerger(numberOfInputs: Int?): ChannelMergerNode
+BaseAudioContext[JT] def createChannelSplitter(numberOfOutputs: Int?): ChannelSplitterNode
+BaseAudioContext[JT] def createConvolver(): ConvolverNode
+BaseAudioContext[JT] def createDelay(maxDelayTime: Int): DelayNode
+BaseAudioContext[JT] def createDynamicsCompressor(): DynamicsCompressorNode
+BaseAudioContext[JT] def createGain(): GainNode
+BaseAudioContext[JT] def createOscillator(): OscillatorNode
+BaseAudioContext[JT] def createPanner(): PannerNode
+BaseAudioContext[JT] def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave
+BaseAudioContext[JT] def createStereoPanner(): StereoPannerNode
+BaseAudioContext[JT] def createWaveShaper(): WaveShaperNode
+BaseAudioContext[JT] def currentTime: Double
+BaseAudioContext[JT] def decodeAudioData(audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _]?, errorCallback: js.Function0[_]?): js.Promise[AudioBuffer]
+BaseAudioContext[JT] val destination: AudioDestinationNode
+BaseAudioContext[JT] def dispatchEvent(evt: Event): Boolean
+BaseAudioContext[JT] val listener: AudioListener
+BaseAudioContext[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+BaseAudioContext[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+BaseAudioContext[JT] val sampleRate: Double
+BaseAudioContext[JT] def state: String
 BeforeUnloadEvent[JC] def bubbles: Boolean
 BeforeUnloadEvent[JC] def cancelBubble: Boolean
 BeforeUnloadEvent[JC] def cancelable: Boolean
@@ -579,8 +702,8 @@ BiquadFilterNode[JT] val Q: AudioParam
 BiquadFilterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 BiquadFilterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 BiquadFilterNode[JT] var channelCount: Int
-BiquadFilterNode[JT] var channelCountMode: Int
-BiquadFilterNode[JT] var channelInterpretation: String
+BiquadFilterNode[JT] var channelCountMode: AudioNodeChannelCountMode
+BiquadFilterNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 BiquadFilterNode[JT] def connect(audioNode: AudioNode): Unit
 BiquadFilterNode[JT] def connect(audioParam: AudioParam): Unit
 BiquadFilterNode[JT] val context: AudioContext
@@ -603,6 +726,26 @@ Blob[JC] def stream(): ReadableStream[Uint8Array]
 Blob[JC] def text(): js.Promise[String]
 Blob[JC] def `type`: String
 Blob[JO]
+BlobEvent[JC] def bubbles: Boolean
+BlobEvent[JC] def cancelBubble: Boolean
+BlobEvent[JC] def cancelable: Boolean
+BlobEvent[JC] def composed: Boolean
+BlobEvent[JC] def currentTarget: EventTarget
+BlobEvent[JC] def data: Blob
+BlobEvent[JC] def defaultPrevented: Boolean
+BlobEvent[JC] def eventPhase: Int
+BlobEvent[JC] def isTrusted: Boolean
+BlobEvent[JC] def preventDefault(): Unit
+BlobEvent[JC] def stopImmediatePropagation(): Unit
+BlobEvent[JC] def stopPropagation(): Unit
+BlobEvent[JC] def target: EventTarget
+BlobEvent[JC] def timeStamp: Double
+BlobEvent[JC] def `type`: String
+BlobEventInit[JT] var bubbles: js.UndefOr[Boolean]
+BlobEventInit[JT] var cancelable: js.UndefOr[Boolean]
+BlobEventInit[JT] var composed: js.UndefOr[Boolean]
+BlobEventInit[JT] var data: Blob
+BlobEventInit[JT] var scoped: js.UndefOr[Boolean]
 BlobPropertyBag[JT] var endings: js.UndefOr[EndingType]
 BlobPropertyBag[JT] var `type`: js.UndefOr[String]
 BlobPropertyBag[SO] def apply(`type`: js.UndefOr[String]?): BlobPropertyBag  (@deprecated in 2.0.0)
@@ -1138,8 +1281,8 @@ CanvasRenderingContext2D[JC] def translate(x: Double, y: Double): Unit
 ChannelMergerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 ChannelMergerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 ChannelMergerNode[JT] var channelCount: Int
-ChannelMergerNode[JT] var channelCountMode: Int
-ChannelMergerNode[JT] var channelInterpretation: String
+ChannelMergerNode[JT] var channelCountMode: AudioNodeChannelCountMode
+ChannelMergerNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 ChannelMergerNode[JT] def connect(audioNode: AudioNode): Unit
 ChannelMergerNode[JT] def connect(audioParam: AudioParam): Unit
 ChannelMergerNode[JT] val context: AudioContext
@@ -1152,8 +1295,8 @@ ChannelMergerNode[JT] def removeEventListener[T <: Event](`type`: String, listen
 ChannelSplitterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 ChannelSplitterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 ChannelSplitterNode[JT] var channelCount: Int
-ChannelSplitterNode[JT] var channelCountMode: Int
-ChannelSplitterNode[JT] var channelInterpretation: String
+ChannelSplitterNode[JT] var channelCountMode: AudioNodeChannelCountMode
+ChannelSplitterNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 ChannelSplitterNode[JT] def connect(audioNode: AudioNode): Unit
 ChannelSplitterNode[JT] def connect(audioParam: AudioParam): Unit
 ChannelSplitterNode[JT] val context: AudioContext
@@ -1383,14 +1526,35 @@ Console[JT] def time(label: String): Unit
 Console[JT] def timeEnd(label: String): Unit
 Console[JT] def trace(): Unit
 Console[JT] def warn(message: Any, optionalParams: Any*): Unit
+ConstantSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+ConstantSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+ConstantSourceNode[JC] var channelCount: Int
+ConstantSourceNode[JC] var channelCountMode: AudioNodeChannelCountMode
+ConstantSourceNode[JC] var channelInterpretation: AudioNodeChannelInterpretation
+ConstantSourceNode[JC] def connect(audioNode: AudioNode): Unit
+ConstantSourceNode[JC] def connect(audioParam: AudioParam): Unit
+ConstantSourceNode[JC] val context: AudioContext
+ConstantSourceNode[JC] def disconnect(output: AudioNode?): Unit
+ConstantSourceNode[JC] def dispatchEvent(evt: Event): Boolean
+ConstantSourceNode[JC] val numberOfInputs: Int
+ConstantSourceNode[JC] val numberOfOutputs: Int
+ConstantSourceNode[JC] val offset: AudioParam
+ConstantSourceNode[JC] var onended: js.Function1[Event, _]
+ConstantSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+ConstantSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+ConstantSourceNode[JC] def start(): Unit
+ConstantSourceNode[JC] def start(when: Double): Unit
+ConstantSourceNode[JC] def stop(): Unit
+ConstantSourceNode[JC] def stop(when: Double): Unit
+ConstantSourceNodeOptions[JT] var offset: js.UndefOr[Double]
 ConvertToBlobOptions[JT] var quality: js.UndefOr[Double]
 ConvertToBlobOptions[JT] var `type`: js.UndefOr[String]
 ConvolverNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 ConvolverNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 ConvolverNode[JT] var buffer: AudioBuffer
 ConvolverNode[JT] var channelCount: Int
-ConvolverNode[JT] var channelCountMode: Int
-ConvolverNode[JT] var channelInterpretation: String
+ConvolverNode[JT] var channelCountMode: AudioNodeChannelCountMode
+ConvolverNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 ConvolverNode[JT] def connect(audioNode: AudioNode): Unit
 ConvolverNode[JT] def connect(audioParam: AudioParam): Unit
 ConvolverNode[JT] val context: AudioContext
@@ -1598,8 +1762,8 @@ DedicatedWorkerGlobalScope[JT] def setTimeout(handler: js.Function0[Any], timeou
 DelayNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 DelayNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 DelayNode[JT] var channelCount: Int
-DelayNode[JT] var channelCountMode: Int
-DelayNode[JT] var channelInterpretation: String
+DelayNode[JT] var channelCountMode: AudioNodeChannelCountMode
+DelayNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 DelayNode[JT] def connect(audioNode: AudioNode): Unit
 DelayNode[JT] def connect(audioParam: AudioParam): Unit
 DelayNode[JT] val context: AudioContext
@@ -1924,8 +2088,8 @@ DynamicsCompressorNode[JT] def addEventListener[T <: Event](`type`: String, list
 DynamicsCompressorNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 DynamicsCompressorNode[JT] val attack: AudioParam
 DynamicsCompressorNode[JT] var channelCount: Int
-DynamicsCompressorNode[JT] var channelCountMode: Int
-DynamicsCompressorNode[JT] var channelInterpretation: String
+DynamicsCompressorNode[JT] var channelCountMode: AudioNodeChannelCountMode
+DynamicsCompressorNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 DynamicsCompressorNode[JT] def connect(audioNode: AudioNode): Unit
 DynamicsCompressorNode[JT] def connect(audioParam: AudioParam): Unit
 DynamicsCompressorNode[JT] val context: AudioContext
@@ -2299,8 +2463,8 @@ FullscreenOptions[JT] var navigationUI: js.UndefOr[String]
 GainNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 GainNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 GainNode[JT] var channelCount: Int
-GainNode[JT] var channelCountMode: Int
-GainNode[JT] var channelInterpretation: String
+GainNode[JT] var channelCountMode: AudioNodeChannelCountMode
+GainNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 GainNode[JT] def connect(audioNode: AudioNode): Unit
 GainNode[JT] def connect(audioParam: AudioParam): Unit
 GainNode[JT] val context: AudioContext
@@ -2311,6 +2475,8 @@ GainNode[JT] val numberOfInputs: Int
 GainNode[JT] val numberOfOutputs: Int
 GainNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 GainNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+GainNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[GainNodeOptions]?): GainNode
+GainNodeOptions[JT] var gain: js.UndefOr[Double]
 Gamepad[JT] val axes: js.Array[Double]
 Gamepad[JT] val buttons: js.Array[GamepadButton]
 Gamepad[JT] val connected: Boolean
@@ -16075,17 +16241,20 @@ MediaDevices[JT] def removeEventListener[T <: Event](`type`: String, listener: j
 MediaElementAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaElementAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaElementAudioSourceNode[JT] var channelCount: Int
-MediaElementAudioSourceNode[JT] var channelCountMode: Int
-MediaElementAudioSourceNode[JT] var channelInterpretation: String
+MediaElementAudioSourceNode[JT] var channelCountMode: AudioNodeChannelCountMode
+MediaElementAudioSourceNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 MediaElementAudioSourceNode[JT] def connect(audioNode: AudioNode): Unit
 MediaElementAudioSourceNode[JT] def connect(audioParam: AudioParam): Unit
 MediaElementAudioSourceNode[JT] val context: AudioContext
 MediaElementAudioSourceNode[JT] def disconnect(output: AudioNode?): Unit
 MediaElementAudioSourceNode[JT] def dispatchEvent(evt: Event): Boolean
+MediaElementAudioSourceNode[JT] def mediaElement: HTMLMediaElement
 MediaElementAudioSourceNode[JT] val numberOfInputs: Int
 MediaElementAudioSourceNode[JT] val numberOfOutputs: Int
 MediaElementAudioSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaElementAudioSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+MediaElementAudioSourceNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[MediaElementAudioSourceNodeOptions]?): MediaElementAudioSourceNode
+MediaElementAudioSourceNodeOptions[JT] var mediaElement: HTMLMediaElement
 MediaError[JC] def code: Int
 MediaError[JO] val MEDIA_ERR_ABORTED: Int
 MediaError[JO] val MEDIA_ERR_DECODE: Int
@@ -16108,6 +16277,20 @@ MediaQueryList[JT] def removeEventListener[T <: Event](`type`: String, listener:
 MediaQueryList[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaQueryList[JT] def removeListener(listener: MediaQueryListListener): Unit  (@deprecated in 2.4.0)
 MediaQueryListListener[JT] def apply(mql: MediaQueryList): Unit  (@deprecated in 2.4.0)
+MediaRecorder[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+MediaRecorder[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+MediaRecorder[JC] def dispatchEvent(evt: Event): Boolean
+MediaRecorder[JC] var ondataavailable: js.Function1[BlobEvent, Any]
+MediaRecorder[JC] var onerror: js.Function1[Event, Any]
+MediaRecorder[JC] var onstop: js.Function1[Event, Any]
+MediaRecorder[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+MediaRecorder[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+MediaRecorder[JC] def resume(): Unit
+MediaRecorder[JC] def start(): Unit
+MediaRecorder[JC] def stop(): Unit
+MediaRecorderOptions[JT] var audioBitsPerSecond: js.UndefOr[Long]
+MediaRecorderOptions[JT] var bitsPerSecond: js.UndefOr[Long]
+MediaRecorderOptions[JT] var videoBitsPerSecond: js.UndefOr[Long]
 MediaSource[JC] def activeSourceBuffers: SourceBufferList
 MediaSource[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaSource[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
@@ -16149,8 +16332,8 @@ MediaStream[JC] def removeTrack(track: MediaStreamTrack): Unit
 MediaStreamAudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaStreamAudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaStreamAudioDestinationNode[JT] var channelCount: Int
-MediaStreamAudioDestinationNode[JT] var channelCountMode: Int
-MediaStreamAudioDestinationNode[JT] var channelInterpretation: String
+MediaStreamAudioDestinationNode[JT] var channelCountMode: AudioNodeChannelCountMode
+MediaStreamAudioDestinationNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 MediaStreamAudioDestinationNode[JT] def connect(audioNode: AudioNode): Unit
 MediaStreamAudioDestinationNode[JT] def connect(audioParam: AudioParam): Unit
 MediaStreamAudioDestinationNode[JT] val context: AudioContext
@@ -16164,8 +16347,8 @@ MediaStreamAudioDestinationNode[JT] var stream: MediaStream
 MediaStreamAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaStreamAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaStreamAudioSourceNode[JT] var channelCount: Int
-MediaStreamAudioSourceNode[JT] var channelCountMode: Int
-MediaStreamAudioSourceNode[JT] var channelInterpretation: String
+MediaStreamAudioSourceNode[JT] var channelCountMode: AudioNodeChannelCountMode
+MediaStreamAudioSourceNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 MediaStreamAudioSourceNode[JT] def connect(audioNode: AudioNode): Unit
 MediaStreamAudioSourceNode[JT] def connect(audioParam: AudioParam): Unit
 MediaStreamAudioSourceNode[JT] val context: AudioContext
@@ -16577,7 +16760,7 @@ OfflineAudioCompletionEvent[JT] def timeStamp: Double
 OfflineAudioCompletionEvent[JT] def `type`: String
 OfflineAudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OfflineAudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
-OfflineAudioContext[JC] def close(): js.Promise[Unit]
+OfflineAudioContext[JC] val audioWorklet: AudioWorklet
 OfflineAudioContext[JC] def createAnalyser(): AnalyserNode
 OfflineAudioContext[JC] def createBiquadFilter(): BiquadFilterNode
 OfflineAudioContext[JC] def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer
@@ -16588,9 +16771,6 @@ OfflineAudioContext[JC] def createConvolver(): ConvolverNode
 OfflineAudioContext[JC] def createDelay(maxDelayTime: Int): DelayNode
 OfflineAudioContext[JC] def createDynamicsCompressor(): DynamicsCompressorNode
 OfflineAudioContext[JC] def createGain(): GainNode
-OfflineAudioContext[JC] def createMediaElementSource(myMediaElement: HTMLMediaElement): MediaElementAudioSourceNode
-OfflineAudioContext[JC] def createMediaStreamDestination(): MediaStreamAudioDestinationNode
-OfflineAudioContext[JC] def createMediaStreamSource(stream: MediaStream): MediaStreamAudioSourceNode
 OfflineAudioContext[JC] def createOscillator(): OscillatorNode
 OfflineAudioContext[JC] def createPanner(): PannerNode
 OfflineAudioContext[JC] def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave
@@ -16603,11 +16783,10 @@ OfflineAudioContext[JC] def dispatchEvent(evt: Event): Boolean
 OfflineAudioContext[JC] val listener: AudioListener
 OfflineAudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OfflineAudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
-OfflineAudioContext[JC] def resume(): js.Promise[Unit]
 OfflineAudioContext[JC] val sampleRate: Double
 OfflineAudioContext[JC] def startRendering(): js.Promise[AudioBuffer]
 OfflineAudioContext[JC] def state: String
-OfflineAudioContext[JC] def suspend(): js.Promise[Unit]
+OfflineAudioContext[JC] def suspend(suspendTime: Double): js.Promise[Unit]
 OffscreenCanvas[JC] def convertToBlob(options: ConvertToBlobOptions?): js.Promise[Blob]
 OffscreenCanvas[JC] def getContext(contextType: String): js.Dynamic
 OffscreenCanvas[JC] def getContext(contextType: String, contextAttributes: TwoDContextAttributes): js.Dynamic
@@ -16616,8 +16795,8 @@ OffscreenCanvas[JC] def transferToImageBitmap(): ImageBitmap
 OscillatorNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OscillatorNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 OscillatorNode[JT] var channelCount: Int
-OscillatorNode[JT] var channelCountMode: Int
-OscillatorNode[JT] var channelInterpretation: String
+OscillatorNode[JT] var channelCountMode: AudioNodeChannelCountMode
+OscillatorNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 OscillatorNode[JT] def connect(audioNode: AudioNode): Unit
 OscillatorNode[JT] def connect(audioParam: AudioParam): Unit
 OscillatorNode[JT] val context: AudioContext
@@ -16631,9 +16810,22 @@ OscillatorNode[JT] var onended: js.Function1[Event, _]
 OscillatorNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OscillatorNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 OscillatorNode[JT] def setPeriodicWave(wave: PeriodicWave): Unit
-OscillatorNode[JT] def start(when: Double?): Unit
-OscillatorNode[JT] def stop(when: Double?): Unit
-OscillatorNode[JT] var `type`: String
+OscillatorNode[JT] def start(): Unit
+OscillatorNode[JT] def start(when: Double): Unit
+OscillatorNode[JT] def stop(): Unit
+OscillatorNode[JT] def stop(when: Double): Unit
+OscillatorNode[JT] var `type`: OscillatorNodeType
+OscillatorNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[OscillatorNodeOptions]?): OscillatorNode
+OscillatorNodeOptions[JT] var detune: js.UndefOr[Double]
+OscillatorNodeOptions[JT] var frequency: js.UndefOr[Double]
+OscillatorNodeOptions[JT] var periodicWave: js.UndefOr[PeriodicWave]
+OscillatorNodeOptions[JT] var `type`: js.UndefOr[OscillatorNodeType]
+OscillatorNodeType[JT]
+OscillatorNodeType[SO] val custom: OscillatorNodeType
+OscillatorNodeType[SO] val sawtooth: OscillatorNodeType
+OscillatorNodeType[SO] val sine: OscillatorNodeType
+OscillatorNodeType[SO] val square: OscillatorNodeType
+OscillatorNodeType[SO] val triangle: OscillatorNodeType
 PageTransitionEvent[JT] def bubbles: Boolean
 PageTransitionEvent[JT] def cancelBubble: Boolean
 PageTransitionEvent[JT] def cancelable: Boolean
@@ -16655,8 +16847,8 @@ PageVisibility[JT] var visibilitychange: js.Function1[Event, _]
 PannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 PannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 PannerNode[JT] var channelCount: Int
-PannerNode[JT] var channelCountMode: Int
-PannerNode[JT] var channelInterpretation: String
+PannerNode[JT] var channelCountMode: AudioNodeChannelCountMode
+PannerNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 PannerNode[JT] var coneInnerAngle: Double
 PannerNode[JT] var coneOuterAngle: Double
 PannerNode[JT] var coneOuterGain: Double
@@ -17181,7 +17373,14 @@ RTCSignalingState[SO] val stable: RTCSignalingState
 RTCStats[JT] val id: String
 RTCStats[JT] val timestamp: Double
 RTCStats[JT] val `type`: RTCStatsType
-RTCStatsReport[JT] def apply(id: String): RTCStats
+RTCStatsReport[JT] @JSBracketAccess def apply(index: K): V
+RTCStatsReport[JT] def entries(): js.Iterator[js.Tuple2[K, V]]
+RTCStatsReport[JT] def forEach(callbackFn: js.Function2[V, K, Unit]): Unit
+RTCStatsReport[JT] def has(key: K): Boolean
+RTCStatsReport[JT] @JSName(js.Symbol.iterator) override def jsIterator(): js.Iterator[js.Tuple2[K, V]]
+RTCStatsReport[JT] def keys(): js.Iterator[K]
+RTCStatsReport[JT] def size: Int
+RTCStatsReport[JT] def values(): js.Iterator[V]
 RTCStatsType[JT]
 RTCStatsType[SO] val `inbound-rtp` = "inbound-rtp".asInstanceOf[RTCStatsType]
 RTCStatsType[SO] val `outbound-rtp` = "outbound-rtp".asInstanceOf[RTCStatsType]
@@ -17219,6 +17418,14 @@ Range[JO] val END_TO_END: Int
 Range[JO] val END_TO_START: Int
 Range[JO] val START_TO_END: Int
 Range[JO] val START_TO_START: Int
+ReadOnlyMapLike[JT] @JSBracketAccess def apply(index: K): V
+ReadOnlyMapLike[JT] def entries(): js.Iterator[js.Tuple2[K, V]]
+ReadOnlyMapLike[JT] def forEach(callbackFn: js.Function2[V, K, Unit]): Unit
+ReadOnlyMapLike[JT] def has(key: K): Boolean
+ReadOnlyMapLike[JT] @JSName(js.Symbol.iterator) override def jsIterator(): js.Iterator[js.Tuple2[K, V]]
+ReadOnlyMapLike[JT] def keys(): js.Iterator[K]
+ReadOnlyMapLike[JT] def size: Int
+ReadOnlyMapLike[JT] def values(): js.Iterator[V]
 ReadableStream[JT] def cancel(reason: js.UndefOr[Any]?): js.Promise[Unit]
 ReadableStream[JT] def getReader(): ReadableStreamReader[T]
 ReadableStream[JT] def locked: Boolean
@@ -25823,8 +26030,8 @@ StaticRangeInit[JT] val startOffset: Int
 StereoPannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 StereoPannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 StereoPannerNode[JT] var channelCount: Int
-StereoPannerNode[JT] var channelCountMode: Int
-StereoPannerNode[JT] var channelInterpretation: String
+StereoPannerNode[JT] var channelCountMode: AudioNodeChannelCountMode
+StereoPannerNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 StereoPannerNode[JT] def connect(audioNode: AudioNode): Unit
 StereoPannerNode[JT] def connect(audioParam: AudioParam): Unit
 StereoPannerNode[JT] val context: AudioContext
@@ -26230,8 +26437,8 @@ VisibilityState[SO] val visible: VisibilityState
 WaveShaperNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 WaveShaperNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 WaveShaperNode[JT] var channelCount: Int
-WaveShaperNode[JT] var channelCountMode: Int
-WaveShaperNode[JT] var channelInterpretation: String
+WaveShaperNode[JT] var channelCountMode: AudioNodeChannelCountMode
+WaveShaperNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 WaveShaperNode[JT] def connect(audioNode: AudioNode): Unit
 WaveShaperNode[JT] def connect(audioParam: AudioParam): Unit
 WaveShaperNode[JT] val context: AudioContext
@@ -27061,6 +27268,9 @@ WorkerOptions[JT] var `type`: js.UndefOr[WorkerType]
 WorkerType[JT]
 WorkerType[SO] val classic: WorkerType
 WorkerType[SO] val module: WorkerType
+Worklet[JC] def addModule(moduleURL: String, options: WorkletOptions?): js.Promise[Unit]
+WorkletGlobalScope[JT]
+WorkletOptions[JT] var credentials: js.UndefOr[RequestCredentials]
 WriteableState[JT]
 WriteableState[SO] val closed: WriteableState
 WriteableState[SO] val closing: WriteableState
diff --git a/api-reports/2_13.txt b/api-reports/2_13.txt
index 9d5cd5409..17d292297 100644
--- a/api-reports/2_13.txt
+++ b/api-reports/2_13.txt
@@ -81,8 +81,8 @@ Algorithm[JT] val name: String
 AnalyserNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AnalyserNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AnalyserNode[JT] var channelCount: Int
-AnalyserNode[JT] var channelCountMode: Int
-AnalyserNode[JT] var channelInterpretation: String
+AnalyserNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AnalyserNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AnalyserNode[JT] def connect(audioNode: AudioNode): Unit
 AnalyserNode[JT] def connect(audioParam: AudioParam): Unit
 AnalyserNode[JT] val context: AudioContext
@@ -425,8 +425,8 @@ AudioBufferSourceNode[JT] def addEventListener[T <: Event](`type`: String, liste
 AudioBufferSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioBufferSourceNode[JT] var buffer: AudioBuffer
 AudioBufferSourceNode[JT] var channelCount: Int
-AudioBufferSourceNode[JT] var channelCountMode: Int
-AudioBufferSourceNode[JT] var channelInterpretation: String
+AudioBufferSourceNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioBufferSourceNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioBufferSourceNode[JT] def connect(audioNode: AudioNode): Unit
 AudioBufferSourceNode[JT] def connect(audioParam: AudioParam): Unit
 AudioBufferSourceNode[JT] val context: AudioContext
@@ -441,10 +441,23 @@ AudioBufferSourceNode[JT] var onended: js.Function1[Event, _]
 AudioBufferSourceNode[JT] val playbackRate: AudioParam
 AudioBufferSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioBufferSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
-AudioBufferSourceNode[JT] def start(when: Double?, offset: Double?, duration: Double?): Unit
-AudioBufferSourceNode[JT] def stop(when: Double?): Unit
+AudioBufferSourceNode[JT] def start(): Unit
+AudioBufferSourceNode[JT] def start(when: Double): Unit
+AudioBufferSourceNode[JT] def start(when: Double, offset: Double): Unit
+AudioBufferSourceNode[JT] def start(when: Double, offset: Double, duration: Double): Unit
+AudioBufferSourceNode[JT] def stop(): Unit
+AudioBufferSourceNode[JT] def stop(when: Double): Unit
+AudioBufferSourceNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[AudioBufferSourceNodeOptions]?): AudioBufferSourceNode
+AudioBufferSourceNodeOptions[JT] var buffer: js.UndefOr[AudioBuffer]
+AudioBufferSourceNodeOptions[JT] var detune: js.UndefOr[Double]
+AudioBufferSourceNodeOptions[JT] var loop: js.UndefOr[Boolean]
+AudioBufferSourceNodeOptions[JT] var loopEnd: js.UndefOr[Double]
+AudioBufferSourceNodeOptions[JT] var loopStart: js.UndefOr[Double]
+AudioBufferSourceNodeOptions[JT] var playbackRate: js.UndefOr[Double]
 AudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioContext[JC] val audioWorklet: AudioWorklet
+AudioContext[JC] def baseLatency: Double
 AudioContext[JC] def close(): js.Promise[Unit]
 AudioContext[JC] def createAnalyser(): AnalyserNode
 AudioContext[JC] def createBiquadFilter(): BiquadFilterNode
@@ -468,7 +481,9 @@ AudioContext[JC] def currentTime: Double
 AudioContext[JC] def decodeAudioData(audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _]?, errorCallback: js.Function0[_]?): js.Promise[AudioBuffer]
 AudioContext[JC] val destination: AudioDestinationNode
 AudioContext[JC] def dispatchEvent(evt: Event): Boolean
+AudioContext[JC] def getOutputTimestamp: AudioTimestamp
 AudioContext[JC] val listener: AudioListener
+AudioContext[JC] def outputLatency: Double
 AudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioContext[JC] def resume(): js.Promise[Unit]
@@ -478,8 +493,8 @@ AudioContext[JC] def suspend(): js.Promise[Unit]
 AudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioDestinationNode[JT] var channelCount: Int
-AudioDestinationNode[JT] var channelCountMode: Int
-AudioDestinationNode[JT] var channelInterpretation: String
+AudioDestinationNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioDestinationNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioDestinationNode[JT] def connect(audioNode: AudioNode): Unit
 AudioDestinationNode[JT] def connect(audioParam: AudioParam): Unit
 AudioDestinationNode[JT] val context: AudioContext
@@ -493,8 +508,8 @@ AudioDestinationNode[JT] def removeEventListener[T <: Event](`type`: String, lis
 AudioListener[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioListener[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioListener[JT] var channelCount: Int
-AudioListener[JT] var channelCountMode: Int
-AudioListener[JT] var channelInterpretation: String
+AudioListener[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioListener[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioListener[JT] def connect(audioNode: AudioNode): Unit
 AudioListener[JT] def connect(audioParam: AudioParam): Unit
 AudioListener[JT] val context: AudioContext
@@ -511,8 +526,8 @@ AudioListener[JT] var speedOfSound: Double
 AudioNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioNode[JT] var channelCount: Int
-AudioNode[JT] var channelCountMode: Int
-AudioNode[JT] var channelInterpretation: String
+AudioNode[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioNode[JT] def connect(audioNode: AudioNode): Unit
 AudioNode[JT] def connect(audioParam: AudioParam): Unit
 AudioNode[JT] val context: AudioContext
@@ -522,12 +537,19 @@ AudioNode[JT] val numberOfInputs: Int
 AudioNode[JT] val numberOfOutputs: Int
 AudioNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioNodeChannelCountMode[JT]
+AudioNodeChannelCountMode[SO] val `clamped-max`: AudioNodeChannelCountMode
+AudioNodeChannelCountMode[SO] val explicit: AudioNodeChannelCountMode
+AudioNodeChannelCountMode[SO] val max: AudioNodeChannelCountMode
+AudioNodeChannelInterpretation[JT]
+AudioNodeChannelInterpretation[SO] val discrete: AudioNodeChannelInterpretation
+AudioNodeChannelInterpretation[SO] val speakers: AudioNodeChannelInterpretation
 AudioParam[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioParam[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 AudioParam[JT] def cancelScheduledValues(startTime: Double): Unit
 AudioParam[JT] var channelCount: Int
-AudioParam[JT] var channelCountMode: Int
-AudioParam[JT] var channelInterpretation: String
+AudioParam[JT] var channelCountMode: AudioNodeChannelCountMode
+AudioParam[JT] var channelInterpretation: AudioNodeChannelInterpretation
 AudioParam[JT] def connect(audioNode: AudioNode): Unit
 AudioParam[JT] def connect(audioParam: AudioParam): Unit
 AudioParam[JT] val context: AudioContext
@@ -536,6 +558,8 @@ AudioParam[JT] def disconnect(output: AudioNode?): Unit
 AudioParam[JT] def dispatchEvent(evt: Event): Boolean
 AudioParam[JT] def exponentialRampToValueAtTime(value: Double, endTime: Double): Unit
 AudioParam[JT] def linearRampToValueAtTime(value: Double, endTime: Double): Unit
+AudioParam[JT] val maxValue: Double
+AudioParam[JT] val minValue: Double
 AudioParam[JT] val numberOfInputs: Int
 AudioParam[JT] val numberOfOutputs: Int
 AudioParam[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
@@ -544,6 +568,43 @@ AudioParam[JT] def setTargetAtTime(target: Double, startTime: Double, timeConsta
 AudioParam[JT] def setValueAtTime(value: Double, startTime: Double): Unit
 AudioParam[JT] def setValueCurveAtTime(values: js.typedarray.Float32Array, startTime: Double, duration: Double): Unit
 AudioParam[JT] var value: Double
+AudioParamAutomationRate[JT]
+AudioParamAutomationRate[SO] val `a-rate`: AudioParamAutomationRate
+AudioParamAutomationRate[SO] val `k-rate`: AudioParamAutomationRate
+AudioParamDescriptor[JT] var automationRate: js.UndefOr[AudioParamAutomationRate]
+AudioParamDescriptor[JT] var defaultValue: js.UndefOr[Double]
+AudioParamDescriptor[JT] var maxValue: js.UndefOr[Double]
+AudioParamDescriptor[JT] var minValue: js.UndefOr[Double]
+AudioParamDescriptor[JT] var name: String
+AudioParamMap[JC] @JSBracketAccess def apply(index: K): V
+AudioParamMap[JC] def entries(): js.Iterator[js.Tuple2[K, V]]
+AudioParamMap[JC] def forEach(callbackFn: js.Function2[V, K, Unit]): Unit
+AudioParamMap[JC] def has(key: K): Boolean
+AudioParamMap[JC] @JSName(js.Symbol.iterator) override def jsIterator(): js.Iterator[js.Tuple2[K, V]]
+AudioParamMap[JC] def keys(): js.Iterator[K]
+AudioParamMap[JC] def size: Int
+AudioParamMap[JC] def values(): js.Iterator[V]
+AudioScheduledSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioScheduledSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioScheduledSourceNode[JC] var channelCount: Int
+AudioScheduledSourceNode[JC] var channelCountMode: AudioNodeChannelCountMode
+AudioScheduledSourceNode[JC] var channelInterpretation: AudioNodeChannelInterpretation
+AudioScheduledSourceNode[JC] def connect(audioNode: AudioNode): Unit
+AudioScheduledSourceNode[JC] def connect(audioParam: AudioParam): Unit
+AudioScheduledSourceNode[JC] val context: AudioContext
+AudioScheduledSourceNode[JC] def disconnect(output: AudioNode?): Unit
+AudioScheduledSourceNode[JC] def dispatchEvent(evt: Event): Boolean
+AudioScheduledSourceNode[JC] val numberOfInputs: Int
+AudioScheduledSourceNode[JC] val numberOfOutputs: Int
+AudioScheduledSourceNode[JC] var onended: js.Function1[Event, _]
+AudioScheduledSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioScheduledSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioScheduledSourceNode[JC] def start(): Unit
+AudioScheduledSourceNode[JC] def start(when: Double): Unit
+AudioScheduledSourceNode[JC] def stop(): Unit
+AudioScheduledSourceNode[JC] def stop(when: Double): Unit
+AudioTimestamp[JT] var contextTime: Double
+AudioTimestamp[JT] var performanceTime: Double
 AudioTrack[JT] var enabled: Boolean
 AudioTrack[JT] val id: String
 AudioTrack[JT] val kind: String
@@ -560,6 +621,68 @@ AudioTrackList[JT] var onchange: js.Function1[Event, Any]
 AudioTrackList[JT] var onremovetrack: js.Function1[TrackEvent, Any]
 AudioTrackList[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 AudioTrackList[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioWorklet[JC] def addModule(moduleURL: String, options: WorkletOptions?): js.Promise[Unit]
+AudioWorkletGlobalScope[JC] def currentFrame: Int
+AudioWorkletGlobalScope[JC] def currentTime: Double
+AudioWorkletGlobalScope[JC] def registerProcessor(name: String, processorCtor: js.Dynamic): Unit
+AudioWorkletGlobalScope[JC] def sampleRate: Float
+AudioWorkletGlobalScope[JO] def globalThis: AudioWorkletGlobalScope
+AudioWorkletNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioWorkletNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioWorkletNode[JC] var channelCount: Int
+AudioWorkletNode[JC] var channelCountMode: AudioNodeChannelCountMode
+AudioWorkletNode[JC] var channelInterpretation: AudioNodeChannelInterpretation
+AudioWorkletNode[JC] def connect(audioNode: AudioNode): Unit
+AudioWorkletNode[JC] def connect(audioParam: AudioParam): Unit
+AudioWorkletNode[JC] val context: AudioContext
+AudioWorkletNode[JC] def disconnect(output: AudioNode?): Unit
+AudioWorkletNode[JC] def dispatchEvent(evt: Event): Boolean
+AudioWorkletNode[JC] val numberOfInputs: Int
+AudioWorkletNode[JC] val numberOfOutputs: Int
+AudioWorkletNode[JC] var onprocessorerror: js.Function1[Event, _]
+AudioWorkletNode[JC] val parameters: AudioParamMap
+AudioWorkletNode[JC] val port: MessagePort
+AudioWorkletNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+AudioWorkletNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+AudioWorkletNodeOptions[JT] var numberOfInputs: js.UndefOr[Int]
+AudioWorkletNodeOptions[JT] var numberOfOutputs: js.UndefOr[Int]
+AudioWorkletNodeOptions[JT] var outputChannelCount: js.UndefOr[js.Array[Int]]
+AudioWorkletNodeOptions[JT] var parameterData: js.UndefOr[js.Object]
+AudioWorkletNodeOptions[JT] var processorOptions: js.UndefOr[js.Any]
+AudioWorkletProcessor[JC] val port: MessagePort
+AudioWorkletProcessor[JC] def process(inputs: js.Array[js.Array[js.typedarray.Float32Array]], outputs: js.Array[js.Array[js.typedarray.Float32Array]], parameters: js.Object): Boolean
+AudioWorkletProcessorOptions[JT] var numberOfInputs: Int
+AudioWorkletProcessorOptions[JT] var numberOfOutputs: Int
+AudioWorkletProcessorOptions[JT] var outputChannelCount: js.Array[Int]
+AudioWorkletProcessorOptions[JT] var parameterData: js.UndefOr[js.Object]
+AudioWorkletProcessorOptions[JT] var processorOptions: js.UndefOr[js.Any]
+BaseAudioContext[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+BaseAudioContext[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+BaseAudioContext[JT] val audioWorklet: AudioWorklet
+BaseAudioContext[JT] def createAnalyser(): AnalyserNode
+BaseAudioContext[JT] def createBiquadFilter(): BiquadFilterNode
+BaseAudioContext[JT] def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer
+BaseAudioContext[JT] def createBufferSource(): AudioBufferSourceNode
+BaseAudioContext[JT] def createChannelMerger(numberOfInputs: Int?): ChannelMergerNode
+BaseAudioContext[JT] def createChannelSplitter(numberOfOutputs: Int?): ChannelSplitterNode
+BaseAudioContext[JT] def createConvolver(): ConvolverNode
+BaseAudioContext[JT] def createDelay(maxDelayTime: Int): DelayNode
+BaseAudioContext[JT] def createDynamicsCompressor(): DynamicsCompressorNode
+BaseAudioContext[JT] def createGain(): GainNode
+BaseAudioContext[JT] def createOscillator(): OscillatorNode
+BaseAudioContext[JT] def createPanner(): PannerNode
+BaseAudioContext[JT] def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave
+BaseAudioContext[JT] def createStereoPanner(): StereoPannerNode
+BaseAudioContext[JT] def createWaveShaper(): WaveShaperNode
+BaseAudioContext[JT] def currentTime: Double
+BaseAudioContext[JT] def decodeAudioData(audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _]?, errorCallback: js.Function0[_]?): js.Promise[AudioBuffer]
+BaseAudioContext[JT] val destination: AudioDestinationNode
+BaseAudioContext[JT] def dispatchEvent(evt: Event): Boolean
+BaseAudioContext[JT] val listener: AudioListener
+BaseAudioContext[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+BaseAudioContext[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+BaseAudioContext[JT] val sampleRate: Double
+BaseAudioContext[JT] def state: String
 BeforeUnloadEvent[JC] def bubbles: Boolean
 BeforeUnloadEvent[JC] def cancelBubble: Boolean
 BeforeUnloadEvent[JC] def cancelable: Boolean
@@ -579,8 +702,8 @@ BiquadFilterNode[JT] val Q: AudioParam
 BiquadFilterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 BiquadFilterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 BiquadFilterNode[JT] var channelCount: Int
-BiquadFilterNode[JT] var channelCountMode: Int
-BiquadFilterNode[JT] var channelInterpretation: String
+BiquadFilterNode[JT] var channelCountMode: AudioNodeChannelCountMode
+BiquadFilterNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 BiquadFilterNode[JT] def connect(audioNode: AudioNode): Unit
 BiquadFilterNode[JT] def connect(audioParam: AudioParam): Unit
 BiquadFilterNode[JT] val context: AudioContext
@@ -603,6 +726,26 @@ Blob[JC] def stream(): ReadableStream[Uint8Array]
 Blob[JC] def text(): js.Promise[String]
 Blob[JC] def `type`: String
 Blob[JO]
+BlobEvent[JC] def bubbles: Boolean
+BlobEvent[JC] def cancelBubble: Boolean
+BlobEvent[JC] def cancelable: Boolean
+BlobEvent[JC] def composed: Boolean
+BlobEvent[JC] def currentTarget: EventTarget
+BlobEvent[JC] def data: Blob
+BlobEvent[JC] def defaultPrevented: Boolean
+BlobEvent[JC] def eventPhase: Int
+BlobEvent[JC] def isTrusted: Boolean
+BlobEvent[JC] def preventDefault(): Unit
+BlobEvent[JC] def stopImmediatePropagation(): Unit
+BlobEvent[JC] def stopPropagation(): Unit
+BlobEvent[JC] def target: EventTarget
+BlobEvent[JC] def timeStamp: Double
+BlobEvent[JC] def `type`: String
+BlobEventInit[JT] var bubbles: js.UndefOr[Boolean]
+BlobEventInit[JT] var cancelable: js.UndefOr[Boolean]
+BlobEventInit[JT] var composed: js.UndefOr[Boolean]
+BlobEventInit[JT] var data: Blob
+BlobEventInit[JT] var scoped: js.UndefOr[Boolean]
 BlobPropertyBag[JT] var endings: js.UndefOr[EndingType]
 BlobPropertyBag[JT] var `type`: js.UndefOr[String]
 BlobPropertyBag[SO] def apply(`type`: js.UndefOr[String]?): BlobPropertyBag  (@deprecated in 2.0.0)
@@ -1138,8 +1281,8 @@ CanvasRenderingContext2D[JC] def translate(x: Double, y: Double): Unit
 ChannelMergerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 ChannelMergerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 ChannelMergerNode[JT] var channelCount: Int
-ChannelMergerNode[JT] var channelCountMode: Int
-ChannelMergerNode[JT] var channelInterpretation: String
+ChannelMergerNode[JT] var channelCountMode: AudioNodeChannelCountMode
+ChannelMergerNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 ChannelMergerNode[JT] def connect(audioNode: AudioNode): Unit
 ChannelMergerNode[JT] def connect(audioParam: AudioParam): Unit
 ChannelMergerNode[JT] val context: AudioContext
@@ -1152,8 +1295,8 @@ ChannelMergerNode[JT] def removeEventListener[T <: Event](`type`: String, listen
 ChannelSplitterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 ChannelSplitterNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 ChannelSplitterNode[JT] var channelCount: Int
-ChannelSplitterNode[JT] var channelCountMode: Int
-ChannelSplitterNode[JT] var channelInterpretation: String
+ChannelSplitterNode[JT] var channelCountMode: AudioNodeChannelCountMode
+ChannelSplitterNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 ChannelSplitterNode[JT] def connect(audioNode: AudioNode): Unit
 ChannelSplitterNode[JT] def connect(audioParam: AudioParam): Unit
 ChannelSplitterNode[JT] val context: AudioContext
@@ -1383,14 +1526,35 @@ Console[JT] def time(label: String): Unit
 Console[JT] def timeEnd(label: String): Unit
 Console[JT] def trace(): Unit
 Console[JT] def warn(message: Any, optionalParams: Any*): Unit
+ConstantSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+ConstantSourceNode[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+ConstantSourceNode[JC] var channelCount: Int
+ConstantSourceNode[JC] var channelCountMode: AudioNodeChannelCountMode
+ConstantSourceNode[JC] var channelInterpretation: AudioNodeChannelInterpretation
+ConstantSourceNode[JC] def connect(audioNode: AudioNode): Unit
+ConstantSourceNode[JC] def connect(audioParam: AudioParam): Unit
+ConstantSourceNode[JC] val context: AudioContext
+ConstantSourceNode[JC] def disconnect(output: AudioNode?): Unit
+ConstantSourceNode[JC] def dispatchEvent(evt: Event): Boolean
+ConstantSourceNode[JC] val numberOfInputs: Int
+ConstantSourceNode[JC] val numberOfOutputs: Int
+ConstantSourceNode[JC] val offset: AudioParam
+ConstantSourceNode[JC] var onended: js.Function1[Event, _]
+ConstantSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+ConstantSourceNode[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+ConstantSourceNode[JC] def start(): Unit
+ConstantSourceNode[JC] def start(when: Double): Unit
+ConstantSourceNode[JC] def stop(): Unit
+ConstantSourceNode[JC] def stop(when: Double): Unit
+ConstantSourceNodeOptions[JT] var offset: js.UndefOr[Double]
 ConvertToBlobOptions[JT] var quality: js.UndefOr[Double]
 ConvertToBlobOptions[JT] var `type`: js.UndefOr[String]
 ConvolverNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 ConvolverNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 ConvolverNode[JT] var buffer: AudioBuffer
 ConvolverNode[JT] var channelCount: Int
-ConvolverNode[JT] var channelCountMode: Int
-ConvolverNode[JT] var channelInterpretation: String
+ConvolverNode[JT] var channelCountMode: AudioNodeChannelCountMode
+ConvolverNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 ConvolverNode[JT] def connect(audioNode: AudioNode): Unit
 ConvolverNode[JT] def connect(audioParam: AudioParam): Unit
 ConvolverNode[JT] val context: AudioContext
@@ -1598,8 +1762,8 @@ DedicatedWorkerGlobalScope[JT] def setTimeout(handler: js.Function0[Any], timeou
 DelayNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 DelayNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 DelayNode[JT] var channelCount: Int
-DelayNode[JT] var channelCountMode: Int
-DelayNode[JT] var channelInterpretation: String
+DelayNode[JT] var channelCountMode: AudioNodeChannelCountMode
+DelayNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 DelayNode[JT] def connect(audioNode: AudioNode): Unit
 DelayNode[JT] def connect(audioParam: AudioParam): Unit
 DelayNode[JT] val context: AudioContext
@@ -1924,8 +2088,8 @@ DynamicsCompressorNode[JT] def addEventListener[T <: Event](`type`: String, list
 DynamicsCompressorNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 DynamicsCompressorNode[JT] val attack: AudioParam
 DynamicsCompressorNode[JT] var channelCount: Int
-DynamicsCompressorNode[JT] var channelCountMode: Int
-DynamicsCompressorNode[JT] var channelInterpretation: String
+DynamicsCompressorNode[JT] var channelCountMode: AudioNodeChannelCountMode
+DynamicsCompressorNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 DynamicsCompressorNode[JT] def connect(audioNode: AudioNode): Unit
 DynamicsCompressorNode[JT] def connect(audioParam: AudioParam): Unit
 DynamicsCompressorNode[JT] val context: AudioContext
@@ -2299,8 +2463,8 @@ FullscreenOptions[JT] var navigationUI: js.UndefOr[String]
 GainNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 GainNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 GainNode[JT] var channelCount: Int
-GainNode[JT] var channelCountMode: Int
-GainNode[JT] var channelInterpretation: String
+GainNode[JT] var channelCountMode: AudioNodeChannelCountMode
+GainNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 GainNode[JT] def connect(audioNode: AudioNode): Unit
 GainNode[JT] def connect(audioParam: AudioParam): Unit
 GainNode[JT] val context: AudioContext
@@ -2311,6 +2475,8 @@ GainNode[JT] val numberOfInputs: Int
 GainNode[JT] val numberOfOutputs: Int
 GainNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 GainNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+GainNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[GainNodeOptions]?): GainNode
+GainNodeOptions[JT] var gain: js.UndefOr[Double]
 Gamepad[JT] val axes: js.Array[Double]
 Gamepad[JT] val buttons: js.Array[GamepadButton]
 Gamepad[JT] val connected: Boolean
@@ -16075,17 +16241,20 @@ MediaDevices[JT] def removeEventListener[T <: Event](`type`: String, listener: j
 MediaElementAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaElementAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaElementAudioSourceNode[JT] var channelCount: Int
-MediaElementAudioSourceNode[JT] var channelCountMode: Int
-MediaElementAudioSourceNode[JT] var channelInterpretation: String
+MediaElementAudioSourceNode[JT] var channelCountMode: AudioNodeChannelCountMode
+MediaElementAudioSourceNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 MediaElementAudioSourceNode[JT] def connect(audioNode: AudioNode): Unit
 MediaElementAudioSourceNode[JT] def connect(audioParam: AudioParam): Unit
 MediaElementAudioSourceNode[JT] val context: AudioContext
 MediaElementAudioSourceNode[JT] def disconnect(output: AudioNode?): Unit
 MediaElementAudioSourceNode[JT] def dispatchEvent(evt: Event): Boolean
+MediaElementAudioSourceNode[JT] def mediaElement: HTMLMediaElement
 MediaElementAudioSourceNode[JT] val numberOfInputs: Int
 MediaElementAudioSourceNode[JT] val numberOfOutputs: Int
 MediaElementAudioSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaElementAudioSourceNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+MediaElementAudioSourceNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[MediaElementAudioSourceNodeOptions]?): MediaElementAudioSourceNode
+MediaElementAudioSourceNodeOptions[JT] var mediaElement: HTMLMediaElement
 MediaError[JC] def code: Int
 MediaError[JO] val MEDIA_ERR_ABORTED: Int
 MediaError[JO] val MEDIA_ERR_DECODE: Int
@@ -16108,6 +16277,20 @@ MediaQueryList[JT] def removeEventListener[T <: Event](`type`: String, listener:
 MediaQueryList[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaQueryList[JT] def removeListener(listener: MediaQueryListListener): Unit  (@deprecated in 2.4.0)
 MediaQueryListListener[JT] def apply(mql: MediaQueryList): Unit  (@deprecated in 2.4.0)
+MediaRecorder[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+MediaRecorder[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+MediaRecorder[JC] def dispatchEvent(evt: Event): Boolean
+MediaRecorder[JC] var ondataavailable: js.Function1[BlobEvent, Any]
+MediaRecorder[JC] var onerror: js.Function1[Event, Any]
+MediaRecorder[JC] var onstop: js.Function1[Event, Any]
+MediaRecorder[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
+MediaRecorder[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
+MediaRecorder[JC] def resume(): Unit
+MediaRecorder[JC] def start(): Unit
+MediaRecorder[JC] def stop(): Unit
+MediaRecorderOptions[JT] var audioBitsPerSecond: js.UndefOr[Long]
+MediaRecorderOptions[JT] var bitsPerSecond: js.UndefOr[Long]
+MediaRecorderOptions[JT] var videoBitsPerSecond: js.UndefOr[Long]
 MediaSource[JC] def activeSourceBuffers: SourceBufferList
 MediaSource[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaSource[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
@@ -16149,8 +16332,8 @@ MediaStream[JC] def removeTrack(track: MediaStreamTrack): Unit
 MediaStreamAudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaStreamAudioDestinationNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaStreamAudioDestinationNode[JT] var channelCount: Int
-MediaStreamAudioDestinationNode[JT] var channelCountMode: Int
-MediaStreamAudioDestinationNode[JT] var channelInterpretation: String
+MediaStreamAudioDestinationNode[JT] var channelCountMode: AudioNodeChannelCountMode
+MediaStreamAudioDestinationNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 MediaStreamAudioDestinationNode[JT] def connect(audioNode: AudioNode): Unit
 MediaStreamAudioDestinationNode[JT] def connect(audioParam: AudioParam): Unit
 MediaStreamAudioDestinationNode[JT] val context: AudioContext
@@ -16164,8 +16347,8 @@ MediaStreamAudioDestinationNode[JT] var stream: MediaStream
 MediaStreamAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 MediaStreamAudioSourceNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 MediaStreamAudioSourceNode[JT] var channelCount: Int
-MediaStreamAudioSourceNode[JT] var channelCountMode: Int
-MediaStreamAudioSourceNode[JT] var channelInterpretation: String
+MediaStreamAudioSourceNode[JT] var channelCountMode: AudioNodeChannelCountMode
+MediaStreamAudioSourceNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 MediaStreamAudioSourceNode[JT] def connect(audioNode: AudioNode): Unit
 MediaStreamAudioSourceNode[JT] def connect(audioParam: AudioParam): Unit
 MediaStreamAudioSourceNode[JT] val context: AudioContext
@@ -16577,7 +16760,7 @@ OfflineAudioCompletionEvent[JT] def timeStamp: Double
 OfflineAudioCompletionEvent[JT] def `type`: String
 OfflineAudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OfflineAudioContext[JC] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
-OfflineAudioContext[JC] def close(): js.Promise[Unit]
+OfflineAudioContext[JC] val audioWorklet: AudioWorklet
 OfflineAudioContext[JC] def createAnalyser(): AnalyserNode
 OfflineAudioContext[JC] def createBiquadFilter(): BiquadFilterNode
 OfflineAudioContext[JC] def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer
@@ -16588,9 +16771,6 @@ OfflineAudioContext[JC] def createConvolver(): ConvolverNode
 OfflineAudioContext[JC] def createDelay(maxDelayTime: Int): DelayNode
 OfflineAudioContext[JC] def createDynamicsCompressor(): DynamicsCompressorNode
 OfflineAudioContext[JC] def createGain(): GainNode
-OfflineAudioContext[JC] def createMediaElementSource(myMediaElement: HTMLMediaElement): MediaElementAudioSourceNode
-OfflineAudioContext[JC] def createMediaStreamDestination(): MediaStreamAudioDestinationNode
-OfflineAudioContext[JC] def createMediaStreamSource(stream: MediaStream): MediaStreamAudioSourceNode
 OfflineAudioContext[JC] def createOscillator(): OscillatorNode
 OfflineAudioContext[JC] def createPanner(): PannerNode
 OfflineAudioContext[JC] def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave
@@ -16603,11 +16783,10 @@ OfflineAudioContext[JC] def dispatchEvent(evt: Event): Boolean
 OfflineAudioContext[JC] val listener: AudioListener
 OfflineAudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OfflineAudioContext[JC] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
-OfflineAudioContext[JC] def resume(): js.Promise[Unit]
 OfflineAudioContext[JC] val sampleRate: Double
 OfflineAudioContext[JC] def startRendering(): js.Promise[AudioBuffer]
 OfflineAudioContext[JC] def state: String
-OfflineAudioContext[JC] def suspend(): js.Promise[Unit]
+OfflineAudioContext[JC] def suspend(suspendTime: Double): js.Promise[Unit]
 OffscreenCanvas[JC] def convertToBlob(options: ConvertToBlobOptions?): js.Promise[Blob]
 OffscreenCanvas[JC] def getContext(contextType: String): js.Dynamic
 OffscreenCanvas[JC] def getContext(contextType: String, contextAttributes: TwoDContextAttributes): js.Dynamic
@@ -16616,8 +16795,8 @@ OffscreenCanvas[JC] def transferToImageBitmap(): ImageBitmap
 OscillatorNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OscillatorNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 OscillatorNode[JT] var channelCount: Int
-OscillatorNode[JT] var channelCountMode: Int
-OscillatorNode[JT] var channelInterpretation: String
+OscillatorNode[JT] var channelCountMode: AudioNodeChannelCountMode
+OscillatorNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 OscillatorNode[JT] def connect(audioNode: AudioNode): Unit
 OscillatorNode[JT] def connect(audioParam: AudioParam): Unit
 OscillatorNode[JT] val context: AudioContext
@@ -16631,9 +16810,22 @@ OscillatorNode[JT] var onended: js.Function1[Event, _]
 OscillatorNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 OscillatorNode[JT] def removeEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 OscillatorNode[JT] def setPeriodicWave(wave: PeriodicWave): Unit
-OscillatorNode[JT] def start(when: Double?): Unit
-OscillatorNode[JT] def stop(when: Double?): Unit
-OscillatorNode[JT] var `type`: String
+OscillatorNode[JT] def start(): Unit
+OscillatorNode[JT] def start(when: Double): Unit
+OscillatorNode[JT] def stop(): Unit
+OscillatorNode[JT] def stop(when: Double): Unit
+OscillatorNode[JT] var `type`: OscillatorNodeType
+OscillatorNode[SO] def apply(context: BaseAudioContext, options: js.UndefOr[OscillatorNodeOptions]?): OscillatorNode
+OscillatorNodeOptions[JT] var detune: js.UndefOr[Double]
+OscillatorNodeOptions[JT] var frequency: js.UndefOr[Double]
+OscillatorNodeOptions[JT] var periodicWave: js.UndefOr[PeriodicWave]
+OscillatorNodeOptions[JT] var `type`: js.UndefOr[OscillatorNodeType]
+OscillatorNodeType[JT]
+OscillatorNodeType[SO] val custom: OscillatorNodeType
+OscillatorNodeType[SO] val sawtooth: OscillatorNodeType
+OscillatorNodeType[SO] val sine: OscillatorNodeType
+OscillatorNodeType[SO] val square: OscillatorNodeType
+OscillatorNodeType[SO] val triangle: OscillatorNodeType
 PageTransitionEvent[JT] def bubbles: Boolean
 PageTransitionEvent[JT] def cancelBubble: Boolean
 PageTransitionEvent[JT] def cancelable: Boolean
@@ -16655,8 +16847,8 @@ PageVisibility[JT] var visibilitychange: js.Function1[Event, _]
 PannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 PannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 PannerNode[JT] var channelCount: Int
-PannerNode[JT] var channelCountMode: Int
-PannerNode[JT] var channelInterpretation: String
+PannerNode[JT] var channelCountMode: AudioNodeChannelCountMode
+PannerNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 PannerNode[JT] var coneInnerAngle: Double
 PannerNode[JT] var coneOuterAngle: Double
 PannerNode[JT] var coneOuterGain: Double
@@ -17181,7 +17373,14 @@ RTCSignalingState[SO] val stable: RTCSignalingState
 RTCStats[JT] val id: String
 RTCStats[JT] val timestamp: Double
 RTCStats[JT] val `type`: RTCStatsType
-RTCStatsReport[JT] def apply(id: String): RTCStats
+RTCStatsReport[JT] @JSBracketAccess def apply(index: K): V
+RTCStatsReport[JT] def entries(): js.Iterator[js.Tuple2[K, V]]
+RTCStatsReport[JT] def forEach(callbackFn: js.Function2[V, K, Unit]): Unit
+RTCStatsReport[JT] def has(key: K): Boolean
+RTCStatsReport[JT] @JSName(js.Symbol.iterator) override def jsIterator(): js.Iterator[js.Tuple2[K, V]]
+RTCStatsReport[JT] def keys(): js.Iterator[K]
+RTCStatsReport[JT] def size: Int
+RTCStatsReport[JT] def values(): js.Iterator[V]
 RTCStatsType[JT]
 RTCStatsType[SO] val `inbound-rtp` = "inbound-rtp".asInstanceOf[RTCStatsType]
 RTCStatsType[SO] val `outbound-rtp` = "outbound-rtp".asInstanceOf[RTCStatsType]
@@ -17219,6 +17418,14 @@ Range[JO] val END_TO_END: Int
 Range[JO] val END_TO_START: Int
 Range[JO] val START_TO_END: Int
 Range[JO] val START_TO_START: Int
+ReadOnlyMapLike[JT] @JSBracketAccess def apply(index: K): V
+ReadOnlyMapLike[JT] def entries(): js.Iterator[js.Tuple2[K, V]]
+ReadOnlyMapLike[JT] def forEach(callbackFn: js.Function2[V, K, Unit]): Unit
+ReadOnlyMapLike[JT] def has(key: K): Boolean
+ReadOnlyMapLike[JT] @JSName(js.Symbol.iterator) override def jsIterator(): js.Iterator[js.Tuple2[K, V]]
+ReadOnlyMapLike[JT] def keys(): js.Iterator[K]
+ReadOnlyMapLike[JT] def size: Int
+ReadOnlyMapLike[JT] def values(): js.Iterator[V]
 ReadableStream[JT] def cancel(reason: js.UndefOr[Any]?): js.Promise[Unit]
 ReadableStream[JT] def getReader(): ReadableStreamReader[T]
 ReadableStream[JT] def locked: Boolean
@@ -25823,8 +26030,8 @@ StaticRangeInit[JT] val startOffset: Int
 StereoPannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 StereoPannerNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 StereoPannerNode[JT] var channelCount: Int
-StereoPannerNode[JT] var channelCountMode: Int
-StereoPannerNode[JT] var channelInterpretation: String
+StereoPannerNode[JT] var channelCountMode: AudioNodeChannelCountMode
+StereoPannerNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 StereoPannerNode[JT] def connect(audioNode: AudioNode): Unit
 StereoPannerNode[JT] def connect(audioParam: AudioParam): Unit
 StereoPannerNode[JT] val context: AudioContext
@@ -26230,8 +26437,8 @@ VisibilityState[SO] val visible: VisibilityState
 WaveShaperNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], options: EventListenerOptions): Unit
 WaveShaperNode[JT] def addEventListener[T <: Event](`type`: String, listener: js.Function1[T, _], useCapture: Boolean?): Unit
 WaveShaperNode[JT] var channelCount: Int
-WaveShaperNode[JT] var channelCountMode: Int
-WaveShaperNode[JT] var channelInterpretation: String
+WaveShaperNode[JT] var channelCountMode: AudioNodeChannelCountMode
+WaveShaperNode[JT] var channelInterpretation: AudioNodeChannelInterpretation
 WaveShaperNode[JT] def connect(audioNode: AudioNode): Unit
 WaveShaperNode[JT] def connect(audioParam: AudioParam): Unit
 WaveShaperNode[JT] val context: AudioContext
@@ -27061,6 +27268,9 @@ WorkerOptions[JT] var `type`: js.UndefOr[WorkerType]
 WorkerType[JT]
 WorkerType[SO] val classic: WorkerType
 WorkerType[SO] val module: WorkerType
+Worklet[JC] def addModule(moduleURL: String, options: WorkletOptions?): js.Promise[Unit]
+WorkletGlobalScope[JT]
+WorkletOptions[JT] var credentials: js.UndefOr[RequestCredentials]
 WriteableState[JT]
 WriteableState[SO] val closed: WriteableState
 WriteableState[SO] val closing: WriteableState
diff --git a/dom/src/main/scala-2/org/scalajs/dom/AudioNodeChannelCountMode.scala b/dom/src/main/scala-2/org/scalajs/dom/AudioNodeChannelCountMode.scala
new file mode 100644
index 000000000..2f442f012
--- /dev/null
+++ b/dom/src/main/scala-2/org/scalajs/dom/AudioNodeChannelCountMode.scala
@@ -0,0 +1,30 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+/** Represents an enumerated value describing the way channels must be matched between the AudioNode's inputs and
+  * outputs.
+  */
+sealed trait AudioNodeChannelCountMode extends js.Any
+
+object AudioNodeChannelCountMode {
+
+  /** The number of channels is equal to the maximum number of channels of all connections. In this case, channelCount
+    * is ignored and only up-mixing happens.
+    */
+  val max: AudioNodeChannelCountMode = "max".asInstanceOf[AudioNodeChannelCountMode]
+
+  /** The number of channels is equal to the maximum number of channels of all connections, clamped to the value of
+    * channelCount.
+    */
+  val `clamped-max`: AudioNodeChannelCountMode = "clamped-max".asInstanceOf[AudioNodeChannelCountMode]
+
+  /** The number of channels is defined by the value of channelCount. */
+  val explicit: AudioNodeChannelCountMode = "explicit".asInstanceOf[AudioNodeChannelCountMode]
+}
diff --git a/dom/src/main/scala-2/org/scalajs/dom/AudioNodeChannelInterpretation.scala b/dom/src/main/scala-2/org/scalajs/dom/AudioNodeChannelInterpretation.scala
new file mode 100644
index 000000000..a229d6e00
--- /dev/null
+++ b/dom/src/main/scala-2/org/scalajs/dom/AudioNodeChannelInterpretation.scala
@@ -0,0 +1,28 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+/** Represents an enumerated value describing how input channels are mapped to output channels when the number of
+  * inputs/outputs is different. For example, this setting defines how a mono input will be up-mixed to a stereo or 5.1
+  * channel output, or how a quad channel input will be down-mixed to a stereo or mono output.
+  */
+sealed trait AudioNodeChannelInterpretation extends js.Any
+
+object AudioNodeChannelInterpretation {
+
+  /** Use set of "standard" mappings for combinations of common speaker input and outputs setups (mono, stereo, quad,
+    * 5.1). For example, with this setting a mono channel input will output to both channels of a stereo output.
+    */
+  val speakers: AudioNodeChannelInterpretation = "speakers".asInstanceOf[AudioNodeChannelInterpretation]
+
+  /** Input channels are mapped to output channels in order. If there are more inputs that outputs the additional inputs
+    * are dropped; if there are fewer than the unused outputs are silent.
+    */
+  val discrete: AudioNodeChannelInterpretation = "discrete".asInstanceOf[AudioNodeChannelInterpretation]
+}
diff --git a/dom/src/main/scala-2/org/scalajs/dom/AudioParamAutomationRate.scala b/dom/src/main/scala-2/org/scalajs/dom/AudioParamAutomationRate.scala
new file mode 100644
index 000000000..2b2b22eae
--- /dev/null
+++ b/dom/src/main/scala-2/org/scalajs/dom/AudioParamAutomationRate.scala
@@ -0,0 +1,22 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+sealed trait AudioParamAutomationRate extends js.Any
+
+object AudioParamAutomationRate {
+
+  /** An a-rate [[AudioParam]] takes the current audio parameter value for each sample frame of the audio signal. */
+  val `a-rate`: AudioParamAutomationRate = "a-rate".asInstanceOf[AudioParamAutomationRate]
+
+  /** A k-rate [[AudioParam]] uses the same initial audio parameter value for the whole block processed; that is, 128
+    * sample frames. In other words, the same value applies to every frame in the audio as it's processed by the node.
+    */
+  val `k-rate`: AudioParamAutomationRate = "k-rate".asInstanceOf[AudioParamAutomationRate]
+}
diff --git a/dom/src/main/scala-2/org/scalajs/dom/OscillatorNodeType.scala b/dom/src/main/scala-2/org/scalajs/dom/OscillatorNodeType.scala
new file mode 100644
index 000000000..de4b2dcf6
--- /dev/null
+++ b/dom/src/main/scala-2/org/scalajs/dom/OscillatorNodeType.scala
@@ -0,0 +1,31 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+sealed trait OscillatorNodeType extends js.Any
+
+object OscillatorNodeType {
+
+  /** A sine wave. This is the default value. */
+  val sine: OscillatorNodeType = "sine".asInstanceOf[OscillatorNodeType]
+
+  /** A square wave with a duty cycle of 0.5; that is, the signal is "high" for half of each period. */
+  val square: OscillatorNodeType = "square".asInstanceOf[OscillatorNodeType]
+
+  /** A sawtooth wave. */
+  val sawtooth: OscillatorNodeType = "sawtooth".asInstanceOf[OscillatorNodeType]
+
+  /** A triangle wave. */
+  val triangle: OscillatorNodeType = "triangle".asInstanceOf[OscillatorNodeType]
+
+  /** A custom waveform. You never set type to custom manually; instead, use the setPeriodicWave() method to provide the
+    * data representing the waveform. Doing so automatically sets the type to custom.
+    */
+  val custom: OscillatorNodeType = "custom".asInstanceOf[OscillatorNodeType]
+}
diff --git a/dom/src/main/scala-3/org/scalajs/dom/AudioNodeChannelCountMode.scala b/dom/src/main/scala-3/org/scalajs/dom/AudioNodeChannelCountMode.scala
new file mode 100644
index 000000000..be34d2342
--- /dev/null
+++ b/dom/src/main/scala-3/org/scalajs/dom/AudioNodeChannelCountMode.scala
@@ -0,0 +1,29 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+/** Represents an enumerated value describing the way channels must be matched between the AudioNode's inputs and
+  * outputs.
+  */
+opaque type AudioNodeChannelCountMode <: String = String
+
+object AudioNodeChannelCountMode {
+
+  /** The number of channels is equal to the maximum number of channels of all connections. In this case, channelCount
+    * is ignored and only up-mixing happens.
+    */
+  val max: AudioNodeChannelCountMode = "max"
+
+  /** The number of channels is equal to the maximum number of channels of all connections, clamped to the value of
+    * channelCount.
+    */
+  val `clamped-max`: AudioNodeChannelCountMode = "clamped-max"
+
+  /** The number of channels is defined by the value of channelCount. */
+  val explicit: AudioNodeChannelCountMode = "explicit"
+}
diff --git a/dom/src/main/scala-3/org/scalajs/dom/AudioNodeChannelInterpretation.scala b/dom/src/main/scala-3/org/scalajs/dom/AudioNodeChannelInterpretation.scala
new file mode 100644
index 000000000..bcf5485d0
--- /dev/null
+++ b/dom/src/main/scala-3/org/scalajs/dom/AudioNodeChannelInterpretation.scala
@@ -0,0 +1,27 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+/** Represents an enumerated value describing how input channels are mapped to output channels when the number of
+  * inputs/outputs is different. For example, this setting defines how a mono input will be up-mixed to a stereo or 5.1
+  * channel output, or how a quad channel input will be down-mixed to a stereo or mono output.
+  */
+opaque type AudioNodeChannelInterpretation <: String = String
+
+object AudioNodeChannelInterpretation {
+
+  /** Use set of "standard" mappings for combinations of common speaker input and outputs setups (mono, stereo, quad,
+    * 5.1). For example, with this setting a mono channel input will output to both channels of a stereo output.
+    */
+  val speakers: AudioNodeChannelInterpretation = "speakers"
+
+  /** Input channels are mapped to output channels in order. If there are more inputs that outputs the additional inputs
+    * are dropped; if there are fewer than the unused outputs are silent.
+    */
+  val discrete: AudioNodeChannelInterpretation = "discrete"
+}
diff --git a/dom/src/main/scala-3/org/scalajs/dom/AudioParamAutomationRate.scala b/dom/src/main/scala-3/org/scalajs/dom/AudioParamAutomationRate.scala
new file mode 100644
index 000000000..7778ac94e
--- /dev/null
+++ b/dom/src/main/scala-3/org/scalajs/dom/AudioParamAutomationRate.scala
@@ -0,0 +1,21 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+opaque type AudioParamAutomationRate <: String = String
+
+object AudioParamAutomationRate {
+
+  /** An a-rate [[AudioParam]] takes the current audio parameter value for each sample frame of the audio signal. */
+  val `a-rate`: AudioParamAutomationRate = "a-rate"
+
+  /** A k-rate [[AudioParam]] uses the same initial audio parameter value for the whole block processed; that is, 128
+    * sample frames. In other words, the same value applies to every frame in the audio as it's processed by the node.
+    */
+  val `k-rate`: AudioParamAutomationRate = "k-rate"
+}
diff --git a/dom/src/main/scala-3/org/scalajs/dom/OscillatorNodeType.scala b/dom/src/main/scala-3/org/scalajs/dom/OscillatorNodeType.scala
new file mode 100644
index 000000000..e0dd4d09a
--- /dev/null
+++ b/dom/src/main/scala-3/org/scalajs/dom/OscillatorNodeType.scala
@@ -0,0 +1,30 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+opaque type OscillatorNodeType <: String = String
+
+object OscillatorNodeType {
+
+  /** A sine wave. This is the default value. */
+  val sine: OscillatorNodeType = "sine"
+
+  /** A square wave with a duty cycle of 0.5; that is, the signal is "high" for half of each period. */
+  val square: OscillatorNodeType = "square"
+
+  /** A sawtooth wave. */
+  val sawtooth: OscillatorNodeType = "sawtooth"
+
+  /** A triangle wave. */
+  val triangle: OscillatorNodeType = "triangle"
+
+  /** A custom waveform. You never set type to custom manually; instead, use the setPeriodicWave() method to provide the
+    * data representing the waveform. Doing so automatically sets the type to custom.
+    */
+  val custom: OscillatorNodeType = "custom"
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNode.scala b/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNode.scala
index 1fc346b2b..8e37ee468 100644
--- a/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNode.scala
+++ b/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNode.scala
@@ -24,7 +24,7 @@ import scala.scalajs.js
   *   - Channel count: defined by the associated AudioBuffer
   */
 @js.native
-trait AudioBufferSourceNode extends AudioNode {
+trait AudioBufferSourceNode extends AudioScheduledSourceNode {
 
   /** Is an AudioBuffer that defines the audio asset to be played, or when set to the value null, defines a single
     * channel of silence.
@@ -63,16 +63,20 @@ trait AudioBufferSourceNode extends AudioNode {
     *   The duration parameter, which defaults to the length of the asset minus the value of offset, defines the length
     *   of the portion of the asset to be played.
     */
-  def start(when: Double = js.native, offset: Double = js.native, duration: Double = js.native): Unit = js.native
+  def start(when: Double, offset: Double, duration: Double): Unit = js.native
 
-  /** Schedules the end of the playback of an audio asset.
-    *
-    * @param when
-    *   The when parameter defines when the playback will stop. If it represents a time in the past, the playback will
-    *   end immediately. If this method is called twice or more, an exception is raised.
-    */
-  def stop(when: Double = js.native): Unit = js.native
+  def start(when: Double, offset: Double): Unit = js.native
+
+}
+
+object AudioBufferSourceNode {
+
+  import js.`|`.undefOr2jsAny
 
-  /** Is an EventHandler containing the callback associated with the ended event. */
-  var onended: js.Function1[Event, _] = js.native
+  def apply(context: BaseAudioContext,
+      options: js.UndefOr[AudioBufferSourceNodeOptions] = js.undefined): AudioBufferSourceNode = {
+    js.Dynamic
+      .newInstance(js.Dynamic.global.AudioBufferSourceNode)(context, options)
+      .asInstanceOf[AudioBufferSourceNode]
+  }
 }
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNodeOptions.scala b/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNodeOptions.scala
new file mode 100644
index 000000000..1f0239d3e
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNodeOptions.scala
@@ -0,0 +1,48 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait AudioBufferSourceNodeOptions extends js.Object {
+
+  /** An instance of [[AudioBuffer]] to be played. */
+  var buffer: js.UndefOr[AudioBuffer] = js.undefined
+
+  /** Indicates whether the audio should play in a loop. The default is false. If the loop is dynamically modified
+    * during playback, the new value will take effect on the next processing block of audio.
+    */
+  var loop: js.UndefOr[Boolean] = js.undefined
+
+  /** An optional value in seconds, where looping should begin if the loop attribute is true. The default is 0. It's
+    * sensible to set this to a value between 0 and the duration of the buffer. If loopStart is less than 0, looping
+    * will begin at 0. If loopStart is greater than the duration of the buffer, looping will begin at the end of the
+    * buffer. This attribute is converted to an exact sample frame offset within the buffer, by multiplying by the
+    * buffer's sample rate and rounding to the nearest integer value. Thus, its behavior is independent of the value of
+    * the playbackRate parameter.
+    */
+  var loopStart: js.UndefOr[Double] = js.undefined
+
+  /** An optional value, in seconds, where looping should end if the loop attribute is true. The default is 0. Its value
+    * is exclusive to the content of the loop. The sample frames, comprising the loop, run from the values loopStart to
+    * loopEnd-(1/sampleRate). It's sensible to set this to a value between 0 and the duration of the buffer. If loopEnd
+    * is less than 0, looping will end at 0. If loopEnd is greater than the duration of the buffer, looping will end at
+    * the end of the buffer. This attribute is converted to an exact sample frame offset within the buffer, by
+    * multiplying by the buffer's sample rate and rounding to the nearest integer value. Thus, its behavior is
+    * independent of the value of the playbackRate parameter.
+    */
+  var loopEnd: js.UndefOr[Double] = js.undefined
+
+  /** A value in cents to modulate the speed of audio stream rendering. Its nominal range is (-∞ to +∞). The default is
+    * 0.
+    */
+  var detune: js.UndefOr[Double] = js.undefined
+
+  /** The speed at which to render the audio stream. Its default value is 1. This parameter is k-rate. This is a
+    * compound parameter with detune. Its nominal range is (-∞ to +∞).
+    */
+  var playbackRate: js.UndefOr[Double] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioContext.scala b/dom/src/main/scala/org/scalajs/dom/AudioContext.scala
index f56f6d956..150f21959 100644
--- a/dom/src/main/scala/org/scalajs/dom/AudioContext.scala
+++ b/dom/src/main/scala/org/scalajs/dom/AudioContext.scala
@@ -17,98 +17,15 @@ import scala.scalajs.js.annotation._
   */
 @js.native
 @JSGlobal
-class AudioContext extends EventTarget {
+class AudioContext extends BaseAudioContext {
 
-  /** Returns a double representing an ever-increasing hardware time in seconds used for scheduling. It starts at 0 and
-    * cannot be stopped, paused or reset.
+  /** Returns the number of seconds of processing latency incurred by the AudioContext passing the audio from the
+    * AudioDestinationNode to the audio subsystem.
     */
-  def currentTime: Double = js.native
+  def baseLatency: Double = js.native
 
-  /** Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought
-    * of as the audio-rendering device.
-    */
-  val destination: AudioDestinationNode = js.native
-
-  /** Returns the AudioListener object, used for 3D spatialization. */
-  val listener: AudioListener = js.native
-
-  /** Returns a float representing the sample rate (in samples per second) used by all nodes in this context. The
-    * sample-rate of an AudioContext cannot be changed.
-    */
-  val sampleRate: Double = js.native
-
-  /** Returns the current state of the AudioContext. */
-  def state: String = js.native
-
-  /** Closes the audio context, releasing any system audio resources that it uses. */
-  def close(): js.Promise[Unit] = js.native
-
-  /** Creates an AnalyserNode, which can be used to expose audio time and frequency data and for example to create data
-    * visualisations.
-    */
-  def createAnalyser(): AnalyserNode = js.native
-
-  /** Creates a BiquadFilterNode, which represents a second order filter configurable as several different common filter
-    * types: high-pass, low-pass, band-pass, etc.
-    */
-  def createBiquadFilter(): BiquadFilterNode = js.native
-
-  /** Creates a new, empty AudioBuffer object, which can then be populated by data and played via an
-    * AudioBufferSourceNode.
-    *
-    * @param numOfChannels
-    *   An integer representing the number of channels this buffer should have. Implementations must support a minimum
-    *   32 channels.
-    * @param length
-    *   An integer representing the size of the buffer in sample-frames.
-    * @param sampleRate
-    *   The sample-rate of the linear audio data in sample-frames per second. An implementation must support
-    *   sample-rates in at least the range 22050 to 96000.
-    */
-  def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer = js.native
-
-  /** Creates an AudioBufferSourceNode, which can be used to play and manipulate audio data contained within an
-    * AudioBuffer object. AudioBuffers are created using AudioContext.createBuffer or returned by
-    * AudioContext.decodeAudioData when it successfully decodes an audio track.
-    */
-  def createBufferSource(): AudioBufferSourceNode = js.native
-
-  /** Creates a ChannelMergerNode, which is used to combine channels from multiple audio streams into a single audio
-    * stream.
-    *
-    * @param numberOfInputs
-    *   The number of channels in the input audio streams, which the output stream will contain; the default is 6 is
-    *   this parameter is not specified.
-    */
-  def createChannelMerger(numberOfInputs: Int = js.native): ChannelMergerNode = js.native
-
-  /** Creates a ChannelSplitterNode, which is used to access the individual channels of an audio stream and process them
-    * separately.
-    *
-    * @param numberOfOutputs
-    *   The number of channels in the input audio stream that you want to output separately; the default is 6 is this
-    *   parameter is not specified.
-    */
-  def createChannelSplitter(numberOfOutputs: Int = js.native): ChannelSplitterNode = js.native
-
-  /** Creates a ConvolverNode, which can be used to apply convolution effects to your audio graph, for example a
-    * reverberation effect.
-    */
-  def createConvolver(): ConvolverNode = js.native
-
-  /** Creates a DelayNode, which is used to delay the incoming audio signal by a certain amount. This node is also
-    * useful to create feedback loops in a Web Audio API graph.
-    *
-    * @param maxDelayTime
-    *   The maximum amount of time, in seconds, that the audio signal can be delayed by. The default value is 0.
-    */
-  def createDelay(maxDelayTime: Int): DelayNode = js.native
-
-  /** Creates a DynamicsCompressorNode, which can be used to apply acoustic compression to an audio signal. */
-  def createDynamicsCompressor(): DynamicsCompressorNode = js.native
-
-  /** Creates a GainNode, which can be used to control the overall volume of the audio graph. */
-  def createGain(): GainNode = js.native
+  /** Returns an estimation of the output latency of the current audio context. */
+  def outputLatency: Double = js.native
 
   /** Creates a MediaElementAudioSourceNode associated with an HTMLMediaElement. This can be used to play and manipulate
     * audio from &lt;video&gt; or &lt;audio&gt; elements.
@@ -131,42 +48,6 @@ class AudioContext extends EventTarget {
     */
   def createMediaStreamDestination(): MediaStreamAudioDestinationNode = js.native
 
-  /** Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone. */
-  def createOscillator(): OscillatorNode = js.native
-
-  /** Creates a PannerNode, which is used to spatialise an incoming audio stream in 3D space. */
-  def createPanner(): PannerNode = js.native
-
-  /** Creates a PeriodicWave, used to define a periodic waveform that can be used to determine the output of an
-    * OscillatorNode.
-    */
-  def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave = js.native
-
-  /** Creates a StereoPannerNode, which can be used to apply stereo panning to an audio source. */
-  def createStereoPanner(): StereoPannerNode = js.native
-
-  /** Creates a WaveShaperNode, which is used to implement non-linear distortion effects. */
-  def createWaveShaper(): WaveShaperNode = js.native
-
-  /** Asynchronously decodes audio file data contained in an ArrayBuffer. In this case, the ArrayBuffer is usually
-    * loaded from an XMLHttpRequest's response attribute after setting the responseType to arraybuffer. This method only
-    * works on complete files, not fragments of audio files.
-    *
-    * @param audioData
-    *   An ArrayBuffer containing the audio data to be decoded, usually grabbed from an XMLHttpRequest's response
-    *   attribute after setting the responseType to arraybuffer.
-    * @param successCallback
-    *   A callback function to be invoked when the decoding successfully finishes. The single argument to this callback
-    *   is an AudioBuffer representing the decoded PCM audio data. Usually you'll want to put the decoded data into an
-    *   AudioBufferSourceNode, from which it can be played and manipulated how you want.
-    * @param errorCallback
-    *   An optional error callback, to be invoked if an error occurs when the audio data is being decoded.
-    */
-  def decodeAudioData(
-      audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _] = js.native,
-      errorCallback: js.Function0[_] = js.native
-  ): js.Promise[AudioBuffer] = js.native
-
   /** Resumes the progression of time in an audio context that has previously been suspended. */
   def resume(): js.Promise[Unit] = js.native
 
@@ -174,4 +55,11 @@ class AudioContext extends EventTarget {
     * CPU/battery usage in the process.
     */
   def suspend(): js.Promise[Unit] = js.native
+
+  /** Closes the audio context, releasing any system audio resources that it uses. */
+  def close(): js.Promise[Unit] = js.native
+
+  /** Returns a new AudioTimestamp object containing two audio timestamp values relating to the current audio context.
+    */
+  def getOutputTimestamp: AudioTimestamp = js.native
 }
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioNode.scala b/dom/src/main/scala/org/scalajs/dom/AudioNode.scala
index 283f604dc..9bd380287 100644
--- a/dom/src/main/scala/org/scalajs/dom/AudioNode.scala
+++ b/dom/src/main/scala/org/scalajs/dom/AudioNode.scala
@@ -47,14 +47,12 @@ trait AudioNode extends EventTarget {
 
   /** Represents an enumerated value describing the way channels must be matched between the node's inputs and outputs.
     */
-  var channelCountMode: Int = js.native
+  var channelCountMode: AudioNodeChannelCountMode = js.native
 
   /** Represents an enumerated value describing the meaning of the channels. This interpretation will define how audio
     * up-mixing and down-mixing will happen.
-    *
-    * The possible values are "speakers" or "discrete".
     */
-  var channelInterpretation: String = js.native
+  var channelInterpretation: AudioNodeChannelInterpretation = js.native
 
   /** Allows us to connect one output of this node to one input of another node. */
   def connect(audioNode: AudioNode): Unit = js.native
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioParam.scala b/dom/src/main/scala/org/scalajs/dom/AudioParam.scala
index 79b16030b..2a5d1bd37 100644
--- a/dom/src/main/scala/org/scalajs/dom/AudioParam.scala
+++ b/dom/src/main/scala/org/scalajs/dom/AudioParam.scala
@@ -36,6 +36,12 @@ trait AudioParam extends AudioNode {
   /** Represents the initial value of the attributes as defined by the specific AudioNode creating the AudioParam. */
   val defaultValue: Double = js.native
 
+  /** Represents the maximum possible value for the parameter's nominal (effective) range. */
+  val maxValue: Double = js.native
+
+  /** Represents the minimum possible value for the parameter's nominal (effective) range. */
+  val minValue: Double = js.native
+
   /** Schedules an instant change to the value of the AudioParam at a precise time, as measured against
     * AudioContext.currentTime. The new value is given in the value parameter.
     *
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioParamDescriptor.scala b/dom/src/main/scala/org/scalajs/dom/AudioParamDescriptor.scala
new file mode 100644
index 000000000..d411a50a6
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioParamDescriptor.scala
@@ -0,0 +1,35 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+/** Specifies properties for AudioParam objects.
+  *
+  * It is used to create custom AudioParams on an AudioWorkletNode. If the underlying AudioWorkletProcessor has a
+  * parameterDescriptors static getter, then the returned array of objects based on this dictionary is used internally
+  * by AudioWorkletNode constructor to populate its parameters property accordingly.
+  */
+trait AudioParamDescriptor extends js.Object {
+
+  /** The string which represents the name of the [[AudioParam]]. Under this name the [[AudioParam]] will be available
+    * in the parameters property of the node, and under this name the [[AudioWorkletProcessor#process]] method will
+    * acquire the calculated values of this [[AudioParam]].
+    */
+  var name: String
+
+  /** Defaults to -3.4028235e38. */
+  var minValue: js.UndefOr[Double] = js.undefined
+
+  /** Defaults to 3.4028235e38. */
+  var maxValue: js.UndefOr[Double] = js.undefined
+
+  /** Represents initial value of the AudioParam. Defaults to 0. */
+  var defaultValue: js.UndefOr[Double] = js.undefined
+
+  /** Represents an automation rate of this AudioParam. Defaults to "a-rate". */
+  var automationRate: js.UndefOr[AudioParamAutomationRate] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioParamMap.scala b/dom/src/main/scala/org/scalajs/dom/AudioParamMap.scala
new file mode 100644
index 000000000..2bc770d8f
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioParamMap.scala
@@ -0,0 +1,13 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@JSGlobal
+@js.native
+class AudioParamMap extends ReadOnlyMapLike[String, AudioParam] {}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioScheduledSourceNode.scala b/dom/src/main/scala/org/scalajs/dom/AudioScheduledSourceNode.scala
new file mode 100644
index 000000000..d860b3237
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioScheduledSourceNode.scala
@@ -0,0 +1,35 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@js.native
+/** A parent interface for several types of audio source node interfaces which share the ability to be started and
+  * stopped, optionally at specified times. Unless stated otherwise, nodes based upon AudioScheduledSourceNode output
+  * silence when not playing (that is, before start() is called and after stop() is called). Silence is represented, as
+  * always, by a stream of samples with the value zero (0).
+  */
+@JSGlobal
+abstract class AudioScheduledSourceNode extends AudioNode {
+
+  /** This method specifies the exact time to start playing the tone. */
+  def start(): Unit = js.native
+
+  /** This method specifies the exact time to stop playing the tone. */
+  def stop(): Unit = js.native
+
+  /** This method specifies the exact time to start playing the tone. */
+  def start(when: Double): Unit = js.native
+
+  /** This method specifies the exact time to stop playing the tone. */
+  def stop(when: Double): Unit = js.native
+
+  /** Used to set the event handler for the ended event, which fires when the tone has stopped playing. */
+  var onended: js.Function1[Event, _] = js.native
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioTimestamp.scala b/dom/src/main/scala/org/scalajs/dom/AudioTimestamp.scala
new file mode 100644
index 000000000..d374bdf25
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioTimestamp.scala
@@ -0,0 +1,24 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+trait AudioTimestamp extends js.Object {
+
+  /** The time of the sample frame currently being rendered by the audio output device (i.e., output audio stream
+    * position), in the same units and origin as the context's AudioContext.currentTime. Basically, this is the time
+    * after the audio context was first created.
+    */
+  var contextTime: Double
+
+  /** An estimation of the moment when the sample frame corresponding to the stored contextTime value was rendered by
+    * the audio output device, in the same units and origin as performance.now(). This is the time after the document
+    * containing the audio context was first rendered.
+    */
+  var performanceTime: Double
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioWorklet.scala b/dom/src/main/scala/org/scalajs/dom/AudioWorklet.scala
new file mode 100644
index 000000000..14d73a144
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioWorklet.scala
@@ -0,0 +1,21 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+/** Used to supply custom audio processing scripts that execute in a separate thread to provide very low latency audio
+  * processing.
+  *
+  * The worklet's code is run in the AudioWorkletGlobalScope global execution context, using a separate Web Audio thread
+  * which is shared by the worklet and other audio nodes.
+  *
+  * Access the audio context's instance of AudioWorklet through the BaseAudioContext.audioWorklet property.
+  */
+@JSGlobal
+@js.native
+abstract class AudioWorklet extends Worklet {}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioWorkletGlobalScope.scala b/dom/src/main/scala/org/scalajs/dom/AudioWorkletGlobalScope.scala
new file mode 100644
index 000000000..4293a69c0
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioWorkletGlobalScope.scala
@@ -0,0 +1,49 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+/** Represents a global execution context for user-supplied code, which defines custom AudioWorkletProcessor-derived
+  * classes.
+  *
+  * Each BaseAudioContext has a single AudioWorklet available under the audioWorklet property, which runs its code in a
+  * single AudioWorkletGlobalScope.
+  *
+  * As the global execution context is shared across the current BaseAudioContext, it's possible to define any other
+  * variables and perform any actions allowed in worklets — apart from defining AudioWorkletProcessor derived classes.
+  */
+@js.native
+@JSGlobal
+abstract class AudioWorkletGlobalScope extends WorkletGlobalScope {
+
+  /** Returns an integer that represents the ever-increasing current sample-frame of the audio block being processed. It
+    * is incremented by 128 (the size of a render quantum) after the processing of each audio block.
+    */
+  def currentFrame: Int
+
+  /** Returns a double that represents the ever-increasing context time of the audio block being processed. It is equal
+    * to the currentTime property of the BaseAudioContext the worklet belongs to.
+    */
+  def currentTime: Double
+
+  /** Returns a float that represents the sample rate of the associated BaseAudioContext. */
+  def sampleRate: Float
+
+  /** Registers a class derived from the AudioWorkletProcessor interface. The class can then be used by creating an
+    * AudioWorkletNode, providing its registered name.
+    */
+  def registerProcessor(name: String, processorCtor: js.Dynamic): Unit = js.native
+}
+
+@js.native
+@JSGlobalScope
+object AudioWorkletGlobalScope extends js.Object {
+
+  /** See issue https://github.com/whatwg/html/issues/6059 */
+  def globalThis: AudioWorkletGlobalScope = js.native
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioWorkletNode.scala b/dom/src/main/scala/org/scalajs/dom/AudioWorkletNode.scala
new file mode 100644
index 000000000..36eb67896
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioWorkletNode.scala
@@ -0,0 +1,32 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@JSGlobal
+@js.native
+class AudioWorkletNode(context: BaseAudioContext, name: String, options: AudioWorkletNodeOptions = js.native)
+    extends AudioNode {
+
+  /** The [[MessagePort]] object that is connecting the [[AudioWorkletNode]] and its associated
+    * [[AudioWorkletProcessor]].
+    */
+  val port: MessagePort = js.native
+
+  /** The [[AudioParamMap]] object containing [[AudioParam]] instances. They can be automated in the same way as with
+    * default [[AudioNode]], and their calculated values can be used in the [[AudioWorkletProcessor#process]] method of
+    * your [[AudioWorkletProcessor]].
+    */
+  val parameters: AudioParamMap = js.native
+
+  /** Fires when the underlying [[AudioWorkletProcessor]] behind the node throws an exception in its constructor, the
+    * process method, or any user-defined class method.
+    */
+  var onprocessorerror: js.Function1[Event, _] = js.native
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioWorkletNodeOptions.scala b/dom/src/main/scala/org/scalajs/dom/AudioWorkletNodeOptions.scala
new file mode 100644
index 000000000..493facbce
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioWorkletNodeOptions.scala
@@ -0,0 +1,31 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait AudioWorkletNodeOptions extends js.Object {
+
+  /** The value to initialize the [[AudioNode#numberOfInputs]] property. Defaults to 1. */
+  var numberOfInputs: js.UndefOr[Int] = js.undefined
+
+  /** The value to initialize the [[AudioNode#numberOfOutputs]] property. Defaults to 1. */
+  var numberOfOutputs: js.UndefOr[Int] = js.undefined
+
+  /** An array defining the number of channels for each output. For example, outputChannelCount: [n, m] specifies the
+    * number of channels in the first output to be n and the second output to be m. The array length must match
+    * numberOfOutputs.
+    */
+  var outputChannelCount: js.UndefOr[js.Array[Int]] = js.undefined
+
+  /** An object containing the initial values of custom AudioParam objects on this node (in its parameters property),
+    * with key being the name of a custom parameter and value being its initial value.
+    */
+  var parameterData: js.UndefOr[js.Object] = js.undefined
+
+  /** Any additional data that can be used for custom initialization of the underlying AudioWorkletProcessor. */
+  var processorOptions: js.UndefOr[js.Any] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioWorkletProcessor.scala b/dom/src/main/scala/org/scalajs/dom/AudioWorkletProcessor.scala
new file mode 100644
index 000000000..795b2c372
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioWorkletProcessor.scala
@@ -0,0 +1,53 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@js.native
+@JSGlobal
+abstract class AudioWorkletProcessor(options: AudioWorkletProcessorOptions) extends js.Object {
+
+  /** The [[MessagePort]] object that is connecting the [[AudioWorkletProcessor]] and the associated
+    * [[AudioWorkletNode]].
+    */
+  val port: MessagePort = js.native
+
+  /** Implements the audio processing algorithm for the audio processor worklet.
+    *
+    * The method is called synchronously from the audio rendering thread, once for each block of audio (also known as a
+    * rendering quantum) being directed through the processor's corresponding AudioWorkletNode. In other words, every
+    * time a new block of audio is ready for your processor to manipulate, your process() function is invoked to do so.
+    * @param inputs
+    *   An array of inputs connected to the node, each item of which is, in turn, an array of channels. Each channel is
+    *   a Float32Array containing 128 samples. For example, inputs[n][m][i] will access n-th input, m-th channel of that
+    *   input, and i-th sample of that channel. Each sample value is in range of [-1 .. 1]. The number of inputs and
+    *   thus the length of that array is fixed at the construction of the node (see AudioWorkletNode). If there is no
+    *   active node connected to the n-th input of the node, inputs[n] will be an empty array (zero input channels
+    *   available). The number of channels in each input may vary, depending on channelCount and channelCountMode
+    *   properties.
+    * @param outputs
+    *   An array of outputs that is similar to the inputs parameter in structure. It is intended to be filled during the
+    *   execution of the process() method. Each of the output channels is filled with zeros by default — the processor
+    *   will output silence unless the output arrays are modified.
+    * @param parameters
+    *   An object containing string keys and Float32Array values. For each custom AudioParam defined using the
+    *   parameterDescriptors getter, the key in the object is a name of that AudioParam, and the value is a
+    *   Float32Array. The values of the array are calculated by taking scheduled automation events into consideration.
+    *   If the automation rate of the parameter is "a-rate", the array will contain 128 values — one for each frame in
+    *   the current audio block. If there's no automation happening during the time represented by the current block,
+    *   the array may contain a single value that is constant for the entire block, instead of 128 identical values. If
+    *   the automation rate is "k-rate", the array will contain a single value, which is to be used for each of 128
+    *   frames.
+    * @return
+    *   A Boolean value indicating whether or not to force the AudioWorkletNode to remain active even if the user
+    *   agent's internal logic would otherwise decide that it's safe to shut down the node.
+    */
+  def process(inputs: js.Array[js.Array[js.typedarray.Float32Array]],
+      outputs: js.Array[js.Array[js.typedarray.Float32Array]], parameters: js.Object): Boolean
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/AudioWorkletProcessorOptions.scala b/dom/src/main/scala/org/scalajs/dom/AudioWorkletProcessorOptions.scala
new file mode 100644
index 000000000..f2f190920
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/AudioWorkletProcessorOptions.scala
@@ -0,0 +1,31 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait AudioWorkletProcessorOptions extends js.Object {
+
+  /** The value to initialize the [[AudioNode#numberOfInputs]] property. Defaults to 1. */
+  var numberOfInputs: Int
+
+  /** The value to initialize the [[AudioNode#numberOfOutputs]] property. Defaults to 1. */
+  var numberOfOutputs: Int
+
+  /** An array defining the number of channels for each output. For example, outputChannelCount: [n, m] specifies the
+    * number of channels in the first output to be n and the second output to be m. The array length must match
+    * numberOfOutputs.
+    */
+  var outputChannelCount: js.Array[Int]
+
+  /** An object containing the initial values of custom AudioParam objects on this node (in its parameters property),
+    * with key being the name of a custom parameter and value being its initial value.
+    */
+  var parameterData: js.UndefOr[js.Object] = js.undefined
+
+  /** Any additional data that can be used for custom initialization of the underlying AudioWorkletProcessor. */
+  var processorOptions: js.UndefOr[js.Any] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/BaseAudioContext.scala b/dom/src/main/scala/org/scalajs/dom/BaseAudioContext.scala
new file mode 100644
index 000000000..e9ce9114f
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/BaseAudioContext.scala
@@ -0,0 +1,148 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+/** The BaseAudioContext interface of the Web Audio API acts as a base definition for online and offline
+  * audio-processing graphs, as represented by AudioContext and OfflineAudioContext respectively. You wouldn't use
+  * BaseAudioContext directly — you'd use its features via one of these two inheriting interfaces.
+  *
+  * A BaseAudioContext can be a target of events, therefore it implements the EventTarget interface.
+  */
+@js.native
+trait BaseAudioContext extends EventTarget {
+
+  /** Returns a double representing an ever-increasing hardware time in seconds used for scheduling. It starts at 0 and
+    * cannot be stopped, paused or reset.
+    */
+  def currentTime: Double = js.native
+
+  /** Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought
+    * of as the audio-rendering device.
+    */
+  val destination: AudioDestinationNode = js.native
+
+  /** Returns the AudioListener object, used for 3D spatialization. */
+  val listener: AudioListener = js.native
+
+  /** Returns a float representing the sample rate (in samples per second) used by all nodes in this context. The
+    * sample-rate of an AudioContext cannot be changed.
+    */
+  val sampleRate: Double = js.native
+
+  /** Returns an instance of [[AudioWorklet]] that can be used for adding [[AudioWorkletProcessor]]-derived classes
+    * which implement custom audio processing.
+    */
+  val audioWorklet: AudioWorklet = js.native
+
+  /** Returns the current state of the AudioContext. */
+  def state: String = js.native
+
+  /** Creates an AnalyserNode, which can be used to expose audio time and frequency data and for example to create data
+    * visualisations.
+    */
+  def createAnalyser(): AnalyserNode = js.native
+
+  /** Creates a BiquadFilterNode, which represents a second order filter configurable as several different common filter
+    * types: high-pass, low-pass, band-pass, etc.
+    */
+  def createBiquadFilter(): BiquadFilterNode = js.native
+
+  /** Creates a new, empty AudioBuffer object, which can then be populated by data and played via an
+    * AudioBufferSourceNode.
+    *
+    * @param numOfChannels
+    *   An integer representing the number of channels this buffer should have. Implementations must support a minimum
+    *   32 channels.
+    * @param length
+    *   An integer representing the size of the buffer in sample-frames.
+    * @param sampleRate
+    *   The sample-rate of the linear audio data in sample-frames per second. An implementation must support
+    *   sample-rates in at least the range 22050 to 96000.
+    */
+  def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer = js.native
+
+  /** Creates an AudioBufferSourceNode, which can be used to play and manipulate audio data contained within an
+    * AudioBuffer object. AudioBuffers are created using AudioContext.createBuffer or returned by
+    * AudioContext.decodeAudioData when it successfully decodes an audio track.
+    */
+  def createBufferSource(): AudioBufferSourceNode = js.native
+
+  /** Creates a ChannelMergerNode, which is used to combine channels from multiple audio streams into a single audio
+    * stream.
+    *
+    * @param numberOfInputs
+    *   The number of channels in the input audio streams, which the output stream will contain; the default is 6 is
+    *   this parameter is not specified.
+    */
+  def createChannelMerger(numberOfInputs: Int = js.native): ChannelMergerNode = js.native
+
+  /** Creates a ChannelSplitterNode, which is used to access the individual channels of an audio stream and process them
+    * separately.
+    *
+    * @param numberOfOutputs
+    *   The number of channels in the input audio stream that you want to output separately; the default is 6 is this
+    *   parameter is not specified.
+    */
+  def createChannelSplitter(numberOfOutputs: Int = js.native): ChannelSplitterNode = js.native
+
+  /** Creates a ConvolverNode, which can be used to apply convolution effects to your audio graph, for example a
+    * reverberation effect.
+    */
+  def createConvolver(): ConvolverNode = js.native
+
+  /** Creates a DelayNode, which is used to delay the incoming audio signal by a certain amount. This node is also
+    * useful to create feedback loops in a Web Audio API graph.
+    *
+    * @param maxDelayTime
+    *   The maximum amount of time, in seconds, that the audio signal can be delayed by. The default value is 0.
+    */
+  def createDelay(maxDelayTime: Int): DelayNode = js.native
+
+  /** Creates a DynamicsCompressorNode, which can be used to apply acoustic compression to an audio signal. */
+  def createDynamicsCompressor(): DynamicsCompressorNode = js.native
+
+  /** Creates a GainNode, which can be used to control the overall volume of the audio graph. */
+  def createGain(): GainNode = js.native
+
+  /** Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone. */
+  def createOscillator(): OscillatorNode = js.native
+
+  /** Creates a PannerNode, which is used to spatialise an incoming audio stream in 3D space. */
+  def createPanner(): PannerNode = js.native
+
+  /** Creates a PeriodicWave, used to define a periodic waveform that can be used to determine the output of an
+    * OscillatorNode.
+    */
+  def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave = js.native
+
+  /** Creates a StereoPannerNode, which can be used to apply stereo panning to an audio source. */
+  def createStereoPanner(): StereoPannerNode = js.native
+
+  /** Creates a WaveShaperNode, which is used to implement non-linear distortion effects. */
+  def createWaveShaper(): WaveShaperNode = js.native
+
+  /** Asynchronously decodes audio file data contained in an ArrayBuffer. In this case, the ArrayBuffer is usually
+    * loaded from an XMLHttpRequest's response attribute after setting the responseType to arraybuffer. This method only
+    * works on complete files, not fragments of audio files.
+    *
+    * @param audioData
+    *   An ArrayBuffer containing the audio data to be decoded, usually grabbed from an XMLHttpRequest's response
+    *   attribute after setting the responseType to arraybuffer.
+    * @param successCallback
+    *   A callback function to be invoked when the decoding successfully finishes. The single argument to this callback
+    *   is an AudioBuffer representing the decoded PCM audio data. Usually you'll want to put the decoded data into an
+    *   AudioBufferSourceNode, from which it can be played and manipulated how you want.
+    * @param errorCallback
+    *   An optional error callback, to be invoked if an error occurs when the audio data is being decoded.
+    */
+  def decodeAudioData(
+      audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _] = js.native,
+      errorCallback: js.Function0[_] = js.native
+  ): js.Promise[AudioBuffer] = js.native
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/BlobEvent.scala b/dom/src/main/scala/org/scalajs/dom/BlobEvent.scala
new file mode 100644
index 000000000..0e67a32ca
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/BlobEvent.scala
@@ -0,0 +1,19 @@
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+/** The BlobEvent interface represents events associated with a Blob. These blobs are typically, but not necessarily,
+  * associated with media content.
+  */
+@JSGlobal
+@js.native
+class BlobEvent(typeArg: String, init: BlobEventInit) extends Event(typeArg, init) {
+
+  def this(init: BlobEventInit) = {
+    this("dataavailable", init)
+  }
+
+  /** Represents a Blob associated with the event. */
+  def data: Blob = js.native
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/BlobEventInit.scala b/dom/src/main/scala/org/scalajs/dom/BlobEventInit.scala
new file mode 100644
index 000000000..866bfca4b
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/BlobEventInit.scala
@@ -0,0 +1,10 @@
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+trait BlobEventInit extends EventInit {
+
+  /** The Blob associated with the event. */
+  var data: Blob
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/ConstantSourceNode.scala b/dom/src/main/scala/org/scalajs/dom/ConstantSourceNode.scala
new file mode 100644
index 000000000..e0b490c81
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/ConstantSourceNode.scala
@@ -0,0 +1,26 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@JSGlobal
+@js.native
+/** Represents an audio source (based upon AudioScheduledSourceNode) whose output is single unchanging value. This makes
+  * it useful for cases in which you need a constant value coming in from an audio source. In addition, it can be used
+  * like a constructible AudioParam by automating the value of its offset or by connecting another node to it; see
+  * Controlling multiple parameters with ConstantSourceNode.
+  */
+class ConstantSourceNode(context: BaseAudioContext, options: ConstantSourceNodeOptions = js.native)
+    extends AudioScheduledSourceNode {
+
+  /** Returns a AudioParam object indicating the numeric a-rate value which is always returned by the source when asked
+    * for the next sample.
+    */
+  val offset: AudioParam = js.native
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/ConstantSourceNodeOptions.scala b/dom/src/main/scala/org/scalajs/dom/ConstantSourceNodeOptions.scala
new file mode 100644
index 000000000..af6c73cfe
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/ConstantSourceNodeOptions.scala
@@ -0,0 +1,16 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait ConstantSourceNodeOptions extends js.Object {
+
+  /** A read-only AudioParam specifying the constant value generated by the source. The default is 1.0. The normal range
+    * is -1.0 to 1.0, but the value can be anywhere in the range from -Infinity to +Infinity.
+    */
+  var offset: js.UndefOr[Double] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/GainNode.scala b/dom/src/main/scala/org/scalajs/dom/GainNode.scala
index c70234e8b..adeb38d9e 100644
--- a/dom/src/main/scala/org/scalajs/dom/GainNode.scala
+++ b/dom/src/main/scala/org/scalajs/dom/GainNode.scala
@@ -29,3 +29,12 @@ trait GainNode extends AudioNode {
   /** Is an a-rate AudioParam representing the amount of gain to apply. */
   val gain: AudioParam = js.native
 }
+
+object GainNode {
+
+  import js.`|`.undefOr2jsAny
+
+  def apply(context: BaseAudioContext, options: js.UndefOr[GainNodeOptions] = js.undefined): GainNode = {
+    js.Dynamic.newInstance(js.Dynamic.global.GainNode)(context, options).asInstanceOf[GainNode]
+  }
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/GainNodeOptions.scala b/dom/src/main/scala/org/scalajs/dom/GainNodeOptions.scala
new file mode 100644
index 000000000..feaf889ec
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/GainNodeOptions.scala
@@ -0,0 +1,14 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait GainNodeOptions extends js.Object {
+
+  /** The amount of gain to apply. This parameter is a- rate and it's nominal range is (-∞,+∞). The default is 1 */
+  var gain: js.UndefOr[Double] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNode.scala b/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNode.scala
index 99e1e6cb1..f306ef8cb 100644
--- a/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNode.scala
+++ b/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNode.scala
@@ -21,4 +21,18 @@ import scala.scalajs.js
   *     method that created it.
   */
 @js.native
-trait MediaElementAudioSourceNode extends AudioNode
+trait MediaElementAudioSourceNode extends AudioNode {
+  def mediaElement: HTMLMediaElement = js.native
+}
+
+object MediaElementAudioSourceNode {
+
+  import js.`|`.undefOr2jsAny
+
+  def apply(context: BaseAudioContext,
+      options: js.UndefOr[MediaElementAudioSourceNodeOptions] = js.undefined): MediaElementAudioSourceNode = {
+    js.Dynamic
+      .newInstance(js.Dynamic.global.MediaElementAudioSourceNode)(context, options)
+      .asInstanceOf[MediaElementAudioSourceNode]
+  }
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNodeOptions.scala b/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNodeOptions.scala
new file mode 100644
index 000000000..6fe47b069
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/MediaElementAudioSourceNodeOptions.scala
@@ -0,0 +1,15 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait MediaElementAudioSourceNodeOptions extends js.Object {
+
+  /** An HTMLMediaElement that will be used as the source for the audio. */
+  var mediaElement: HTMLMediaElement
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/MediaRecorder.scala b/dom/src/main/scala/org/scalajs/dom/MediaRecorder.scala
new file mode 100644
index 000000000..61c27a535
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/MediaRecorder.scala
@@ -0,0 +1,43 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@js.native
+@JSGlobal
+/** Provides functionality to easily record media */
+class MediaRecorder(stream: MediaStream, options: MediaRecorderOptions) extends EventTarget {
+
+  /** Fires periodically each time timeslice milliseconds of media have been recorded (or when the entire media has been
+    * recorded, if timeslice wasn't specified). The event, of type BlobEvent, contains the recorded media in its data
+    * property.
+    */
+  var ondataavailable: js.Function1[BlobEvent, Any] = js.native
+
+  /** Fired when there are fatal errors that stop recording. The received event is based on the MediaRecorderErrorEvent
+    * interface, whose error property contains a DOMException that describes the actual error that occurred.
+    */
+  var onerror: js.Function1[Event, Any] = js.native
+
+  /** Fired when media recording ends, either when the MediaStream ends, or after the MediaRecorder.stop() method is
+    * called.
+    */
+  var onstop: js.Function1[Event, Any] = js.native
+
+  def this(stream: MediaStream) = this(stream, js.native)
+
+  /** Used to resume media recording when it has been previously paused. */
+  def resume(): Unit = js.native
+
+  /** Begins recording media into one or more Blob objects. */
+  def start(): Unit = js.native
+
+  /** Used to stop media capture. */
+  def stop(): Unit = js.native
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/MediaRecorderOptions.scala b/dom/src/main/scala/org/scalajs/dom/MediaRecorderOptions.scala
new file mode 100644
index 000000000..d71d31644
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/MediaRecorderOptions.scala
@@ -0,0 +1,23 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait MediaRecorderOptions extends js.Object {
+
+  /** The chosen bitrate for the audio component of the media. */
+  var audioBitsPerSecond: js.UndefOr[Long] = js.undefined
+
+  /** The chosen bitrate for the video component of the media. */
+  var videoBitsPerSecond: js.UndefOr[Long] = js.undefined
+
+  /** The chosen bitrate for the audio and video components of the media. This can be specified instead of the above two
+    * properties. If this is specified along with one or the other of the above properties, this will be used for the
+    * one that isn't specified.
+    */
+  var bitsPerSecond: js.UndefOr[Long] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/OfflineAudioContext.scala b/dom/src/main/scala/org/scalajs/dom/OfflineAudioContext.scala
index d8ccc27e6..72668a61f 100644
--- a/dom/src/main/scala/org/scalajs/dom/OfflineAudioContext.scala
+++ b/dom/src/main/scala/org/scalajs/dom/OfflineAudioContext.scala
@@ -32,7 +32,7 @@ import scala.scalajs.js.annotation._
   */
 @js.native
 @JSGlobal
-class OfflineAudioContext(numOfChannels: Int, length: Int, sampleRate: Int) extends AudioContext {
+class OfflineAudioContext(numOfChannels: Int, length: Int, sampleRate: Int) extends BaseAudioContext {
 
   /** The promise-based startRendering() method of the OfflineAudioContext Interface starts rendering the audio graph,
     * taking into account the current connections and the current scheduled changes.
@@ -41,4 +41,9 @@ class OfflineAudioContext(numOfChannels: Int, length: Int, sampleRate: Int) exte
     * promise resolves with an AudioBuffer containing the rendered audio.
     */
   def startRendering(): js.Promise[AudioBuffer] = js.native
+
+  /** Schedules a suspension of the time progression in the audio context at the specified time and returns a promise.
+    */
+  def suspend(suspendTime: Double): js.Promise[Unit] = js.native
+
 }
diff --git a/dom/src/main/scala/org/scalajs/dom/OscillatorNode.scala b/dom/src/main/scala/org/scalajs/dom/OscillatorNode.scala
index afaaaf53a..b7d99be58 100644
--- a/dom/src/main/scala/org/scalajs/dom/OscillatorNode.scala
+++ b/dom/src/main/scala/org/scalajs/dom/OscillatorNode.scala
@@ -20,7 +20,7 @@ import scala.scalajs.js
   *   - Channel interpretation: speakers
   */
 @js.native
-trait OscillatorNode extends AudioNode {
+trait OscillatorNode extends AudioScheduledSourceNode {
 
   /** An a-rate AudioParam representing the frequency of oscillation in hertz (though the AudioParam returned is
     * read-only, the value it represents is not.)
@@ -33,13 +33,7 @@ trait OscillatorNode extends AudioNode {
   var detune: AudioParam = js.native
 
   /** Represents the shape of the oscillator wave generated. Different waves will produce different tones. */
-  var `type`: String = js.native // Not sure if this is correct ...
-
-  /** This method specifies the exact time to start playing the tone. */
-  def start(when: Double = js.native): Unit = js.native
-
-  /** This method specifies the exact time to stop playing the tone. */
-  def stop(when: Double = js.native): Unit = js.native
+  var `type`: OscillatorNodeType = js.native
 
   /** Used to point to a PeriodicWave defining a periodic waveform that can be used to shape the oscillator's output,
     * when type = "custom" is used.
@@ -47,7 +41,12 @@ trait OscillatorNode extends AudioNode {
     * This replaces the now-obsolete OscillatorNode.setWaveTable.
     */
   def setPeriodicWave(wave: PeriodicWave): Unit = js.native
+}
+
+object OscillatorNode {
+
+  import js.`|`.undefOr2jsAny
 
-  /** Used to set the event handler for the ended event, which fires when the tone has stopped playing. */
-  var onended: js.Function1[Event, _] = js.native
+  def apply(context: BaseAudioContext, options: js.UndefOr[OscillatorNodeOptions] = js.undefined): OscillatorNode =
+    js.Dynamic.newInstance(js.Dynamic.global.OscillatorNode)(context, options).asInstanceOf[OscillatorNode]
 }
diff --git a/dom/src/main/scala/org/scalajs/dom/OscillatorNodeOptions.scala b/dom/src/main/scala/org/scalajs/dom/OscillatorNodeOptions.scala
new file mode 100644
index 000000000..83e999a82
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/OscillatorNodeOptions.scala
@@ -0,0 +1,25 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait OscillatorNodeOptions extends js.Object {
+
+  /** The shape of the wave produced by the node. Valid values are 'sine', 'square', 'sawtooth', 'triangle' and
+    * 'custom'. The default is 'sine'.
+    */
+  var `type`: js.UndefOr[OscillatorNodeType] = js.undefined
+
+  /** A detuning value (in cents) which will offset the frequency by the given amount. Its default is 0. */
+  var detune: js.UndefOr[Double] = js.undefined
+
+  /** The frequency (in hertz) of the periodic waveform. Its default is 440. */
+  var frequency: js.UndefOr[Double] = js.undefined
+
+  /** An arbitrary period waveform described by a PeriodicWave object. */
+  var periodicWave: js.UndefOr[PeriodicWave] = js.undefined
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/RTCStatsReport.scala b/dom/src/main/scala/org/scalajs/dom/RTCStatsReport.scala
index 0d5c65884..94b93c8d2 100644
--- a/dom/src/main/scala/org/scalajs/dom/RTCStatsReport.scala
+++ b/dom/src/main/scala/org/scalajs/dom/RTCStatsReport.scala
@@ -5,6 +5,4 @@ import scala.scalajs.js
 
 //https://www.w3.org/TR/2015/WD-webrtc-20150210/#idl-def-RTCStatsReport
 @js.native
-trait RTCStatsReport extends js.Object {
-  def apply(id: String): RTCStats = js.native
-}
+trait RTCStatsReport extends ReadOnlyMapLike[String, RTCStats] {}
diff --git a/dom/src/main/scala/org/scalajs/dom/ReadOnlyMapLike.scala b/dom/src/main/scala/org/scalajs/dom/ReadOnlyMapLike.scala
new file mode 100644
index 000000000..cf274c2aa
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/ReadOnlyMapLike.scala
@@ -0,0 +1,39 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+/** Browser Map-like objects (or "maplike objects") are Web API interfaces that behave in many ways like a Map.
+  *
+  * Just like Map, entries can be iterated in the same order that they were added to the object. Map-like objects and
+  * Map also have properties and methods that share the same name and behavior. However unlike Map they only allow
+  * specific predefined types for the keys and values of each entry.
+  */
+@js.native
+trait ReadOnlyMapLike[K, V] extends js.Iterable[js.Tuple2[K, V]] {
+
+  @JSBracketAccess
+  def apply(index: K): V = js.native
+
+  /** Returns a boolean indicating whether a value has been associated with the passed key in the Map object or not. */
+  def has(key: K): Boolean = js.native
+
+  def forEach(callbackFn: js.Function2[V, K, Unit]): Unit = js.native
+
+  def size: Int = js.native
+
+  def keys(): js.Iterator[K] = js.native
+
+  def entries(): js.Iterator[js.Tuple2[K, V]] = js.native
+
+  def values(): js.Iterator[V] = js.native
+
+  @JSName(js.Symbol.iterator)
+  override def jsIterator(): js.Iterator[js.Tuple2[K, V]] = js.native
+
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/Worklet.scala b/dom/src/main/scala/org/scalajs/dom/Worklet.scala
new file mode 100644
index 000000000..918cb7650
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/Worklet.scala
@@ -0,0 +1,23 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+import scala.scalajs.js.annotation._
+
+@JSGlobal
+@js.native
+/** The Worklet interface is a lightweight version of Web Workers and gives developers access to low-level parts of the
+  * rendering pipeline.
+  */
+abstract class Worklet extends js.Object {
+
+  /** loads the module in the given JavaScript file and adds it to the current Worklet.
+    * @param moduleURL
+    *   A String containing the URL of a JavaScript file with the module to add.
+    */
+  def addModule(moduleURL: String, options: WorkletOptions = js.native): js.Promise[Unit] = js.native
+}
diff --git a/dom/src/main/scala/org/scalajs/dom/WorkletGlobalScope.scala b/dom/src/main/scala/org/scalajs/dom/WorkletGlobalScope.scala
new file mode 100644
index 000000000..b191a537f
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/WorkletGlobalScope.scala
@@ -0,0 +1,15 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+@js.native
+/** An abstract class that specific worklet scope classes inherit from. Each WorkletGlobalScope defines a new global
+  * environment.
+  */
+trait WorkletGlobalScope extends js.Object {}
diff --git a/dom/src/main/scala/org/scalajs/dom/WorkletOptions.scala b/dom/src/main/scala/org/scalajs/dom/WorkletOptions.scala
new file mode 100644
index 000000000..0d872ba3c
--- /dev/null
+++ b/dom/src/main/scala/org/scalajs/dom/WorkletOptions.scala
@@ -0,0 +1,16 @@
+/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
+  * under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
+  *
+  * Everything else is under the MIT License http://opensource.org/licenses/MIT
+  */
+package org.scalajs.dom
+
+import scala.scalajs.js
+
+trait WorkletOptions extends js.Object {
+
+  /** Indicates whether to send credentials (e.g. cookies and HTTP authentication) when loading the module. Can be one
+    * of "omit", "same-origin", or "include". Defaults to "same-origin". See also Request.credentials.
+    */
+  var credentials: js.UndefOr[RequestCredentials] = js.undefined
+}