Skip to content

Commit 4f524b8

Browse files
committed
backup
1 parent 053fa19 commit 4f524b8

13 files changed

+275
-151
lines changed

dom/src/main/scala/org/scalajs/dom/AudioBufferSourceNode.scala

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import scala.scalajs.js
2424
* - Channel count: defined by the associated AudioBuffer
2525
*/
2626
@js.native
27-
trait AudioBufferSourceNode extends AudioNode {
27+
trait AudioBufferSourceNode extends AudioScheduledSourceNode {
2828

2929
/** Is an AudioBuffer that defines the audio asset to be played, or when set to the value null, defines a single
3030
* channel of silence.
@@ -63,16 +63,8 @@ trait AudioBufferSourceNode extends AudioNode {
6363
* The duration parameter, which defaults to the length of the asset minus the value of offset, defines the length
6464
* of the portion of the asset to be played.
6565
*/
66-
def start(when: Double = js.native, offset: Double = js.native, duration: Double = js.native): Unit = js.native
66+
def start(when: Double, offset: Double, duration: Double): Unit = js.native
6767

68-
/** Schedules the end of the playback of an audio asset.
69-
*
70-
* @param when
71-
* The when parameter defines when the playback will stop. If it represents a time in the past, the playback will
72-
* end immediately. If this method is called twice or more, an exception is raised.
73-
*/
74-
def stop(when: Double = js.native): Unit = js.native
68+
def start(when: Double, offset: Double): Unit = js.native
7569

76-
/** Is an EventHandler containing the callback associated with the ended event. */
77-
var onended: js.Function1[Event, _] = js.native
7870
}

dom/src/main/scala/org/scalajs/dom/AudioContext.scala

Lines changed: 12 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -17,98 +17,13 @@ import scala.scalajs.js.annotation._
1717
*/
1818
@js.native
1919
@JSGlobal
20-
class AudioContext extends EventTarget {
20+
class AudioContext extends BaseAudioContext {
2121

22-
/** Returns a double representing an ever-increasing hardware time in seconds used for scheduling. It starts at 0 and
23-
* cannot be stopped, paused or reset.
24-
*/
25-
def currentTime: Double = js.native
26-
27-
/** Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought
28-
* of as the audio-rendering device.
29-
*/
30-
val destination: AudioDestinationNode = js.native
31-
32-
/** Returns the AudioListener object, used for 3D spatialization. */
33-
val listener: AudioListener = js.native
34-
35-
/** Returns a float representing the sample rate (in samples per second) used by all nodes in this context. The
36-
* sample-rate of an AudioContext cannot be changed.
37-
*/
38-
val sampleRate: Double = js.native
22+
// Returns the number of seconds of processing latency incurred by the AudioContext passing the audio from the AudioDestinationNode to the aud io subsystem.
23+
def baseLatency: Double = js.native
3924

40-
/** Returns the current state of the AudioContext. */
41-
def state: String = js.native
42-
43-
/** Closes the audio context, releasing any system audio resources that it uses. */
44-
def close(): js.Promise[Unit] = js.native
45-
46-
/** Creates an AnalyserNode, which can be used to expose audio time and frequency data and for example to create data
47-
* visualisations.
48-
*/
49-
def createAnalyser(): AnalyserNode = js.native
50-
51-
/** Creates a BiquadFilterNode, which represents a second order filter configurable as several different common filter
52-
* types: high-pass, low-pass, band-pass, etc.
53-
*/
54-
def createBiquadFilter(): BiquadFilterNode = js.native
55-
56-
/** Creates a new, empty AudioBuffer object, which can then be populated by data and played via an
57-
* AudioBufferSourceNode.
58-
*
59-
* @param numOfChannels
60-
* An integer representing the number of channels this buffer should have. Implementations must support a minimum
61-
* 32 channels.
62-
* @param length
63-
* An integer representing the size of the buffer in sample-frames.
64-
* @param sampleRate
65-
* The sample-rate of the linear audio data in sample-frames per second. An implementation must support
66-
* sample-rates in at least the range 22050 to 96000.
67-
*/
68-
def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer = js.native
69-
70-
/** Creates an AudioBufferSourceNode, which can be used to play and manipulate audio data contained within an
71-
* AudioBuffer object. AudioBuffers are created using AudioContext.createBuffer or returned by
72-
* AudioContext.decodeAudioData when it successfully decodes an audio track.
73-
*/
74-
def createBufferSource(): AudioBufferSourceNode = js.native
75-
76-
/** Creates a ChannelMergerNode, which is used to combine channels from multiple audio streams into a single audio
77-
* stream.
78-
*
79-
* @param numberOfInputs
80-
* The number of channels in the input audio streams, which the output stream will contain; the default is 6 is
81-
* this parameter is not specified.
82-
*/
83-
def createChannelMerger(numberOfInputs: Int = js.native): ChannelMergerNode = js.native
84-
85-
/** Creates a ChannelSplitterNode, which is used to access the individual channels of an audio stream and process them
86-
* separately.
87-
*
88-
* @param numberOfOutputs
89-
* The number of channels in the input audio stream that you want to output separately; the default is 6 is this
90-
* parameter is not specified.
91-
*/
92-
def createChannelSplitter(numberOfOutputs: Int = js.native): ChannelSplitterNode = js.native
93-
94-
/** Creates a ConvolverNode, which can be used to apply convolution effects to your audio graph, for example a
95-
* reverberation effect.
96-
*/
97-
def createConvolver(): ConvolverNode = js.native
98-
99-
/** Creates a DelayNode, which is used to delay the incoming audio signal by a certain amount. This node is also
100-
* useful to create feedback loops in a Web Audio API graph.
101-
*
102-
* @param maxDelayTime
103-
* The maximum amount of time, in seconds, that the audio signal can be delayed by. The default value is 0.
104-
*/
105-
def createDelay(maxDelayTime: Int): DelayNode = js.native
106-
107-
/** Creates a DynamicsCompressorNode, which can be used to apply acoustic compression to an audio signal. */
108-
def createDynamicsCompressor(): DynamicsCompressorNode = js.native
109-
110-
/** Creates a GainNode, which can be used to control the overall volume of the audio graph. */
111-
def createGain(): GainNode = js.native
25+
/** Returns an estimation of the output latency of the current audio context. */
26+
def outputLatency: Double = js.native
11227

11328
/** Creates a MediaElementAudioSourceNode associated with an HTMLMediaElement. This can be used to play and manipulate
11429
* audio from <video> or <audio> elements.
@@ -131,47 +46,18 @@ class AudioContext extends EventTarget {
13146
*/
13247
def createMediaStreamDestination(): MediaStreamAudioDestinationNode = js.native
13348

134-
/** Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone. */
135-
def createOscillator(): OscillatorNode = js.native
136-
137-
/** Creates a PannerNode, which is used to spatialise an incoming audio stream in 3D space. */
138-
def createPanner(): PannerNode = js.native
139-
140-
/** Creates a PeriodicWave, used to define a periodic waveform that can be used to determine the output of an
141-
* OscillatorNode.
142-
*/
143-
def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave = js.native
144-
145-
/** Creates a StereoPannerNode, which can be used to apply stereo panning to an audio source. */
146-
def createStereoPanner(): StereoPannerNode = js.native
147-
148-
/** Creates a WaveShaperNode, which is used to implement non-linear distortion effects. */
149-
def createWaveShaper(): WaveShaperNode = js.native
150-
151-
/** Asynchronously decodes audio file data contained in an ArrayBuffer. In this case, the ArrayBuffer is usually
152-
* loaded from an XMLHttpRequest's response attribute after setting the responseType to arraybuffer. This method only
153-
* works on complete files, not fragments of audio files.
154-
*
155-
* @param audioData
156-
* An ArrayBuffer containing the audio data to be decoded, usually grabbed from an XMLHttpRequest's response
157-
* attribute after setting the responseType to arraybuffer.
158-
* @param successCallback
159-
* A callback function to be invoked when the decoding successfully finishes. The single argument to this callback
160-
* is an AudioBuffer representing the decoded PCM audio data. Usually you'll want to put the decoded data into an
161-
* AudioBufferSourceNode, from which it can be played and manipulated how you want.
162-
* @param errorCallback
163-
* An optional error callback, to be invoked if an error occurs when the audio data is being decoded.
164-
*/
165-
def decodeAudioData(
166-
audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _] = js.native,
167-
errorCallback: js.Function0[_] = js.native
168-
): js.Promise[AudioBuffer] = js.native
169-
17049
/** Resumes the progression of time in an audio context that has previously been suspended. */
17150
def resume(): js.Promise[Unit] = js.native
17251

17352
/** Suspends the progression of time in the audio context, temporarily halting audio hardware access and reducing
17453
* CPU/battery usage in the process.
17554
*/
17655
def suspend(): js.Promise[Unit] = js.native
56+
57+
/** Closes the audio context, releasing any system audio resources that it uses. */
58+
def close(): js.Promise[Unit] = js.native
59+
60+
61+
// TODO docs
62+
def getOutputTimestamp: AudioTimestamp = js.native
17763
}

dom/src/main/scala/org/scalajs/dom/AudioNode.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,14 +47,14 @@ trait AudioNode extends EventTarget {
4747

4848
/** Represents an enumerated value describing the way channels must be matched between the node's inputs and outputs.
4949
*/
50-
var channelCountMode: Int = js.native
50+
var channelCountMode: ChannelCountMode = js.native
5151

5252
/** Represents an enumerated value describing the meaning of the channels. This interpretation will define how audio
5353
* up-mixing and down-mixing will happen.
5454
*
5555
* The possible values are "speakers" or "discrete".
5656
*/
57-
var channelInterpretation: String = js.native
57+
var channelInterpretation: ChannelInterpretation = js.native
5858

5959
/** Allows us to connect one output of this node to one input of another node. */
6060
def connect(audioNode: AudioNode): Unit = js.native
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
2+
* under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
3+
*
4+
* Everything else is under the MIT License http://opensource.org/licenses/MIT
5+
*/
6+
package org.scalajs.dom
7+
8+
import scala.scalajs.js
9+
10+
@js.native
11+
trait AudioScheduledSourceNode extends AudioNode {
12+
13+
/** This method specifies the exact time to start playing the tone. */
14+
def start(): Unit = js.native
15+
16+
/** This method specifies the exact time to stop playing the tone. */
17+
def stop(): Unit = js.native
18+
19+
/** This method specifies the exact time to start playing the tone. */
20+
def start(when: Double): Unit = js.native
21+
22+
/** This method specifies the exact time to stop playing the tone. */
23+
def stop(when: Double): Unit = js.native
24+
25+
/** Used to set the event handler for the ended event, which fires when the tone has stopped playing. */
26+
var onended: js.Function1[Event, _] = js.native
27+
28+
}
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
2+
* under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
3+
*
4+
* Everything else is under the MIT License http://opensource.org/licenses/MIT
5+
*/
6+
package org.scalajs.dom
7+
8+
import scala.scalajs.js
9+
10+
trait AudioTimestamp extends js.Object {
11+
var contextTime: Double
12+
var performanceTime: Double
13+
}
Lines changed: 140 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,140 @@
1+
/** Documentation is thanks to Mozilla Contributors at https://developer.mozilla.org/en-US/docs/Web/API and available
2+
* under the Creative Commons Attribution-ShareAlike v2.5 or later. http://creativecommons.org/licenses/by-sa/2.5/
3+
*
4+
* Everything else is under the MIT License http://opensource.org/licenses/MIT
5+
*/
6+
package org.scalajs.dom
7+
8+
import scala.scalajs.js
9+
10+
/** The BaseAudioContext interface of the Web Audio API acts as a base definition for online and offline audio-processing graphs, as represented by AudioContext and OfflineAudioContext respectively. You wouldn't use BaseAudioContext directly — you'd use its features via one of these two inheriting interfaces.
11+
12+
A BaseAudioContext can be a target of events, therefore it implements the EventTarget interface. */
13+
@js.native
14+
trait BaseAudioContext extends EventTarget {
15+
16+
/** Returns a double representing an ever-increasing hardware time in seconds used for scheduling. It starts at 0 and
17+
* cannot be stopped, paused or reset.
18+
*/
19+
def currentTime: Double = js.native
20+
21+
/** Returns an AudioDestinationNode representing the final destination of all audio in the context. It can be thought
22+
* of as the audio-rendering device.
23+
*/
24+
val destination: AudioDestinationNode = js.native
25+
26+
/** Returns the AudioListener object, used for 3D spatialization. */
27+
val listener: AudioListener = js.native
28+
29+
/** Returns a float representing the sample rate (in samples per second) used by all nodes in this context. The
30+
* sample-rate of an AudioContext cannot be changed.
31+
*/
32+
val sampleRate: Double = js.native
33+
34+
/** Returns the current state of the AudioContext. */
35+
def state: String = js.native
36+
37+
/** Creates an AnalyserNode, which can be used to expose audio time and frequency data and for example to create data
38+
* visualisations.
39+
*/
40+
def createAnalyser(): AnalyserNode = js.native
41+
42+
/** Creates a BiquadFilterNode, which represents a second order filter configurable as several different common filter
43+
* types: high-pass, low-pass, band-pass, etc.
44+
*/
45+
def createBiquadFilter(): BiquadFilterNode = js.native
46+
47+
/** Creates a new, empty AudioBuffer object, which can then be populated by data and played via an
48+
* AudioBufferSourceNode.
49+
*
50+
* @param numOfChannels
51+
* An integer representing the number of channels this buffer should have. Implementations must support a minimum
52+
* 32 channels.
53+
* @param length
54+
* An integer representing the size of the buffer in sample-frames.
55+
* @param sampleRate
56+
* The sample-rate of the linear audio data in sample-frames per second. An implementation must support
57+
* sample-rates in at least the range 22050 to 96000.
58+
*/
59+
def createBuffer(numOfChannels: Int, length: Int, sampleRate: Int): AudioBuffer = js.native
60+
61+
/** Creates an AudioBufferSourceNode, which can be used to play and manipulate audio data contained within an
62+
* AudioBuffer object. AudioBuffers are created using AudioContext.createBuffer or returned by
63+
* AudioContext.decodeAudioData when it successfully decodes an audio track.
64+
*/
65+
def createBufferSource(): AudioBufferSourceNode = js.native
66+
67+
/** Creates a ChannelMergerNode, which is used to combine channels from multiple audio streams into a single audio
68+
* stream.
69+
*
70+
* @param numberOfInputs
71+
* The number of channels in the input audio streams, which the output stream will contain; the default is 6 is
72+
* this parameter is not specified.
73+
*/
74+
def createChannelMerger(numberOfInputs: Int = js.native): ChannelMergerNode = js.native
75+
76+
/** Creates a ChannelSplitterNode, which is used to access the individual channels of an audio stream and process them
77+
* separately.
78+
*
79+
* @param numberOfOutputs
80+
* The number of channels in the input audio stream that you want to output separately; the default is 6 is this
81+
* parameter is not specified.
82+
*/
83+
def createChannelSplitter(numberOfOutputs: Int = js.native): ChannelSplitterNode = js.native
84+
85+
/** Creates a ConvolverNode, which can be used to apply convolution effects to your audio graph, for example a
86+
* reverberation effect.
87+
*/
88+
def createConvolver(): ConvolverNode = js.native
89+
90+
/** Creates a DelayNode, which is used to delay the incoming audio signal by a certain amount. This node is also
91+
* useful to create feedback loops in a Web Audio API graph.
92+
*
93+
* @param maxDelayTime
94+
* The maximum amount of time, in seconds, that the audio signal can be delayed by. The default value is 0.
95+
*/
96+
def createDelay(maxDelayTime: Int): DelayNode = js.native
97+
98+
/** Creates a DynamicsCompressorNode, which can be used to apply acoustic compression to an audio signal. */
99+
def createDynamicsCompressor(): DynamicsCompressorNode = js.native
100+
101+
/** Creates a GainNode, which can be used to control the overall volume of the audio graph. */
102+
def createGain(): GainNode = js.native
103+
104+
/** Creates an OscillatorNode, a source representing a periodic waveform. It basically generates a tone. */
105+
def createOscillator(): OscillatorNode = js.native
106+
107+
/** Creates a PannerNode, which is used to spatialise an incoming audio stream in 3D space. */
108+
def createPanner(): PannerNode = js.native
109+
110+
/** Creates a PeriodicWave, used to define a periodic waveform that can be used to determine the output of an
111+
* OscillatorNode.
112+
*/
113+
def createPeriodicWave(real: js.typedarray.Float32Array, imag: js.typedarray.Float32Array): PeriodicWave = js.native
114+
115+
/** Creates a StereoPannerNode, which can be used to apply stereo panning to an audio source. */
116+
def createStereoPanner(): StereoPannerNode = js.native
117+
118+
/** Creates a WaveShaperNode, which is used to implement non-linear distortion effects. */
119+
def createWaveShaper(): WaveShaperNode = js.native
120+
121+
/** Asynchronously decodes audio file data contained in an ArrayBuffer. In this case, the ArrayBuffer is usually
122+
* loaded from an XMLHttpRequest's response attribute after setting the responseType to arraybuffer. This method only
123+
* works on complete files, not fragments of audio files.
124+
*
125+
* @param audioData
126+
* An ArrayBuffer containing the audio data to be decoded, usually grabbed from an XMLHttpRequest's response
127+
* attribute after setting the responseType to arraybuffer.
128+
* @param successCallback
129+
* A callback function to be invoked when the decoding successfully finishes. The single argument to this callback
130+
* is an AudioBuffer representing the decoded PCM audio data. Usually you'll want to put the decoded data into an
131+
* AudioBufferSourceNode, from which it can be played and manipulated how you want.
132+
* @param errorCallback
133+
* An optional error callback, to be invoked if an error occurs when the audio data is being decoded.
134+
*/
135+
def decodeAudioData(
136+
audioData: js.typedarray.ArrayBuffer, successCallback: js.Function1[AudioBuffer, _] = js.native,
137+
errorCallback: js.Function0[_] = js.native
138+
): js.Promise[AudioBuffer] = js.native
139+
140+
}

0 commit comments

Comments
 (0)