AudioContext.cpp 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /*
  2. * Copyright (C) 2010, Google Inc. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. * 1. Redistributions of source code must retain the above copyright
  8. * notice, this list of conditions and the following disclaimer.
  9. * 2. Redistributions in binary form must reproduce the above copyright
  10. * notice, this list of conditions and the following disclaimer in the
  11. * documentation and/or other materials provided with the distribution.
  12. *
  13. * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
  14. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  15. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  16. * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
  17. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  18. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  19. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
  20. * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  21. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  22. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  23. */
  24. #include "config.h"
  25. #if ENABLE(WEB_AUDIO)
  26. #include "AudioContext.h"
  27. #include "AnalyserNode.h"
  28. #include "AsyncAudioDecoder.h"
  29. #include "AudioBuffer.h"
  30. #include "AudioBufferCallback.h"
  31. #include "AudioBufferSourceNode.h"
  32. #include "AudioListener.h"
  33. #include "AudioNodeInput.h"
  34. #include "AudioNodeOutput.h"
  35. #include "BiquadFilterNode.h"
  36. #include "ChannelMergerNode.h"
  37. #include "ChannelSplitterNode.h"
  38. #include "ConvolverNode.h"
  39. #include "DefaultAudioDestinationNode.h"
  40. #include "DelayNode.h"
  41. #include "Document.h"
  42. #include "DynamicsCompressorNode.h"
  43. #include "ExceptionCode.h"
  44. #include "FFTFrame.h"
  45. #include "GainNode.h"
  46. #include "HRTFDatabaseLoader.h"
  47. #include "HRTFPanner.h"
  48. #include "OfflineAudioCompletionEvent.h"
  49. #include "OfflineAudioDestinationNode.h"
  50. #include "OscillatorNode.h"
  51. #include "Page.h"
  52. #include "PannerNode.h"
  53. #include "ScriptCallStack.h"
  54. #include "ScriptController.h"
  55. #include "ScriptProcessorNode.h"
  56. #include "WaveShaperNode.h"
  57. #include "WaveTable.h"
  58. #if ENABLE(MEDIA_STREAM)
  59. #include "MediaStream.h"
  60. #include "MediaStreamAudioDestinationNode.h"
  61. #include "MediaStreamAudioSourceNode.h"
  62. #endif
  63. #if ENABLE(VIDEO)
  64. #include "HTMLMediaElement.h"
  65. #include "MediaElementAudioSourceNode.h"
  66. #endif
  67. #if DEBUG_AUDIONODE_REFERENCES
  68. #include <stdio.h>
  69. #endif
  70. #if USE(GSTREAMER)
  71. #include "GStreamerUtilities.h"
  72. #endif
  73. #include <wtf/ArrayBuffer.h>
  74. #include <wtf/Atomics.h>
  75. #include <wtf/MainThread.h>
  76. #include <wtf/OwnPtr.h>
  77. #include <wtf/PassOwnPtr.h>
  78. #include <wtf/RefCounted.h>
  79. #include <wtf/text/WTFString.h>
  80. // FIXME: check the proper way to reference an undefined thread ID
  81. const int UndefinedThreadIdentifier = 0xffffffff;
  82. const unsigned MaxNodesToDeletePerQuantum = 10;
  83. namespace WebCore {
  84. bool AudioContext::isSampleRateRangeGood(float sampleRate)
  85. {
  86. // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
  87. // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
  88. return sampleRate >= 44100 && sampleRate <= 96000;
  89. }
  90. // Don't allow more than this number of simultaneous AudioContexts talking to hardware.
  91. const unsigned MaxHardwareContexts = 4;
  92. unsigned AudioContext::s_hardwareContextCount = 0;
  93. PassRefPtr<AudioContext> AudioContext::create(Document* document, ExceptionCode& ec)
  94. {
  95. UNUSED_PARAM(ec);
  96. ASSERT(document);
  97. ASSERT(isMainThread());
  98. if (s_hardwareContextCount >= MaxHardwareContexts)
  99. return 0;
  100. RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
  101. audioContext->suspendIfNeeded();
  102. return audioContext.release();
  103. }
  104. // Constructor for rendering to the audio hardware.
  105. AudioContext::AudioContext(Document* document)
  106. : ActiveDOMObject(document)
  107. , m_isStopScheduled(false)
  108. , m_isInitialized(false)
  109. , m_isAudioThreadFinished(false)
  110. , m_destinationNode(0)
  111. , m_isDeletionScheduled(false)
  112. , m_automaticPullNodesNeedUpdating(false)
  113. , m_connectionCount(0)
  114. , m_audioThread(0)
  115. , m_graphOwnerThread(UndefinedThreadIdentifier)
  116. , m_isOfflineContext(false)
  117. , m_activeSourceCount(0)
  118. , m_restrictions(NoRestrictions)
  119. {
  120. constructCommon();
  121. m_destinationNode = DefaultAudioDestinationNode::create(this);
  122. // This sets in motion an asynchronous loading mechanism on another thread.
  123. // We can check m_hrtfDatabaseLoader->isLoaded() to find out whether or not it has been fully loaded.
  124. // It's not that useful to have a callback function for this since the audio thread automatically starts rendering on the graph
  125. // when this has finished (see AudioDestinationNode).
  126. m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate());
  127. }
  128. // Constructor for offline (non-realtime) rendering.
  129. AudioContext::AudioContext(Document* document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
  130. : ActiveDOMObject(document)
  131. , m_isStopScheduled(false)
  132. , m_isInitialized(false)
  133. , m_isAudioThreadFinished(false)
  134. , m_destinationNode(0)
  135. , m_automaticPullNodesNeedUpdating(false)
  136. , m_connectionCount(0)
  137. , m_audioThread(0)
  138. , m_graphOwnerThread(UndefinedThreadIdentifier)
  139. , m_isOfflineContext(true)
  140. , m_activeSourceCount(0)
  141. , m_restrictions(NoRestrictions)
  142. {
  143. constructCommon();
  144. // FIXME: the passed in sampleRate MUST match the hardware sample-rate since HRTFDatabaseLoader is a singleton.
  145. m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(sampleRate);
  146. // Create a new destination for offline rendering.
  147. m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
  148. m_destinationNode = OfflineAudioDestinationNode::create(this, m_renderTarget.get());
  149. }
  150. void AudioContext::constructCommon()
  151. {
  152. // According to spec AudioContext must die only after page navigate.
  153. // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
  154. setPendingActivity(this);
  155. #if USE(GSTREAMER)
  156. initializeGStreamer();
  157. #endif
  158. FFTFrame::initialize();
  159. m_listener = AudioListener::create();
  160. #if PLATFORM(IOS)
  161. if (!document()->settings() || document()->settings()->mediaPlaybackRequiresUserGesture())
  162. addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
  163. else
  164. m_restrictions = NoRestrictions;
  165. #endif
  166. #if PLATFORM(MAC)
  167. addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
  168. #endif
  169. }
  170. AudioContext::~AudioContext()
  171. {
  172. #if DEBUG_AUDIONODE_REFERENCES
  173. fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
  174. #endif
  175. // AudioNodes keep a reference to their context, so there should be no way to be in the destructor if there are still AudioNodes around.
  176. ASSERT(!m_isInitialized);
  177. ASSERT(m_isStopScheduled);
  178. ASSERT(!m_nodesToDelete.size());
  179. ASSERT(!m_referencedNodes.size());
  180. ASSERT(!m_finishedNodes.size());
  181. ASSERT(!m_automaticPullNodes.size());
  182. if (m_automaticPullNodesNeedUpdating)
  183. m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
  184. ASSERT(!m_renderingAutomaticPullNodes.size());
  185. }
  186. void AudioContext::lazyInitialize()
  187. {
  188. if (!m_isInitialized) {
  189. // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
  190. ASSERT(!m_isAudioThreadFinished);
  191. if (!m_isAudioThreadFinished) {
  192. if (m_destinationNode.get()) {
  193. m_destinationNode->initialize();
  194. if (!isOfflineContext()) {
  195. // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
  196. // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
  197. // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
  198. // We may want to consider requiring it for symmetry with OfflineAudioContext.
  199. startRendering();
  200. ++s_hardwareContextCount;
  201. }
  202. }
  203. m_isInitialized = true;
  204. }
  205. }
  206. }
  207. void AudioContext::clear()
  208. {
  209. // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
  210. if (m_destinationNode)
  211. m_destinationNode.clear();
  212. // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
  213. do {
  214. deleteMarkedNodes();
  215. m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
  216. m_nodesMarkedForDeletion.clear();
  217. } while (m_nodesToDelete.size());
  218. // It was set in constructCommon.
  219. unsetPendingActivity(this);
  220. }
  221. void AudioContext::uninitialize()
  222. {
  223. ASSERT(isMainThread());
  224. if (!m_isInitialized)
  225. return;
  226. // This stops the audio thread and all audio rendering.
  227. m_destinationNode->uninitialize();
  228. // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
  229. m_isAudioThreadFinished = true;
  230. if (!isOfflineContext()) {
  231. ASSERT(s_hardwareContextCount);
  232. --s_hardwareContextCount;
  233. }
  234. // Get rid of the sources which may still be playing.
  235. derefUnfinishedSourceNodes();
  236. m_isInitialized = false;
  237. }
  238. bool AudioContext::isInitialized() const
  239. {
  240. return m_isInitialized;
  241. }
  242. bool AudioContext::isRunnable() const
  243. {
  244. if (!isInitialized())
  245. return false;
  246. // Check with the HRTF spatialization system to see if it's finished loading.
  247. return m_hrtfDatabaseLoader->isLoaded();
  248. }
  249. void AudioContext::stopDispatch(void* userData)
  250. {
  251. AudioContext* context = reinterpret_cast<AudioContext*>(userData);
  252. ASSERT(context);
  253. if (!context)
  254. return;
  255. context->uninitialize();
  256. context->clear();
  257. }
  258. void AudioContext::stop()
  259. {
  260. // Usually ScriptExecutionContext calls stop twice.
  261. if (m_isStopScheduled)
  262. return;
  263. m_isStopScheduled = true;
  264. // Don't call uninitialize() immediately here because the ScriptExecutionContext is in the middle
  265. // of dealing with all of its ActiveDOMObjects at this point. uninitialize() can de-reference other
  266. // ActiveDOMObjects so let's schedule uninitialize() to be called later.
  267. // FIXME: see if there's a more direct way to handle this issue.
  268. callOnMainThread(stopDispatch, this);
  269. }
  270. Document* AudioContext::document() const
  271. {
  272. ASSERT(m_scriptExecutionContext && m_scriptExecutionContext->isDocument());
  273. return static_cast<Document*>(m_scriptExecutionContext);
  274. }
  275. PassRefPtr<AudioBuffer> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate, ExceptionCode& ec)
  276. {
  277. RefPtr<AudioBuffer> audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
  278. if (!audioBuffer.get()) {
  279. ec = SYNTAX_ERR;
  280. return 0;
  281. }
  282. return audioBuffer;
  283. }
  284. PassRefPtr<AudioBuffer> AudioContext::createBuffer(ArrayBuffer* arrayBuffer, bool mixToMono, ExceptionCode& ec)
  285. {
  286. ASSERT(arrayBuffer);
  287. if (!arrayBuffer) {
  288. ec = SYNTAX_ERR;
  289. return 0;
  290. }
  291. RefPtr<AudioBuffer> audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer->data(), arrayBuffer->byteLength(), mixToMono, sampleRate());
  292. if (!audioBuffer.get()) {
  293. ec = SYNTAX_ERR;
  294. return 0;
  295. }
  296. return audioBuffer;
  297. }
  298. void AudioContext::decodeAudioData(ArrayBuffer* audioData, PassRefPtr<AudioBufferCallback> successCallback, PassRefPtr<AudioBufferCallback> errorCallback, ExceptionCode& ec)
  299. {
  300. if (!audioData) {
  301. ec = SYNTAX_ERR;
  302. return;
  303. }
  304. m_audioDecoder.decodeAsync(audioData, sampleRate(), successCallback, errorCallback);
  305. }
  306. PassRefPtr<AudioBufferSourceNode> AudioContext::createBufferSource()
  307. {
  308. ASSERT(isMainThread());
  309. lazyInitialize();
  310. RefPtr<AudioBufferSourceNode> node = AudioBufferSourceNode::create(this, m_destinationNode->sampleRate());
  311. // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
  312. // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
  313. refNode(node.get());
  314. return node;
  315. }
  316. #if ENABLE(VIDEO)
  317. PassRefPtr<MediaElementAudioSourceNode> AudioContext::createMediaElementSource(HTMLMediaElement* mediaElement, ExceptionCode& ec)
  318. {
  319. ASSERT(mediaElement);
  320. if (!mediaElement) {
  321. ec = INVALID_STATE_ERR;
  322. return 0;
  323. }
  324. ASSERT(isMainThread());
  325. lazyInitialize();
  326. // First check if this media element already has a source node.
  327. if (mediaElement->audioSourceNode()) {
  328. ec = INVALID_STATE_ERR;
  329. return 0;
  330. }
  331. RefPtr<MediaElementAudioSourceNode> node = MediaElementAudioSourceNode::create(this, mediaElement);
  332. mediaElement->setAudioSourceNode(node.get());
  333. refNode(node.get()); // context keeps reference until node is disconnected
  334. return node;
  335. }
  336. #endif
  337. #if ENABLE(MEDIA_STREAM)
  338. PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
  339. {
  340. ASSERT(mediaStream);
  341. if (!mediaStream) {
  342. ec = INVALID_STATE_ERR;
  343. return 0;
  344. }
  345. ASSERT(isMainThread());
  346. lazyInitialize();
  347. AudioSourceProvider* provider = 0;
  348. MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
  349. if (mediaStream->isLocal() && audioTracks.size()) {
  350. // Enable input for the specific local audio device specified in the MediaStreamSource.
  351. RefPtr<MediaStreamTrack> localAudio = audioTracks[0];
  352. MediaStreamSource* source = localAudio->component()->source();
  353. destination()->enableInput(source->deviceId());
  354. provider = destination()->localAudioInputProvider();
  355. } else {
  356. // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
  357. provider = 0;
  358. }
  359. RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);
  360. // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
  361. node->setFormat(2, sampleRate());
  362. refNode(node.get()); // context keeps reference until node is disconnected
  363. return node;
  364. }
  365. PassRefPtr<MediaStreamAudioDestinationNode> AudioContext::createMediaStreamDestination()
  366. {
  367. // FIXME: Add support for an optional argument which specifies the number of channels.
  368. // FIXME: The default should probably be stereo instead of mono.
  369. return MediaStreamAudioDestinationNode::create(this, 1);
  370. }
  371. #endif
  372. PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, ExceptionCode& ec)
  373. {
  374. // Set number of input/output channels to stereo by default.
  375. return createScriptProcessor(bufferSize, 2, 2, ec);
  376. }
  377. PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, ExceptionCode& ec)
  378. {
  379. // Set number of output channels to stereo by default.
  380. return createScriptProcessor(bufferSize, numberOfInputChannels, 2, ec);
  381. }
  382. PassRefPtr<ScriptProcessorNode> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels, ExceptionCode& ec)
  383. {
  384. ASSERT(isMainThread());
  385. lazyInitialize();
  386. RefPtr<ScriptProcessorNode> node = ScriptProcessorNode::create(this, m_destinationNode->sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
  387. if (!node.get()) {
  388. ec = SYNTAX_ERR;
  389. return 0;
  390. }
  391. refNode(node.get()); // context keeps reference until we stop making javascript rendering callbacks
  392. return node;
  393. }
  394. PassRefPtr<BiquadFilterNode> AudioContext::createBiquadFilter()
  395. {
  396. ASSERT(isMainThread());
  397. lazyInitialize();
  398. return BiquadFilterNode::create(this, m_destinationNode->sampleRate());
  399. }
  400. PassRefPtr<WaveShaperNode> AudioContext::createWaveShaper()
  401. {
  402. ASSERT(isMainThread());
  403. lazyInitialize();
  404. return WaveShaperNode::create(this);
  405. }
  406. PassRefPtr<PannerNode> AudioContext::createPanner()
  407. {
  408. ASSERT(isMainThread());
  409. lazyInitialize();
  410. return PannerNode::create(this, m_destinationNode->sampleRate());
  411. }
  412. PassRefPtr<ConvolverNode> AudioContext::createConvolver()
  413. {
  414. ASSERT(isMainThread());
  415. lazyInitialize();
  416. return ConvolverNode::create(this, m_destinationNode->sampleRate());
  417. }
  418. PassRefPtr<DynamicsCompressorNode> AudioContext::createDynamicsCompressor()
  419. {
  420. ASSERT(isMainThread());
  421. lazyInitialize();
  422. return DynamicsCompressorNode::create(this, m_destinationNode->sampleRate());
  423. }
  424. PassRefPtr<AnalyserNode> AudioContext::createAnalyser()
  425. {
  426. ASSERT(isMainThread());
  427. lazyInitialize();
  428. return AnalyserNode::create(this, m_destinationNode->sampleRate());
  429. }
  430. PassRefPtr<GainNode> AudioContext::createGain()
  431. {
  432. ASSERT(isMainThread());
  433. lazyInitialize();
  434. return GainNode::create(this, m_destinationNode->sampleRate());
  435. }
  436. PassRefPtr<DelayNode> AudioContext::createDelay(ExceptionCode& ec)
  437. {
  438. const double defaultMaxDelayTime = 1;
  439. return createDelay(defaultMaxDelayTime, ec);
  440. }
  441. PassRefPtr<DelayNode> AudioContext::createDelay(double maxDelayTime, ExceptionCode& ec)
  442. {
  443. ASSERT(isMainThread());
  444. lazyInitialize();
  445. RefPtr<DelayNode> node = DelayNode::create(this, m_destinationNode->sampleRate(), maxDelayTime, ec);
  446. if (ec)
  447. return 0;
  448. return node;
  449. }
  450. PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(ExceptionCode& ec)
  451. {
  452. const unsigned ChannelSplitterDefaultNumberOfOutputs = 6;
  453. return createChannelSplitter(ChannelSplitterDefaultNumberOfOutputs, ec);
  454. }
  455. PassRefPtr<ChannelSplitterNode> AudioContext::createChannelSplitter(size_t numberOfOutputs, ExceptionCode& ec)
  456. {
  457. ASSERT(isMainThread());
  458. lazyInitialize();
  459. RefPtr<ChannelSplitterNode> node = ChannelSplitterNode::create(this, m_destinationNode->sampleRate(), numberOfOutputs);
  460. if (!node.get()) {
  461. ec = SYNTAX_ERR;
  462. return 0;
  463. }
  464. return node;
  465. }
  466. PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(ExceptionCode& ec)
  467. {
  468. const unsigned ChannelMergerDefaultNumberOfInputs = 6;
  469. return createChannelMerger(ChannelMergerDefaultNumberOfInputs, ec);
  470. }
  471. PassRefPtr<ChannelMergerNode> AudioContext::createChannelMerger(size_t numberOfInputs, ExceptionCode& ec)
  472. {
  473. ASSERT(isMainThread());
  474. lazyInitialize();
  475. RefPtr<ChannelMergerNode> node = ChannelMergerNode::create(this, m_destinationNode->sampleRate(), numberOfInputs);
  476. if (!node.get()) {
  477. ec = SYNTAX_ERR;
  478. return 0;
  479. }
  480. return node;
  481. }
  482. PassRefPtr<OscillatorNode> AudioContext::createOscillator()
  483. {
  484. ASSERT(isMainThread());
  485. lazyInitialize();
  486. RefPtr<OscillatorNode> node = OscillatorNode::create(this, m_destinationNode->sampleRate());
  487. // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
  488. // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
  489. refNode(node.get());
  490. return node;
  491. }
  492. PassRefPtr<WaveTable> AudioContext::createWaveTable(Float32Array* real, Float32Array* imag, ExceptionCode& ec)
  493. {
  494. ASSERT(isMainThread());
  495. if (!real || !imag || (real->length() != imag->length())) {
  496. ec = SYNTAX_ERR;
  497. return 0;
  498. }
  499. lazyInitialize();
  500. return WaveTable::create(sampleRate(), real, imag);
  501. }
  502. void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
  503. {
  504. ASSERT(isAudioThread());
  505. m_finishedNodes.append(node);
  506. }
  507. void AudioContext::derefFinishedSourceNodes()
  508. {
  509. ASSERT(isGraphOwner());
  510. ASSERT(isAudioThread() || isAudioThreadFinished());
  511. for (unsigned i = 0; i < m_finishedNodes.size(); i++)
  512. derefNode(m_finishedNodes[i]);
  513. m_finishedNodes.clear();
  514. }
  515. void AudioContext::refNode(AudioNode* node)
  516. {
  517. ASSERT(isMainThread());
  518. AutoLocker locker(this);
  519. node->ref(AudioNode::RefTypeConnection);
  520. m_referencedNodes.append(node);
  521. }
  522. void AudioContext::derefNode(AudioNode* node)
  523. {
  524. ASSERT(isGraphOwner());
  525. node->deref(AudioNode::RefTypeConnection);
  526. for (unsigned i = 0; i < m_referencedNodes.size(); ++i) {
  527. if (node == m_referencedNodes[i]) {
  528. m_referencedNodes.remove(i);
  529. break;
  530. }
  531. }
  532. }
  533. void AudioContext::derefUnfinishedSourceNodes()
  534. {
  535. ASSERT(isMainThread() && isAudioThreadFinished());
  536. for (unsigned i = 0; i < m_referencedNodes.size(); ++i)
  537. m_referencedNodes[i]->deref(AudioNode::RefTypeConnection);
  538. m_referencedNodes.clear();
  539. }
  540. void AudioContext::lock(bool& mustReleaseLock)
  541. {
  542. // Don't allow regular lock in real-time audio thread.
  543. ASSERT(isMainThread());
  544. ThreadIdentifier thisThread = currentThread();
  545. if (thisThread == m_graphOwnerThread) {
  546. // We already have the lock.
  547. mustReleaseLock = false;
  548. } else {
  549. // Acquire the lock.
  550. m_contextGraphMutex.lock();
  551. m_graphOwnerThread = thisThread;
  552. mustReleaseLock = true;
  553. }
  554. }
  555. bool AudioContext::tryLock(bool& mustReleaseLock)
  556. {
  557. ThreadIdentifier thisThread = currentThread();
  558. bool isAudioThread = thisThread == audioThread();
  559. // Try to catch cases of using try lock on main thread - it should use regular lock.
  560. ASSERT(isAudioThread || isAudioThreadFinished());
  561. if (!isAudioThread) {
  562. // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
  563. lock(mustReleaseLock);
  564. return true;
  565. }
  566. bool hasLock;
  567. if (thisThread == m_graphOwnerThread) {
  568. // Thread already has the lock.
  569. hasLock = true;
  570. mustReleaseLock = false;
  571. } else {
  572. // Don't already have the lock - try to acquire it.
  573. hasLock = m_contextGraphMutex.tryLock();
  574. if (hasLock)
  575. m_graphOwnerThread = thisThread;
  576. mustReleaseLock = hasLock;
  577. }
  578. return hasLock;
  579. }
  580. void AudioContext::unlock()
  581. {
  582. ASSERT(currentThread() == m_graphOwnerThread);
  583. m_graphOwnerThread = UndefinedThreadIdentifier;
  584. m_contextGraphMutex.unlock();
  585. }
  586. bool AudioContext::isAudioThread() const
  587. {
  588. return currentThread() == m_audioThread;
  589. }
  590. bool AudioContext::isGraphOwner() const
  591. {
  592. return currentThread() == m_graphOwnerThread;
  593. }
  594. void AudioContext::addDeferredFinishDeref(AudioNode* node)
  595. {
  596. ASSERT(isAudioThread());
  597. m_deferredFinishDerefList.append(node);
  598. }
  599. void AudioContext::handlePreRenderTasks()
  600. {
  601. ASSERT(isAudioThread());
  602. // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
  603. // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
  604. bool mustReleaseLock;
  605. if (tryLock(mustReleaseLock)) {
  606. // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
  607. handleDirtyAudioSummingJunctions();
  608. handleDirtyAudioNodeOutputs();
  609. updateAutomaticPullNodes();
  610. if (mustReleaseLock)
  611. unlock();
  612. }
  613. }
  614. void AudioContext::handlePostRenderTasks()
  615. {
  616. ASSERT(isAudioThread());
  617. // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
  618. // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
  619. // from the render graph (in which case they'll render silence).
  620. bool mustReleaseLock;
  621. if (tryLock(mustReleaseLock)) {
  622. // Take care of finishing any derefs where the tryLock() failed previously.
  623. handleDeferredFinishDerefs();
  624. // Dynamically clean up nodes which are no longer needed.
  625. derefFinishedSourceNodes();
  626. // Don't delete in the real-time thread. Let the main thread do it.
  627. // Ref-counted objects held by certain AudioNodes may not be thread-safe.
  628. scheduleNodeDeletion();
  629. // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
  630. handleDirtyAudioSummingJunctions();
  631. handleDirtyAudioNodeOutputs();
  632. updateAutomaticPullNodes();
  633. if (mustReleaseLock)
  634. unlock();
  635. }
  636. }
  637. void AudioContext::handleDeferredFinishDerefs()
  638. {
  639. ASSERT(isAudioThread() && isGraphOwner());
  640. for (unsigned i = 0; i < m_deferredFinishDerefList.size(); ++i) {
  641. AudioNode* node = m_deferredFinishDerefList[i];
  642. node->finishDeref(AudioNode::RefTypeConnection);
  643. }
  644. m_deferredFinishDerefList.clear();
  645. }
  646. void AudioContext::markForDeletion(AudioNode* node)
  647. {
  648. ASSERT(isGraphOwner());
  649. if (isAudioThreadFinished())
  650. m_nodesToDelete.append(node);
  651. else
  652. m_nodesMarkedForDeletion.append(node);
  653. // This is probably the best time for us to remove the node from automatic pull list,
  654. // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
  655. // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
  656. // modify m_renderingAutomaticPullNodes.
  657. removeAutomaticPullNode(node);
  658. }
  659. void AudioContext::scheduleNodeDeletion()
  660. {
  661. bool isGood = m_isInitialized && isGraphOwner();
  662. ASSERT(isGood);
  663. if (!isGood)
  664. return;
  665. // Make sure to call deleteMarkedNodes() on main thread.
  666. if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
  667. m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
  668. m_nodesMarkedForDeletion.clear();
  669. m_isDeletionScheduled = true;
  670. // Don't let ourself get deleted before the callback.
  671. // See matching deref() in deleteMarkedNodesDispatch().
  672. ref();
  673. callOnMainThread(deleteMarkedNodesDispatch, this);
  674. }
  675. }
  676. void AudioContext::deleteMarkedNodesDispatch(void* userData)
  677. {
  678. AudioContext* context = reinterpret_cast<AudioContext*>(userData);
  679. ASSERT(context);
  680. if (!context)
  681. return;
  682. context->deleteMarkedNodes();
  683. context->deref();
  684. }
  685. void AudioContext::deleteMarkedNodes()
  686. {
  687. ASSERT(isMainThread());
  688. // Protect this object from being deleted before we release the mutex locked by AutoLocker.
  689. RefPtr<AudioContext> protect(this);
  690. {
  691. AutoLocker locker(this);
  692. while (size_t n = m_nodesToDelete.size()) {
  693. AudioNode* node = m_nodesToDelete[n - 1];
  694. m_nodesToDelete.removeLast();
  695. // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
  696. unsigned numberOfInputs = node->numberOfInputs();
  697. for (unsigned i = 0; i < numberOfInputs; ++i)
  698. m_dirtySummingJunctions.remove(node->input(i));
  699. // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
  700. unsigned numberOfOutputs = node->numberOfOutputs();
  701. for (unsigned i = 0; i < numberOfOutputs; ++i)
  702. m_dirtyAudioNodeOutputs.remove(node->output(i));
  703. // Finally, delete it.
  704. delete node;
  705. }
  706. m_isDeletionScheduled = false;
  707. }
  708. }
  709. void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
  710. {
  711. ASSERT(isGraphOwner());
  712. m_dirtySummingJunctions.add(summingJunction);
  713. }
  714. void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
  715. {
  716. ASSERT(isMainThread());
  717. AutoLocker locker(this);
  718. m_dirtySummingJunctions.remove(summingJunction);
  719. }
  720. void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
  721. {
  722. ASSERT(isGraphOwner());
  723. m_dirtyAudioNodeOutputs.add(output);
  724. }
  725. void AudioContext::handleDirtyAudioSummingJunctions()
  726. {
  727. ASSERT(isGraphOwner());
  728. for (HashSet<AudioSummingJunction*>::iterator i = m_dirtySummingJunctions.begin(); i != m_dirtySummingJunctions.end(); ++i)
  729. (*i)->updateRenderingState();
  730. m_dirtySummingJunctions.clear();
  731. }
  732. void AudioContext::handleDirtyAudioNodeOutputs()
  733. {
  734. ASSERT(isGraphOwner());
  735. for (HashSet<AudioNodeOutput*>::iterator i = m_dirtyAudioNodeOutputs.begin(); i != m_dirtyAudioNodeOutputs.end(); ++i)
  736. (*i)->updateRenderingState();
  737. m_dirtyAudioNodeOutputs.clear();
  738. }
  739. void AudioContext::addAutomaticPullNode(AudioNode* node)
  740. {
  741. ASSERT(isGraphOwner());
  742. if (!m_automaticPullNodes.contains(node)) {
  743. m_automaticPullNodes.add(node);
  744. m_automaticPullNodesNeedUpdating = true;
  745. }
  746. }
  747. void AudioContext::removeAutomaticPullNode(AudioNode* node)
  748. {
  749. ASSERT(isGraphOwner());
  750. if (m_automaticPullNodes.contains(node)) {
  751. m_automaticPullNodes.remove(node);
  752. m_automaticPullNodesNeedUpdating = true;
  753. }
  754. }
  755. void AudioContext::updateAutomaticPullNodes()
  756. {
  757. ASSERT(isGraphOwner());
  758. if (m_automaticPullNodesNeedUpdating) {
  759. // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
  760. m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
  761. unsigned j = 0;
  762. for (HashSet<AudioNode*>::iterator i = m_automaticPullNodes.begin(); i != m_automaticPullNodes.end(); ++i, ++j) {
  763. AudioNode* output = *i;
  764. m_renderingAutomaticPullNodes[j] = output;
  765. }
  766. m_automaticPullNodesNeedUpdating = false;
  767. }
  768. }
  769. void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
  770. {
  771. ASSERT(isAudioThread());
  772. for (unsigned i = 0; i < m_renderingAutomaticPullNodes.size(); ++i)
  773. m_renderingAutomaticPullNodes[i]->processIfNecessary(framesToProcess);
  774. }
  775. const AtomicString& AudioContext::interfaceName() const
  776. {
  777. return eventNames().interfaceForAudioContext;
  778. }
  779. ScriptExecutionContext* AudioContext::scriptExecutionContext() const
  780. {
  781. return m_isStopScheduled ? 0 : ActiveDOMObject::scriptExecutionContext();
  782. }
  783. void AudioContext::startRendering()
  784. {
  785. if (ScriptController::processingUserGesture())
  786. removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
  787. if (pageConsentRequiredForAudioStart()) {
  788. Page* page = document()->page();
  789. if (page && !page->canStartMedia())
  790. document()->addMediaCanStartListener(this);
  791. else
  792. removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
  793. }
  794. destination()->startRendering();
  795. }
  796. void AudioContext::mediaCanStart()
  797. {
  798. removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
  799. }
  800. void AudioContext::fireCompletionEvent()
  801. {
  802. ASSERT(isMainThread());
  803. if (!isMainThread())
  804. return;
  805. AudioBuffer* renderedBuffer = m_renderTarget.get();
  806. ASSERT(renderedBuffer);
  807. if (!renderedBuffer)
  808. return;
  809. // Avoid firing the event if the document has already gone away.
  810. if (scriptExecutionContext()) {
  811. // Call the offline rendering completion event listener.
  812. dispatchEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
  813. }
  814. }
  815. void AudioContext::incrementActiveSourceCount()
  816. {
  817. atomicIncrement(&m_activeSourceCount);
  818. }
  819. void AudioContext::decrementActiveSourceCount()
  820. {
  821. atomicDecrement(&m_activeSourceCount);
  822. }
  823. } // namespace WebCore
  824. #endif // ENABLE(WEB_AUDIO)