diff --git a/dom/base/BodyConsumer.cpp b/dom/base/BodyConsumer.cpp index fbee9fac3d..e60f2dfbd9 100644 --- a/dom/base/BodyConsumer.cpp +++ b/dom/base/BodyConsumer.cpp @@ -239,8 +239,8 @@ class ConsumeBodyDoneObserver final : public nsIStreamLoaderObserver, return NS_OK; } - virtual void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, Blob* aBlob, - nsresult aRv) override { + virtual void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, + BlobImpl* aBlobImpl, nsresult aRv) override { // On error. if (NS_FAILED(aRv)) { OnStreamComplete(nullptr, nullptr, aRv, 0, nullptr); @@ -251,7 +251,7 @@ class ConsumeBodyDoneObserver final : public nsIStreamLoaderObserver, // consuming of the body. mBodyConsumer->NullifyConsumeBodyPump(); - mBodyConsumer->OnBlobResult(aBlob, mWorkerRef); + mBodyConsumer->OnBlobResult(aBlobImpl, mWorkerRef); } private: @@ -405,7 +405,7 @@ class FileCreationHandler final : public PromiseNativeHandler { return; } - mConsumer->OnBlobResult(blob, mWorkerRef); + mConsumer->OnBlobResult(blob->Impl(), mWorkerRef); } void RejectedCallback(JSContext* aCx, JS::Handle aValue) override { @@ -531,8 +531,8 @@ void BodyConsumer::BeginConsumeBodyMainThread(ThreadSafeWorkerRef* aWorkerRef) { nsCOMPtr listener; if (mConsumeType == CONSUME_BLOB) { - listener = new MutableBlobStreamListener( - mBlobStorageType, nullptr, mBodyMimeType, p, mMainThreadEventTarget); + listener = new MutableBlobStreamListener(mBlobStorageType, mBodyMimeType, p, + mMainThreadEventTarget); } else { nsCOMPtr loader; rv = NS_NewStreamLoader(getter_AddRefs(loader), p); @@ -573,10 +573,11 @@ void BodyConsumer::BeginConsumeBodyMainThread(ThreadSafeWorkerRef* aWorkerRef) { * been wrapped by FileCreationHandler). The blob is sent to the target thread * and ContinueConsumeBody is called. */ -void BodyConsumer::OnBlobResult(Blob* aBlob, ThreadSafeWorkerRef* aWorkerRef) { +void BodyConsumer::OnBlobResult(BlobImpl* aBlobImpl, + ThreadSafeWorkerRef* aWorkerRef) { AssertIsOnMainThread(); - DispatchContinueConsumeBlobBody(aBlob ? aBlob->Impl() : nullptr, aWorkerRef); + DispatchContinueConsumeBlobBody(aBlobImpl, aWorkerRef); } void BodyConsumer::DispatchContinueConsumeBlobBody( @@ -753,7 +754,10 @@ void BodyConsumer::ContinueConsumeBlobBody(BlobImpl* aBlobImpl, if (!aShuttingDown) { RefPtr blob = dom::Blob::Create(mGlobal, aBlobImpl); - MOZ_ASSERT(blob); + if (NS_WARN_IF(!blob)) { + localPromise->MaybeReject(NS_ERROR_FAILURE); + return; + } localPromise->MaybeResolve(blob); } diff --git a/dom/base/BodyConsumer.h b/dom/base/BodyConsumer.h index a08d48395e..075581298c 100644 --- a/dom/base/BodyConsumer.h +++ b/dom/base/BodyConsumer.h @@ -68,7 +68,8 @@ class BodyConsumer final : public nsIObserver, void BeginConsumeBodyMainThread(ThreadSafeWorkerRef* aWorkerRef); - void OnBlobResult(Blob* aBlob, ThreadSafeWorkerRef* aWorkerRef = nullptr); + void OnBlobResult(BlobImpl* aBlobImpl, + ThreadSafeWorkerRef* aWorkerRef = nullptr); void ContinueConsumeBody(nsresult aStatus, uint32_t aLength, uint8_t* aResult, bool aShuttingDown = false); diff --git a/dom/base/BodyUtil.cpp b/dom/base/BodyUtil.cpp index eb7eb03af1..0070286e51 100644 --- a/dom/base/BodyUtil.cpp +++ b/dom/base/BodyUtil.cpp @@ -254,6 +254,10 @@ class MOZ_STACK_CLASS FormDataParser { mParentObject, reinterpret_cast(copy), body.Length(), NS_ConvertUTF8toUTF16(mFilename), NS_ConvertUTF8toUTF16(mContentType), /* aLastModifiedDate */ 0); + if (NS_WARN_IF(!file)) { + return false; + } + Optional dummy; ErrorResult rv; mFormData->Append(name, *file, dummy, rv); @@ -396,7 +400,7 @@ void BodyUtil::ConsumeArrayBuffer(JSContext* aCx, } // static -already_AddRefed BodyUtil::ConsumeBlob(nsISupports* aParent, +already_AddRefed BodyUtil::ConsumeBlob(nsIGlobalObject* aParent, const nsString& aMimeType, uint32_t aInputLength, uint8_t* aInput, diff --git a/dom/base/BodyUtil.h b/dom/base/BodyUtil.h index e2b8a4dbd4..a87d8c1ca8 100644 --- a/dom/base/BodyUtil.h +++ b/dom/base/BodyUtil.h @@ -34,7 +34,7 @@ class BodyUtil final { * Creates an in-memory blob from an array. The blob takes ownership of * |aInput|, which must be allocated by |malloc|. */ - static already_AddRefed ConsumeBlob(nsISupports* aParent, + static already_AddRefed ConsumeBlob(nsIGlobalObject* aParent, const nsString& aMimeType, uint32_t aInputLength, uint8_t* aInput, ErrorResult& aRv); diff --git a/dom/base/ImageEncoder.cpp b/dom/base/ImageEncoder.cpp index 930b01d78d..94704e883d 100644 --- a/dom/base/ImageEncoder.cpp +++ b/dom/base/ImageEncoder.cpp @@ -4,6 +4,7 @@ #include "ImageEncoder.h" #include "mozilla/dom/CanvasRenderingContext2D.h" +#include "mozilla/dom/MemoryBlobImpl.h" #include "mozilla/dom/WorkerPrivate.h" #include "mozilla/gfx/2D.h" #include "mozilla/gfx/DataSurfaceHelpers.h" @@ -98,14 +99,10 @@ class EncodingCompleteEvent : public CancelableRunnable { // We want to null out mEncodeCompleteCallback no matter what. RefPtr callback(mEncodeCompleteCallback.forget()); if (!mFailed) { - // The correct parentObject has to be set by the mEncodeCompleteCallback. - RefPtr blob = - Blob::CreateMemoryBlob(nullptr, mImgData, mImgSize, mType); - MOZ_ASSERT(blob); - - rv = callback->ReceiveBlob(blob.forget()); + RefPtr blobImpl = new MemoryBlobImpl(mImgData, mImgSize, mType); + rv = callback->ReceiveBlobImpl(blobImpl.forget()); } else { - rv = callback->ReceiveBlob(nullptr); + rv = callback->ReceiveBlobImpl(nullptr); } return rv; @@ -247,7 +244,7 @@ nsresult ImageEncoder::ExtractDataFromLayersImageAsync( new EncodingRunnable(aType, aOptions, nullptr, aImage, encoder, completeEvent, imgIEncoder::INPUT_FORMAT_HOSTARGB, size, aUsePlaceholder, aUsingCustomOptions); - return NS_DispatchToBackgroundThread(event.forget()); + return NS_DispatchBackgroundTask(event.forget()); } /* static */ @@ -266,7 +263,7 @@ nsresult ImageEncoder::ExtractDataAsync( nsCOMPtr event = new EncodingRunnable( aType, aOptions, std::move(aImageBuffer), nullptr, encoder, completeEvent, aFormat, aSize, aUsePlaceholder, aUsingCustomOptions); - return NS_DispatchToBackgroundThread(event.forget()); + return NS_DispatchBackgroundTask(event.forget()); } /*static*/ diff --git a/dom/base/ImageEncoder.h b/dom/base/ImageEncoder.h index 8c81b82285..0c32eeb7f0 100644 --- a/dom/base/ImageEncoder.h +++ b/dom/base/ImageEncoder.h @@ -100,15 +100,15 @@ class ImageEncoder { /** * The callback interface of ExtractDataAsync and - * ExtractDataFromLayersImageAsync. ReceiveBlob() is called on main thread when - * encoding is complete. + * ExtractDataFromLayersImageAsync. ReceiveBlobImpl() is called on main thread + * when encoding is complete. */ class EncodeCompleteCallback { public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EncodeCompleteCallback) MOZ_CAN_RUN_SCRIPT - virtual nsresult ReceiveBlob(already_AddRefed aBlob) = 0; + virtual nsresult ReceiveBlobImpl(already_AddRefed aBlobImpl) = 0; protected: virtual ~EncodeCompleteCallback() {} diff --git a/dom/base/PostMessageEvent.cpp b/dom/base/PostMessageEvent.cpp index cc54878ca7..1601856457 100644 --- a/dom/base/PostMessageEvent.cpp +++ b/dom/base/PostMessageEvent.cpp @@ -163,7 +163,7 @@ PostMessageEvent::Run() { StructuredCloneHolder* holder; if (mHolder.constructed()) { - mHolder.ref().Read(ToSupports(targetWindow), cx, + mHolder.ref().Read(targetWindow->AsGlobal(), cx, &messageData, rv); holder = &mHolder.ref(); } else { diff --git a/dom/base/StructuredCloneHolder.cpp b/dom/base/StructuredCloneHolder.cpp index 4a2ef13cf7..0d5cb95af6 100644 --- a/dom/base/StructuredCloneHolder.cpp +++ b/dom/base/StructuredCloneHolder.cpp @@ -230,7 +230,7 @@ StructuredCloneHolder::StructuredCloneHolder( : StructuredCloneHolderBase(aScope), mSupportsCloning(aSupportsCloning == CloningSupported), mSupportsTransferring(aSupportsTransferring == TransferringSupported), - mParent(nullptr) + mGlobal(nullptr) #ifdef DEBUG , mCreationEventTarget(GetCurrentThreadEventTarget()) @@ -260,13 +260,13 @@ void StructuredCloneHolder::Write(JSContext* aCx, JS::Handle aValue, } } -void StructuredCloneHolder::Read(nsISupports* aParent, JSContext* aCx, +void StructuredCloneHolder::Read(nsIGlobalObject* aGlobal, JSContext* aCx, JS::MutableHandle aValue, ErrorResult& aRv) { - MOZ_ASSERT(aParent); + MOZ_ASSERT(aGlobal); - mozilla::AutoRestore guard(mParent); - mParent = aParent; + mozilla::AutoRestore guard(mGlobal); + mGlobal = aGlobal; if (!StructuredCloneHolderBase::Read(aCx, aValue)) { JS_ClearPendingException(aCx); @@ -284,23 +284,25 @@ void StructuredCloneHolder::Read(nsISupports* aParent, JSContext* aCx, } } -void StructuredCloneHolder::ReadFromBuffer(nsISupports* aParent, JSContext* aCx, +void StructuredCloneHolder::ReadFromBuffer(nsIGlobalObject* aGlobal, + JSContext* aCx, JSStructuredCloneData& aBuffer, JS::MutableHandle aValue, ErrorResult& aRv) { - ReadFromBuffer(aParent, aCx, aBuffer, JS_STRUCTURED_CLONE_VERSION, aValue, + ReadFromBuffer(aGlobal, aCx, aBuffer, JS_STRUCTURED_CLONE_VERSION, aValue, aRv); } -void StructuredCloneHolder::ReadFromBuffer(nsISupports* aParent, JSContext* aCx, +void StructuredCloneHolder::ReadFromBuffer(nsIGlobalObject* aGlobal, + JSContext* aCx, JSStructuredCloneData& aBuffer, uint32_t aAlgorithmVersion, JS::MutableHandle aValue, ErrorResult& aRv) { MOZ_ASSERT(!mBuffer, "ReadFromBuffer() must be called without a Write()."); - mozilla::AutoRestore guard(mParent); - mParent = aParent; + mozilla::AutoRestore guard(mGlobal); + mGlobal = aGlobal; if (!JS_ReadStructuredClone(aCx, aBuffer, aAlgorithmVersion, mStructuredCloneScope, aValue, &sCallbacks, @@ -340,10 +342,10 @@ JSObject* StructuredCloneHolder::ReadFullySerializableObjects( // the casting between JSPrincipals* and nsIPrincipal* we can't use // getter_AddRefs above and have to already_AddRefed here. nsCOMPtr principal = - already_AddRefed(nsJSPrincipals::get(prin)); + already_AddRefed(nsJSPrincipals::get(prin)); nsresult rv = nsContentUtils::WrapNative( - aCx, principal, &NS_GET_IID(nsIPrincipal), &result); + aCx, principal, &NS_GET_IID(nsIPrincipal), &result); if (NS_FAILED(rv)) { xpc::Throw(aCx, NS_ERROR_DOM_DATA_CLONE_ERR); return nullptr; @@ -434,7 +436,11 @@ JSObject* ReadBlob(JSContext* aCx, uint32_t aIndex, // pointer while destructors are running. RefPtr blobImpl = aHolder->BlobImpls()[aIndex]; - RefPtr blob = Blob::Create(aHolder->ParentDuringRead(), blobImpl); + RefPtr blob = Blob::Create(aHolder->GlobalDuringRead(), blobImpl); + if (NS_WARN_IF(!blob)) { + return nullptr; + } + if (!ToJSValue(aCx, blob, &val)) { return nullptr; } @@ -499,7 +505,7 @@ already_AddRefed ReadDirectoryInternal( } RefPtr directory = - Directory::Create(aHolder->ParentDuringRead(), file); + Directory::Create(aHolder->GlobalDuringRead(), file); return directory.forget(); } @@ -538,7 +544,7 @@ JSObject* ReadFileList(JSContext* aCx, JSStructuredCloneReader* aReader, JS::Rooted val(aCx); { - RefPtr fileList = new FileList(aHolder->ParentDuringRead()); + RefPtr fileList = new FileList(aHolder->GlobalDuringRead()); uint32_t zero, index; // |index| is the index of the first blobImpl. @@ -561,7 +567,11 @@ JSObject* ReadFileList(JSContext* aCx, JSStructuredCloneReader* aReader, RefPtr blobImpl = aHolder->BlobImpls()[pos]; MOZ_ASSERT(blobImpl->IsFile()); - RefPtr file = File::Create(aHolder->ParentDuringRead(), blobImpl); + RefPtr file = File::Create(aHolder->GlobalDuringRead(), blobImpl); + if (NS_WARN_IF(!file)) { + return nullptr; + } + if (!fileList->Append(file)) { return nullptr; } @@ -613,7 +623,7 @@ JSObject* ReadFormData(JSContext* aCx, JSStructuredCloneReader* aReader, // See the serialization of the FormData for the format. JS::Rooted val(aCx); { - RefPtr formData = new FormData(aHolder->ParentDuringRead()); + RefPtr formData = new FormData(aHolder->GlobalDuringRead()); Optional thirdArg; for (uint32_t i = 0; i < aCount; ++i) { @@ -637,8 +647,10 @@ JSObject* ReadFormData(JSContext* aCx, JSStructuredCloneReader* aReader, RefPtr blobImpl = aHolder->BlobImpls()[indexOrLengthOfString]; - RefPtr blob = Blob::Create(aHolder->ParentDuringRead(), blobImpl); - MOZ_ASSERT(blob); + RefPtr blob = Blob::Create(aHolder->GlobalDuringRead(), blobImpl); + if (NS_WARN_IF(!blob)) { + return nullptr; + } ErrorResult rv; formData->Append(name, *blob, thirdArg, rv); @@ -807,7 +819,7 @@ JSObject* ReadInputStream(JSContext* aCx, uint32_t aIndex, nsCOMPtr inputStream = aHolder->InputStreams()[aIndex]; nsresult rv = nsContentUtils::WrapNative( - aCx, inputStream, &NS_GET_IID(nsIInputStream), &result); + aCx, inputStream, &NS_GET_IID(nsIInputStream), &result); if (NS_FAILED(rv)) { return nullptr; } @@ -863,9 +875,8 @@ JSObject* StructuredCloneHolder::CustomReadHandler( // This can be null. JS::RootedObject result(aCx); { - nsCOMPtr parent = do_QueryInterface(mParent); // aIndex is the index of the cloned image. - result = ImageBitmap::ReadStructuredClone(aCx, aReader, parent, + result = ImageBitmap::ReadStructuredClone(aCx, aReader, mGlobal, GetSurfaces(), aIndex); } return result; @@ -982,12 +993,10 @@ bool StructuredCloneHolder::CustomReadTransferHandler( } #endif MOZ_ASSERT(aExtraData < mPortIdentifiers.Length()); - const MessagePortIdentifier& portIdentifier = mPortIdentifiers[aExtraData]; - - nsCOMPtr global = do_QueryInterface(mParent); + UniqueMessagePortId portIdentifier(mPortIdentifiers[aExtraData]); ErrorResult rv; - RefPtr port = MessagePort::Create(global, portIdentifier, rv); + RefPtr port = MessagePort::Create(mGlobal, portIdentifier, rv); if (NS_WARN_IF(rv.Failed())) { rv.SuppressException(); return false; @@ -1011,9 +1020,8 @@ bool StructuredCloneHolder::CustomReadTransferHandler( MOZ_ASSERT(aContent); OffscreenCanvasCloneData* data = static_cast(aContent); - nsCOMPtr parent = do_QueryInterface(mParent); RefPtr canvas = - OffscreenCanvas::CreateFromCloneData(parent, data); + OffscreenCanvas::CreateFromCloneData(mGlobal, data); delete data; JS::Rooted value(aCx); @@ -1031,8 +1039,8 @@ bool StructuredCloneHolder::CustomReadTransferHandler( StructuredCloneScope::SameProcessDifferentThread) { MOZ_ASSERT(aContent); ImageBitmapCloneData* data = static_cast(aContent); - nsCOMPtr parent = do_QueryInterface(mParent); - RefPtr bitmap = ImageBitmap::CreateFromCloneData(parent, data); + RefPtr bitmap = + ImageBitmap::CreateFromCloneData(mGlobal, data); delete data; JS::Rooted value(aCx); @@ -1062,15 +1070,16 @@ bool StructuredCloneHolder::CustomWriteTransferHandler( MessagePort* port = nullptr; nsresult rv = UNWRAP_OBJECT(MessagePort, &obj, port); if (NS_SUCCEEDED(rv)) { - // We use aExtraData to store the index of this new port identifier. - *aExtraData = mPortIdentifiers.Length(); - MessagePortIdentifier* identifier = mPortIdentifiers.AppendElement(); - if (!port->CanBeCloned()) { return false; } - port->CloneAndDisentangle(*identifier); + UniqueMessagePortId identifier; + port->CloneAndDisentangle(identifier); + + // We use aExtraData to store the index of this new port identifier. + *aExtraData = mPortIdentifiers.Length(); + mPortIdentifiers.AppendElement(identifier.release()); *aTag = SCTAG_DOM_MAP_MESSAGEPORT; *aOwnership = JS::SCTAG_TMO_CUSTOM; diff --git a/dom/base/StructuredCloneHolder.h b/dom/base/StructuredCloneHolder.h index 271a6a8a2a..329a0dc656 100644 --- a/dom/base/StructuredCloneHolder.h +++ b/dom/base/StructuredCloneHolder.h @@ -12,13 +12,13 @@ #include "mozilla/Move.h" #include "mozilla/UniquePtr.h" #include "mozilla/dom/BindingDeclarations.h" -#include "nsISupports.h" #include "nsTArray.h" #ifdef DEBUG # include "nsIThread.h" #endif +class nsIGlobalObject; class nsIInputStream; namespace mozilla { @@ -162,7 +162,7 @@ class StructuredCloneHolder : public StructuredCloneHolderBase { JS::Handle aTransfer, JS::CloneDataPolicy cloneDataPolicy, ErrorResult& aRv); - void Read(nsISupports* aParent, JSContext* aCx, + void Read(nsIGlobalObject* aGlobal, JSContext* aCx, JS::MutableHandle aValue, ErrorResult& aRv); // Call this method to know if this object is keeping some DOM object alive. @@ -191,9 +191,9 @@ class StructuredCloneHolder : public StructuredCloneHolderBase { StructuredCloneScope CloneScope() const { return mStructuredCloneScope; } - // The parent object is set internally just during the Read(). This method + // The global object is set internally just during the Read(). This method // can be used by read functions to retrieve it. - nsISupports* ParentDuringRead() const { return mParent; } + nsIGlobalObject* GlobalDuringRead() const { return mGlobal; } // This must be called if the transferring has ports generated by Read(). // MessagePorts are not thread-safe and they must be retrieved in the thread @@ -270,11 +270,11 @@ class StructuredCloneHolder : public StructuredCloneHolderBase { // If you receive a buffer from IPC, you can use this method to retrieve a // JS::Value. It can happen that you want to pre-populate the array of Blobs // and/or the PortIdentifiers. - void ReadFromBuffer(nsISupports* aParent, JSContext* aCx, + void ReadFromBuffer(nsIGlobalObject* aGlobal, JSContext* aCx, JSStructuredCloneData& aBuffer, JS::MutableHandle aValue, ErrorResult& aRv); - void ReadFromBuffer(nsISupports* aParent, JSContext* aCx, + void ReadFromBuffer(nsIGlobalObject* aGlobal, JSContext* aCx, JSStructuredCloneData& aBuffer, uint32_t aAlgorithmVersion, JS::MutableHandle aValue, ErrorResult& aRv); @@ -304,7 +304,7 @@ class StructuredCloneHolder : public StructuredCloneHolderBase { nsTArray> mClonedSurfaces; // This raw pointer is only set within ::Read() and is unset by the end. - nsISupports* MOZ_NON_OWNING_REF mParent; + nsIGlobalObject* MOZ_NON_OWNING_REF mGlobal; // This array contains the ports once we've finished the reading. It's // generated from the mPortIdentifiers array. diff --git a/dom/base/nsDOMDataChannel.cpp b/dom/base/nsDOMDataChannel.cpp index 0cb37c2e7e..0370d69d44 100644 --- a/dom/base/nsDOMDataChannel.cpp +++ b/dom/base/nsDOMDataChannel.cpp @@ -286,8 +286,10 @@ nsresult nsDOMDataChannel::DoOnMessageAvailable(const nsACString& aData, if (aBinary) { if (mBinaryType == DC_BINARY_TYPE_BLOB) { RefPtr blob = - Blob::CreateStringBlob(GetOwner(), aData, EmptyString()); - MOZ_ASSERT(blob); + Blob::CreateStringBlob(GetOwnerGlobal(), aData, EmptyString()); + if (NS_WARN_IF(!blob)) { + return NS_ERROR_FAILURE; + } if (!ToJSValue(cx, blob, &jsData)) { return NS_ERROR_FAILURE; diff --git a/dom/bindings/test/moz.build b/dom/bindings/test/moz.build index bcb054cdde..509c6affb8 100644 --- a/dom/bindings/test/moz.build +++ b/dom/bindings/test/moz.build @@ -53,6 +53,8 @@ LOCAL_INCLUDES += [ '/js/xpconnect/wrappers', ] +include('/ipc/chromium/chromium-config.mozbuild') + if CONFIG['CC_TYPE'] in ('clang', 'gcc'): CXXFLAGS += ['-Wno-error=shadow'] diff --git a/dom/canvas/CanvasRenderingContextHelper.cpp b/dom/canvas/CanvasRenderingContextHelper.cpp index 307b0bafd2..3dd0e1cc6a 100644 --- a/dom/canvas/CanvasRenderingContextHelper.cpp +++ b/dom/canvas/CanvasRenderingContextHelper.cpp @@ -31,19 +31,19 @@ void CanvasRenderingContextHelper::ToBlob( // This is called on main thread. MOZ_CAN_RUN_SCRIPT - nsresult ReceiveBlob(already_AddRefed aBlob) override { - RefPtr blob = aBlob; + nsresult ReceiveBlobImpl(already_AddRefed aBlobImpl) override { + RefPtr blobImpl = aBlobImpl; - RefPtr newBlob; + RefPtr blob; - if (blob) { - newBlob = Blob::Create(mGlobal, blob->Impl()); + if (blobImpl) { + blob = Blob::Create(mGlobal, blobImpl); } RefPtr callback(mBlobCallback.forget()); ErrorResult rv; - callback->Call(newBlob, rv); + callback->Call(blob, rv); mGlobal = nullptr; MOZ_ASSERT(!mBlobCallback); diff --git a/dom/canvas/OffscreenCanvas.cpp b/dom/canvas/OffscreenCanvas.cpp index d88cc6dbbb..53246a92d4 100644 --- a/dom/canvas/OffscreenCanvas.cpp +++ b/dom/canvas/OffscreenCanvas.cpp @@ -227,12 +227,16 @@ already_AddRefed OffscreenCanvas::ToBlob(JSContext* aCx, : mGlobal(aGlobal), mPromise(aPromise) {} // This is called on main thread. - nsresult ReceiveBlob(already_AddRefed aBlob) override { - RefPtr blob = aBlob; + nsresult ReceiveBlobImpl(already_AddRefed aBlobImpl) override { + RefPtr blobImpl = aBlobImpl; if (mPromise) { - RefPtr newBlob = Blob::Create(mGlobal, blob->Impl()); - mPromise->MaybeResolve(newBlob); + RefPtr blob = Blob::Create(mGlobal, blobImpl); + if (NS_WARN_IF(!blob)) { + mPromise->MaybeReject(NS_ERROR_FAILURE); + } else { + mPromise->MaybeResolve(blob); + } } mGlobal = nullptr; diff --git a/dom/canvas/WebGLContext.cpp b/dom/canvas/WebGLContext.cpp index d2f5c901a8..e7138ebab1 100644 --- a/dom/canvas/WebGLContext.cpp +++ b/dom/canvas/WebGLContext.cpp @@ -1927,59 +1927,62 @@ uint64_t IndexedBufferBinding::ByteCount() const { //////////////////////////////////////// ScopedUnpackReset::ScopedUnpackReset(const WebGLContext* const webgl) - : ScopedGLWrapper(webgl->gl), mWebGL(webgl) { + : mWebGL(webgl) { + const auto& gl = mWebGL->gl; // clang-format off - if (mWebGL->mPixelStore_UnpackAlignment != 4) mGL->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 4); + if (mWebGL->mPixelStore_UnpackAlignment != 4) gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 4); - if (mWebGL->IsWebGL2()) { - if (mWebGL->mPixelStore_UnpackRowLength != 0) mGL->fPixelStorei(LOCAL_GL_UNPACK_ROW_LENGTH , 0); - if (mWebGL->mPixelStore_UnpackImageHeight != 0) mGL->fPixelStorei(LOCAL_GL_UNPACK_IMAGE_HEIGHT, 0); - if (mWebGL->mPixelStore_UnpackSkipPixels != 0) mGL->fPixelStorei(LOCAL_GL_UNPACK_SKIP_PIXELS , 0); - if (mWebGL->mPixelStore_UnpackSkipRows != 0) mGL->fPixelStorei(LOCAL_GL_UNPACK_SKIP_ROWS , 0); - if (mWebGL->mPixelStore_UnpackSkipImages != 0) mGL->fPixelStorei(LOCAL_GL_UNPACK_SKIP_IMAGES , 0); + if (mWebGL->IsWebGL2()) { + if (mWebGL->mPixelStore_UnpackRowLength != 0) gl->fPixelStorei(LOCAL_GL_UNPACK_ROW_LENGTH , 0); + if (mWebGL->mPixelStore_UnpackImageHeight != 0) gl->fPixelStorei(LOCAL_GL_UNPACK_IMAGE_HEIGHT, 0); + if (mWebGL->mPixelStore_UnpackSkipPixels != 0) gl->fPixelStorei(LOCAL_GL_UNPACK_SKIP_PIXELS , 0); + if (mWebGL->mPixelStore_UnpackSkipRows != 0) gl->fPixelStorei(LOCAL_GL_UNPACK_SKIP_ROWS , 0); + if (mWebGL->mPixelStore_UnpackSkipImages != 0) gl->fPixelStorei(LOCAL_GL_UNPACK_SKIP_IMAGES , 0); - if (mWebGL->mBoundPixelUnpackBuffer) mGL->fBindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER, 0); - } + if (mWebGL->mBoundPixelUnpackBuffer) gl->fBindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER, 0); + } // clang-format on } -void ScopedUnpackReset::UnwrapImpl() { +ScopedUnpackReset::~ScopedUnpackReset() { + const auto& gl = mWebGL->gl; // clang-format off - mGL->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, mWebGL->mPixelStore_UnpackAlignment); + gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, mWebGL->mPixelStore_UnpackAlignment); - if (mWebGL->IsWebGL2()) { - mGL->fPixelStorei(LOCAL_GL_UNPACK_ROW_LENGTH , mWebGL->mPixelStore_UnpackRowLength ); - mGL->fPixelStorei(LOCAL_GL_UNPACK_IMAGE_HEIGHT, mWebGL->mPixelStore_UnpackImageHeight); - mGL->fPixelStorei(LOCAL_GL_UNPACK_SKIP_PIXELS , mWebGL->mPixelStore_UnpackSkipPixels ); - mGL->fPixelStorei(LOCAL_GL_UNPACK_SKIP_ROWS , mWebGL->mPixelStore_UnpackSkipRows ); - mGL->fPixelStorei(LOCAL_GL_UNPACK_SKIP_IMAGES , mWebGL->mPixelStore_UnpackSkipImages ); + if (mWebGL->IsWebGL2()) { + gl->fPixelStorei(LOCAL_GL_UNPACK_ROW_LENGTH , mWebGL->mPixelStore_UnpackRowLength ); + gl->fPixelStorei(LOCAL_GL_UNPACK_IMAGE_HEIGHT, mWebGL->mPixelStore_UnpackImageHeight); + gl->fPixelStorei(LOCAL_GL_UNPACK_SKIP_PIXELS , mWebGL->mPixelStore_UnpackSkipPixels ); + gl->fPixelStorei(LOCAL_GL_UNPACK_SKIP_ROWS , mWebGL->mPixelStore_UnpackSkipRows ); + gl->fPixelStorei(LOCAL_GL_UNPACK_SKIP_IMAGES , mWebGL->mPixelStore_UnpackSkipImages ); - GLuint pbo = 0; - if (mWebGL->mBoundPixelUnpackBuffer) { - pbo = mWebGL->mBoundPixelUnpackBuffer->mGLName; - } - - mGL->fBindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER, pbo); + GLuint pbo = 0; + if (mWebGL->mBoundPixelUnpackBuffer) { + pbo = mWebGL->mBoundPixelUnpackBuffer->mGLName; } + + gl->fBindBuffer(LOCAL_GL_PIXEL_UNPACK_BUFFER, pbo); + } // clang-format on } //////////////////// -void ScopedFBRebinder::UnwrapImpl() { +ScopedFBRebinder::~ScopedFBRebinder() { const auto fnName = [&](WebGLFramebuffer* fb) { return fb ? fb->mGLName : 0; }; + const auto& gl = mWebGL->gl; if (mWebGL->IsWebGL2()) { - mGL->fBindFramebuffer(LOCAL_GL_DRAW_FRAMEBUFFER, - fnName(mWebGL->mBoundDrawFramebuffer)); - mGL->fBindFramebuffer(LOCAL_GL_READ_FRAMEBUFFER, - fnName(mWebGL->mBoundReadFramebuffer)); + gl->fBindFramebuffer(LOCAL_GL_DRAW_FRAMEBUFFER, + fnName(mWebGL->mBoundDrawFramebuffer)); + gl->fBindFramebuffer(LOCAL_GL_READ_FRAMEBUFFER, + fnName(mWebGL->mBoundReadFramebuffer)); } else { MOZ_ASSERT(mWebGL->mBoundDrawFramebuffer == mWebGL->mBoundReadFramebuffer); - mGL->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, - fnName(mWebGL->mBoundDrawFramebuffer)); + gl->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, + fnName(mWebGL->mBoundDrawFramebuffer)); } } @@ -1996,17 +1999,15 @@ static GLenum TargetIfLazy(GLenum target) { } } -ScopedLazyBind::ScopedLazyBind(gl::GLContext* gl, GLenum target, +ScopedLazyBind::ScopedLazyBind(gl::GLContext* const gl, const GLenum target, const WebGLBuffer* buf) - : ScopedGLWrapper(gl), - mTarget(buf ? TargetIfLazy(target) : 0), - mBuf(buf) { + : mGL(gl), mTarget(buf ? TargetIfLazy(target) : 0), mBuf(buf) { if (mTarget) { mGL->fBindBuffer(mTarget, mBuf->mGLName); } } -void ScopedLazyBind::UnwrapImpl() { +ScopedLazyBind::~ScopedLazyBind() { if (mTarget) { mGL->fBindBuffer(mTarget, 0); } diff --git a/dom/canvas/WebGLContext.h b/dom/canvas/WebGLContext.h index cc87749f03..8b3ce226a5 100644 --- a/dom/canvas/WebGLContext.h +++ b/dom/canvas/WebGLContext.h @@ -2075,44 +2075,33 @@ bool ValidateTexImageTarget(WebGLContext* webgl, uint8_t funcDims, TexImageTarget* const out_texImageTarget, WebGLTexture** const out_tex); -class ScopedUnpackReset final : public gl::ScopedGLWrapper { - friend struct gl::ScopedGLWrapper; - +class ScopedUnpackReset final { private: const WebGLContext* const mWebGL; public: explicit ScopedUnpackReset(const WebGLContext* webgl); - - private: - void UnwrapImpl(); + ~ScopedUnpackReset(); }; -class ScopedFBRebinder final : public gl::ScopedGLWrapper { - friend struct gl::ScopedGLWrapper; - +class ScopedFBRebinder final { private: const WebGLContext* const mWebGL; public: - explicit ScopedFBRebinder(const WebGLContext* const webgl) - : ScopedGLWrapper(webgl->gl), mWebGL(webgl) {} - - private: - void UnwrapImpl(); + explicit ScopedFBRebinder(const WebGLContext* const webgl) : mWebGL(webgl) {} + ~ScopedFBRebinder(); }; -class ScopedLazyBind final : public gl::ScopedGLWrapper { - friend struct gl::ScopedGLWrapper; - +class ScopedLazyBind final { + private: + gl::GLContext* const mGL; const GLenum mTarget; const WebGLBuffer* const mBuf; public: ScopedLazyBind(gl::GLContext* gl, GLenum target, const WebGLBuffer* buf); - - private: - void UnwrapImpl(); + ~ScopedLazyBind(); }; //// diff --git a/dom/canvas/WebGLTextureUpload.cpp b/dom/canvas/WebGLTextureUpload.cpp index fe5e9876ed..94fbd7fa99 100644 --- a/dom/canvas/WebGLTextureUpload.cpp +++ b/dom/canvas/WebGLTextureUpload.cpp @@ -1735,25 +1735,25 @@ ScopedCopyTexImageSource::ScopedCopyTexImageSource( // Now create the swizzled FB we'll be exposing. GLuint rgbaRB = 0; - gl->fGenRenderbuffers(1, &rgbaRB); - gl::ScopedBindRenderbuffer scopedRB(gl, rgbaRB); - gl->fRenderbufferStorage(LOCAL_GL_RENDERBUFFER, sizedFormat, srcWidth, - srcHeight); - GLuint rgbaFB = 0; - gl->fGenFramebuffers(1, &rgbaFB); - gl->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, rgbaFB); - gl->fFramebufferRenderbuffer(LOCAL_GL_FRAMEBUFFER, LOCAL_GL_COLOR_ATTACHMENT0, - LOCAL_GL_RENDERBUFFER, rgbaRB); + { + gl->fGenRenderbuffers(1, &rgbaRB); + gl::ScopedBindRenderbuffer scopedRB(gl, rgbaRB); + gl->fRenderbufferStorage(LOCAL_GL_RENDERBUFFER, sizedFormat, srcWidth, + srcHeight); - const GLenum status = gl->fCheckFramebufferStatus(LOCAL_GL_FRAMEBUFFER); - if (status != LOCAL_GL_FRAMEBUFFER_COMPLETE) { - MOZ_CRASH("GFX: Temp framebuffer is not complete."); + gl->fGenFramebuffers(1, &rgbaFB); + gl->fBindFramebuffer(LOCAL_GL_FRAMEBUFFER, rgbaFB); + gl->fFramebufferRenderbuffer(LOCAL_GL_FRAMEBUFFER, + LOCAL_GL_COLOR_ATTACHMENT0, + LOCAL_GL_RENDERBUFFER, rgbaRB); + + const GLenum status = gl->fCheckFramebufferStatus(LOCAL_GL_FRAMEBUFFER); + if (status != LOCAL_GL_FRAMEBUFFER_COMPLETE) { + MOZ_CRASH("GFX: Temp framebuffer is not complete."); + } } - // Restore RB binding. - scopedRB.Unwrap(); // This function should really have a better name. - // Draw-blit rgbaTex into rgbaFB. const gfx::IntSize srcSize(srcWidth, srcHeight); { @@ -1762,10 +1762,6 @@ ScopedCopyTexImageSource::ScopedCopyTexImageSource( srcSize); } - // Restore Tex2D binding and destroy the temp tex. - scopedBindTex.Unwrap(); - scopedTex.Unwrap(); - // Leave RB and FB alive, and FB bound. mRB = rgbaRB; mFB = rgbaFB; diff --git a/dom/console/Console.cpp b/dom/console/Console.cpp index 916af6fc57..470ee72d9d 100644 --- a/dom/console/Console.cpp +++ b/dom/console/Console.cpp @@ -71,7 +71,7 @@ namespace mozilla { namespace dom { struct ConsoleStructuredCloneData { - nsCOMPtr mParent; + nsCOMPtr mGlobal; nsTArray> mBlobs; }; @@ -263,8 +263,9 @@ class ConsoleRunnable : public StructuredCloneHolderBase { JS::Rooted val(aCx); { - RefPtr blob = Blob::Create(mClonedData.mParent, - mClonedData.mBlobs.ElementAt(aIndex)); + nsCOMPtr global = mClonedData.mGlobal; + RefPtr blob = + Blob::Create(global, mClonedData.mBlobs.ElementAt(aIndex)); if (!ToJSValue(aCx, blob, &val)) { return nullptr; } @@ -385,7 +386,7 @@ class ConsoleRunnable : public StructuredCloneHolderBase { JS::Rooted argumentsValue(aCx); bool ok = Read(aCx, &argumentsValue); - mClonedData.mParent = nullptr; + mClonedData.mGlobal = nullptr; if (!ok) { return; @@ -586,7 +587,8 @@ class ConsoleWorkerRunnable : public WorkerProxyToMainThreadRunnable, return; } - RunConsole(jsapi.cx(), aWorkerPrivate, outerWindow, aWindow); + RunConsole(jsapi.cx(), aWindow->AsGlobal(), aWorkerPrivate, outerWindow, + aWindow); } void RunWindowless(WorkerPrivate* aWorkerPrivate) { @@ -617,7 +619,12 @@ class ConsoleWorkerRunnable : public WorkerProxyToMainThreadRunnable, JSAutoRealm ar(cx, global); - RunConsole(cx, aWorkerPrivate, nullptr, nullptr); + nsCOMPtr globalObject = xpc::NativeGlobal(global); + if (NS_WARN_IF(!globalObject)) { + return; + } + + RunConsole(cx, globalObject, aWorkerPrivate, nullptr, nullptr); } void RunBackOnWorkerThreadForCleanup(WorkerPrivate* aWorkerPrivate) override { @@ -627,7 +634,8 @@ class ConsoleWorkerRunnable : public WorkerProxyToMainThreadRunnable, } // This method is called in the main-thread. - virtual void RunConsole(JSContext* aCx, WorkerPrivate* aWorkerPrivate, + virtual void RunConsole(JSContext* aCx, nsIGlobalObject* aGlobal, + WorkerPrivate* aWorkerPrivate, nsPIDOMWindowOuter* aOuterWindow, nsPIDOMWindowInner* aInnerWindow) = 0; @@ -652,9 +660,11 @@ class ConsoleCallDataWorkerRunnable final : public ConsoleWorkerRunnable { private: ~ConsoleCallDataWorkerRunnable() override { MOZ_ASSERT(!mCallData); } - void RunConsole(JSContext* aCx, WorkerPrivate* aWorkerPrivate, + void RunConsole(JSContext* aCx, nsIGlobalObject* aGlobal, + WorkerPrivate* aWorkerPrivate, nsPIDOMWindowOuter* aOuterWindow, nsPIDOMWindowInner* aInnerWindow) override { + MOZ_ASSERT(aGlobal); MOZ_ASSERT(aWorkerPrivate); AssertIsOnMainThread(); @@ -685,12 +695,11 @@ class ConsoleCallDataWorkerRunnable final : public ConsoleWorkerRunnable { mCallData->SetIDs(id, innerID); } - // Now we could have the correct window (if we are not window-less). - mClonedData.mParent = aInnerWindow; + mClonedData.mGlobal = aGlobal; ProcessCallData(aCx, mConsoleData, mCallData); - mClonedData.mParent = nullptr; + mClonedData.mGlobal = nullptr; } virtual void ReleaseData() override { mCallData = nullptr; } @@ -762,17 +771,18 @@ class ConsoleProfileWorkerRunnable final : public ConsoleWorkerRunnable { } private: - void RunConsole(JSContext* aCx, WorkerPrivate* aWorkerPrivate, + void RunConsole(JSContext* aCx, nsIGlobalObject* aGlobal, + WorkerPrivate* aWorkerPrivate, nsPIDOMWindowOuter* aOuterWindow, nsPIDOMWindowInner* aInnerWindow) override { AssertIsOnMainThread(); + MOZ_ASSERT(aGlobal); - // Now we could have the correct window (if we are not window-less). - mClonedData.mParent = aInnerWindow; + mClonedData.mGlobal = aGlobal; ProcessProfileData(aCx, mName, mAction); - mClonedData.mParent = nullptr; + mClonedData.mGlobal = nullptr; } virtual void ReleaseData() override {} diff --git a/dom/crypto/WebCryptoTask.cpp b/dom/crypto/WebCryptoTask.cpp index 561baa0e6f..c731ae32b4 100644 --- a/dom/crypto/WebCryptoTask.cpp +++ b/dom/crypto/WebCryptoTask.cpp @@ -17,7 +17,6 @@ #include "mozilla/dom/TypedArray.h" #include "mozilla/dom/WebCryptoCommon.h" #include "mozilla/dom/WebCryptoTask.h" -#include "mozilla/dom/WebCryptoThreadPool.h" #include "mozilla/dom/WorkerRef.h" #include "mozilla/dom/WorkerPrivate.h" @@ -337,7 +336,13 @@ void WebCryptoTask::DispatchWithPromise(Promise* aResultPromise) { MAYBE_EARLY_FAIL(mEarlyRv); // dispatch to thread pool - mEarlyRv = WebCryptoThreadPool::Dispatch(this); + + if (!EnsureNSSInitializedChromeOrContent()) { + mEarlyRv = NS_ERROR_FAILURE; + } + MAYBE_EARLY_FAIL(mEarlyRv); + + mEarlyRv = NS_DispatchBackgroundTask(this); MAYBE_EARLY_FAIL(mEarlyRv) } diff --git a/dom/crypto/WebCryptoThreadPool.cpp b/dom/crypto/WebCryptoThreadPool.cpp deleted file mode 100644 index bfa4662239..0000000000 --- a/dom/crypto/WebCryptoThreadPool.cpp +++ /dev/null @@ -1,123 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#include "mozilla/dom/WebCryptoThreadPool.h" - -#include "MainThreadUtils.h" -#include "mozilla/Services.h" -#include "mozilla/StaticPtr.h" -#include "nsComponentManagerUtils.h" -#include "nsNSSComponent.h" -#include "nsXPCOMCIDInternal.h" -#include "nsXPCOMPrivate.h" -#include "nsIObserverService.h" -#include "nsThreadPool.h" - -namespace mozilla { -namespace dom { - -StaticRefPtr gInstance; - -NS_IMPL_ISUPPORTS(WebCryptoThreadPool, nsIObserver) - -/* static */ -void WebCryptoThreadPool::Initialize() { - MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); - MOZ_ASSERT(!gInstance, "More than one instance!"); - - gInstance = new WebCryptoThreadPool(); - NS_WARNING_ASSERTION(gInstance, "Failed create thread pool!"); - - if (gInstance && NS_FAILED(gInstance->Init())) { - NS_WARNING("Failed to initialize thread pool!"); - gInstance = nullptr; - } -} - -/* static */ -nsresult WebCryptoThreadPool::Dispatch(nsIRunnable* aRunnable) { - if (gInstance) { - return gInstance->DispatchInternal(aRunnable); - } - - // Fail if called on shutdown. - return NS_ERROR_FAILURE; -} - -nsresult WebCryptoThreadPool::Init() { - MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); - - nsCOMPtr obs = mozilla::services::GetObserverService(); - NS_ENSURE_TRUE(obs, NS_ERROR_FAILURE); - - // Need this observer to know when to shut down the thread pool. - return obs->AddObserver(this, NS_XPCOM_SHUTDOWN_THREADS_OBSERVER_ID, false); -} - -nsresult WebCryptoThreadPool::DispatchInternal(nsIRunnable* aRunnable) { - MutexAutoLock lock(mMutex); - - if (mShutdown) { - return NS_ERROR_FAILURE; - } - - if (!mPool) { - NS_ENSURE_TRUE(EnsureNSSInitializedChromeOrContent(), NS_ERROR_FAILURE); - - nsCOMPtr pool(new nsThreadPool()); - - nsresult rv = pool->SetName(NS_LITERAL_CSTRING("SubtleCrypto")); - NS_ENSURE_SUCCESS(rv, rv); - - pool.swap(mPool); - } - - return mPool->Dispatch(aRunnable, NS_DISPATCH_NORMAL); -} - -void WebCryptoThreadPool::Shutdown() { - MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); - - // Limit the scope of locking to avoid deadlocking if DispatchInternal ends - // up getting called during shutdown event processing. - nsCOMPtr pool; - { - MutexAutoLock lock(mMutex); - if (mShutdown) { - return; - } - pool = mPool; - mShutdown = true; - } - - if (pool) { - pool->Shutdown(); - } - - nsCOMPtr obs = mozilla::services::GetObserverService(); - NS_WARNING_ASSERTION(obs, "Failed to retrieve observer service!"); - - if (obs) { - if (NS_FAILED( - obs->RemoveObserver(this, NS_XPCOM_SHUTDOWN_THREADS_OBSERVER_ID))) { - NS_WARNING("Failed to remove shutdown observer!"); - } - } -} - -NS_IMETHODIMP -WebCryptoThreadPool::Observe(nsISupports* aSubject, const char* aTopic, - const char16_t* aData) { - MOZ_ASSERT(NS_IsMainThread(), "Wrong thread!"); - - if (gInstance) { - gInstance->Shutdown(); - gInstance = nullptr; - } - - return NS_OK; -} - -} // namespace dom -} // namespace mozilla diff --git a/dom/crypto/WebCryptoThreadPool.h b/dom/crypto/WebCryptoThreadPool.h deleted file mode 100644 index d29360e91f..0000000000 --- a/dom/crypto/WebCryptoThreadPool.h +++ /dev/null @@ -1,47 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this - * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef mozilla_dom_WebCryptoThreadPool_h -#define mozilla_dom_WebCryptoThreadPool_h - -#include "mozilla/Mutex.h" -#include "nsIObserver.h" -#include "nsIThreadPool.h" - -namespace mozilla { -namespace dom { - -class WebCryptoThreadPool final : nsIObserver { - public: - NS_DECL_THREADSAFE_ISUPPORTS - - static void Initialize(); - - static nsresult Dispatch(nsIRunnable* aRunnable); - - private: - WebCryptoThreadPool() - : mMutex("WebCryptoThreadPool::mMutex"), - mPool(nullptr), - mShutdown(false) {} - virtual ~WebCryptoThreadPool() {} - - nsresult Init(); - - nsresult DispatchInternal(nsIRunnable* aRunnable); - - void Shutdown(); - - NS_IMETHOD Observe(nsISupports* aSubject, const char* aTopic, - const char16_t* aData) override; - - mozilla::Mutex mMutex; - nsCOMPtr mPool; - bool mShutdown; -}; - -} // namespace dom -} // namespace mozilla - -#endif // mozilla_dom_WebCryptoThreadPool_h diff --git a/dom/crypto/moz.build b/dom/crypto/moz.build index 521f75f7f4..16cf3e2780 100644 --- a/dom/crypto/moz.build +++ b/dom/crypto/moz.build @@ -11,7 +11,6 @@ EXPORTS.mozilla.dom += [ 'KeyAlgorithmProxy.h', 'WebCryptoCommon.h', 'WebCryptoTask.h', - 'WebCryptoThreadPool.h' ] UNIFIED_SOURCES += [ @@ -19,7 +18,6 @@ UNIFIED_SOURCES += [ 'CryptoKey.cpp', 'KeyAlgorithmProxy.cpp', 'WebCryptoTask.cpp', - 'WebCryptoThreadPool.cpp', ] include('/ipc/chromium/chromium-config.mozbuild') diff --git a/dom/events/DataTransferItem.cpp b/dom/events/DataTransferItem.cpp index 34be47fb3e..ddd53cbe83 100644 --- a/dom/events/DataTransferItem.cpp +++ b/dom/events/DataTransferItem.cpp @@ -285,14 +285,27 @@ already_AddRefed DataTransferItem::GetAsFile( if (RefPtr blob = do_QueryObject(supports)) { mCachedFile = blob->ToFile(); - } else if (nsCOMPtr blobImpl = do_QueryInterface(supports)) { - MOZ_ASSERT(blobImpl->IsFile()); - mCachedFile = File::Create(mDataTransfer, blobImpl); - } else if (nsCOMPtr ifile = do_QueryInterface(supports)) { - mCachedFile = File::CreateFromFile(mDataTransfer, ifile); } else { - MOZ_ASSERT(false, "One of the above code paths should be taken"); - return nullptr; + nsCOMPtr global = GetGlobalFromDataTransfer(); + if (NS_WARN_IF(!global)) { + return nullptr; + } + + if (nsCOMPtr blobImpl = do_QueryInterface(supports)) { + MOZ_ASSERT(blobImpl->IsFile()); + mCachedFile = File::Create(global, blobImpl); + if (NS_WARN_IF(!mCachedFile)) { + return nullptr; + } + } else if (nsCOMPtr ifile = do_QueryInterface(supports)) { + mCachedFile = File::CreateFromFile(global, ifile); + if (NS_WARN_IF(!mCachedFile)) { + return nullptr; + } + } else { + MOZ_ASSERT(false, "One of the above code paths should be taken"); + return nullptr; + } } } @@ -307,20 +320,8 @@ already_AddRefed DataTransferItem::GetAsEntry( return nullptr; } - nsCOMPtr global; - // This is annoying, but DataTransfer may have various things as parent. - nsCOMPtr target = - do_QueryInterface(mDataTransfer->GetParentObject()); - if (target) { - global = target->GetOwnerGlobal(); - } else { - RefPtr event = do_QueryObject(mDataTransfer->GetParentObject()); - if (event) { - global = event->GetParentObject(); - } - } - - if (!global) { + nsCOMPtr global = GetGlobalFromDataTransfer(); + if (NS_WARN_IF(!global)) { return nullptr; } @@ -389,7 +390,12 @@ already_AddRefed DataTransferItem::CreateFileFromInputStream( return nullptr; } - return File::CreateMemoryFile(mDataTransfer, data, available, fileName, mType, + nsCOMPtr global = GetGlobalFromDataTransfer(); + if (NS_WARN_IF(!global)) { + return nullptr; + } + + return File::CreateMemoryFile(global, data, available, fileName, mType, PR_Now()); } @@ -550,5 +556,23 @@ already_AddRefed DataTransferItem::Data(nsIPrincipal* aPrincipal, return variant.forget(); } +already_AddRefed +DataTransferItem::GetGlobalFromDataTransfer() { + nsCOMPtr global; + // This is annoying, but DataTransfer may have various things as parent. + nsCOMPtr target = + do_QueryInterface(mDataTransfer->GetParentObject()); + if (target) { + global = target->GetOwnerGlobal(); + } else { + RefPtr event = do_QueryObject(mDataTransfer->GetParentObject()); + if (event) { + global = event->GetParentObject(); + } + } + + return global.forget(); +} + } // namespace dom } // namespace mozilla diff --git a/dom/events/DataTransferItem.h b/dom/events/DataTransferItem.h index abc2286fbb..3b0dd9a8a5 100644 --- a/dom/events/DataTransferItem.h +++ b/dom/events/DataTransferItem.h @@ -103,6 +103,8 @@ class DataTransferItem final : public nsISupports, public nsWrapperCache { ~DataTransferItem() {} already_AddRefed CreateFileFromInputStream(nsIInputStream* aStream); + already_AddRefed GetGlobalFromDataTransfer(); + // The index in the 2d mIndexedItems array uint32_t mIndex; diff --git a/dom/file/Blob.cpp b/dom/file/Blob.cpp index f1bede05ec..1835a1c421 100644 --- a/dom/file/Blob.cpp +++ b/dom/file/Blob.cpp @@ -3,6 +3,7 @@ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "Blob.h" +#include "EmptyBlobImpl.h" #include "File.h" #include "MemoryBlobImpl.h" #include "mozilla/dom/BlobBinding.h" @@ -10,6 +11,7 @@ #include "mozilla/dom/WorkerCommon.h" #include "mozilla/dom/WorkerPrivate.h" #include "MultipartBlobImpl.h" +#include "nsIGlobalObject.h" #include "nsIInputStream.h" #include "nsPIDOMWindow.h" #include "StreamBlobImpl.h" @@ -22,12 +24,12 @@ namespace dom { NS_IMPL_CYCLE_COLLECTION_CLASS(Blob) NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Blob) - NS_IMPL_CYCLE_COLLECTION_UNLINK(mParent) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mGlobal) NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER NS_IMPL_CYCLE_COLLECTION_UNLINK_END NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(Blob) - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mGlobal) NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END NS_IMPL_CYCLE_COLLECTION_TRACE_BEGIN(Blob) @@ -63,36 +65,52 @@ void Blob::MakeValidBlobType(nsAString& aType) { } /* static */ -Blob* Blob::Create(nsISupports* aParent, BlobImpl* aImpl) { +Blob* Blob::Create(nsIGlobalObject* aGlobal, BlobImpl* aImpl) { MOZ_ASSERT(aImpl); - return aImpl->IsFile() ? new File(aParent, aImpl) : new Blob(aParent, aImpl); + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + + return aImpl->IsFile() ? new File(aGlobal, aImpl) : new Blob(aGlobal, aImpl); } /* static */ -already_AddRefed Blob::CreateStringBlob(nsISupports* aParent, +already_AddRefed Blob::CreateStringBlob(nsIGlobalObject* aGlobal, const nsACString& aData, const nsAString& aContentType) { + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + RefPtr blobImpl = StringBlobImpl::Create(aData, aContentType); - RefPtr blob = Blob::Create(aParent, blobImpl); + RefPtr blob = Blob::Create(aGlobal, blobImpl); MOZ_ASSERT(!blob->mImpl->IsFile()); return blob.forget(); } /* static */ -already_AddRefed Blob::CreateMemoryBlob(nsISupports* aParent, +already_AddRefed Blob::CreateMemoryBlob(nsIGlobalObject* aGlobal, void* aMemoryBuffer, uint64_t aLength, const nsAString& aContentType) { + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + RefPtr blob = Blob::Create( - aParent, new MemoryBlobImpl(aMemoryBuffer, aLength, aContentType)); + aGlobal, new MemoryBlobImpl(aMemoryBuffer, aLength, aContentType)); MOZ_ASSERT(!blob->mImpl->IsFile()); return blob.forget(); } -Blob::Blob(nsISupports* aParent, BlobImpl* aImpl) - : mImpl(aImpl), mParent(aParent) { +Blob::Blob(nsIGlobalObject* aGlobal, BlobImpl* aImpl) + : mImpl(aImpl), mGlobal(aGlobal) { MOZ_ASSERT(mImpl); + MOZ_ASSERT(mGlobal); } Blob::~Blob() = default; @@ -112,7 +130,7 @@ already_AddRefed Blob::ToFile() { if (HasFileInterface()) { file = static_cast(this); } else { - file = new File(mParent, mImpl); + file = new File(mGlobal, mImpl); } return file.forget(); @@ -131,7 +149,7 @@ already_AddRefed Blob::ToFile(const nsAString& aName, return nullptr; } - RefPtr file = new File(mParent, impl); + RefPtr file = new File(mGlobal, impl); return file.forget(); } @@ -144,7 +162,7 @@ already_AddRefed Blob::CreateSlice(uint64_t aStart, uint64_t aLength, return nullptr; } - RefPtr blob = Blob::Create(mParent, impl); + RefPtr blob = Blob::Create(mGlobal, impl); return blob.forget(); } @@ -170,7 +188,7 @@ already_AddRefed Blob::Slice(const Optional& aStart, return nullptr; } - RefPtr blob = Blob::Create(mParent, impl); + RefPtr blob = Blob::Create(mGlobal, impl); return blob.forget(); } @@ -210,7 +228,10 @@ already_AddRefed Blob::Constructor( MOZ_ASSERT(!impl->IsFile()); - RefPtr blob = Blob::Create(aGlobal.GetAsSupports(), impl); + nsCOMPtr global = do_QueryInterface(aGlobal.GetAsSupports()); + MOZ_ASSERT(global); + + RefPtr blob = Blob::Create(global, impl); return blob.forget(); } @@ -243,8 +264,7 @@ already_AddRefed Blob::ArrayBuffer(ErrorResult& aRv) { already_AddRefed Blob::ConsumeBody( BodyConsumer::ConsumeType aConsumeType, ErrorResult& aRv) { - nsCOMPtr global = do_QueryInterface(mParent); - if (NS_WARN_IF(!global)) { + if (NS_WARN_IF(!mGlobal)) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; } @@ -255,7 +275,7 @@ already_AddRefed Blob::ConsumeBody( MOZ_ASSERT(workerPrivate); mainThreadEventTarget = workerPrivate->MainThreadEventTarget(); } else { - mainThreadEventTarget = global->EventTargetFor(TaskCategory::Other); + mainThreadEventTarget = mGlobal->EventTargetFor(TaskCategory::Other); } MOZ_ASSERT(mainThreadEventTarget); @@ -266,7 +286,7 @@ already_AddRefed Blob::ConsumeBody( return nullptr; } - return BodyConsumer::Create(global, mainThreadEventTarget, inputStream, + return BodyConsumer::Create(mGlobal, mainThreadEventTarget, inputStream, nullptr, aConsumeType, VoidCString(), VoidString(), VoidCString(), MutableBlobStorage::eOnlyInMemory, aRv); @@ -335,15 +355,14 @@ void Blob::Stream(JSContext* aCx, JS::MutableHandle aStream, return; } - nsCOMPtr global = do_QueryInterface(GetParentObject()); - if (NS_WARN_IF(!global)) { + if (NS_WARN_IF(!mGlobal)) { aRv.Throw(NS_ERROR_FAILURE); return; } RefPtr holder = new BlobBodyStreamHolder(); - BodyStream::Create(aCx, holder, global, stream, aRv); + BodyStream::Create(aCx, holder, mGlobal, stream, aRv); if (NS_WARN_IF(aRv.Failed())) { return; } diff --git a/dom/file/Blob.h b/dom/file/Blob.h index 69b152ee0a..6c77e6352c 100644 --- a/dom/file/Blob.h +++ b/dom/file/Blob.h @@ -15,6 +15,7 @@ #include "nsWrapperCache.h" #include "nsWeakReference.h" +class nsIGlobalObject; class nsIInputStream; namespace mozilla { @@ -41,16 +42,16 @@ class Blob : public nsSupportsWeakReference, public nsWrapperCache { typedef OwningArrayBufferViewOrArrayBufferOrBlobOrUSVString BlobPart; // This creates a Blob or a File based on the type of BlobImpl. - static Blob* Create(nsISupports* aParent, BlobImpl* aImpl); + static Blob* Create(nsIGlobalObject* aGlobal, BlobImpl* aImpl); - static already_AddRefed CreateStringBlob(nsISupports* aParent, + static already_AddRefed CreateStringBlob(nsIGlobalObject* aGlobal, const nsACString& aData, const nsAString& aContentType); // The returned Blob takes ownership of aMemoryBuffer. aMemoryBuffer will be // freed by free so it must be allocated by malloc or something // compatible with it. - static already_AddRefed CreateMemoryBlob(nsISupports* aParent, + static already_AddRefed CreateMemoryBlob(nsIGlobalObject* aGlobal, void* aMemoryBuffer, uint64_t aLength, const nsAString& aContentType); @@ -87,7 +88,7 @@ class Blob : public nsSupportsWeakReference, public nsWrapperCache { static void MakeValidBlobType(nsAString& aType); // WebIDL methods - nsISupports* GetParentObject() const { return mParent; } + nsIGlobalObject* GetParentObject() const { return mGlobal; } bool IsMemoryFile() const; @@ -122,7 +123,7 @@ class Blob : public nsSupportsWeakReference, public nsWrapperCache { protected: // File constructor should never be used directly. Use Blob::Create instead. - Blob(nsISupports* aParent, BlobImpl* aImpl); + Blob(nsIGlobalObject* aGlobal, BlobImpl* aImpl); virtual ~Blob(); virtual bool HasFileInterface() const { return false; } @@ -137,7 +138,7 @@ class Blob : public nsSupportsWeakReference, public nsWrapperCache { RefPtr mImpl; private: - nsCOMPtr mParent; + nsCOMPtr mGlobal; }; NS_DEFINE_STATIC_IID_ACCESSOR(Blob, NS_DOM_BLOB_IID) diff --git a/dom/file/File.cpp b/dom/file/File.cpp index afdb7cc598..162d4968c2 100644 --- a/dom/file/File.cpp +++ b/dom/file/File.cpp @@ -16,61 +16,88 @@ namespace mozilla { namespace dom { -File::File(nsISupports* aParent, BlobImpl* aImpl) : Blob(aParent, aImpl) { +File::File(nsIGlobalObject* aGlobal, BlobImpl* aImpl) : Blob(aGlobal, aImpl) { MOZ_ASSERT(aImpl->IsFile()); } File::~File() {} /* static */ -File* File::Create(nsISupports* aParent, BlobImpl* aImpl) { +File* File::Create(nsIGlobalObject* aGlobal, BlobImpl* aImpl) { MOZ_ASSERT(aImpl); MOZ_ASSERT(aImpl->IsFile()); - return new File(aParent, aImpl); + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + + return new File(aGlobal, aImpl); } /* static */ -already_AddRefed File::Create(nsISupports* aParent, +already_AddRefed File::Create(nsIGlobalObject* aGlobal, const nsAString& aName, const nsAString& aContentType, uint64_t aLength, int64_t aLastModifiedDate) { + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + RefPtr file = new File( - aParent, new BaseBlobImpl(NS_LITERAL_STRING("BaseBlobImpl"), aName, + aGlobal, new BaseBlobImpl(NS_LITERAL_STRING("BaseBlobImpl"), aName, aContentType, aLength, aLastModifiedDate)); return file.forget(); } /* static */ -already_AddRefed File::CreateMemoryFile(nsISupports* aParent, +already_AddRefed File::CreateMemoryFile(nsIGlobalObject* aGlobal, void* aMemoryBuffer, uint64_t aLength, const nsAString& aName, const nsAString& aContentType, int64_t aLastModifiedDate) { + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + RefPtr file = - new File(aParent, new MemoryBlobImpl(aMemoryBuffer, aLength, aName, + new File(aGlobal, new MemoryBlobImpl(aMemoryBuffer, aLength, aName, aContentType, aLastModifiedDate)); return file.forget(); } /* static */ -already_AddRefed File::CreateFromFile(nsISupports* aParent, +already_AddRefed File::CreateFromFile(nsIGlobalObject* aGlobal, nsIFile* aFile) { MOZ_DIAGNOSTIC_ASSERT(XRE_IsParentProcess()); - RefPtr file = new File(aParent, new FileBlobImpl(aFile)); + + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + + RefPtr file = new File(aGlobal, new FileBlobImpl(aFile)); return file.forget(); } /* static */ -already_AddRefed File::CreateFromFile(nsISupports* aParent, +already_AddRefed File::CreateFromFile(nsIGlobalObject* aGlobal, nsIFile* aFile, const nsAString& aName, const nsAString& aContentType) { MOZ_DIAGNOSTIC_ASSERT(XRE_IsParentProcess()); + + MOZ_ASSERT(aGlobal); + if (NS_WARN_IF(!aGlobal)) { + return nullptr; + } + RefPtr file = - new File(aParent, new FileBlobImpl(aFile, aName, aContentType)); + new File(aGlobal, new FileBlobImpl(aFile, aName, aContentType)); return file.forget(); } @@ -130,7 +157,10 @@ already_AddRefed File::Constructor(const GlobalObject& aGlobal, impl->SetLastModified(aBag.mLastModified.Value()); } - RefPtr file = new File(aGlobal.GetAsSupports(), impl); + nsCOMPtr global = do_QueryInterface(aGlobal.GetAsSupports()); + MOZ_ASSERT(global); + + RefPtr file = new File(global, impl); return file.forget(); } @@ -141,6 +171,12 @@ already_AddRefed File::CreateFromNsIFile( ErrorResult& aRv) { nsCOMPtr global = do_QueryInterface(aGlobal.GetAsSupports()); + MOZ_ASSERT(global); + if (NS_WARN_IF(!global)) { + aRv.Throw(NS_ERROR_FAILURE); + return nullptr; + } + RefPtr promise = FileCreatorHelper::CreateFile(global, aData, aBag, true, aRv); return promise.forget(); @@ -159,6 +195,12 @@ already_AddRefed File::CreateFromFileName( nsCOMPtr global = do_QueryInterface(aGlobal.GetAsSupports()); + MOZ_ASSERT(global); + if (NS_WARN_IF(!global)) { + aRv.Throw(NS_ERROR_FAILURE); + return nullptr; + } + RefPtr promise = FileCreatorHelper::CreateFile(global, file, aBag, false, aRv); return promise.forget(); diff --git a/dom/file/File.h b/dom/file/File.h index b1f53344b5..c8f168394f 100644 --- a/dom/file/File.h +++ b/dom/file/File.h @@ -22,9 +22,9 @@ class File final : public Blob { public: // Note: BlobImpl must be a File in order to use this method. // Check impl->IsFile(). - static File* Create(nsISupports* aParent, BlobImpl* aImpl); + static File* Create(nsIGlobalObject* aGlobal, BlobImpl* aImpl); - static already_AddRefed Create(nsISupports* aParent, + static already_AddRefed Create(nsIGlobalObject* aGlobal, const nsAString& aName, const nsAString& aContentType, uint64_t aLength, @@ -33,7 +33,7 @@ class File final : public Blob { // The returned File takes ownership of aMemoryBuffer. aMemoryBuffer will be // freed by free so it must be allocated by malloc or something // compatible with it. - static already_AddRefed CreateMemoryFile(nsISupports* aParent, + static already_AddRefed CreateMemoryFile(nsIGlobalObject* aGlobal, void* aMemoryBuffer, uint64_t aLength, const nsAString& aName, @@ -46,10 +46,10 @@ class File final : public Blob { // order to use nsIMIMEService. // Would be nice if we try to avoid to use this method outside the // main-thread to avoid extra runnables. - static already_AddRefed CreateFromFile(nsISupports* aParent, + static already_AddRefed CreateFromFile(nsIGlobalObject* aGlobal, nsIFile* aFile); - static already_AddRefed CreateFromFile(nsISupports* aParent, + static already_AddRefed CreateFromFile(nsIGlobalObject* aGlobal, nsIFile* aFile, const nsAString& aName, const nsAString& aContentType); @@ -95,7 +95,7 @@ class File final : public Blob { private: // File constructor should never be used directly. Use Blob::Create or // File::Create. - File(nsISupports* aParent, BlobImpl* aImpl); + File(nsIGlobalObject* aGlobal, BlobImpl* aImpl); ~File(); }; diff --git a/dom/file/MutableBlobStorage.cpp b/dom/file/MutableBlobStorage.cpp index 2b4937beda..1e980a0aea 100644 --- a/dom/file/MutableBlobStorage.cpp +++ b/dom/file/MutableBlobStorage.cpp @@ -29,25 +29,26 @@ namespace { class BlobCreationDoneRunnable final : public Runnable { public: BlobCreationDoneRunnable(MutableBlobStorage* aBlobStorage, - MutableBlobStorageCallback* aCallback, Blob* aBlob, - nsresult aRv) + MutableBlobStorageCallback* aCallback, + BlobImpl* aBlobImpl, nsresult aRv) : Runnable("dom::BlobCreationDoneRunnable"), mBlobStorage(aBlobStorage), mCallback(aCallback), - mBlob(aBlob), + mBlobImpl(aBlobImpl), mRv(aRv) { MOZ_ASSERT(aBlobStorage); MOZ_ASSERT(aCallback); - MOZ_ASSERT((NS_FAILED(aRv) && !aBlob) || (NS_SUCCEEDED(aRv) && aBlob)); + MOZ_ASSERT((NS_FAILED(aRv) && !aBlobImpl) || + (NS_SUCCEEDED(aRv) && aBlobImpl)); } NS_IMETHOD Run() override { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(mBlobStorage); - mCallback->BlobStoreCompleted(mBlobStorage, mBlob, mRv); + mCallback->BlobStoreCompleted(mBlobStorage, mBlobImpl, mRv); mCallback = nullptr; - mBlob = nullptr; + mBlobImpl = nullptr; return NS_OK; } @@ -58,13 +59,11 @@ class BlobCreationDoneRunnable final : public Runnable { // correct thread. NS_ProxyRelease("BlobCreationDoneRunnable::mCallback", mBlobStorage->EventTarget(), mCallback.forget()); - NS_ProxyRelease("BlobCreationDoneRunnable::mBlob", - mBlobStorage->EventTarget(), mBlob.forget()); } RefPtr mBlobStorage; RefPtr mCallback; - RefPtr mBlob; + RefPtr mBlobImpl; nsresult mRv; }; @@ -189,12 +188,10 @@ class CreateBlobRunnable final : public Runnable, NS_DECL_ISUPPORTS_INHERITED CreateBlobRunnable(MutableBlobStorage* aBlobStorage, - already_AddRefed aParent, const nsACString& aContentType, already_AddRefed aCallback) : Runnable("dom::CreateBlobRunnable"), mBlobStorage(aBlobStorage), - mParent(aParent), mContentType(aContentType), mCallback(aCallback) { MOZ_ASSERT(!NS_IsMainThread()); @@ -210,11 +207,8 @@ class CreateBlobRunnable final : public Runnable, } void OperationSucceeded(BlobImpl* aBlobImpl) override { - nsCOMPtr parent(std::move(mParent)); RefPtr callback(std::move(mCallback)); - - RefPtr blob = Blob::Create(parent, aBlobImpl); - callback->BlobStoreCompleted(mBlobStorage, blob, NS_OK); + callback->BlobStoreCompleted(mBlobStorage, aBlobImpl, NS_OK); } void OperationFailed(nsresult aRv) override { @@ -227,14 +221,11 @@ class CreateBlobRunnable final : public Runnable, MOZ_ASSERT(mBlobStorage); // If something when wrong, we still have to release data in the correct // thread. - NS_ProxyRelease("CreateBlobRunnable::mParent", mBlobStorage->EventTarget(), - mParent.forget()); NS_ProxyRelease("CreateBlobRunnable::mCallback", mBlobStorage->EventTarget(), mCallback.forget()); } RefPtr mBlobStorage; - nsCOMPtr mParent; nsCString mContentType; RefPtr mCallback; }; @@ -245,12 +236,10 @@ NS_IMPL_ISUPPORTS_INHERITED0(CreateBlobRunnable, Runnable) // it dispatches a CreateBlobRunnable to the main-thread. class LastRunnable final : public Runnable { public: - LastRunnable(MutableBlobStorage* aBlobStorage, nsISupports* aParent, - const nsACString& aContentType, + LastRunnable(MutableBlobStorage* aBlobStorage, const nsACString& aContentType, MutableBlobStorageCallback* aCallback) : Runnable("dom::LastRunnable"), mBlobStorage(aBlobStorage), - mParent(aParent), mContentType(aContentType), mCallback(aCallback) { MOZ_ASSERT(NS_IsMainThread()); @@ -262,8 +251,8 @@ class LastRunnable final : public Runnable { Run() override { MOZ_ASSERT(!NS_IsMainThread()); - RefPtr runnable = new CreateBlobRunnable( - mBlobStorage, mParent.forget(), mContentType, mCallback.forget()); + RefPtr runnable = + new CreateBlobRunnable(mBlobStorage, mContentType, mCallback.forget()); return mBlobStorage->EventTarget()->Dispatch(runnable, NS_DISPATCH_NORMAL); } @@ -272,14 +261,11 @@ class LastRunnable final : public Runnable { MOZ_ASSERT(mBlobStorage); // If something when wrong, we still have to release data in the correct // thread. - NS_ProxyRelease("LastRunnable::mParent", mBlobStorage->EventTarget(), - mParent.forget()); NS_ProxyRelease("LastRunnable::mCallback", mBlobStorage->EventTarget(), mCallback.forget()); } RefPtr mBlobStorage; - nsCOMPtr mParent; nsCString mContentType; RefPtr mCallback; }; @@ -330,9 +316,8 @@ MutableBlobStorage::~MutableBlobStorage() { } } -void MutableBlobStorage::GetBlobWhenReady( - nsISupports* aParent, const nsACString& aContentType, - MutableBlobStorageCallback* aCallback) { +void MutableBlobStorage::GetBlobImplWhenReady( + const nsACString& aContentType, MutableBlobStorageCallback* aCallback) { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(aCallback); @@ -359,8 +344,7 @@ void MutableBlobStorage::GetBlobWhenReady( // this is to go to the I/O thread and then we come back: the runnables are // executed in order and this LastRunnable will be... the last one. // This Runnable will also close the FD on the I/O thread. - RefPtr runnable = - new LastRunnable(this, aParent, aContentType, aCallback); + RefPtr runnable = new LastRunnable(this, aContentType, aCallback); // If the dispatching fails, we are shutting down and it's fine to do not // run the callback. @@ -370,7 +354,6 @@ void MutableBlobStorage::GetBlobWhenReady( // If we are waiting for the temporary file, it's better to wait... if (previousState == eWaitingForTemporaryFile) { - mPendingParent = aParent; mPendingContentType = aContentType; mPendingCallback = aCallback; return; @@ -389,9 +372,8 @@ void MutableBlobStorage::GetBlobWhenReady( blobImpl = new EmptyBlobImpl(NS_ConvertUTF8toUTF16(aContentType)); } - RefPtr blob = Blob::Create(aParent, blobImpl); RefPtr runnable = - new BlobCreationDoneRunnable(this, aCallback, blob, NS_OK); + new BlobCreationDoneRunnable(this, aCallback, blobImpl, NS_OK); nsresult error = EventTarget()->Dispatch(runnable.forget(), NS_DISPATCH_NORMAL); @@ -589,19 +571,18 @@ void MutableBlobStorage::TemporaryFileCreated(PRFileDesc* aFD) { return; } - // If we are closed, it means that GetBlobWhenReady() has been called when we - // were already waiting for a temporary file-descriptor. Finally we are here, - // AdoptBuffer runnable is going to write the current buffer into this file. - // After that, there is nothing else to write, and we dispatch LastRunnable - // which ends up calling mPendingCallback via CreateBlobRunnable. + // If we are closed, it means that GetBlobImplWhenReady() has been called when + // we were already waiting for a temporary file-descriptor. Finally we are + // here, AdoptBuffer runnable is going to write the current buffer into this + // file. After that, there is nothing else to write, and we dispatch + // LastRunnable which ends up calling mPendingCallback via CreateBlobRunnable. if (mStorageState == eClosed) { MOZ_ASSERT(mPendingCallback); - RefPtr runnable = new LastRunnable( - this, mPendingParent, mPendingContentType, mPendingCallback); + RefPtr runnable = + new LastRunnable(this, mPendingContentType, mPendingCallback); Unused << DispatchToIOThread(runnable.forget()); - mPendingParent = nullptr; mPendingCallback = nullptr; } } diff --git a/dom/file/MutableBlobStorage.h b/dom/file/MutableBlobStorage.h index 4acc998d6d..94c22a72fc 100644 --- a/dom/file/MutableBlobStorage.h +++ b/dom/file/MutableBlobStorage.h @@ -29,8 +29,8 @@ class MutableBlobStorageCallback { public: NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING - virtual void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, Blob* aBlob, - nsresult aRv) = 0; + virtual void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, + BlobImpl* aBlob, nsresult aRv) = 0; }; // This class is must be created and used on main-thread, except for Append() @@ -51,9 +51,9 @@ class MutableBlobStorage final { nsresult Append(const void* aData, uint32_t aLength); // This method can be called just once. - // The callback will be called when the Blob is ready. - void GetBlobWhenReady(nsISupports* aParent, const nsACString& aContentType, - MutableBlobStorageCallback* aCallback); + // The callback will be called when the BlobImpl is ready. + void GetBlobImplWhenReady(const nsACString& aContentType, + MutableBlobStorageCallback* aCallback); void TemporaryFileCreated(PRFileDesc* aFD); @@ -115,7 +115,6 @@ class MutableBlobStorage final { RefPtr mTaskQueue; nsCOMPtr mEventTarget; - nsCOMPtr mPendingParent; nsCString mPendingContentType; RefPtr mPendingCallback; diff --git a/dom/file/MutableBlobStreamListener.cpp b/dom/file/MutableBlobStreamListener.cpp index e39bb30940..d14ee4ae31 100644 --- a/dom/file/MutableBlobStreamListener.cpp +++ b/dom/file/MutableBlobStreamListener.cpp @@ -12,10 +12,9 @@ namespace dom { MutableBlobStreamListener::MutableBlobStreamListener( MutableBlobStorage::MutableBlobStorageType aStorageType, - nsISupports* aParent, const nsACString& aContentType, - MutableBlobStorageCallback* aCallback, nsIEventTarget* aEventTarget) + const nsACString& aContentType, MutableBlobStorageCallback* aCallback, + nsIEventTarget* aEventTarget) : mCallback(aCallback), - mParent(aParent), mStorageType(aStorageType), mContentType(aContentType), mEventTarget(aEventTarget) { @@ -62,7 +61,7 @@ MutableBlobStreamListener::OnStopRequest(nsIRequest* aRequest, return NS_OK; } - storage->GetBlobWhenReady(mParent, mContentType, mCallback); + storage->GetBlobImplWhenReady(mContentType, mCallback); return NS_OK; } diff --git a/dom/file/MutableBlobStreamListener.h b/dom/file/MutableBlobStreamListener.h index 35f19efaa3..7bed51f91b 100644 --- a/dom/file/MutableBlobStreamListener.h +++ b/dom/file/MutableBlobStreamListener.h @@ -24,7 +24,6 @@ class MutableBlobStreamListener final NS_DECL_NSIREQUESTOBSERVER MutableBlobStreamListener(MutableBlobStorage::MutableBlobStorageType aType, - nsISupports* aParent, const nsACString& aContentType, MutableBlobStorageCallback* aCallback, nsIEventTarget* aEventTarget = nullptr); @@ -39,7 +38,6 @@ class MutableBlobStreamListener final RefPtr mStorage; RefPtr mCallback; - nsCOMPtr mParent; MutableBlobStorage::MutableBlobStorageType mStorageType; nsCString mContentType; nsCOMPtr mEventTarget; diff --git a/dom/file/ipc/FileCreatorChild.cpp b/dom/file/ipc/FileCreatorChild.cpp index b3a900e486..809e652b9e 100644 --- a/dom/file/ipc/FileCreatorChild.cpp +++ b/dom/file/ipc/FileCreatorChild.cpp @@ -38,6 +38,11 @@ mozilla::ipc::IPCResult FileCreatorChild::Recv__delete__( aResult.get_FileCreationSuccessResult().blob()); RefPtr file = File::Create(promise->GetParentObject(), impl); + if (NS_WARN_IF(!file)) { + promise->MaybeReject(NS_ERROR_FAILURE); + return IPC_OK(); + } + promise->MaybeResolve(file); return IPC_OK(); } diff --git a/dom/file/moz.build b/dom/file/moz.build index d040cddc37..2ab5f1b17a 100644 --- a/dom/file/moz.build +++ b/dom/file/moz.build @@ -12,6 +12,7 @@ EXPORTS.mozilla.dom += [ 'Blob.h', 'BlobImpl.h', 'BlobSet.h', + 'EmptyBlobImpl.h', 'File.h', 'FileBlobImpl.h', 'FileCreatorHelper.h', diff --git a/dom/filesystem/Directory.cpp b/dom/filesystem/Directory.cpp index 5681ee6faa..e04505e133 100644 --- a/dom/filesystem/Directory.cpp +++ b/dom/filesystem/Directory.cpp @@ -25,7 +25,7 @@ NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN(Directory) tmp->mFileSystem->Unlink(); tmp->mFileSystem = nullptr; } - NS_IMPL_CYCLE_COLLECTION_UNLINK(mParent) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mGlobal) NS_IMPL_CYCLE_COLLECTION_UNLINK_PRESERVED_WRAPPER NS_IMPL_CYCLE_COLLECTION_UNLINK_END @@ -33,7 +33,7 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN(Directory) if (tmp->mFileSystem) { tmp->mFileSystem->Traverse(cb); } - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mGlobal) NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END NS_IMPL_CYCLE_COLLECTION_TRACE_WRAPPERCACHE(Directory) @@ -55,29 +55,35 @@ already_AddRefed Directory::Constructor(const GlobalObject& aGlobal, return nullptr; } - return Create(aGlobal.GetAsSupports(), path); + nsCOMPtr global = do_QueryInterface(aGlobal.GetAsSupports()); + if (NS_WARN_IF(!global)) { + aRv.Throw(NS_ERROR_FAILURE); + return nullptr; + } + + return Create(global, path); } /* static */ -already_AddRefed Directory::Create(nsISupports* aParent, +already_AddRefed Directory::Create(nsIGlobalObject* aGlobal, nsIFile* aFile, FileSystemBase* aFileSystem) { - MOZ_ASSERT(aParent); + MOZ_ASSERT(aGlobal); MOZ_ASSERT(aFile); - RefPtr directory = new Directory(aParent, aFile, aFileSystem); + RefPtr directory = new Directory(aGlobal, aFile, aFileSystem); return directory.forget(); } -Directory::Directory(nsISupports* aParent, nsIFile* aFile, +Directory::Directory(nsIGlobalObject* aGlobal, nsIFile* aFile, FileSystemBase* aFileSystem) - : mParent(aParent), mFile(aFile) { + : mGlobal(aGlobal), mFile(aFile) { MOZ_ASSERT(aFile); // aFileSystem can be null. In this case we create a OSFileSystem when needed. if (aFileSystem) { // More likely, this is a OSFileSystem. This object keeps a reference of - // mParent but it's not cycle collectable and to avoid manual + // mGlobal but it's not cycle collectable and to avoid manual // addref/release, it's better to have 1 object per directory. For this // reason we clone it here. mFileSystem = aFileSystem->Clone(); @@ -86,7 +92,7 @@ Directory::Directory(nsISupports* aParent, nsIFile* aFile, Directory::~Directory() {} -nsISupports* Directory::GetParentObject() const { return mParent; } +nsIGlobalObject* Directory::GetParentObject() const { return mGlobal; } JSObject* Directory::WrapObject(JSContext* aCx, JS::Handle aGivenProto) { @@ -181,7 +187,7 @@ FileSystemBase* Directory::GetFileSystem(ErrorResult& aRv) { } RefPtr fs = new OSFileSystem(path); - fs->Init(mParent); + fs->Init(mGlobal); mFileSystem = fs; } diff --git a/dom/filesystem/Directory.h b/dom/filesystem/Directory.h index f271dcb06f..cfd05bf18b 100644 --- a/dom/filesystem/Directory.h +++ b/dom/filesystem/Directory.h @@ -28,13 +28,13 @@ class Directory final : public nsISupports, public nsWrapperCache { const nsAString& aRealPath, ErrorResult& aRv); - static already_AddRefed Create(nsISupports* aParent, + static already_AddRefed Create(nsIGlobalObject* aGlobal, nsIFile* aDirectory, FileSystemBase* aFileSystem = 0); // ========= Begin WebIDL bindings. =========== - nsISupports* GetParentObject() const; + nsIGlobalObject* GetParentObject() const; virtual JSObject* WrapObject(JSContext* aCx, JS::Handle aGivenProto) override; @@ -84,7 +84,7 @@ class Directory final : public nsISupports, public nsWrapperCache { nsIFile* GetInternalNsIFile() const { return mFile; } private: - Directory(nsISupports* aParent, nsIFile* aFile, + Directory(nsIGlobalObject* aGlobal, nsIFile* aFile, FileSystemBase* aFileSystem = nullptr); ~Directory(); @@ -93,7 +93,7 @@ class Directory final : public nsISupports, public nsWrapperCache { */ nsresult DOMPathToRealPath(const nsAString& aPath, nsIFile** aFile) const; - nsCOMPtr mParent; + nsCOMPtr mGlobal; RefPtr mFileSystem; nsCOMPtr mFile; diff --git a/dom/filesystem/FileSystemBase.cpp b/dom/filesystem/FileSystemBase.cpp index 7cefb77bdc..1e73a549de 100644 --- a/dom/filesystem/FileSystemBase.cpp +++ b/dom/filesystem/FileSystemBase.cpp @@ -19,7 +19,7 @@ void FileSystemBase::Shutdown() { mShutdown = true; } -nsISupports* FileSystemBase::GetParentObject() const { +nsIGlobalObject* FileSystemBase::GetParentObject() const { AssertIsOnOwningThread(); return nullptr; } diff --git a/dom/filesystem/FileSystemBase.h b/dom/filesystem/FileSystemBase.h index b7d893c78a..de680d09f6 100644 --- a/dom/filesystem/FileSystemBase.h +++ b/dom/filesystem/FileSystemBase.h @@ -28,7 +28,7 @@ class FileSystemBase { virtual bool ShouldCreateDirectory() = 0; - virtual nsISupports* GetParentObject() const; + virtual nsIGlobalObject* GetParentObject() const; virtual void GetDirectoryName(nsIFile* aFile, nsAString& aRetval, ErrorResult& aRv) const; diff --git a/dom/filesystem/GetDirectoryListingTask.cpp b/dom/filesystem/GetDirectoryListingTask.cpp index 8e421b7428..af614e91d4 100644 --- a/dom/filesystem/GetDirectoryListingTask.cpp +++ b/dom/filesystem/GetDirectoryListingTask.cpp @@ -34,12 +34,8 @@ GetDirectoryListingTaskChild::Create(FileSystemBase* aFileSystem, MOZ_ASSERT(aDirectory); aFileSystem->AssertIsOnOwningThread(); - nsCOMPtr globalObject = - do_QueryInterface(aFileSystem->GetParentObject()); - if (NS_WARN_IF(!globalObject)) { - aRv.Throw(NS_ERROR_FAILURE); - return nullptr; - } + nsCOMPtr globalObject = aFileSystem->GetParentObject(); + MOZ_ASSERT(globalObject); RefPtr task = new GetDirectoryListingTaskChild( globalObject, aFileSystem, aDirectory, aTargetPath, aFilters); @@ -120,8 +116,10 @@ void GetDirectoryListingTaskChild::SetSuccessRequestResult( RefPtr blobImpl = IPCBlobUtils::Deserialize(d.blob()); MOZ_ASSERT(blobImpl); - RefPtr file = - File::Create(mFileSystem->GetParentObject(), blobImpl); + nsCOMPtr globalObject = mFileSystem->GetParentObject(); + MOZ_ASSERT(globalObject); + + RefPtr file = File::Create(globalObject, blobImpl); MOZ_ASSERT(file); ofd->SetAsFile() = file; diff --git a/dom/filesystem/GetFileOrDirectoryTask.cpp b/dom/filesystem/GetFileOrDirectoryTask.cpp index 0f13cb9fce..d6432ced62 100644 --- a/dom/filesystem/GetFileOrDirectoryTask.cpp +++ b/dom/filesystem/GetFileOrDirectoryTask.cpp @@ -28,8 +28,7 @@ GetFileOrDirectoryTaskChild::Create(FileSystemBase* aFileSystem, MOZ_ASSERT(NS_IsMainThread(), "Only call on main thread!"); MOZ_ASSERT(aFileSystem); - nsCOMPtr globalObject = - do_QueryInterface(aFileSystem->GetParentObject()); + nsCOMPtr globalObject = aFileSystem->GetParentObject(); if (NS_WARN_IF(!globalObject)) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; @@ -89,8 +88,13 @@ void GetFileOrDirectoryTaskChild::SetSuccessRequestResult( RefPtr blobImpl = IPCBlobUtils::Deserialize(r.blob()); MOZ_ASSERT(blobImpl); - mResultFile = File::Create(mFileSystem->GetParentObject(), blobImpl); - MOZ_ASSERT(mResultFile); + nsCOMPtr globalObject = mFileSystem->GetParentObject(); + MOZ_ASSERT(globalObject); + + mResultFile = File::Create(globalObject, blobImpl); + if (NS_WARN_IF(!mResultFile)) { + aRv.Throw(NS_ERROR_FAILURE); + } break; } case FileSystemResponseValue::TFileSystemDirectoryResponse: { diff --git a/dom/filesystem/GetFilesHelper.cpp b/dom/filesystem/GetFilesHelper.cpp index e9aa91b360..4e0bb80abd 100644 --- a/dom/filesystem/GetFilesHelper.cpp +++ b/dom/filesystem/GetFilesHelper.cpp @@ -22,16 +22,13 @@ class ReleaseRunnable final : public Runnable { public: static void MaybeReleaseOnMainThread( nsTArray>& aPromises, - nsTArray>& aCallbacks, - Sequence>& aFiles, - already_AddRefed aGlobal) { - nsCOMPtr global(aGlobal); + nsTArray>& aCallbacks) { if (NS_IsMainThread()) { return; } RefPtr runnable = - new ReleaseRunnable(aPromises, aCallbacks, aFiles, global.forget()); + new ReleaseRunnable(aPromises, aCallbacks); FileSystemUtils::DispatchRunnable(nullptr, runnable.forget()); } @@ -41,28 +38,20 @@ class ReleaseRunnable final : public Runnable { mPromises.Clear(); mCallbacks.Clear(); - mFiles.Clear(); - mGlobal = nullptr; return NS_OK; } private: ReleaseRunnable(nsTArray>& aPromises, - nsTArray>& aCallbacks, - Sequence>& aFiles, - already_AddRefed aGlobal) + nsTArray>& aCallbacks) : Runnable("dom::ReleaseRunnable") { mPromises.SwapElements(aPromises); mCallbacks.SwapElements(aCallbacks); - mFiles.SwapElements(aFiles); - mGlobal = aGlobal; } nsTArray> mPromises; nsTArray> mCallbacks; - Sequence> mFiles; - nsCOMPtr mGlobal; }; } // namespace @@ -71,15 +60,14 @@ class ReleaseRunnable final : public Runnable { // GetFilesHelper Base class already_AddRefed GetFilesHelper::Create( - nsIGlobalObject* aGlobal, const nsTArray& aFilesOrDirectory, bool aRecursiveFlag, ErrorResult& aRv) { RefPtr helper; if (XRE_IsParentProcess()) { - helper = new GetFilesHelper(aGlobal, aRecursiveFlag); + helper = new GetFilesHelper(aRecursiveFlag); } else { - helper = new GetFilesHelperChild(aGlobal, aRecursiveFlag); + helper = new GetFilesHelperChild(aRecursiveFlag); } nsAutoString directoryPath; @@ -87,7 +75,8 @@ already_AddRefed GetFilesHelper::Create( for (uint32_t i = 0; i < aFilesOrDirectory.Length(); ++i) { const OwningFileOrDirectory& data = aFilesOrDirectory[i]; if (data.IsFile()) { - if (!helper->mFiles.AppendElement(data.GetAsFile(), fallible)) { + if (!helper->mTargetBlobImplArray.AppendElement(data.GetAsFile()->Impl(), + fallible)) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return nullptr; } @@ -115,7 +104,7 @@ already_AddRefed GetFilesHelper::Create( return helper.forget(); } - MOZ_ASSERT(helper->mFiles.IsEmpty()); + MOZ_ASSERT(helper->mTargetBlobImplArray.IsEmpty()); helper->SetDirectoryPath(directoryPath); helper->Work(aRv); @@ -126,18 +115,16 @@ already_AddRefed GetFilesHelper::Create( return helper.forget(); } -GetFilesHelper::GetFilesHelper(nsIGlobalObject* aGlobal, bool aRecursiveFlag) +GetFilesHelper::GetFilesHelper(bool aRecursiveFlag) : Runnable("GetFilesHelper"), GetFilesHelperBase(aRecursiveFlag), - mGlobal(aGlobal), mListingCompleted(false), mErrorResult(NS_OK), mMutex("GetFilesHelper::mMutex"), mCanceled(false) {} GetFilesHelper::~GetFilesHelper() { - ReleaseRunnable::MaybeReleaseOnMainThread(mPromises, mCallbacks, mFiles, - mGlobal.forget()); + ReleaseRunnable::MaybeReleaseOnMainThread(mPromises, mCallbacks); } void GetFilesHelper::AddPromise(Promise* aPromise) { @@ -167,8 +154,6 @@ void GetFilesHelper::AddCallback(GetFilesCallback* aCallback) { } void GetFilesHelper::Unlink() { - mGlobal = nullptr; - mFiles.Clear(); mPromises.Clear(); mCallbacks.Clear(); @@ -182,8 +167,6 @@ void GetFilesHelper::Unlink() { void GetFilesHelper::Traverse(nsCycleCollectionTraversalCallback& cb) { GetFilesHelper* tmp = this; - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mGlobal); - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mFiles); NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mPromises); } @@ -221,8 +204,6 @@ GetFilesHelper::Run() { return NS_OK; } - RunMainThread(); - OperationCompleted(); return NS_OK; } @@ -272,29 +253,6 @@ void GetFilesHelper::RunIO() { mErrorResult = ExploreDirectory(domPath, file); } -void GetFilesHelper::RunMainThread() { - MOZ_ASSERT(NS_IsMainThread()); - MOZ_ASSERT(!mDirectoryPath.IsEmpty()); - MOZ_ASSERT(!mListingCompleted); - - // If there is an error, do nothing. - if (NS_FAILED(mErrorResult)) { - return; - } - - // Create the sequence of Files. - for (uint32_t i = 0; i < mTargetBlobImplArray.Length(); ++i) { - RefPtr domFile = File::Create(mGlobal, mTargetBlobImplArray[i]); - MOZ_ASSERT(domFile); - - if (!mFiles.AppendElement(domFile, fallible)) { - mErrorResult = NS_ERROR_OUT_OF_MEMORY; - mFiles.Clear(); - return; - } - } -} - nsresult GetFilesHelperBase::ExploreDirectory(const nsAString& aDOMPath, nsIFile* aFile) { MOZ_ASSERT(!NS_IsMainThread()); @@ -439,13 +397,33 @@ void GetFilesHelper::ResolveOrRejectPromise(Promise* aPromise) { MOZ_ASSERT(mListingCompleted); MOZ_ASSERT(aPromise); + Sequence> files; + + if (NS_SUCCEEDED(mErrorResult)) { + for (uint32_t i = 0; i < mTargetBlobImplArray.Length(); ++i) { + RefPtr domFile = + File::Create(aPromise->GetParentObject(), mTargetBlobImplArray[i]); + if (NS_WARN_IF(!domFile)) { + mErrorResult = NS_ERROR_FAILURE; + files.Clear(); + break; + } + + if (!files.AppendElement(domFile, fallible)) { + mErrorResult = NS_ERROR_OUT_OF_MEMORY; + files.Clear(); + break; + } + } + } + // Error propagation. if (NS_FAILED(mErrorResult)) { aPromise->MaybeReject(mErrorResult); return; } - aPromise->MaybeResolve(mFiles); + aPromise->MaybeResolve(files); } void GetFilesHelper::RunCallback(GetFilesCallback* aCallback) { @@ -453,7 +431,7 @@ void GetFilesHelper::RunCallback(GetFilesCallback* aCallback) { MOZ_ASSERT(mListingCompleted); MOZ_ASSERT(aCallback); - aCallback->Callback(mErrorResult, mFiles); + aCallback->Callback(mErrorResult, mTargetBlobImplArray); } /////////////////////////////////////////////////////////////////////////////// @@ -494,10 +472,7 @@ bool GetFilesHelperChild::AppendBlobImpl(BlobImpl* aBlobImpl) { MOZ_ASSERT(aBlobImpl); MOZ_ASSERT(aBlobImpl->IsFile()); - RefPtr file = File::Create(mGlobal, aBlobImpl); - MOZ_ASSERT(file); - - return mFiles.AppendElement(file, fallible); + return mTargetBlobImplArray.AppendElement(aBlobImpl, fallible); } void GetFilesHelperChild::Finished(nsresult aError) { @@ -521,7 +496,7 @@ class GetFilesHelperParentCallback final : public GetFilesCallback { } void Callback(nsresult aStatus, - const Sequence>& aFiles) override { + const FallibleTArray>& aBlobImpls) override { if (NS_FAILED(aStatus)) { mParent->mContentParent->SendGetFilesResponseAndForget( mParent->mUUID, GetFilesResponseFailure(aStatus)); @@ -531,11 +506,11 @@ class GetFilesHelperParentCallback final : public GetFilesCallback { GetFilesResponseSuccess success; nsTArray& ipcBlobs = success.blobs(); - ipcBlobs.SetLength(aFiles.Length()); + ipcBlobs.SetLength(aBlobImpls.Length()); - for (uint32_t i = 0; i < aFiles.Length(); ++i) { + for (uint32_t i = 0; i < aBlobImpls.Length(); ++i) { nsresult rv = IPCBlobUtils::Serialize( - aFiles[i]->Impl(), mParent->mContentParent, ipcBlobs[i]); + aBlobImpls[i], mParent->mContentParent, ipcBlobs[i]); if (NS_WARN_IF(NS_FAILED(rv))) { mParent->mContentParent->SendGetFilesResponseAndForget( mParent->mUUID, GetFilesResponseFailure(NS_ERROR_OUT_OF_MEMORY)); @@ -555,7 +530,7 @@ class GetFilesHelperParentCallback final : public GetFilesCallback { GetFilesHelperParent::GetFilesHelperParent(const nsID& aUUID, ContentParent* aContentParent, bool aRecursiveFlag) - : GetFilesHelper(nullptr, aRecursiveFlag), + : GetFilesHelper(aRecursiveFlag), mContentParent(aContentParent), mUUID(aUUID) {} diff --git a/dom/filesystem/GetFilesHelper.h b/dom/filesystem/GetFilesHelper.h index cb87c4f45e..0624400ebb 100644 --- a/dom/filesystem/GetFilesHelper.h +++ b/dom/filesystem/GetFilesHelper.h @@ -28,7 +28,7 @@ class GetFilesCallback { NS_INLINE_DECL_REFCOUNTING(GetFilesCallback); virtual void Callback(nsresult aStatus, - const Sequence>& aFiles) = 0; + const FallibleTArray>& aBlobImpls) = 0; protected: virtual ~GetFilesCallback() {} @@ -63,7 +63,6 @@ class GetFilesHelper : public Runnable, public GetFilesHelperBase { public: static already_AddRefed Create( - nsIGlobalObject* aGlobal, const nsTArray& aFilesOrDirectory, bool aRecursiveFlag, ErrorResult& aRv); @@ -76,7 +75,7 @@ class GetFilesHelper : public Runnable, public GetFilesHelperBase { void Traverse(nsCycleCollectionTraversalCallback& cb); protected: - GetFilesHelper(nsIGlobalObject* aGlobal, bool aRecursiveFlag); + explicit GetFilesHelper(bool aRecursiveFlag); virtual ~GetFilesHelper(); @@ -98,22 +97,15 @@ class GetFilesHelper : public Runnable, public GetFilesHelperBase { void RunIO(); - void RunMainThread(); - void OperationCompleted(); void ResolveOrRejectPromise(Promise* aPromise); void RunCallback(GetFilesCallback* aCallback); - nsCOMPtr mGlobal; - bool mListingCompleted; nsString mDirectoryPath; - // This is the real File sequence that we expose via Promises. - Sequence> mFiles; - // Error code to propagate. nsresult mErrorResult; @@ -128,8 +120,8 @@ class GetFilesHelper : public Runnable, public GetFilesHelperBase { class GetFilesHelperChild final : public GetFilesHelper { public: - GetFilesHelperChild(nsIGlobalObject* aGlobal, bool aRecursiveFlag) - : GetFilesHelper(aGlobal, aRecursiveFlag), mPendingOperation(false) {} + explicit GetFilesHelperChild(bool aRecursiveFlag) + : GetFilesHelper(aRecursiveFlag), mPendingOperation(false) {} virtual void Work(ErrorResult& aRv) override; diff --git a/dom/filesystem/GetFilesTask.cpp b/dom/filesystem/GetFilesTask.cpp index ba9206e75b..8079733871 100644 --- a/dom/filesystem/GetFilesTask.cpp +++ b/dom/filesystem/GetFilesTask.cpp @@ -30,8 +30,7 @@ already_AddRefed GetFilesTaskChild::Create( MOZ_ASSERT(aDirectory); aFileSystem->AssertIsOnOwningThread(); - nsCOMPtr globalObject = - do_QueryInterface(aFileSystem->GetParentObject()); + nsCOMPtr globalObject = aFileSystem->GetParentObject(); if (NS_WARN_IF(!globalObject)) { aRv.Throw(NS_ERROR_FAILURE); return nullptr; @@ -105,12 +104,19 @@ void GetFilesTaskChild::SetSuccessRequestResult( return; } + nsCOMPtr globalObject = mFileSystem->GetParentObject(); + MOZ_ASSERT(globalObject); + for (uint32_t i = 0; i < r.data().Length(); ++i) { const FileSystemFileResponse& data = r.data()[i]; RefPtr blobImpl = IPCBlobUtils::Deserialize(data.blob()); MOZ_ASSERT(blobImpl); - mTargetData[i] = File::Create(mFileSystem->GetParentObject(), blobImpl); + mTargetData[i] = File::Create(globalObject, blobImpl); + if (NS_WARN_IF(!mTargetData[i])) { + aRv.Throw(NS_ERROR_FAILURE); + return; + } } } diff --git a/dom/filesystem/OSFileSystem.cpp b/dom/filesystem/OSFileSystem.cpp index 8d201a8f3f..455072c6c3 100644 --- a/dom/filesystem/OSFileSystem.cpp +++ b/dom/filesystem/OSFileSystem.cpp @@ -23,29 +23,24 @@ already_AddRefed OSFileSystem::Clone() { AssertIsOnOwningThread(); RefPtr fs = new OSFileSystem(mLocalRootPath); - if (mParent) { - fs->Init(mParent); + if (mGlobal) { + fs->Init(mGlobal); } return fs.forget(); } -void OSFileSystem::Init(nsISupports* aParent) { +void OSFileSystem::Init(nsIGlobalObject* aGlobal) { AssertIsOnOwningThread(); - MOZ_ASSERT(!mParent, "No duple Init() calls"); - MOZ_ASSERT(aParent); + MOZ_ASSERT(!mGlobal, "No duple Init() calls"); + MOZ_ASSERT(aGlobal); - mParent = aParent; - -#ifdef DEBUG - nsCOMPtr obj = do_QueryInterface(aParent); - MOZ_ASSERT(obj); -#endif + mGlobal = aGlobal; } -nsISupports* OSFileSystem::GetParentObject() const { +nsIGlobalObject* OSFileSystem::GetParentObject() const { AssertIsOnOwningThread(); - return mParent; + return mGlobal; } bool OSFileSystem::IsSafeFile(nsIFile* aFile) const { @@ -66,14 +61,14 @@ bool OSFileSystem::IsSafeDirectory(Directory* aDir) const { void OSFileSystem::Unlink() { AssertIsOnOwningThread(); - mParent = nullptr; + mGlobal = nullptr; } void OSFileSystem::Traverse(nsCycleCollectionTraversalCallback& cb) { AssertIsOnOwningThread(); OSFileSystem* tmp = this; - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mParent); + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mGlobal); } void OSFileSystem::SerializeDOMPath(nsAString& aOutput) const { diff --git a/dom/filesystem/OSFileSystem.h b/dom/filesystem/OSFileSystem.h index d3936c0cc0..dc01e6bc11 100644 --- a/dom/filesystem/OSFileSystem.h +++ b/dom/filesystem/OSFileSystem.h @@ -14,7 +14,7 @@ class OSFileSystem final : public FileSystemBase { public: explicit OSFileSystem(const nsAString& aRootDir); - void Init(nsISupports* aParent); + void Init(nsIGlobalObject* aGlobal); // Overrides FileSystemBase @@ -27,7 +27,7 @@ class OSFileSystem final : public FileSystemBase { return false; } - virtual nsISupports* GetParentObject() const override; + virtual nsIGlobalObject* GetParentObject() const override; virtual bool IsSafeFile(nsIFile* aFile) const override; @@ -42,7 +42,7 @@ class OSFileSystem final : public FileSystemBase { private: virtual ~OSFileSystem() {} - nsCOMPtr mParent; + nsCOMPtr mGlobal; }; class OSFileSystemParent final : public FileSystemBase { @@ -58,7 +58,7 @@ class OSFileSystemParent final : public FileSystemBase { virtual bool ShouldCreateDirectory() override { return false; } - virtual nsISupports* GetParentObject() const override { + virtual nsIGlobalObject* GetParentObject() const override { MOZ_CRASH("This should not be called on the PBackground thread."); return nullptr; } diff --git a/dom/filesystem/compat/FileSystemFileEntry.cpp b/dom/filesystem/compat/FileSystemFileEntry.cpp index 5d7206173e..44e96999b4 100644 --- a/dom/filesystem/compat/FileSystemFileEntry.cpp +++ b/dom/filesystem/compat/FileSystemFileEntry.cpp @@ -28,8 +28,6 @@ class FileCallbackRunnable final : public Runnable { // Here we clone the File object. RefPtr file = File::Create(mFile->GetParentObject(), mFile->Impl()); - MOZ_ASSERT(file); - mCallback->Call(*file); return NS_OK; } diff --git a/dom/html/HTMLCanvasElement.cpp b/dom/html/HTMLCanvasElement.cpp index 037330f4cc..9d31384ee4 100644 --- a/dom/html/HTMLCanvasElement.cpp +++ b/dom/html/HTMLCanvasElement.cpp @@ -879,8 +879,11 @@ nsresult HTMLCanvasElement::MozGetAsFileImpl(const nsAString& aName, do_QueryInterface(OwnerDoc()->GetScopeObject()); // The File takes ownership of the buffer - RefPtr file = - File::CreateMemoryFile(win, imgData, imgSize, aName, type, PR_Now()); + RefPtr file = File::CreateMemoryFile(win->AsGlobal(), imgData, imgSize, + aName, type, PR_Now()); + if (NS_WARN_IF(!file)) { + return NS_ERROR_FAILURE; + } file.forget(aResult); return NS_OK; diff --git a/dom/html/HTMLInputElement.cpp b/dom/html/HTMLInputElement.cpp index 96c9b8cd9d..3992beca93 100644 --- a/dom/html/HTMLInputElement.cpp +++ b/dom/html/HTMLInputElement.cpp @@ -227,12 +227,23 @@ class DispatchChangeEventCallback final : public GetFilesCallback { MOZ_ASSERT(aInputElement); } - virtual void Callback(nsresult aStatus, - const Sequence>& aFiles) override { + virtual void Callback( + nsresult aStatus, + const FallibleTArray>& aBlobImpls) override { + if (!mInputElement->GetOwnerGlobal()) { + return; + } + nsTArray array; - for (uint32_t i = 0; i < aFiles.Length(); ++i) { + for (uint32_t i = 0; i < aBlobImpls.Length(); ++i) { OwningFileOrDirectory* element = array.AppendElement(); - element->SetAsFile() = aFiles[i]; + RefPtr file = + File::Create(mInputElement->GetOwnerGlobal(), aBlobImpls[i]); + if (NS_WARN_IF(!file)) { + return; + } + + element->SetAsFile() = file; } mInputElement->SetFilesOrDirectories(array, true); @@ -522,6 +533,11 @@ HTMLInputElement::nsFilePickerShownCallback::Done(int16_t aResult) { // So, we can safely send one by ourself. mInput->SetFilesOrDirectories(newFilesOrDirectories, true); + // mInput(HTMLInputElement) has no scriptGlobalObject, don't create + // DispatchChangeEventCallback + if (!mInput->GetOwnerGlobal()) { + return NS_OK; + } RefPtr dispatchChangeEventCallback = new DispatchChangeEventCallback(mInput); @@ -1989,7 +2005,9 @@ void HTMLInputElement::MozSetFileArray( nsTArray files; for (uint32_t i = 0; i < aFiles.Length(); ++i) { RefPtr file = File::Create(global, aFiles[i].get()->Impl()); - MOZ_ASSERT(file); + if (NS_WARN_IF(!file)) { + return; + } OwningFileOrDirectory* element = files.AppendElement(); element->SetAsFile() = file; @@ -2037,6 +2055,10 @@ void HTMLInputElement::MozSetFileNameArray(const Sequence& aFileNames, } RefPtr domFile = File::CreateFromFile(global, file); + if (NS_WARN_IF(!domFile)) { + aRv.Throw(NS_ERROR_FAILURE); + return; + } OwningFileOrDirectory* element = files.AppendElement(); element->SetAsFile() = domFile; @@ -2063,7 +2085,7 @@ void HTMLInputElement::MozSetDirectory(const nsAString& aDirectoryPath, return; } - RefPtr directory = Directory::Create(window, file); + RefPtr directory = Directory::Create(window->AsGlobal(), file); MOZ_ASSERT(directory); nsTArray array; @@ -6059,8 +6081,10 @@ static nsTArray RestoreFileContentData( continue; } - RefPtr file = File::Create(aWindow, it.get_BlobImpl()); - MOZ_ASSERT(file); + RefPtr file = File::Create(aWindow->AsGlobal(), it.get_BlobImpl()); + if (NS_WARN_IF(!file)) { + continue; + } OwningFileOrDirectory* element = res.AppendElement(); element->SetAsFile() = file; @@ -6073,7 +6097,8 @@ static nsTArray RestoreFileContentData( continue; } - RefPtr directory = Directory::Create(aWindow, file); + RefPtr directory = + Directory::Create(aWindow->AsGlobal(), file); MOZ_ASSERT(directory); OwningFileOrDirectory* element = res.AppendElement(); @@ -7074,17 +7099,10 @@ GetFilesHelper* HTMLInputElement::GetOrCreateGetFilesHelper(bool aRecursiveFlag, ErrorResult& aRv) { MOZ_ASSERT(mFileData); - nsCOMPtr global = OwnerDoc()->GetScopeObject(); - MOZ_ASSERT(global); - if (!global) { - aRv.Throw(NS_ERROR_FAILURE); - return nullptr; - } - if (aRecursiveFlag) { if (!mFileData->mGetFilesRecursiveHelper) { mFileData->mGetFilesRecursiveHelper = GetFilesHelper::Create( - global, GetFilesOrDirectoriesInternal(), aRecursiveFlag, aRv); + GetFilesOrDirectoriesInternal(), aRecursiveFlag, aRv); if (NS_WARN_IF(aRv.Failed())) { return nullptr; } @@ -7095,7 +7113,7 @@ GetFilesHelper* HTMLInputElement::GetOrCreateGetFilesHelper(bool aRecursiveFlag, if (!mFileData->mGetFilesNonRecursiveHelper) { mFileData->mGetFilesNonRecursiveHelper = GetFilesHelper::Create( - global, GetFilesOrDirectoriesInternal(), aRecursiveFlag, aRv); + GetFilesOrDirectoriesInternal(), aRecursiveFlag, aRv); if (NS_WARN_IF(aRv.Failed())) { return nullptr; } diff --git a/dom/html/HTMLMediaElement.cpp b/dom/html/HTMLMediaElement.cpp index 52148b81c3..148423bd79 100644 --- a/dom/html/HTMLMediaElement.cpp +++ b/dom/html/HTMLMediaElement.cpp @@ -4995,6 +4995,14 @@ nsresult HTMLMediaElement::FinishDecoderSetup(MediaDecoder* aDecoder) { // This will also do an AddRemoveSelfReference. NotifyOwnerDocumentActivityChanged(); + if (!mDecoder) { + // NotifyOwnerDocumentActivityChanged may shutdown the decoder if the + // owning document is inactive and we're in the EME case. We could try and + // handle this, but at the time of writing it's a pretty niche case, so just + // bail. + return NS_ERROR_FAILURE; + } + if (mPausedForInactiveDocumentOrChannel) { mDecoder->Suspend(); } diff --git a/dom/indexedDB/ActorsChild.cpp b/dom/indexedDB/ActorsChild.cpp index 835802d246..6ddeb2f1f8 100644 --- a/dom/indexedDB/ActorsChild.cpp +++ b/dom/indexedDB/ActorsChild.cpp @@ -589,6 +589,7 @@ auto DeserializeStructuredCloneFiles( RefPtr blob = Blob::Create(aDatabase->GetOwnerGlobal(), blobImpl); + MOZ_ASSERT(blob); files.EmplaceBack(StructuredCloneFile::eStructuredClone, std::move(blob)); diff --git a/dom/indexedDB/IDBObjectStore.cpp b/dom/indexedDB/IDBObjectStore.cpp index 0412256b8e..d646588f28 100644 --- a/dom/indexedDB/IDBObjectStore.cpp +++ b/dom/indexedDB/IDBObjectStore.cpp @@ -623,6 +623,27 @@ class ValueDeserializationHelper { RefPtr blob = aFile.mBlob; + // It can happen that this IDB is chrome code, so there is no parent, but + // still we want to set a correct parent for the new File object. + nsCOMPtr global; + if (NS_IsMainThread()) { + if (aDatabase && aDatabase->GetParentObject()) { + global = aDatabase->GetParentObject(); + } else { + global = xpc::CurrentNativeGlobal(aCx); + } + } else { + WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate(); + MOZ_ASSERT(workerPrivate); + + WorkerGlobalScope* globalScope = workerPrivate->GlobalScope(); + MOZ_ASSERT(globalScope); + + global = do_QueryObject(globalScope); + } + + MOZ_ASSERT(global); + /* If we are creating an index, we do not have an mBlob but do have an * mInfo. Unlike other index or upgrade cases, we do need a real-looking * Blob/File instance because the index's key path can reference their @@ -638,30 +659,12 @@ class ValueDeserializationHelper { const RefPtr impl = new FileBlobImpl(file); impl->SetFileId(aFile.mFileInfo->Id()); - blob = File::Create(nullptr, impl); - } - - // It can happen that this IDB is chrome code, so there is no parent, but - // still we want to set a correct parent for the new File object. - nsCOMPtr parent; - if (NS_IsMainThread()) { - if (aDatabase && aDatabase->GetParentObject()) { - parent = aDatabase->GetParentObject(); - } else { - parent = xpc::CurrentNativeGlobal(aCx); + blob = File::Create(global, impl); + if (NS_WARN_IF(!blob)) { + return false; } - } else { - WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate(); - MOZ_ASSERT(workerPrivate); - - WorkerGlobalScope* globalScope = workerPrivate->GlobalScope(); - MOZ_ASSERT(globalScope); - - parent = do_QueryObject(globalScope); } - MOZ_ASSERT(parent); - if (aData.tag == SCTAG_DOM_BLOB) { blob->Impl()->SetLazyData(VoidString(), aData.type, aData.size, INT64_MAX); @@ -676,6 +679,10 @@ class ValueDeserializationHelper { const RefPtr exposedBlob = Blob::Create(blob->GetParentObject(), blob->Impl()); + if (NS_WARN_IF(!exposedBlob)) { + return false; + } + MOZ_ASSERT(exposedBlob); JS::Rooted wrappedBlob(aCx); if (!ToJSValue(aCx, exposedBlob, &wrappedBlob)) { diff --git a/dom/media/AudioCaptureTrack.cpp b/dom/media/AudioCaptureTrack.cpp index cc0a419d92..1ccd81a8b4 100644 --- a/dom/media/AudioCaptureTrack.cpp +++ b/dom/media/AudioCaptureTrack.cpp @@ -33,7 +33,7 @@ AudioCaptureTrack::AudioCaptureTrack(TrackRate aRate) mStarted(false) { MOZ_ASSERT(NS_IsMainThread()); MOZ_COUNT_CTOR(AudioCaptureTrack); - mMixer.AddCallback(this); + mMixer.AddCallback(WrapNotNull(this)); } AudioCaptureTrack::~AudioCaptureTrack() { diff --git a/dom/media/AudioMixer.h b/dom/media/AudioMixer.h index 7f11d15ec8..8f7f761f13 100644 --- a/dom/media/AudioMixer.h +++ b/dom/media/AudioMixer.h @@ -6,10 +6,11 @@ #define MOZILLA_AUDIOMIXER_H_ #include "AudioSampleFormat.h" -#include "nsTArray.h" -#include "mozilla/PodOperations.h" -#include "mozilla/LinkedList.h" #include "AudioStream.h" +#include "nsTArray.h" +#include "mozilla/LinkedList.h" +#include "mozilla/NotNull.h" +#include "mozilla/PodOperations.h" namespace mozilla { @@ -53,6 +54,7 @@ class AudioMixer { for (MixerCallback* cb = mCallbacks.getFirst(); cb != nullptr; cb = cb->getNext()) { MixerCallbackReceiver* receiver = cb->mReceiver; + MOZ_ASSERT(receiver); receiver->MixerCallback(mMixedAudio.Elements(), AudioSampleTypeToFormat::Format, mChannels, mFrames, mSampleRate); @@ -85,7 +87,7 @@ class AudioMixer { } } - void AddCallback(MixerCallbackReceiver* aReceiver) { + void AddCallback(NotNull aReceiver) { mCallbacks.insertBack(new MixerCallback(aReceiver)); } @@ -121,9 +123,9 @@ class AudioMixer { class MixerCallback : public LinkedListElement { public: - explicit MixerCallback(MixerCallbackReceiver* aReceiver) + explicit MixerCallback(NotNull aReceiver) : mReceiver(aReceiver) {} - MixerCallbackReceiver* mReceiver; + NotNull mReceiver; }; /* Function that is called when the mixing is done. */ diff --git a/dom/media/AudioSegment.cpp b/dom/media/AudioSegment.cpp index cd8cd9d59f..73ce4fe4a8 100644 --- a/dom/media/AudioSegment.cpp +++ b/dom/media/AudioSegment.cpp @@ -3,7 +3,6 @@ * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "AudioSegment.h" - #include "AudioMixer.h" #include "AudioChannelFormat.h" #include @@ -30,16 +29,13 @@ void AudioSegment::ApplyVolume(float aVolume) { } } -void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, +void AudioSegment::ResampleChunks(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, uint32_t aInRate, uint32_t aOutRate) { if (mChunks.IsEmpty()) { return; } - MOZ_ASSERT( - aResampler || IsNull(), - "We can only be here without a resampler if this segment is null."); - AudioSampleFormat format = AUDIO_FORMAT_SILENCE; for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { if (ci->mBufferFormat != AUDIO_FORMAT_SILENCE) { @@ -53,10 +49,10 @@ void AudioSegment::ResampleChunks(SpeexResamplerState* aResampler, // the chunks duration. case AUDIO_FORMAT_SILENCE: case AUDIO_FORMAT_FLOAT32: - Resample(aResampler, aInRate, aOutRate); + Resample(aResampler, aResamplerChannelCount, aInRate, aOutRate); break; case AUDIO_FORMAT_S16: - Resample(aResampler, aInRate, aOutRate); + Resample(aResampler, aResamplerChannelCount, aInRate, aOutRate); break; default: MOZ_ASSERT(false); diff --git a/dom/media/AudioSegment.h b/dom/media/AudioSegment.h index deca9c5542..edc983ad5a 100644 --- a/dom/media/AudioSegment.h +++ b/dom/media/AudioSegment.h @@ -5,11 +5,14 @@ #ifndef MOZILLA_AUDIOSEGMENT_H_ #define MOZILLA_AUDIOSEGMENT_H_ +#include +#include "MediaTrackGraph.h" #include "MediaSegment.h" #include "AudioSampleFormat.h" #include "AudioChannelFormat.h" #include "SharedBuffer.h" #include "WebAudioUtils.h" +#include "nsAutoRef.h" #ifdef MOZILLA_INTERNAL_API # include "mozilla/TimeStamp.h" #endif @@ -201,7 +204,7 @@ struct AudioChunk { mPrincipalHandle = PRINCIPAL_HANDLE_NONE; } - size_t ChannelCount() const { return mChannelData.Length(); } + uint32_t ChannelCount() const { return mChannelData.Length(); } bool IsMuted() const { return mVolume == 0.0f; } @@ -292,14 +295,16 @@ class AudioSegment : public MediaSegmentBase { ~AudioSegment() {} - // Resample the whole segment in place. + // Resample the whole segment in place. `aResampler` is an instance of a + // resampler, initialized with `aResamplerChannelCount` channels. If this + // function finds a chunk with more channels, `aResampler` is destroyed and a + // new resampler is created, and `aResamplerChannelCount` is updated with the + // new channel count value. template - void Resample(SpeexResamplerState* aResampler, uint32_t aInRate, + void Resample(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, uint32_t aInRate, uint32_t aOutRate) { mDuration = 0; -#ifdef DEBUG - uint32_t segmentChannelCount = ChannelCount(); -#endif for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { AutoTArray, GUESS_AUDIO_CHANNELS> output; @@ -312,7 +317,17 @@ class AudioSegment : public MediaSegmentBase { continue; } uint32_t channels = c.mChannelData.Length(); - MOZ_ASSERT(channels == segmentChannelCount); + // This might introduce a discontinuity, but a channel count change in the + // middle of a stream is not that common. This also initializes the + // resampler as late as possible. + if (channels != *aResamplerChannelCount) { + SpeexResamplerState* state = + speex_resampler_init(channels, aInRate, aOutRate, + SPEEX_RESAMPLER_QUALITY_DEFAULT, nullptr); + MOZ_ASSERT(state); + aResampler.own(state); + *aResamplerChannelCount = channels; + } output.SetLength(channels); bufferPtrs.SetLength(channels); uint32_t inFrames = c.mDuration; @@ -325,8 +340,8 @@ class AudioSegment : public MediaSegmentBase { uint32_t outFrames = outSize; const T* in = static_cast(c.mChannelData[i]); - dom::WebAudioUtils::SpeexResamplerProcess(aResampler, i, in, &inFrames, - out, &outFrames); + dom::WebAudioUtils::SpeexResamplerProcess(aResampler.get(), i, in, + &inFrames, out, &outFrames); MOZ_ASSERT(inFrames == c.mDuration); bufferPtrs[i] = out; @@ -342,7 +357,8 @@ class AudioSegment : public MediaSegmentBase { } } - void ResampleChunks(SpeexResamplerState* aResampler, uint32_t aInRate, + void ResampleChunks(nsAutoRef& aResampler, + uint32_t* aResamplerChannelCount, uint32_t aInRate, uint32_t aOutRate); void AppendFrames(already_AddRefed aBuffer, const nsTArray& aChannelData, @@ -401,18 +417,17 @@ class AudioSegment : public MediaSegmentBase { // aChannelCount channels. void Mix(AudioMixer& aMixer, uint32_t aChannelCount, uint32_t aSampleRate); - int ChannelCount() { - NS_WARNING_ASSERTION( - !mChunks.IsEmpty(), - "Cannot query channel count on a AudioSegment with no chunks."); + // Returns the maximum + uint32_t MaxChannelCount() { // Find the first chunk that has non-zero channels. A chunk that hs zero // channels is just silence and we can simply discard it. + uint32_t channelCount = 0; for (ChunkIterator ci(*this); !ci.IsEnded(); ci.Next()) { if (ci->ChannelCount()) { - return ci->ChannelCount(); + channelCount = std::max(channelCount, ci->ChannelCount()); } } - return 0; + return channelCount; } static Type StaticType() { return AUDIO; } diff --git a/dom/media/GraphDriver.cpp b/dom/media/GraphDriver.cpp index 4ac6107031..01672152b4 100644 --- a/dom/media/GraphDriver.cpp +++ b/dom/media/GraphDriver.cpp @@ -453,10 +453,11 @@ TrackAndPromiseForOperation::TrackAndPromiseForOperation( mFlags(aFlags) {} AudioCallbackDriver::AudioCallbackDriver(MediaTrackGraphImpl* aGraphImpl, + uint32_t aOutputChannelCount, uint32_t aInputChannelCount, AudioInputType aAudioInputType) : GraphDriver(aGraphImpl), - mOutputChannels(0), + mOutputChannels(aOutputChannelCount), mSampleRate(0), mInputChannelCount(aInputChannelCount), mIterationDurationMS(MEDIA_GRAPH_TARGET_PERIOD_MS), @@ -553,8 +554,6 @@ bool AudioCallbackDriver::Init() { output.format = CUBEB_SAMPLE_FLOAT32NE; } - // Query and set the number of channels this AudioCallbackDriver will use. - mOutputChannels = GraphImpl()->AudioOutputChannelCount(); if (!mOutputChannels) { LOG(LogLevel::Warning, ("Output number of channels is 0.")); Monitor2AutoLock lock(GraphImpl()->GetMonitor()); @@ -579,7 +578,10 @@ bool AudioCallbackDriver::Init() { SpillBuffer(mOutputChannels); output.channels = mOutputChannels; - output.layout = CUBEB_LAYOUT_UNDEFINED; + AudioConfig::ChannelLayout::ChannelMap channelMap = + AudioConfig::ChannelLayout(mOutputChannels).Map(); + + output.layout = static_cast(channelMap); output.prefs = CubebUtils::GetDefaultStreamPrefs(); #if !defined(XP_WIN) if (mInputDevicePreference == CUBEB_DEVICE_PREF_VOICE) { @@ -725,7 +727,7 @@ void AudioCallbackDriver::AddMixerCallback() { MOZ_ASSERT(OnGraphThread()); if (!mAddedMixer) { - mGraphImpl->mMixer.AddCallback(this); + mGraphImpl->mMixer.AddCallback(WrapNotNull(this)); mAddedMixer = true; } } @@ -799,7 +801,7 @@ long AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer, // Don't add the callback until we're inited and ready if (!mAddedMixer) { - GraphImpl()->mMixer.AddCallback(this); + GraphImpl()->mMixer.AddCallback(WrapNotNull(this)); mAddedMixer = true; } @@ -896,6 +898,18 @@ long AudioCallbackDriver::DataCallback(const AudioDataValue* aInputBuffer, GraphImpl()->NotifyOutputData(aOutputBuffer, static_cast(aFrames), mSampleRate, mOutputChannels); +#ifdef XP_MACOSX + // This only happens when the output is on a macbookpro's external speaker, + // that are stereo, but let's just be safe. + if (mNeedsPanning && mOutputChannels == 2) { + // hard pan to the right + for (uint32_t i = 0; i < aFrames * 2; i += 2) { + aOutputBuffer[i + 1] += aOutputBuffer[i]; + aOutputBuffer[i] = 0.0; + } + } +#endif + if (!stillProcessing) { // About to hand over control of the graph. Do not start a new driver if // StateCallback() receives an error for this stream while the main thread @@ -996,7 +1010,7 @@ void AudioCallbackDriver::MixerCallback(AudioDataValue* aMixedBuffer, void AudioCallbackDriver::PanOutputIfNeeded(bool aMicrophoneActive) { #ifdef XP_MACOSX - cubeb_device* out; + cubeb_device* out = nullptr; int rv; char name[128]; size_t length = sizeof(name); @@ -1008,22 +1022,16 @@ void AudioCallbackDriver::PanOutputIfNeeded(bool aMicrophoneActive) { if (!strncmp(name, "MacBookPro", 10)) { if (cubeb_stream_get_current_device(mAudioStream, &out) == CUBEB_OK) { + MOZ_ASSERT(out); // Check if we are currently outputing sound on external speakers. - if (!strcmp(out->output_name, "ispk")) { + if (out->output_name && !strcmp(out->output_name, "ispk")) { // Pan everything to the right speaker. - if (aMicrophoneActive) { - if (cubeb_stream_set_panning(mAudioStream, 1.0) != CUBEB_OK) { - NS_WARNING("Could not pan audio output to the right."); - } - } else { - if (cubeb_stream_set_panning(mAudioStream, 0.0) != CUBEB_OK) { - NS_WARNING("Could not pan audio output to the center."); - } - } + LOG(LogLevel::Debug, ("Using the built-in speakers, with%s audio input", + aMicrophoneActive ? "" : "out")); + mNeedsPanning = aMicrophoneActive; } else { - if (cubeb_stream_set_panning(mAudioStream, 0.0) != CUBEB_OK) { - NS_WARNING("Could not pan audio output to the center."); - } + LOG(LogLevel::Debug, ("Using an external output device")); + mNeedsPanning = false; } cubeb_stream_device_destroy(mAudioStream, out); } @@ -1038,7 +1046,12 @@ void AudioCallbackDriver::DeviceChangedCallback() { Monitor2AutoLock mon(mGraphImpl->GetMonitor()); GraphImpl()->DeviceChanged(); #ifdef XP_MACOSX - PanOutputIfNeeded(mInputChannelCount); + RefPtr self(this); + bool hasInput = mInputChannelCount; + NS_DispatchBackgroundTask(NS_NewRunnableFunction( + "PanOutputIfNeeded", [self{std::move(self)}, hasInput]() { + self->PanOutputIfNeeded(hasInput); + })); #endif } diff --git a/dom/media/GraphDriver.h b/dom/media/GraphDriver.h index 146da89269..795a75c462 100644 --- a/dom/media/GraphDriver.h +++ b/dom/media/GraphDriver.h @@ -129,7 +129,7 @@ class GraphDriver { * before being started again. */ virtual void Start() = 0; /* Shutdown GraphDriver (synchronously) */ - virtual void Shutdown() = 0; + MOZ_CAN_RUN_SCRIPT virtual void Shutdown() = 0; /* Rate at which the GraphDriver runs, in ms. This can either be user * controlled (because we are using a {System,Offline}ClockDriver, and decide * how often we want to wakeup/how much we want to process per iteration), or @@ -231,7 +231,7 @@ class ThreadedDriver : public GraphDriver { void WaitForNextIteration() override; void WakeUp() override; void Start() override; - void Shutdown() override; + MOZ_CAN_RUN_SCRIPT void Shutdown() override; /** * Runs main control loop on the graph thread. Normally a single invocation * of this runs for the entire lifetime of the graph thread. @@ -352,14 +352,14 @@ class AudioCallbackDriver : public GraphDriver, public: /** If aInputChannelCount is zero, then this driver is output-only. */ AudioCallbackDriver(MediaTrackGraphImpl* aGraphImpl, - uint32_t aInputChannelCount, + uint32_t aOutputChannelCount, uint32_t aInputChannelCount, AudioInputType aAudioInputType); virtual ~AudioCallbackDriver(); void Start() override; void WaitForNextIteration() override; void WakeUp() override; - void Shutdown() override; + MOZ_CAN_RUN_SCRIPT void Shutdown() override; #if defined(XP_WIN) void ResetDefaultDevice() override; #endif @@ -459,7 +459,7 @@ class AudioCallbackDriver : public GraphDriver, } /* MediaTrackGraphs are always down/up mixed to output channels. */ - uint32_t mOutputChannels; + const uint32_t mOutputChannels; /* The size of this buffer comes from the fact that some audio backends can * call back with a number of frames lower than one block (128 frames), so we * need to keep at most two block in the SpillBuffer, because we always round @@ -533,6 +533,12 @@ class AudioCallbackDriver : public GraphDriver, /* True if this driver was created from a driver created because of a previous * AudioCallbackDriver failure. */ bool mFromFallback; +#ifdef XP_MACOSX + /* When using the built-in speakers on macbook pro (13 and 15, all models), + * it's best to hard pan the audio on the right, to avoid feedback into the + * microphone that is located next to the left speaker. */ + Atomic mNeedsPanning; +#endif }; class AsyncCubebTask : public Runnable { diff --git a/dom/media/GraphRunner.cpp b/dom/media/GraphRunner.cpp index 361333121c..be445dec85 100644 --- a/dom/media/GraphRunner.cpp +++ b/dom/media/GraphRunner.cpp @@ -14,30 +14,37 @@ namespace mozilla { -static void Start(void* aArg) { - GraphRunner* th = static_cast(aArg); - th->Run(); -} - -GraphRunner::GraphRunner(MediaTrackGraphImpl* aGraph) - : mMonitor("GraphRunner::mMonitor"), +GraphRunner::GraphRunner(MediaTrackGraphImpl* aGraph, + already_AddRefed aThread) + : Runnable("GraphRunner"), + mMonitor("GraphRunner::mMonitor"), mGraph(aGraph), mStateEnd(0), mStillProcessing(true), mThreadState(ThreadState::Wait), - // Note that mThread needs to be initialized last, as it may pre-empt the - // thread running this ctor and enter Run() with uninitialized members. - mThread(PR_CreateThread(PR_SYSTEM_THREAD, &Start, this, - PR_PRIORITY_URGENT, PR_GLOBAL_THREAD, - PR_JOINABLE_THREAD, 0)) { - MOZ_COUNT_CTOR(GraphRunner); + mThread(aThread) { + mThread->Dispatch(do_AddRef(this)); } GraphRunner::~GraphRunner() { - MOZ_COUNT_DTOR(GraphRunner); MOZ_ASSERT(mThreadState == ThreadState::Shutdown); } +/* static */ +already_AddRefed GraphRunner::Create(MediaTrackGraphImpl* aGraph) { + nsCOMPtr thread; + if (NS_WARN_IF(NS_FAILED( + NS_NewNamedThread("GraphRunner", getter_AddRefs(thread))))) { + return nullptr; + } + nsCOMPtr supportsPriority = do_QueryInterface(thread); + MOZ_ASSERT(supportsPriority); + MOZ_ALWAYS_SUCCEEDS( + supportsPriority->SetPriority(nsISupportsPriority::PRIORITY_HIGHEST)); + + return do_AddRef(new GraphRunner(aGraph, thread.forget())); +} + void GraphRunner::Shutdown() { { Monitor2AutoLock lock(mMonitor); @@ -45,12 +52,7 @@ void GraphRunner::Shutdown() { mThreadState = ThreadState::Shutdown; mMonitor.Signal(); } - // We need to wait for runner thread shutdown here for the sake of the - // xpcomWillShutdown case, so that the main thread is not shut down before - // cleanup messages are sent for objects destroyed in - // CycleCollectedJSContext shutdown. - PR_JoinThread(mThread); - mThread = nullptr; + mThread->Shutdown(); } bool GraphRunner::OneIteration(GraphTime aStateEnd) { @@ -86,8 +88,7 @@ bool GraphRunner::OneIteration(GraphTime aStateEnd) { return mStillProcessing; } -void GraphRunner::Run() { - PR_SetCurrentThreadName("GraphRunner"); +NS_IMETHODIMP GraphRunner::Run() { Monitor2AutoLock lock(mMonitor); while (true) { while (mThreadState == ThreadState::Wait) { @@ -104,9 +105,13 @@ void GraphRunner::Run() { } dom::WorkletThread::DeleteCycleCollectedJSContext(); + + return NS_OK; } -bool GraphRunner::OnThread() { return PR_GetCurrentThread() == mThread; } +bool GraphRunner::OnThread() { + return mThread->EventTarget()->IsOnCurrentThread(); +} #ifdef DEBUG bool GraphRunner::RunByGraphDriver(GraphDriver* aDriver) { diff --git a/dom/media/GraphRunner.h b/dom/media/GraphRunner.h index 087ebd4cbe..8f6ac300e2 100644 --- a/dom/media/GraphRunner.h +++ b/dom/media/GraphRunner.h @@ -17,15 +17,14 @@ namespace mozilla { class GraphDriver; class MediaTrackGraphImpl; -class GraphRunner { +class GraphRunner final : public Runnable { public: - explicit GraphRunner(MediaTrackGraphImpl* aGraph); - ~GraphRunner(); + static already_AddRefed Create(MediaTrackGraphImpl* aGraph); /** * Marks us as shut down and signals mThread, so that it runs until the end. */ - void Shutdown(); + MOZ_CAN_RUN_SCRIPT void Shutdown(); /** * Signals one iteration of mGraph. Hands aStateEnd over to mThread and runs @@ -36,7 +35,7 @@ class GraphRunner { /** * Runs mGraph until it shuts down. */ - void Run(); + NS_IMETHOD Run(); /** * Returns true if called on mThread. @@ -52,6 +51,10 @@ class GraphRunner { #endif private: + explicit GraphRunner(MediaTrackGraphImpl* aGraph, + already_AddRefed aThread); + ~GraphRunner(); + // Monitor used for yielding mThread through Wait(), and scheduling mThread // through Signal() from a GraphDriver. Monitor2 mMonitor; @@ -78,7 +81,7 @@ class GraphRunner { // The thread running mGraph. Set on construction, after other members are // initialized. Cleared at the end of Shutdown(). - PRThread* mThread; + const nsCOMPtr mThread; #ifdef DEBUG // Set to mGraph's audio callback driver's thread id, if run by an diff --git a/dom/media/MediaData.cpp b/dom/media/MediaData.cpp index a83e87cdc2..19be64722c 100644 --- a/dom/media/MediaData.cpp +++ b/dom/media/MediaData.cpp @@ -222,25 +222,6 @@ VideoData::VideoData(int64_t aOffset, const TimeUnit& aTime, VideoData::~VideoData() {} -void VideoData::SetListener(UniquePtr aListener) { - MOZ_ASSERT(!mSentToCompositor, - "Listener should be registered before sending data"); - - mListener = std::move(aListener); -} - -void VideoData::MarkSentToCompositor() { - if (mSentToCompositor) { - return; - } - - mSentToCompositor = true; - if (mListener != nullptr) { - mListener->OnSentToCompositor(); - mListener = nullptr; - } -} - size_t VideoData::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const { size_t size = aMallocSizeOf(this); diff --git a/dom/media/MediaData.h b/dom/media/MediaData.h index f4c70fa7f3..3106132f55 100644 --- a/dom/media/MediaData.h +++ b/dom/media/MediaData.h @@ -449,12 +449,6 @@ class VideoData : public MediaData { ColorRange mColorRange = ColorRange::LIMITED; }; - class Listener { - public: - virtual void OnSentToCompositor() = 0; - virtual ~Listener() {} - }; - // Constructs a VideoData object. If aImage is nullptr, creates a new Image // holding a copy of the YCbCr data passed in aBuffer. If aImage is not // nullptr, it's stored as the underlying video image and aBuffer is assumed @@ -509,8 +503,7 @@ class VideoData : public MediaData { const media::TimeUnit& aTimecode, IntSize aDisplay, uint32_t aFrameID); - void SetListener(UniquePtr aListener); - void MarkSentToCompositor(); + void MarkSentToCompositor() { mSentToCompositor = true; } bool IsSentToCompositor() { return mSentToCompositor; } void UpdateDuration(const media::TimeUnit& aDuration); @@ -526,7 +519,6 @@ class VideoData : public MediaData { ~VideoData(); bool mSentToCompositor; - UniquePtr mListener; media::TimeUnit mNextKeyFrameTime; }; diff --git a/dom/media/MediaFormatReader.cpp b/dom/media/MediaFormatReader.cpp index 8c38160fe9..010ba041f2 100644 --- a/dom/media/MediaFormatReader.cpp +++ b/dom/media/MediaFormatReader.cpp @@ -3000,3 +3000,5 @@ void MediaFormatReader::OnFirstDemuxFailed(TrackInfo::TrackType aType, } // namespace mozilla #undef NS_DispatchToMainThread +#undef LOGV +#undef LOG diff --git a/dom/media/MediaManager.cpp b/dom/media/MediaManager.cpp index 6a32016fbd..70e13b5da2 100644 --- a/dom/media/MediaManager.cpp +++ b/dom/media/MediaManager.cpp @@ -159,10 +159,6 @@ class nsMainThreadPtrHolder< namespace mozilla { -#ifdef LOG -# undef LOG -#endif - LazyLogModule gMediaManagerLog("MediaManager"); #define LOG(...) MOZ_LOG(gMediaManagerLog, LogLevel::Debug, (__VA_ARGS__)) @@ -4703,4 +4699,6 @@ void GetUserMediaWindowListener::NotifyChrome() { })); } +#undef LOG + } // namespace mozilla diff --git a/dom/media/MediaRecorder.cpp b/dom/media/MediaRecorder.cpp index 6c03afc31d..27fca306c7 100644 --- a/dom/media/MediaRecorder.cpp +++ b/dom/media/MediaRecorder.cpp @@ -15,6 +15,7 @@ #include "mozilla/DOMEventTargetHelper.h" #include "mozilla/dom/AudioStreamTrack.h" #include "mozilla/dom/BlobEvent.h" +#include "mozilla/dom/EmptyBlobImpl.h" #include "mozilla/dom/File.h" #include "mozilla/dom/MediaRecorderErrorEvent.h" #include "mozilla/dom/MutableBlobStorage.h" @@ -37,23 +38,23 @@ #include "nsProxyRelease.h" #include "nsTArray.h" -#ifdef LOG -# undef LOG -#endif - mozilla::LazyLogModule gMediaRecorderLog("MediaRecorder"); #define LOG(type, msg) MOZ_LOG(gMediaRecorderLog, type, msg) +#define MIN_VIDEO_BITRATE_BPS 10e3 // 10kbps +#define DEFAULT_VIDEO_BITRATE_BPS 2500e3 // 2.5Mbps +#define MAX_VIDEO_BITRATE_BPS 100e6 // 100Mbps + +#define MIN_AUDIO_BITRATE_BPS 500 // 500bps +#define DEFAULT_AUDIO_BITRATE_BPS 128e3 // 128kbps +#define MAX_AUDIO_BITRATE_BPS 512e3 // 512kbps + namespace mozilla { namespace dom { using namespace mozilla::media; -/* static */ StaticRefPtr - gMediaRecorderShutdownBlocker; -static nsTHashtable> gSessions; - /** * MediaRecorderReporter measures memory being used by the Media Recorder. * @@ -142,7 +143,7 @@ NS_IMPL_CYCLE_COLLECTION_CLASS(MediaRecorder) NS_IMPL_CYCLE_COLLECTION_TRAVERSE_BEGIN_INHERITED(MediaRecorder, DOMEventTargetHelper) - NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mDOMStream) + NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mStream) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mAudioNode) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mSecurityDomException) NS_IMPL_CYCLE_COLLECTION_TRAVERSE(mUnknownDomException) @@ -151,7 +152,7 @@ NS_IMPL_CYCLE_COLLECTION_TRAVERSE_END NS_IMPL_CYCLE_COLLECTION_UNLINK_BEGIN_INHERITED(MediaRecorder, DOMEventTargetHelper) - NS_IMPL_CYCLE_COLLECTION_UNLINK(mDOMStream) + NS_IMPL_CYCLE_COLLECTION_UNLINK(mStream) NS_IMPL_CYCLE_COLLECTION_UNLINK(mAudioNode) NS_IMPL_CYCLE_COLLECTION_UNLINK(mSecurityDomException) NS_IMPL_CYCLE_COLLECTION_UNLINK(mUnknownDomException) @@ -166,6 +167,381 @@ NS_INTERFACE_MAP_END_INHERITING(DOMEventTargetHelper) NS_IMPL_ADDREF_INHERITED(MediaRecorder, DOMEventTargetHelper) NS_IMPL_RELEASE_INHERITED(MediaRecorder, DOMEventTargetHelper) +namespace { +bool PrincipalSubsumes(MediaRecorder* aRecorder, nsIPrincipal* aPrincipal) { + if (!aRecorder->GetOwner()) { + return false; + } + nsCOMPtr doc = aRecorder->GetOwner()->GetExtantDoc(); + if (!doc) { + return false; + } + if (!aPrincipal) { + return false; + } + bool subsumes; + if (NS_FAILED(doc->NodePrincipal()->Subsumes(aPrincipal, &subsumes))) { + return false; + } + return subsumes; +} + +bool MediaStreamTracksPrincipalSubsumes( + MediaRecorder* aRecorder, + const nsTArray>& aTracks) { + nsCOMPtr principal = nullptr; + for (const auto& track : aTracks) { + nsContentUtils::CombineResourcePrincipals(&principal, + track->GetPrincipal()); + } + return PrincipalSubsumes(aRecorder, principal); +} + +bool AudioNodePrincipalSubsumes(MediaRecorder* aRecorder, + AudioNode* aAudioNode) { + MOZ_ASSERT(aAudioNode); + Document* doc = + aAudioNode->GetOwner() ? aAudioNode->GetOwner()->GetExtantDoc() : nullptr; + nsCOMPtr principal = doc ? doc->NodePrincipal() : nullptr; + return PrincipalSubsumes(aRecorder, principal); +} + +enum class TypeSupport { + Supported, + MediaTypeInvalid, + NoVideoWithAudioType, + ContainersDisabled, + CodecsDisabled, + ContainerUnsupported, + CodecUnsupported, + CodecDuplicated, +}; + +nsCString TypeSupportToCString(TypeSupport aSupport, + const nsAString& aMimeType) { + nsAutoCString mime = NS_ConvertUTF16toUTF8(aMimeType); + switch (aSupport) { + case TypeSupport::Supported: + return nsPrintfCString("%s is supported", mime.get()); + case TypeSupport::MediaTypeInvalid: + return nsPrintfCString("%s is not a valid media type", mime.get()); + case TypeSupport::NoVideoWithAudioType: + return nsPrintfCString( + "Video cannot be recorded with %s as it is an audio type", + mime.get()); + case TypeSupport::ContainersDisabled: + return NS_LITERAL_CSTRING("All containers are disabled"); + case TypeSupport::CodecsDisabled: + return NS_LITERAL_CSTRING("All codecs are disabled"); + case TypeSupport::ContainerUnsupported: + return nsPrintfCString("%s indicates an unsupported container", + mime.get()); + case TypeSupport::CodecUnsupported: + return nsPrintfCString("%s indicates an unsupported codec", mime.get()); + case TypeSupport::CodecDuplicated: + return nsPrintfCString("%s contains the same codec multiple times", + mime.get()); + default: + MOZ_ASSERT_UNREACHABLE("Unknown TypeSupport"); + return NS_LITERAL_CSTRING("Unknown error"); + } +} + +TypeSupport CanRecordAudioTrackWith(const Maybe& aMimeType, + const nsAString& aMimeTypeString) { + if (aMimeTypeString.IsEmpty()) { + // For the empty string we just need to check whether we have support for an + // audio container and an audio codec. + if (!MediaEncoder::IsWebMEncoderEnabled() && + !MediaDecoder::IsOggEnabled()) { + // No container support for audio. + return TypeSupport::ContainersDisabled; + } + + if (!MediaDecoder::IsOpusEnabled()) { + // No codec support for audio. + return TypeSupport::CodecsDisabled; + } + + return TypeSupport::Supported; + } + + if (!aMimeType) { + // A mime type string was set, but it couldn't be parsed to a valid + // MediaContainerType. + return TypeSupport::MediaTypeInvalid; + } + + if (aMimeType->Type() != MEDIAMIMETYPE(VIDEO_WEBM) && + aMimeType->Type() != MEDIAMIMETYPE(AUDIO_WEBM) && + aMimeType->Type() != MEDIAMIMETYPE(AUDIO_OGG)) { + // Any currently supported container can record audio. + return TypeSupport::ContainerUnsupported; + } + + if (aMimeType->Type() == MEDIAMIMETYPE(VIDEO_WEBM) && + !MediaEncoder::IsWebMEncoderEnabled()) { + return TypeSupport::ContainerUnsupported; + } + + if (aMimeType->Type() == MEDIAMIMETYPE(AUDIO_WEBM) && + !MediaEncoder::IsWebMEncoderEnabled()) { + return TypeSupport::ContainerUnsupported; + } + + if (aMimeType->Type() == MEDIAMIMETYPE(AUDIO_OGG) && + !MediaDecoder::IsOggEnabled()) { + return TypeSupport::ContainerUnsupported; + } + + if (!MediaDecoder::IsOpusEnabled()) { + return TypeSupport::CodecUnsupported; + } + + if (!aMimeType->ExtendedType().HaveCodecs()) { + // No codecs constrained, we can pick opus. + return TypeSupport::Supported; + } + + size_t opus = 0; + size_t unknown = 0; + for (const auto& codec : aMimeType->ExtendedType().Codecs().Range()) { + // Ignore video codecs. + if (codec.EqualsLiteral("vp8")) { + continue; + } + if (codec.EqualsLiteral("vp8.0")) { + continue; + } + if (codec.EqualsLiteral("opus")) { + // All containers support opus + opus++; + continue; + } + unknown++; + } + + if (unknown > 0) { + // Unsupported codec. + return TypeSupport::CodecUnsupported; + } + + if (opus == 0) { + // Codecs specified but not opus. Unsupported for audio. + return TypeSupport::CodecUnsupported; + } + + if (opus > 1) { + // Opus specified more than once. Bad form. + return TypeSupport::CodecDuplicated; + } + + return TypeSupport::Supported; +} + +TypeSupport CanRecordVideoTrackWith(const Maybe& aMimeType, + const nsAString& aMimeTypeString) { + if (aMimeTypeString.IsEmpty()) { + // For the empty string we just need to check whether we have support for a + // video container and a video codec. The VP8 encoder is always available. + if (!MediaEncoder::IsWebMEncoderEnabled()) { + // No container support for video. + return TypeSupport::ContainersDisabled; + } + + return TypeSupport::Supported; + } + + if (!aMimeType) { + // A mime type string was set, but it couldn't be parsed to a valid + // MediaContainerType. + return TypeSupport::MediaTypeInvalid; + } + + if (!aMimeType->Type().HasVideoMajorType()) { + return TypeSupport::NoVideoWithAudioType; + } + + if (aMimeType->Type() != MEDIAMIMETYPE(VIDEO_WEBM)) { + return TypeSupport::ContainerUnsupported; + } + + if (!MediaEncoder::IsWebMEncoderEnabled()) { + return TypeSupport::ContainerUnsupported; + } + + if (!aMimeType->ExtendedType().HaveCodecs()) { + // No codecs constrained, we can pick vp8. + return TypeSupport::Supported; + } + + size_t vp8 = 0; + size_t unknown = 0; + for (const auto& codec : aMimeType->ExtendedType().Codecs().Range()) { + if (codec.EqualsLiteral("opus")) { + // Ignore audio codecs. + continue; + } + if (codec.EqualsLiteral("vp8")) { + vp8++; + continue; + } + if (codec.EqualsLiteral("vp8.0")) { + vp8++; + continue; + } + unknown++; + } + + if (unknown > 0) { + // Unsupported codec. + return TypeSupport::CodecUnsupported; + } + + if (vp8 == 0) { + // Codecs specified but not vp8. Unsupported for video. + return TypeSupport::CodecUnsupported; + } + + if (vp8 > 1) { + // Vp8 specified more than once. Bad form. + return TypeSupport::CodecDuplicated; + } + + return TypeSupport::Supported; +} + +TypeSupport CanRecordWith(MediaStreamTrack* aTrack, + const Maybe& aMimeType, + const nsAString& aMimeTypeString) { + if (aTrack->AsAudioStreamTrack()) { + return CanRecordAudioTrackWith(aMimeType, aMimeTypeString); + } + + if (aTrack->AsVideoStreamTrack()) { + return CanRecordVideoTrackWith(aMimeType, aMimeTypeString); + } + + MOZ_CRASH("Unexpected track type"); +} + +TypeSupport IsTypeSupportedImpl(const nsAString& aMIMEType) { + if (aMIMEType.IsEmpty()) { + // Lie and return true even if no container/codec support is enabled, + // because the spec mandates it. + return TypeSupport::Supported; + } + Maybe mime = MakeMediaContainerType(aMIMEType); + TypeSupport rv = CanRecordAudioTrackWith(mime, aMIMEType); + if (rv == TypeSupport::Supported) { + return rv; + } + return CanRecordVideoTrackWith(mime, aMIMEType); +} + +nsString SelectMimeType(bool aHasVideo, bool aHasAudio, + const nsString& aConstrainedMimeType) { + MOZ_ASSERT(aHasVideo || aHasAudio); + + Maybe constrainedType = + MakeMediaContainerType(aConstrainedMimeType); + + // If we are recording video, Start() should have rejected any non-video mime + // types. + MOZ_ASSERT_IF(constrainedType && aHasVideo, + constrainedType->Type().HasVideoMajorType()); + // IsTypeSupported() rejects application mime types. + MOZ_ASSERT_IF(constrainedType, + !constrainedType->Type().HasApplicationMajorType()); + + nsString result; + if (constrainedType && constrainedType->ExtendedType().HaveCodecs()) { + // The constrained mime type is fully defined (it has codecs!). No need to + // select anything. + result = NS_ConvertUTF8toUTF16(constrainedType->OriginalString()); + } else { + // There is no constrained mime type, or there is and it is not fully + // defined but still valid. Select what's missing, so that we have major + // type, container and codecs. + + // If there is a constrained mime type it should not have codecs defined, + // because then it is fully defined and used unchanged (covered earlier). + MOZ_ASSERT_IF(constrainedType, + !constrainedType->ExtendedType().HaveCodecs()); + + nsCString majorType; + { + if (constrainedType) { + // There is a constrained type. It has both major type and container in + // order to be valid. Use them as is. + majorType = constrainedType->Type().AsString(); + } else if (aHasVideo) { + majorType = NS_LITERAL_CSTRING(VIDEO_WEBM); + } else { + majorType = NS_LITERAL_CSTRING(AUDIO_OGG); + } + } + + nsCString codecs; + { + if (aHasVideo && aHasAudio) { + codecs = NS_LITERAL_CSTRING("\"vp8, opus\""); + } else if (aHasVideo) { + codecs = NS_LITERAL_CSTRING("vp8"); + } else { + codecs = NS_LITERAL_CSTRING("opus"); + } + } + result = NS_ConvertUTF8toUTF16( + nsPrintfCString("%s; codecs=%s", majorType.get(), codecs.get())); + } + + MOZ_ASSERT_IF(aHasAudio, + CanRecordAudioTrackWith(MakeMediaContainerType(result), + result) == TypeSupport::Supported); + MOZ_ASSERT_IF(aHasVideo, + CanRecordVideoTrackWith(MakeMediaContainerType(result), + result) == TypeSupport::Supported); + return result; +} + +void SelectBitrates(uint32_t aBitsPerSecond, uint8_t aNumVideoTracks, + uint32_t* aOutVideoBps, uint8_t aNumAudioTracks, + uint32_t* aOutAudioBps) { + uint32_t vbps = 0; + uint32_t abps = 0; + + const uint32_t minVideoBps = MIN_VIDEO_BITRATE_BPS * aNumVideoTracks; + const uint32_t maxVideoBps = MAX_VIDEO_BITRATE_BPS * aNumVideoTracks; + + const uint32_t minAudioBps = MIN_AUDIO_BITRATE_BPS * aNumAudioTracks; + const uint32_t maxAudioBps = MAX_AUDIO_BITRATE_BPS * aNumAudioTracks; + + if (aNumVideoTracks == 0) { + MOZ_DIAGNOSTIC_ASSERT(aNumAudioTracks > 0); + abps = std::min(maxAudioBps, std::max(minAudioBps, aBitsPerSecond)); + } else if (aNumAudioTracks == 0) { + vbps = std::min(maxVideoBps, std::max(minVideoBps, aBitsPerSecond)); + } else { + // Scale the bits so that video gets 20 times the bits of audio. + // Since we must account for varying number of tracks of each type we weight + // them by type; video = weight 20, audio = weight 1. + const uint32_t videoWeight = aNumVideoTracks * 20; + const uint32_t audioWeight = aNumAudioTracks; + const uint32_t totalWeights = audioWeight + videoWeight; + const uint32_t videoBitrate = + uint64_t(aBitsPerSecond) * videoWeight / totalWeights; + const uint32_t audioBitrate = + uint64_t(aBitsPerSecond) * audioWeight / totalWeights; + vbps = std::min(maxVideoBps, std::max(minVideoBps, videoBitrate)); + abps = std::min(maxAudioBps, std::max(minAudioBps, audioBitrate)); + } + + *aOutVideoBps = vbps; + *aOutAudioBps = abps; +} +} // namespace + /** * Session is an object to represent a single recording event. * In original design, all recording context is stored in MediaRecorder, which @@ -198,72 +574,15 @@ NS_IMPL_RELEASE_INHERITED(MediaRecorder, DOMEventTargetHelper) * Therefore, the reference dependency in gecko is: * ShutdownBlocker -> Session <-> MediaRecorder, note that there is a cycle * reference between Session and MediaRecorder. - * 2) A Session is destroyed in DestroyRunnable after MediaRecorder::Stop being - * called _and_ all encoded media data been passed to OnDataAvailable handler. - * 3) MediaRecorder::Stop is called by user or the document is going to - * inactive or invisible. + * 2) A Session is destroyed after MediaRecorder::Stop has been called _and_ all + * encoded media data has been passed to OnDataAvailable handler. 3) + * MediaRecorder::Stop is called by user or the document is going to inactive or + * invisible. */ class MediaRecorder::Session : public PrincipalChangeObserver, public DOMMediaStream::TrackListener { NS_INLINE_DECL_THREADSAFE_REFCOUNTING(Session) - // Main thread task. - // Create a blob event and send back to client. - class PushBlobRunnable : public Runnable, public MutableBlobStorageCallback { - public: - // We need to always declare refcounting because - // MutableBlobStorageCallback has pure-virtual refcounting. - NS_DECL_ISUPPORTS_INHERITED - - // aDestroyRunnable can be null. If it's not, it will be dispatched after - // the PushBlobRunnable::Run(). - PushBlobRunnable(Session* aSession, Runnable* aDestroyRunnable) - : Runnable("dom::MediaRecorder::Session::PushBlobRunnable"), - mSession(aSession), - mDestroyRunnable(aDestroyRunnable) {} - - NS_IMETHOD Run() override { - LOG(LogLevel::Debug, ("Session.PushBlobRunnable s=(%p)", mSession.get())); - MOZ_ASSERT(NS_IsMainThread()); - - mSession->GetBlobWhenReady(this); - return NS_OK; - } - - void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, Blob* aBlob, - nsresult aRv) override { - RefPtr recorder = mSession->mRecorder; - if (!recorder) { - return; - } - - if (NS_FAILED(aRv)) { - mSession->DoSessionEndTask(aRv); - return; - } - - nsresult rv = recorder->CreateAndDispatchBlobEvent(aBlob); - if (NS_FAILED(rv)) { - mSession->DoSessionEndTask(aRv); - } - - if (mDestroyRunnable && - NS_FAILED(NS_DispatchToMainThread(mDestroyRunnable.forget()))) { - MOZ_ASSERT(false, "NS_DispatchToMainThread failed"); - } - } - - private: - ~PushBlobRunnable() = default; - - RefPtr mSession; - - // The generation of the blob is async. In order to avoid dispatching the - // DestroyRunnable before pushing the blob event, we store the runnable - // here. - RefPtr mDestroyRunnable; - }; - class StoreEncodedBufferRunnable final : public Runnable { RefPtr mSession; nsTArray> mBuffer; @@ -279,13 +598,13 @@ class MediaRecorder::Session : public PrincipalChangeObserver, Run() override { MOZ_ASSERT(NS_IsMainThread()); mSession->MaybeCreateMutableBlobStorage(); - for (uint32_t i = 0; i < mBuffer.Length(); i++) { - if (mBuffer[i].IsEmpty()) { + for (const auto& part : mBuffer) { + if (part.IsEmpty()) { continue; } - nsresult rv = mSession->mMutableBlobStorage->Append( - mBuffer[i].Elements(), mBuffer[i].Length()); + nsresult rv = mSession->mMutableBlobStorage->Append(part.Elements(), + part.Length()); if (NS_WARN_IF(NS_FAILED(rv))) { mSession->DoSessionEndTask(rv); break; @@ -296,126 +615,6 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } }; - // Notify encoder error, run in main thread task. (Bug 1095381) - class EncoderErrorNotifierRunnable : public Runnable { - public: - explicit EncoderErrorNotifierRunnable(Session* aSession) - : Runnable("dom::MediaRecorder::Session::EncoderErrorNotifierRunnable"), - mSession(aSession) {} - - NS_IMETHOD Run() override { - LOG(LogLevel::Debug, - ("Session.ErrorNotifyRunnable s=(%p)", mSession.get())); - MOZ_ASSERT(NS_IsMainThread()); - - RefPtr recorder = mSession->mRecorder; - if (!recorder) { - return NS_OK; - } - - recorder->NotifyError(NS_ERROR_UNEXPECTED); - return NS_OK; - } - - private: - RefPtr mSession; - }; - - // Fire a named event, run in main thread task. - class DispatchEventRunnable : public Runnable { - public: - explicit DispatchEventRunnable(Session* aSession, - const nsAString& aEventName) - : Runnable("dom::MediaRecorder::Session::DispatchEventRunnable"), - mSession(aSession), - mEventName(aEventName) {} - - NS_IMETHOD Run() override { - LOG(LogLevel::Debug, - ("Session.DispatchEventRunnable s=(%p) e=(%s)", mSession.get(), - NS_ConvertUTF16toUTF8(mEventName).get())); - MOZ_ASSERT(NS_IsMainThread()); - - NS_ENSURE_TRUE(mSession->mRecorder, NS_OK); - mSession->mRecorder->DispatchSimpleEvent(mEventName); - - return NS_OK; - } - - private: - RefPtr mSession; - nsString mEventName; - }; - - // Main thread task. - // To delete RecordingSession object. - class DestroyRunnable : public Runnable { - public: - explicit DestroyRunnable(Session* aSession) - : Runnable("dom::MediaRecorder::Session::DestroyRunnable"), - mSession(aSession) {} - - explicit DestroyRunnable(already_AddRefed aSession) - : Runnable("dom::MediaRecorder::Session::DestroyRunnable"), - mSession(aSession) {} - - NS_IMETHOD Run() override { - LOG(LogLevel::Debug, - ("Session.DestroyRunnable session refcnt = (%d) s=(%p)", - static_cast(mSession->mRefCnt), mSession.get())); - MOZ_ASSERT(NS_IsMainThread() && mSession); - RefPtr recorder = mSession->mRecorder; - if (!recorder) { - return NS_OK; - } - // SourceMediaStream is ended, and send out TRACK_EVENT_END notification. - // Read Thread will be terminate soon. - // We need to switch MediaRecorder to "Stop" state first to make sure - // MediaRecorder is not associated with this Session anymore, then, it's - // safe to delete this Session. - // Also avoid to run if this session already call stop before - if (mSession->mRunningState.isOk() && - mSession->mRunningState.unwrap() != RunningState::Stopping && - mSession->mRunningState.unwrap() != RunningState::Stopped) { - recorder->StopForSessionDestruction(); - if (NS_FAILED(NS_DispatchToMainThread( - new DestroyRunnable(mSession.forget())))) { - MOZ_ASSERT(false, "NS_DispatchToMainThread failed"); - } - return NS_OK; - } - - if (mSession->mRunningState.isOk()) { - mSession->mRunningState = RunningState::Stopped; - } - - // Dispatch stop event and clear MIME type. - mSession->mMimeType = NS_LITERAL_STRING(""); - recorder->SetMimeType(mSession->mMimeType); - recorder->DispatchSimpleEvent(NS_LITERAL_STRING("stop")); - - RefPtr session = mSession.forget(); - session->Shutdown()->Then( - GetCurrentThreadSerialEventTarget(), __func__, - [session]() { - gSessions.RemoveEntry(session); - if (gSessions.Count() == 0 && gMediaRecorderShutdownBlocker) { - // All sessions finished before shutdown, no need to keep the - // blocker. - RefPtr barrier = GetShutdownBarrier(); - barrier->RemoveBlocker(gMediaRecorderShutdownBlocker); - gMediaRecorderShutdownBlocker = nullptr; - } - }, - []() { MOZ_CRASH("Not reached"); }); - return NS_OK; - } - - private: - // Call mSession::Release automatically while DestroyRunnable be destroy. - RefPtr mSession; - }; - class EncoderListener : public MediaEncoderListener { public: EncoderListener(TaskQueue* aEncoderThread, Session* aSession) @@ -459,28 +658,48 @@ class MediaRecorder::Session : public PrincipalChangeObserver, RefPtr mSession; }; - friend class EncoderErrorNotifierRunnable; - friend class PushBlobRunnable; - friend class DestroyRunnable; + struct TrackTypeComparator { + enum Type { + AUDIO, + VIDEO, + }; + static bool Equals(const RefPtr& aTrack, Type aType) { + return (aType == AUDIO && aTrack->AsAudioStreamTrack()) || + (aType == VIDEO && aTrack->AsVideoStreamTrack()); + } + }; public: - Session(MediaRecorder* aRecorder, uint32_t aTimeSlice) + Session(MediaRecorder* aRecorder, + nsTArray> aMediaStreamTracks, + TimeDuration aTimeslice, uint32_t aVideoBitsPerSecond, + uint32_t aAudioBitsPerSecond) : mRecorder(aRecorder), - mMediaStreamReady(false), - mTimeSlice(aTimeSlice), + mMediaStreamTracks(std::move(aMediaStreamTracks)), + mMainThread(mRecorder->GetOwner()->EventTargetFor(TaskCategory::Other)), + mMimeType(SelectMimeType( + mMediaStreamTracks.Contains(TrackTypeComparator::VIDEO, + TrackTypeComparator()), + mRecorder->mAudioNode || + mMediaStreamTracks.Contains(TrackTypeComparator::AUDIO, + TrackTypeComparator()), + mRecorder->mConstrainedMimeType)), + mTimeslice(aTimeslice), + mVideoBitsPerSecond(aVideoBitsPerSecond), + mAudioBitsPerSecond(aAudioBitsPerSecond), + mStartTime(TimeStamp::Now()), mRunningState(RunningState::Idling) { MOZ_ASSERT(NS_IsMainThread()); - aRecorder->GetMimeType(mMimeType); mMaxMemory = Preferences::GetUint("media.recorder.max_memory", MAX_ALLOW_MEMORY_BUFFER); - mLastBlobTimeStamp = TimeStamp::Now(); + Telemetry::ScalarAdd(Telemetry::ScalarID::MEDIARECORDER_RECORDING_COUNT, 1); } void PrincipalChanged(MediaStreamTrack* aTrack) override { NS_ASSERTION(mMediaStreamTracks.Contains(aTrack), "Principal changed for unrecorded track"); - if (!MediaStreamTracksPrincipalSubsumes()) { + if (!MediaStreamTracksPrincipalSubsumes(mRecorder, mMediaStreamTracks)) { DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR); } } @@ -489,32 +708,14 @@ class MediaRecorder::Session : public PrincipalChangeObserver, LOG(LogLevel::Warning, ("Session.NotifyTrackAdded %p Raising error due to track set change", this)); - if (mMediaStreamReady) { - DoSessionEndTask(NS_ERROR_ABORT); - } - - NS_DispatchToMainThread( - NewRunnableMethod("MediaRecorder::Session::MediaStreamReady", this, - &Session::MediaStreamReady)); - return; + DoSessionEndTask(NS_ERROR_ABORT); } void NotifyTrackRemoved(const RefPtr& aTrack) override { - if (!mMediaStreamReady) { - // We haven't chosen the track set to record yet. - return; - } - if (aTrack->Ended()) { // TrackEncoder will pickup tracks that end itself. return; } - - MOZ_ASSERT(mEncoder); - if (mEncoder) { - mEncoder->RemoveMediaStreamTrack(aTrack); - } - LOG(LogLevel::Warning, ("Session.NotifyTrackRemoved %p Raising error due to track set change", this)); @@ -525,32 +726,51 @@ class MediaRecorder::Session : public PrincipalChangeObserver, LOG(LogLevel::Debug, ("Session.Start %p", this)); MOZ_ASSERT(NS_IsMainThread()); - DOMMediaStream* domStream = mRecorder->Stream(); - if (domStream) { - // The callback reports back when tracks are available and can be - // attached to MediaEncoder. This allows `recorder.start()` before any - // tracks are available. We have supported this historically and have - // mochitests assuming this behavior. - mMediaStream = domStream; + if (mRecorder->mStream) { + // The TrackListener reports back when tracks are added or removed from + // the MediaStream. + mMediaStream = mRecorder->mStream; mMediaStream->RegisterTrackListener(this); - nsTArray> tracks(2); - mMediaStream->GetTracks(tracks); - for (const auto& track : tracks) { - // Notify of existing tracks, as the stream doesn't do this by itself. - NotifyTrackAdded(track); + + uint8_t trackTypes = 0; + int32_t audioTracks = 0; + int32_t videoTracks = 0; + for (const auto& track : mMediaStreamTracks) { + if (track->AsAudioStreamTrack()) { + ++audioTracks; + trackTypes |= ContainerWriter::CREATE_AUDIO_TRACK; + } else if (track->AsVideoStreamTrack()) { + ++videoTracks; + trackTypes |= ContainerWriter::CREATE_VIDEO_TRACK; + } else { + MOZ_CRASH("Unexpected track type"); + } } + + if (audioTracks > 1 || videoTracks > 1) { + // When MediaRecorder supports multiple tracks, we should set up a + // single MediaInputPort from the input stream, and let main thread + // check track principals async later. + nsPIDOMWindowInner* window = mRecorder->GetOwner(); + Document* document = window ? window->GetExtantDoc() : nullptr; + nsContentUtils::ReportToConsole(nsIScriptError::errorFlag, + NS_LITERAL_CSTRING("Media"), document, + nsContentUtils::eDOM_PROPERTIES, + "MediaRecorderMultiTracksNotSupported"); + DoSessionEndTask(NS_ERROR_ABORT); + return; + } + + for (const auto& t : mMediaStreamTracks) { + t->AddPrincipalChangeObserver(this); + } + + LOG(LogLevel::Debug, ("Session.Start track types = (%d)", trackTypes)); + InitEncoder(trackTypes, mMediaStreamTracks[0]->Graph()->GraphRate()); return; } if (mRecorder->mAudioNode) { - // Check that we may access the audio node's content. - if (!AudioNodePrincipalSubsumes()) { - LOG(LogLevel::Warning, - ("Session.Start AudioNode principal check failed")); - DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR); - return; - } - TrackRate trackRate = mRecorder->mAudioNode->Context()->Graph()->GraphRate(); @@ -577,8 +797,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } { - auto tracks(std::move(mMediaStreamTracks)); - for (RefPtr& track : tracks) { + for (const auto& track : mMediaStreamTracks) { track->RemovePrincipalChangeObserver(this); } } @@ -586,7 +805,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver, if (mRunningState.isOk() && mRunningState.inspect() == RunningState::Idling) { LOG(LogLevel::Debug, ("Session.Stop Explicit end task %p", this)); - // End the Session directly if there is no ExtractRunnable. + // End the Session directly if there is no encoder. DoSessionEndTask(NS_OK); } else if (mRunningState.isOk() && (mRunningState.inspect() == RunningState::Starting || @@ -595,45 +814,54 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } } - nsresult Pause() { + void Pause() { LOG(LogLevel::Debug, ("Session.Pause")); MOZ_ASSERT(NS_IsMainThread()); - - if (!mEncoder) { - return NS_ERROR_FAILURE; + MOZ_ASSERT_IF(mRunningState.isOk(), + mRunningState.unwrap() != RunningState::Idling); + if (mRunningState.isErr() || + mRunningState.unwrap() == RunningState::Stopping || + mRunningState.unwrap() == RunningState::Stopped) { + return; } - + MOZ_ASSERT(mEncoder); mEncoder->Suspend(); - NS_DispatchToMainThread( - new DispatchEventRunnable(this, NS_LITERAL_STRING("pause"))); - return NS_OK; } - nsresult Resume() { + void Resume() { LOG(LogLevel::Debug, ("Session.Resume")); MOZ_ASSERT(NS_IsMainThread()); - - if (!mEncoder) { - return NS_ERROR_FAILURE; + MOZ_ASSERT_IF(mRunningState.isOk(), + mRunningState.unwrap() != RunningState::Idling); + if (mRunningState.isErr() || + mRunningState.unwrap() == RunningState::Stopping || + mRunningState.unwrap() == RunningState::Stopped) { + return; } - + MOZ_ASSERT(mEncoder); mEncoder->Resume(); - NS_DispatchToMainThread( - new DispatchEventRunnable(this, NS_LITERAL_STRING("resume"))); - return NS_OK; } - nsresult RequestData() { + void RequestData() { LOG(LogLevel::Debug, ("Session.RequestData")); MOZ_ASSERT(NS_IsMainThread()); - if (NS_FAILED( - NS_DispatchToMainThread(new PushBlobRunnable(this, nullptr)))) { - MOZ_ASSERT(false, "RequestData NS_DispatchToMainThread failed"); - return NS_ERROR_FAILURE; - } + GatherBlob()->Then( + mMainThread, __func__, + [this, self = RefPtr(this)]( + const BlobPromise::ResolveOrRejectValue& aResult) { + if (aResult.IsReject()) { + LOG(LogLevel::Warning, ("GatherBlob failed for RequestData()")); + DoSessionEndTask(aResult.RejectValue()); + return; + } - return NS_OK; + nsresult rv = + mRecorder->CreateAndDispatchBlobEvent(aResult.ResolveValue()); + if (NS_FAILED(rv)) { + DoSessionEndTask(NS_OK); + } + }); } void MaybeCreateMutableBlobStorage() { @@ -643,14 +871,66 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } } - void GetBlobWhenReady(MutableBlobStorageCallback* aCallback) { - MOZ_ASSERT(NS_IsMainThread()); + static const bool IsExclusive = false; + using BlobPromise = MozPromise, nsresult, IsExclusive>; + class BlobStorer : public MutableBlobStorageCallback { + MozPromiseHolder mHolder; + virtual ~BlobStorer() = default; + + public: + BlobStorer() = default; + + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(BlobStorer, override) + + void BlobStoreCompleted(MutableBlobStorage*, BlobImpl* aBlobImpl, + nsresult aRv) override { + MOZ_ASSERT(NS_IsMainThread()); + if (NS_FAILED(aRv)) { + mHolder.Reject(aRv, __func__); + return; + } + + mHolder.Resolve(aBlobImpl, __func__); + } + + RefPtr Promise() { return mHolder.Ensure(__func__); } + }; + + protected: + RefPtr GatherBlobImpl() { + RefPtr storer = MakeAndAddRef(); MaybeCreateMutableBlobStorage(); - mMutableBlobStorage->GetBlobWhenReady(mRecorder->GetParentObject(), - NS_ConvertUTF16toUTF8(mMimeType), - aCallback); + mMutableBlobStorage->GetBlobImplWhenReady(NS_ConvertUTF16toUTF8(mMimeType), + storer); mMutableBlobStorage = nullptr; + + storer->Promise()->Then( + mMainThread, __func__, + [self = RefPtr(this), p = storer->Promise()] { + if (self->mBlobPromise == p) { + // Reset BlobPromise. + self->mBlobPromise = nullptr; + } + }); + + return storer->Promise(); + } + + public: + // Stops gathering data into the current blob and resolves when the current + // blob is available. Future data will be stored in a new blob. + // Should a previous async GatherBlob() operation still be in progress, we'll + // wait for it to finish before starting this one. + RefPtr GatherBlob() { + MOZ_ASSERT(NS_IsMainThread()); + if (!mBlobPromise) { + return mBlobPromise = GatherBlobImpl(); + } + return mBlobPromise = mBlobPromise->Then(mMainThread, __func__, + [self = RefPtr(this)] { + return self->GatherBlobImpl(); + }); } RefPtr SizeOfExcludingThis( @@ -675,17 +955,17 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } private: - // Only DestroyRunnable is allowed to delete Session object on main thread. virtual ~Session() { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(mShutdownPromise); + MOZ_ASSERT(!mShutdownBlocker); LOG(LogLevel::Debug, ("Session.~Session (%p)", this)); } + // Pull encoded media data from MediaEncoder and put into MutableBlobStorage. - // Destroy this session object in the end of this function. - // If the bool aForceFlush is true, we will force to dispatch a - // PushBlobRunnable to main thread. - void Extract(bool aForceFlush, Runnable* aDestroyRunnable) { + // If the bool aForceFlush is true, we will force a dispatch of a blob to + // main thread. + void Extract(TimeStamp aNow, bool aForceFlush) { MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); LOG(LogLevel::Debug, ("Session.Extract %p", this)); @@ -708,147 +988,38 @@ class MediaRecorder::Session : public PrincipalChangeObserver, // Whether push encoded data back to onDataAvailable automatically or we // need a flush. bool pushBlob = aForceFlush; - if (!pushBlob && mTimeSlice > 0 && - (TimeStamp::Now() - mLastBlobTimeStamp).ToMilliseconds() > mTimeSlice) { + if (!pushBlob && !mLastBlobTimeStamp.IsNull() && + (aNow - mLastBlobTimeStamp) > mTimeslice) { pushBlob = true; } if (pushBlob) { - if (NS_FAILED(NS_DispatchToMainThread( - new PushBlobRunnable(this, aDestroyRunnable)))) { - MOZ_ASSERT(false, "NS_DispatchToMainThread PushBlobRunnable failed"); - } else { - mLastBlobTimeStamp = TimeStamp::Now(); - } - } else if (aDestroyRunnable) { - if (NS_FAILED(NS_DispatchToMainThread(aDestroyRunnable))) { - MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed"); - } + MOZ_ASSERT(!mLastBlobTimeStamp.IsNull(), + "The encoder must have been initialized if there's data"); + mLastBlobTimeStamp = aNow; + InvokeAsync(mMainThread, this, __func__, &Session::GatherBlob) + ->Then(mMainThread, __func__, + [this, self = RefPtr(this)]( + const BlobPromise::ResolveOrRejectValue& aResult) { + // Assert that we've seen the start event + MOZ_ASSERT_IF( + mRunningState.isOk(), + mRunningState.inspect() != RunningState::Starting); + if (aResult.IsReject()) { + LOG(LogLevel::Warning, + ("GatherBlob failed for pushing blob")); + DoSessionEndTask(aResult.RejectValue()); + return; + } + + nsresult rv = mRecorder->CreateAndDispatchBlobEvent( + aResult.ResolveValue()); + if (NS_FAILED(rv)) { + DoSessionEndTask(NS_OK); + } + }); } } - void MediaStreamReady() { - if (!mMediaStream) { - // Already shut down. This can happen because MediaStreamReady is async. - return; - } - - if (mMediaStreamReady) { - return; - } - - if (!mRunningState.isOk() || - mRunningState.inspect() != RunningState::Idling) { - return; - } - - nsTArray> tracks; - mMediaStream->GetTracks(tracks); - uint8_t trackTypes = 0; - int32_t audioTracks = 0; - int32_t videoTracks = 0; - for (auto& track : tracks) { - if (track->Ended()) { - continue; - } - - ConnectMediaStreamTrack(*track); - - if (track->AsAudioStreamTrack()) { - ++audioTracks; - trackTypes |= ContainerWriter::CREATE_AUDIO_TRACK; - } else if (track->AsVideoStreamTrack()) { - ++videoTracks; - trackTypes |= ContainerWriter::CREATE_VIDEO_TRACK; - } else { - MOZ_CRASH("Unexpected track type"); - } - } - - if (trackTypes == 0) { - MOZ_ASSERT(audioTracks == 0); - MOZ_ASSERT(videoTracks == 0); - return; - } - - mMediaStreamReady = true; - - if (audioTracks > 1 || videoTracks > 1) { - // When MediaRecorder supports multiple tracks, we should set up a single - // MediaInputPort from the input stream, and let main thread check - // track principals async later. - nsPIDOMWindowInner* window = mRecorder->GetParentObject(); - Document* document = window ? window->GetExtantDoc() : nullptr; - nsContentUtils::ReportToConsole(nsIScriptError::errorFlag, - NS_LITERAL_CSTRING("Media"), document, - nsContentUtils::eDOM_PROPERTIES, - "MediaRecorderMultiTracksNotSupported"); - DoSessionEndTask(NS_ERROR_ABORT); - return; - } - - // Check that we may access the tracks' content. - if (!MediaStreamTracksPrincipalSubsumes()) { - LOG(LogLevel::Warning, ("Session.MediaTracksReady MediaStreamTracks " - "principal check failed")); - DoSessionEndTask(NS_ERROR_DOM_SECURITY_ERR); - return; - } - - LOG(LogLevel::Debug, - ("Session.MediaTracksReady track type = (%d)", trackTypes)); - InitEncoder(trackTypes, mMediaStreamTracks[0]->Graph()->GraphRate()); - } - - void ConnectMediaStreamTrack(MediaStreamTrack& aTrack) { - for (auto& track : mMediaStreamTracks) { - if (track->AsAudioStreamTrack() && aTrack.AsAudioStreamTrack()) { - // We only allow one audio track. See bug 1276928. - return; - } - if (track->AsVideoStreamTrack() && aTrack.AsVideoStreamTrack()) { - // We only allow one video track. See bug 1276928. - return; - } - } - mMediaStreamTracks.AppendElement(&aTrack); - aTrack.AddPrincipalChangeObserver(this); - } - - bool PrincipalSubsumes(nsIPrincipal* aPrincipal) { - if (!mRecorder->GetOwner()) return false; - nsCOMPtr doc = mRecorder->GetOwner()->GetExtantDoc(); - if (!doc) { - return false; - } - if (!aPrincipal) { - return false; - } - bool subsumes; - if (NS_FAILED(doc->NodePrincipal()->Subsumes(aPrincipal, &subsumes))) { - return false; - } - return subsumes; - } - - bool MediaStreamTracksPrincipalSubsumes() { - MOZ_ASSERT(mRecorder->mDOMStream); - nsCOMPtr principal = nullptr; - for (RefPtr& track : mMediaStreamTracks) { - nsContentUtils::CombineResourcePrincipals(&principal, - track->GetPrincipal()); - } - return PrincipalSubsumes(principal); - } - - bool AudioNodePrincipalSubsumes() { - MOZ_ASSERT(mRecorder->mAudioNode); - Document* doc = mRecorder->mAudioNode->GetOwner() - ? mRecorder->mAudioNode->GetOwner()->GetExtantDoc() - : nullptr; - nsCOMPtr principal = doc ? doc->NodePrincipal() : nullptr; - return PrincipalSubsumes(principal); - } - void InitEncoder(uint8_t aTrackTypes, TrackRate aTrackRate) { LOG(LogLevel::Debug, ("Session.InitEncoder %p", this)); MOZ_ASSERT(NS_IsMainThread()); @@ -874,83 +1045,32 @@ class MediaRecorder::Session : public PrincipalChangeObserver, mEncoderThread = MakeAndAddRef(pool.forget(), "MediaRecorderReadThread"); - if (!gMediaRecorderShutdownBlocker) { - // Add a shutdown blocker so mEncoderThread can be shutdown async. - class Blocker : public ShutdownBlocker { - public: - Blocker() - : ShutdownBlocker( - NS_LITERAL_STRING("MediaRecorder::Session: shutdown")) {} + MOZ_DIAGNOSTIC_ASSERT(!mShutdownBlocker); + // Add a shutdown blocker so mEncoderThread can be shutdown async. + class Blocker : public ShutdownBlocker { + const RefPtr mSession; - NS_IMETHOD BlockShutdown(nsIAsyncShutdownClient*) override { - // Distribute the global async shutdown blocker in a ticket. If there - // are zero graphs then shutdown is unblocked when we go out of scope. - RefPtr ticket = - MakeAndAddRef(gMediaRecorderShutdownBlocker); - gMediaRecorderShutdownBlocker = nullptr; + public: + Blocker(RefPtr aSession, const nsString& aName) + : ShutdownBlocker(aName), mSession(std::move(aSession)) {} - nsTArray> promises(gSessions.Count()); - for (auto iter = gSessions.Iter(); !iter.Done(); iter.Next()) { - promises.AppendElement(iter.Get()->GetKey()->Shutdown()); - } - gSessions.Clear(); - ShutdownPromise::All(GetCurrentThreadSerialEventTarget(), promises) - ->Then( - GetCurrentThreadSerialEventTarget(), __func__, - [ticket]() mutable { - MOZ_ASSERT(gSessions.Count() == 0); - // Unblock shutdown - ticket = nullptr; - }, - []() { MOZ_CRASH("Not reached"); }); - return NS_OK; - } - }; - - gMediaRecorderShutdownBlocker = MakeAndAddRef(); - RefPtr barrier = GetShutdownBarrier(); - nsresult rv = barrier->AddBlocker( - gMediaRecorderShutdownBlocker, NS_LITERAL_STRING(__FILE__), __LINE__, - NS_LITERAL_STRING("MediaRecorder::Session: shutdown")); - MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); - } - - gSessions.PutEntry(this); - - uint32_t audioBitrate = mRecorder->GetAudioBitrate(); - uint32_t videoBitrate = mRecorder->GetVideoBitrate(); - uint32_t bitrate = mRecorder->GetBitrate(); - if (bitrate > 0) { - // There's a total cap set. We have to make sure the type-specific limits - // are within range. - if ((aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) && - (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK) && - audioBitrate + videoBitrate > bitrate) { - LOG(LogLevel::Info, ("Session.InitEncoder Bitrates higher than total " - "cap. Recalculating.")); - double factor = - bitrate / static_cast(audioBitrate + videoBitrate); - audioBitrate = static_cast(audioBitrate * factor); - videoBitrate = static_cast(videoBitrate * factor); - } else if ((aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) && - !(aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK)) { - audioBitrate = std::min(audioBitrate, bitrate); - videoBitrate = 0; - } else if (!(aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) && - (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK)) { - audioBitrate = 0; - videoBitrate = std::min(videoBitrate, bitrate); + NS_IMETHOD BlockShutdown(nsIAsyncShutdownClient*) override { + Unused << mSession->Shutdown(); + return NS_OK; } - MOZ_ASSERT(audioBitrate + videoBitrate <= bitrate); - } + }; - // Allocate encoder and bind with union stream. - // At this stage, the API doesn't allow UA to choose the output mimeType - // format. + nsString name; + name.AppendPrintf("MediaRecorder::Session %p shutdown", this); + mShutdownBlocker = MakeAndAddRef(this, name); + nsresult rv = GetShutdownBarrier()->AddBlocker( + mShutdownBlocker, NS_LITERAL_STRING(__FILE__), __LINE__, + NS_LITERAL_STRING("MediaRecorder::Session: shutdown")); + MOZ_RELEASE_ASSERT(NS_SUCCEEDED(rv)); - mEncoder = - MediaEncoder::CreateEncoder(mEncoderThread, mMimeType, audioBitrate, - videoBitrate, aTrackTypes, aTrackRate); + mEncoder = MediaEncoder::CreateEncoder( + mEncoderThread, mMimeType, mAudioBitsPerSecond, mVideoBitsPerSecond, + aTrackTypes, aTrackRate); if (!mEncoder) { LOG(LogLevel::Error, ("Session.InitEncoder !mEncoder %p", this)); @@ -959,10 +1079,9 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } mEncoderListener = MakeAndAddRef(mEncoderThread, this); - nsresult rv = - mEncoderThread->Dispatch(NewRunnableMethod>( - "mozilla::MediaEncoder::RegisterListener", mEncoder, - &MediaEncoder::RegisterListener, mEncoderListener)); + rv = mEncoderThread->Dispatch(NewRunnableMethod>( + "mozilla::MediaEncoder::RegisterListener", mEncoder, + &MediaEncoder::RegisterListener, mEncoderListener)); MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); Unused << rv; @@ -971,20 +1090,29 @@ class MediaRecorder::Session : public PrincipalChangeObserver, mRecorder->mAudioNodeOutput); } - for (auto& track : mMediaStreamTracks) { + for (const auto& track : mMediaStreamTracks) { mEncoder->ConnectMediaStreamTrack(track); } - // If user defines timeslice interval for video blobs we have to set - // appropriate video keyframe interval defined in milliseconds. - mEncoder->SetVideoKeyFrameInterval(mTimeSlice); + // If a timeslice is defined we set an appropriate video keyframe interval. + // This allows users to get blobs regularly when the timeslice interval is + // shorter than the default key frame interval, as we'd normally wait for a + // key frame before sending data to the blob. + mEncoder->SetVideoKeyFrameInterval( + std::max(TimeDuration::FromSeconds(1), mTimeslice).ToMilliseconds()); - // Set mRunningState to Running so that ExtractRunnable/DestroyRunnable will + // Set mRunningState to Running so that DoSessionEndTask will // take the responsibility to end the session. mRunningState = RunningState::Starting; } - // application should get blob and onstop event + // This is the task that will stop recording per spec: + // - Stop gathering data (this is inherently async) + // - Set state to "inactive" + // - Fire an error event, if NS_FAILED(rv) + // - Discard blob data if rv is NS_ERROR_DOM_SECURITY_ERR + // - Fire a Blob event + // - Fire an event named stop void DoSessionEndTask(nsresult rv) { MOZ_ASSERT(NS_IsMainThread()); if (mRunningState.isErr()) { @@ -998,11 +1126,11 @@ class MediaRecorder::Session : public PrincipalChangeObserver, return; } + bool needsStartEvent = false; if (mRunningState.isOk() && (mRunningState.inspect() == RunningState::Idling || mRunningState.inspect() == RunningState::Starting)) { - NS_DispatchToMainThread( - new DispatchEventRunnable(this, NS_LITERAL_STRING("start"))); + needsStartEvent = true; } if (rv == NS_OK) { @@ -1011,68 +1139,85 @@ class MediaRecorder::Session : public PrincipalChangeObserver, mRunningState = Err(rv); } - if (NS_FAILED(rv)) { - mRecorder->ForceInactive(); - NS_DispatchToMainThread(NewRunnableMethod( - "dom::MediaRecorder::NotifyError", mRecorder, - &MediaRecorder::NotifyError, rv)); - } + GatherBlob() + ->Then( + mMainThread, __func__, + [this, self = RefPtr(this), rv, needsStartEvent]( + const BlobPromise::ResolveOrRejectValue& aResult) { + if (mRecorder->mSessions.LastElement() == this) { + // Set state to inactive, but only if the recorder is not + // controlled by another session already. + mRecorder->Inactivate(); + } - RefPtr destroyRunnable = new DestroyRunnable(this); + if (needsStartEvent) { + mRecorder->DispatchSimpleEvent(NS_LITERAL_STRING("start")); + } - if (rv != NS_ERROR_DOM_SECURITY_ERR) { - // Don't push a blob if there was a security error. - if (NS_FAILED(NS_DispatchToMainThread( - new PushBlobRunnable(this, destroyRunnable)))) { - MOZ_ASSERT(false, "NS_DispatchToMainThread PushBlobRunnable failed"); - } - } else { - if (NS_FAILED(NS_DispatchToMainThread(destroyRunnable))) { - MOZ_ASSERT(false, "NS_DispatchToMainThread DestroyRunnable failed"); - } - } + // If there was an error, Fire the appropriate one + if (NS_FAILED(rv)) { + mRecorder->NotifyError(rv); + } + + // Fire a blob event named dataavailable + RefPtr blobImpl; + if (rv == NS_ERROR_DOM_SECURITY_ERR || aResult.IsReject()) { + // In case of SecurityError, the blob data must be discarded. + // We create a new empty one and throw the blob with its data + // away. + // In case we failed to gather blob data, we create an empty + // memory blob instead. + blobImpl = new EmptyBlobImpl(mMimeType); + } else { + blobImpl = aResult.ResolveValue(); + } + if (NS_FAILED(mRecorder->CreateAndDispatchBlobEvent(blobImpl))) { + // Failed to dispatch blob event. That's unexpected. It's + // probably all right to fire an error event if we haven't + // already. + if (NS_SUCCEEDED(rv)) { + mRecorder->NotifyError(NS_ERROR_FAILURE); + } + } + + // Fire an event named stop + mRecorder->DispatchSimpleEvent(NS_LITERAL_STRING("stop")); + + // And finally, Shutdown and destroy the Session + return Shutdown(); + }) + ->Then(mMainThread, __func__, [this, self = RefPtr(this)] { + GetShutdownBarrier()->RemoveBlocker(mShutdownBlocker); + mShutdownBlocker = nullptr; + }); } void MediaEncoderInitialized() { MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - // Pull encoded metadata from MediaEncoder - nsTArray> encodedBuf; - nsString mime; - nsresult rv = mEncoder->GetEncodedMetadata(&encodedBuf, mime); + // Start issuing timeslice-based blobs. + MOZ_ASSERT(mLastBlobTimeStamp.IsNull()); + mLastBlobTimeStamp = TimeStamp::Now(); - if (NS_FAILED(rv)) { - MOZ_ASSERT(false); - return; - } + Extract(mLastBlobTimeStamp, false); - // Append pulled data into cache buffer. - NS_DispatchToMainThread( - new StoreEncodedBufferRunnable(this, std::move(encodedBuf))); - - RefPtr self = this; - NS_DispatchToMainThread(NewRunnableFrom([self, mime]() { - if (!self->mRecorder) { - MOZ_ASSERT_UNREACHABLE("Recorder should be live"); + NS_DispatchToMainThread(NewRunnableFrom([self = RefPtr(this), this, + mime = mEncoder->MimeType()]() { + if (mRunningState.isErr()) { return NS_OK; } - if (self->mRunningState.isOk()) { - RunningState state = self->mRunningState.inspect(); - if (state == RunningState::Starting || - state == RunningState::Stopping) { - if (state == RunningState::Starting) { - // We set it to Running in the runnable since we can only assign - // mRunningState on main thread. We set it before running the start - // event runnable since that dispatches synchronously (and may cause - // js calls to methods depending on mRunningState). - self->mRunningState = RunningState::Running; - } - self->mMimeType = mime; - self->mRecorder->SetMimeType(self->mMimeType); - auto startEvent = MakeRefPtr( - self, NS_LITERAL_STRING("start")); - startEvent->Run(); + RunningState state = self->mRunningState.inspect(); + if (state == RunningState::Starting || state == RunningState::Stopping) { + if (state == RunningState::Starting) { + // We set it to Running in the runnable since we can only assign + // mRunningState on main thread. We set it before running the start + // event runnable since that dispatches synchronously (and may cause + // js calls to methods depending on mRunningState). + mRunningState = RunningState::Running; + + mRecorder->mMimeType = mMimeType; } + mRecorder->DispatchSimpleEvent(NS_LITERAL_STRING("start")); } return NS_OK; })); @@ -1081,7 +1226,7 @@ class MediaRecorder::Session : public PrincipalChangeObserver, void MediaEncoderDataAvailable() { MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - Extract(false, nullptr); + Extract(TimeStamp::Now(), false); } void MediaEncoderError() { @@ -1093,14 +1238,11 @@ class MediaRecorder::Session : public PrincipalChangeObserver, void MediaEncoderShutdown() { MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - MOZ_ASSERT(mEncoder->IsShutdown()); + mEncoder->AssertShutdownCalled(); - // For the stop event. Let's the creation of the blob to dispatch this - // runnable. - RefPtr destroyRunnable = new DestroyRunnable(this); - - // Forces the last blob even if it's not time for it yet. - Extract(true, destroyRunnable); + mMainThread->Dispatch(NewRunnableMethod( + "MediaRecorder::Session::MediaEncoderShutdown->DoSessionEndTask", this, + &Session::DoSessionEndTask, NS_OK)); // Clean up. mEncoderListener->Forget(); @@ -1117,26 +1259,34 @@ class MediaRecorder::Session : public PrincipalChangeObserver, return mShutdownPromise; } + // This is a coarse calculation and does not reflect the duration of the + // final recording for reasons such as pauses. However it allows us an + // idea of how long people are running their recorders for. + TimeDuration timeDelta = TimeStamp::Now() - mStartTime; + Telemetry::Accumulate(Telemetry::MEDIA_RECORDER_RECORDING_DURATION, + timeDelta.ToSeconds()); + mShutdownPromise = ShutdownPromise::CreateAndResolve(true, __func__); - RefPtr self = this; if (mEncoder) { - auto& encoder = mEncoder; - encoder->Cancel(); - MOZ_RELEASE_ASSERT(mEncoderListener); - auto& encoderListener = mEncoderListener; - mShutdownPromise = mShutdownPromise->Then( - mEncoderThread, __func__, - [encoder, encoderListener]() { - encoder->UnregisterListener(encoderListener); - encoderListener->Forget(); - return ShutdownPromise::CreateAndResolve(true, __func__); - }, - []() { - MOZ_ASSERT_UNREACHABLE("Unexpected reject"); - return ShutdownPromise::CreateAndReject(false, __func__); - }); + mShutdownPromise = + mShutdownPromise + ->Then(mEncoderThread, __func__, + [encoder = mEncoder, encoderListener = mEncoderListener] { + // Unregister the listener before canceling so that we + // don't get the Shutdown notification from Cancel(). + encoder->UnregisterListener(encoderListener); + encoderListener->Forget(); + return ShutdownPromise::CreateAndResolve(true, __func__); + }) + ->Then(mMainThread, __func__, + [encoder = mEncoder] { return encoder->Cancel(); }) + ->Then(mEncoderThread, __func__, [] { + // Meh, this is just to convert the promise type to match + // mShutdownPromise. + return ShutdownPromise::CreateAndResolve(true, __func__); + }); } // Remove main thread state. This could be needed if Stop() wasn't called. @@ -1153,25 +1303,23 @@ class MediaRecorder::Session : public PrincipalChangeObserver, } // Break the cycle reference between Session and MediaRecorder. - if (mRecorder) { - mShutdownPromise = mShutdownPromise->Then( - GetCurrentThreadSerialEventTarget(), __func__, - [self]() { - self->mRecorder->RemoveSession(self); - self->mRecorder = nullptr; - return ShutdownPromise::CreateAndResolve(true, __func__); - }, - []() { - MOZ_ASSERT_UNREACHABLE("Unexpected reject"); - return ShutdownPromise::CreateAndReject(false, __func__); - }); - } + mShutdownPromise = mShutdownPromise->Then( + mMainThread, __func__, + [self = RefPtr(this)]() { + self->mRecorder->RemoveSession(self); + return ShutdownPromise::CreateAndResolve(true, __func__); + }, + []() { + MOZ_ASSERT_UNREACHABLE("Unexpected reject"); + return ShutdownPromise::CreateAndReject(false, __func__); + }); if (mEncoderThread) { - RefPtr& encoderThread = mEncoderThread; mShutdownPromise = mShutdownPromise->Then( - GetCurrentThreadSerialEventTarget(), __func__, - [encoderThread]() { return encoderThread->BeginShutdown(); }, + mMainThread, __func__, + [encoderThread = mEncoderThread]() { + return encoderThread->BeginShutdown(); + }, []() { MOZ_ASSERT_UNREACHABLE("Unexpected reject"); return ShutdownPromise::CreateAndReject(false, __func__); @@ -1190,20 +1338,18 @@ class MediaRecorder::Session : public PrincipalChangeObserver, Stopped, // Session has stopped without any error }; - // Hold reference to MediaRecorder that ensure MediaRecorder is alive - // if there is an active session. Access ONLY on main thread. - RefPtr mRecorder; + // Our associated MediaRecorder. + const RefPtr mRecorder; // Stream currently recorded. RefPtr mMediaStream; - // True after we have decided on the track set to use for the recording. - bool mMediaStreamReady; - // Tracks currently recorded. This should be a subset of mMediaStream's track // set. nsTArray> mMediaStreamTracks; + // Main thread used for MozPromise operations. + const RefPtr mMainThread; // Runnable thread for reading data from MediaEncoder. RefPtr mEncoderThread; // MediaEncoder pipeline. @@ -1216,52 +1362,38 @@ class MediaRecorder::Session : public PrincipalChangeObserver, RefPtr mMutableBlobStorage; // Max memory to use for the MutableBlobStorage. uint64_t mMaxMemory; - // Current session mimeType - nsString mMimeType; + // If set, is a promise for the latest GatherBlob() operation. Allows + // GatherBlob() operations to be serialized in order to avoid races. + RefPtr mBlobPromise; + // Session mimeType + const nsString mMimeType; // Timestamp of the last fired dataavailable event. TimeStamp mLastBlobTimeStamp; // The interval of passing encoded data from MutableBlobStorage to // onDataAvailable handler. - const uint32_t mTimeSlice; - // The session's current main thread state. The error type gets setwhen ending - // a recording with an error. An NS_OK error is invalid. + const TimeDuration mTimeslice; + // The video bitrate the recorder was configured with. + const uint32_t mVideoBitsPerSecond; + // The audio bitrate the recorder was configured with. + const uint32_t mAudioBitsPerSecond; + // The time this session started, for telemetry. + const TimeStamp mStartTime; + // The session's current main thread state. The error type gets set when + // ending a recording with an error. An NS_OK error is invalid. // Main thread only. Result mRunningState; + // Shutdown blocker unique for this Session. Main thread only. + RefPtr mShutdownBlocker; }; -NS_IMPL_ISUPPORTS_INHERITED0(MediaRecorder::Session::PushBlobRunnable, Runnable) - MediaRecorder::~MediaRecorder() { LOG(LogLevel::Debug, ("~MediaRecorder (%p)", this)); UnRegisterActivityObserver(); } -MediaRecorder::MediaRecorder(DOMMediaStream& aSourceMediaTrack, - nsPIDOMWindowInner* aOwnerWindow) - : DOMEventTargetHelper(aOwnerWindow), - mAudioNodeOutput(0), - mState(RecordingState::Inactive), - mAudioBitsPerSecond(0), - mVideoBitsPerSecond(0), - mBitsPerSecond(0) { +MediaRecorder::MediaRecorder(nsPIDOMWindowInner* aOwnerWindow) + : DOMEventTargetHelper(aOwnerWindow) { MOZ_ASSERT(aOwnerWindow); - mDOMStream = &aSourceMediaTrack; - - RegisterActivityObserver(); -} - -MediaRecorder::MediaRecorder(AudioNode& aSrcAudioNode, uint32_t aSrcOutput, - nsPIDOMWindowInner* aOwnerWindow) - : DOMEventTargetHelper(aOwnerWindow), - mAudioNodeOutput(aSrcOutput), - mState(RecordingState::Inactive), - mAudioBitsPerSecond(0), - mVideoBitsPerSecond(0), - mBitsPerSecond(0) { - MOZ_ASSERT(aOwnerWindow); - - mAudioNode = &aSrcAudioNode; - RegisterActivityObserver(); } @@ -1282,116 +1414,306 @@ void MediaRecorder::UnRegisterActivityObserver() { } } -void MediaRecorder::SetMimeType(const nsString& aMimeType) { - mMimeType = aMimeType; -} - void MediaRecorder::GetMimeType(nsString& aMimeType) { aMimeType = mMimeType; } -void MediaRecorder::Start(const Optional& aTimeSlice, +void MediaRecorder::Start(const Optional& aTimeslice, ErrorResult& aResult) { LOG(LogLevel::Debug, ("MediaRecorder.Start %p", this)); InitializeDomExceptions(); + // When a MediaRecorder object’s start() method is invoked, the UA MUST run + // the following steps: + + // 1. Let recorder be the MediaRecorder object on which the method was + // invoked. + + // 2. Let timeslice be the method’s first argument, if provided, or undefined. + TimeDuration timeslice = + aTimeslice.WasPassed() + ? TimeDuration::FromMilliseconds(aTimeslice.Value()) + : TimeDuration::Forever(); + + // 3. Let stream be the value of recorder’s stream attribute. + + // 4. Let tracks be the set of live tracks in stream’s track set. + nsTArray> tracks; + if (mStream) { + mStream->GetTracks(tracks); + } + for (const auto& t : nsTArray>(tracks)) { + if (t->Ended()) { + tracks.RemoveElement(t); + } + } + + // 5. If the value of recorder’s state attribute is not inactive, throw an + // InvalidStateError DOMException and abort these steps. if (mState != RecordingState::Inactive) { - aResult.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + aResult.ThrowDOMException( + NS_ERROR_DOM_INVALID_STATE_ERR, + NS_LITERAL_CSTRING("The MediaRecorder has already been started")); return; } - nsTArray> tracks; - if (mDOMStream) { - mDOMStream->GetTracks(tracks); + // 6. If the isolation properties of stream disallow access from recorder, + // throw a SecurityError DOMException and abort these steps. + if (mStream) { + RefPtr streamPrincipal = mStream->GetPrincipal(); + if (!PrincipalSubsumes(this, streamPrincipal)) { + aResult.ThrowDOMException( + NS_ERROR_DOM_SECURITY_ERR, + NS_LITERAL_CSTRING("The MediaStream's isolation properties disallow " + "access from MediaRecorder")); + return; + } } - if (!tracks.IsEmpty()) { - // If there are tracks already available that we're not allowed - // to record, we should throw a security error. - RefPtr streamPrincipal = mDOMStream->GetPrincipal(); - bool subsumes = false; - nsPIDOMWindowInner* window; - Document* doc; - if (!(window = GetOwner()) || !(doc = window->GetExtantDoc()) || - NS_FAILED(doc->NodePrincipal()->Subsumes(streamPrincipal, &subsumes)) || - !subsumes) { - aResult.Throw(NS_ERROR_DOM_SECURITY_ERR); + if (mAudioNode && !AudioNodePrincipalSubsumes(this, mAudioNode)) { + LOG(LogLevel::Warning, + ("MediaRecorder %p Start AudioNode principal check failed", this)); + aResult.ThrowDOMException( + NS_ERROR_DOM_SECURITY_ERR, + NS_LITERAL_CSTRING("The AudioNode's isolation properties disallow " + "access from MediaRecorder")); + return; + } + + // 7. If stream is inactive, throw a NotSupportedError DOMException and abort + // these steps. + if (mStream && !mStream->Active()) { + aResult.ThrowDOMException( + NS_ERROR_DOM_NOT_SUPPORTED_ERR, + NS_LITERAL_CSTRING("The MediaStream is inactive")); + return; + } + + // 8. If the [[ConstrainedMimeType]] slot specifies a media type, container, + // or codec, then run the following sub steps: + // 1. Constrain the configuration of recorder to the media type, container, + // and codec specified in the [[ConstrainedMimeType]] slot. + // 2. For each track in tracks, if the User Agent cannot record the track + // using the current configuration, then throw a NotSupportedError + // DOMException and abort all steps. + Maybe mime; + if (mConstrainedMimeType.Length() > 0) { + mime = MakeMediaContainerType(mConstrainedMimeType); + MOZ_DIAGNOSTIC_ASSERT( + mime, + "Invalid media MIME type should have been caught by IsTypeSupported"); + } + for (const auto& track : tracks) { + TypeSupport support = CanRecordWith(track, mime, mConstrainedMimeType); + if (support != TypeSupport::Supported) { + nsString id; + track->GetId(id); + aResult.ThrowDOMException( + NS_ERROR_DOM_NOT_SUPPORTED_ERR, + nsPrintfCString( + "%s track cannot be recorded: %s", + track->AsAudioStreamTrack() ? "An audio" : "A video", + TypeSupportToCString(support, mConstrainedMimeType).get())); + return; + } + } + if (mAudioNode) { + TypeSupport support = CanRecordAudioTrackWith(mime, mConstrainedMimeType); + if (support != TypeSupport::Supported) { + aResult.ThrowDOMException( + NS_ERROR_DOM_NOT_SUPPORTED_ERR, + nsPrintfCString( + "An AudioNode cannot be recorded: %s", + TypeSupportToCString(support, mConstrainedMimeType).get())); return; } } - uint32_t timeSlice = aTimeSlice.WasPassed() ? aTimeSlice.Value() : 0; - MediaRecorderReporter::AddMediaRecorder(this); + // 9. If recorder’s [[ConstrainedBitsPerSecond]] slot is not undefined, set + // recorder’s videoBitsPerSecond and audioBitsPerSecond attributes to + // values the User Agent deems reasonable for the respective media types, + // for recording all tracks in tracks, such that the sum of + // videoBitsPerSecond and audioBitsPerSecond is close to the value of + // recorder’s + // [[ConstrainedBitsPerSecond]] slot. + if (mConstrainedBitsPerSecond) { + uint8_t numVideoTracks = 0; + uint8_t numAudioTracks = 0; + for (const auto& t : tracks) { + if (t->AsVideoStreamTrack() && numVideoTracks < UINT8_MAX) { + ++numVideoTracks; + } else if (t->AsAudioStreamTrack() && numAudioTracks < UINT8_MAX) { + ++numAudioTracks; + } + } + if (mAudioNode) { + MOZ_DIAGNOSTIC_ASSERT(!mStream); + ++numAudioTracks; + } + SelectBitrates(*mConstrainedBitsPerSecond, numVideoTracks, + &mVideoBitsPerSecond, numAudioTracks, &mAudioBitsPerSecond); + } + + // 10. Let videoBitrate be the value of recorder’s videoBitsPerSecond + // attribute, and constrain the configuration of recorder to target an + // aggregate bitrate of videoBitrate bits per second for all video tracks + // recorder will be recording. videoBitrate is a hint for the encoder and + // the value might be surpassed, not achieved, or only be achieved over a + // long period of time. + const uint32_t videoBitrate = mVideoBitsPerSecond; + + // 11. Let audioBitrate be the value of recorder’s audioBitsPerSecond + // attribute, and constrain the configuration of recorder to target an + // aggregate bitrate of audioBitrate bits per second for all audio tracks + // recorder will be recording. audioBitrate is a hint for the encoder and + // the value might be surpassed, not achieved, or only be achieved over a + // long period of time. + const uint32_t audioBitrate = mAudioBitsPerSecond; + + // 12. Set recorder’s state to recording mState = RecordingState::Recording; + + MediaRecorderReporter::AddMediaRecorder(this); // Start a session. mSessions.AppendElement(); - mSessions.LastElement() = new Session(this, timeSlice); + mSessions.LastElement() = new Session(this, std::move(tracks), timeslice, + videoBitrate, audioBitrate); mSessions.LastElement()->Start(); - mStartTime = TimeStamp::Now(); - Telemetry::ScalarAdd(Telemetry::ScalarID::MEDIARECORDER_RECORDING_COUNT, 1); } void MediaRecorder::Stop(ErrorResult& aResult) { LOG(LogLevel::Debug, ("MediaRecorder.Stop %p", this)); MediaRecorderReporter::RemoveMediaRecorder(this); + + // When a MediaRecorder object’s stop() method is invoked, the UA MUST run the + // following steps: + + // 1. Let recorder be the MediaRecorder object on which the method was + // invoked. + + // 2. If recorder’s state attribute is inactive, abort these steps. if (mState == RecordingState::Inactive) { return; } - mState = RecordingState::Inactive; + + // 3. Inactivate the recorder with recorder. + Inactivate(); + + // 4. Queue a task, using the DOM manipulation task source, that runs the + // following steps: + // 1. Stop gathering data. + // 2. Let blob be the Blob of collected data so far, then fire a blob event + // named dataavailable at recorder with blob. + // 3. Fire an event named stop at recorder. MOZ_ASSERT(mSessions.Length() > 0); mSessions.LastElement()->Stop(); + + // 5. return undefined. } void MediaRecorder::Pause(ErrorResult& aResult) { LOG(LogLevel::Debug, ("MediaRecorder.Pause %p", this)); + + // When a MediaRecorder object’s pause() method is invoked, the UA MUST run + // the following steps: + + // 1. If state is inactive, throw an InvalidStateError DOMException and abort + // these steps. if (mState == RecordingState::Inactive) { - aResult.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + aResult.ThrowDOMException( + NS_ERROR_DOM_INVALID_STATE_ERR, + NS_LITERAL_CSTRING("The MediaRecorder is inactive")); return; } + // 2. If state is paused, abort these steps. if (mState == RecordingState::Paused) { return; } - MOZ_ASSERT(mSessions.Length() > 0); - nsresult rv = mSessions.LastElement()->Pause(); - if (NS_FAILED(rv)) { - NotifyError(rv); - return; - } - + // 3. Set state to paused, and queue a task, using the DOM manipulation task + // source, that runs the following steps: mState = RecordingState::Paused; + + // XXX - We pause synchronously pending spec issue + // https://github.com/w3c/mediacapture-record/issues/131 + // 1. Stop gathering data into blob (but keep it available so that + // recording can be resumed in the future). + MOZ_ASSERT(!mSessions.IsEmpty()); + mSessions.LastElement()->Pause(); + + NS_DispatchToMainThread(NS_NewRunnableFunction( + "MediaRecorder::Pause", [recorder = RefPtr(this)] { + // 2. Let target be the MediaRecorder context object. Fire an event + // named pause at target. + recorder->DispatchSimpleEvent(NS_LITERAL_STRING("pause")); + })); + + // 4. return undefined. } void MediaRecorder::Resume(ErrorResult& aResult) { LOG(LogLevel::Debug, ("MediaRecorder.Resume %p", this)); + + // When a MediaRecorder object’s resume() method is invoked, the UA MUST run + // the following steps: + + // 1. If state is inactive, throw an InvalidStateError DOMException and abort + // these steps. if (mState == RecordingState::Inactive) { - aResult.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + aResult.ThrowDOMException( + NS_ERROR_DOM_INVALID_STATE_ERR, + NS_LITERAL_CSTRING("The MediaRecorder is inactive")); return; } + // 2. If state is recording, abort these steps. if (mState == RecordingState::Recording) { return; } - MOZ_ASSERT(mSessions.Length() > 0); - nsresult rv = mSessions.LastElement()->Resume(); - if (NS_FAILED(rv)) { - NotifyError(rv); - return; - } - + // 3. Set state to recording, and queue a task, using the DOM manipulation + // task source, that runs the following steps: mState = RecordingState::Recording; + + // XXX - We resume synchronously pending spec issue + // https://github.com/w3c/mediacapture-record/issues/131 + // 1. Resume (or continue) gathering data into the current blob. + MOZ_ASSERT(!mSessions.IsEmpty()); + mSessions.LastElement()->Resume(); + + NS_DispatchToMainThread(NS_NewRunnableFunction( + "MediaRecorder::Resume", [recorder = RefPtr(this)] { + // 2. Let target be the MediaRecorder context object. Fire an event + // named resume at target. + recorder->DispatchSimpleEvent(NS_LITERAL_STRING("resume")); + })); + + // 4. return undefined. } void MediaRecorder::RequestData(ErrorResult& aResult) { + LOG(LogLevel::Debug, ("MediaRecorder.RequestData %p", this)); + + // When a MediaRecorder object’s requestData() method is invoked, the UA MUST + // run the following steps: + + // 1. If state is inactive throw an InvalidStateError DOMException and + // terminate these steps. Otherwise the UA MUST queue a task, using the DOM + // manipulation task source, that runs the following steps: + // 1. Let blob be the Blob of collected data so far and let target be the + // MediaRecorder context object, then fire a blob event named + // dataavailable at target with blob. (Note that blob will be empty if no + // data has been gathered yet.) + // 2. Create a new Blob and gather subsequent data into it. if (mState == RecordingState::Inactive) { - aResult.Throw(NS_ERROR_DOM_INVALID_STATE_ERR); + aResult.ThrowDOMException( + NS_ERROR_DOM_INVALID_STATE_ERR, + NS_LITERAL_CSTRING("The MediaRecorder is inactive")); return; } MOZ_ASSERT(mSessions.Length() > 0); - nsresult rv = mSessions.LastElement()->RequestData(); - if (NS_FAILED(rv)) { - NotifyError(rv); - } + mSessions.LastElement()->RequestData(); + + // 2. return undefined. } JSObject* MediaRecorder::WrapObject(JSContext* aCx, @@ -1402,7 +1724,7 @@ JSObject* MediaRecorder::WrapObject(JSContext* aCx, /* static */ already_AddRefed MediaRecorder::Constructor( const GlobalObject& aGlobal, DOMMediaStream& aStream, - const MediaRecorderOptions& aInitDict, ErrorResult& aRv) { + const MediaRecorderOptions& aOptions, ErrorResult& aRv) { nsCOMPtr ownerWindow = do_QueryInterface(aGlobal.GetAsSupports()); if (!ownerWindow) { @@ -1410,20 +1732,84 @@ already_AddRefed MediaRecorder::Constructor( return nullptr; } - if (!IsTypeSupported(aInitDict.mMimeType)) { - aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + // When the MediaRecorder() constructor is invoked, the User Agent MUST run + // the following steps: + + // 1. Let stream be the constructor’s first argument. + + // 2. Let options be the constructor’s second argument. + + // 3. If invoking is type supported with options’ mimeType member as its + // argument returns false, throw a NotSupportedError DOMException and abort + // these steps. + TypeSupport support = IsTypeSupportedImpl(aOptions.mMimeType); + if (support != TypeSupport::Supported) { + // This catches also the empty string mimeType when support for any encoders + // has been disabled. + aRv.ThrowDOMException(NS_ERROR_DOM_NOT_SUPPORTED_ERR, + TypeSupportToCString(support, aOptions.mMimeType)); return nullptr; } - RefPtr object = new MediaRecorder(aStream, ownerWindow); - object->SetOptions(aInitDict); - return object.forget(); + // 4. Let recorder be a newly constructed MediaRecorder object. + RefPtr recorder = new MediaRecorder(ownerWindow); + + // 5. Let recorder have a [[ConstrainedMimeType]] internal slot, initialized + // to the value of options' mimeType member. + recorder->mConstrainedMimeType = aOptions.mMimeType; + + // 6. Let recorder have a [[ConstrainedBitsPerSecond]] internal slot, + // initialized to the value of options’ bitsPerSecond member, if it is + // present, otherwise undefined. + recorder->mConstrainedBitsPerSecond = + aOptions.mBitsPerSecond.WasPassed() + ? Some(aOptions.mBitsPerSecond.Value()) + : Nothing(); + + // 7. Initialize recorder’s stream attribute to stream. + recorder->mStream = &aStream; + + // 8. Initialize recorder’s mimeType attribute to the value of recorder’s + // [[ConstrainedMimeType]] slot. + recorder->mMimeType = recorder->mConstrainedMimeType; + + // 9. Initialize recorder’s state attribute to inactive. + recorder->mState = RecordingState::Inactive; + + // 10. Initialize recorder’s videoBitsPerSecond attribute to the value of + // options’ videoBitsPerSecond member, if it is present. Otherwise, choose + // a target value the User Agent deems reasonable for video. + recorder->mVideoBitsPerSecond = aOptions.mVideoBitsPerSecond.WasPassed() + ? aOptions.mVideoBitsPerSecond.Value() + : DEFAULT_VIDEO_BITRATE_BPS; + + // 11. Initialize recorder’s audioBitsPerSecond attribute to the value of + // options’ audioBitsPerSecond member, if it is present. Otherwise, choose + // a target value the User Agent deems reasonable for audio. + recorder->mAudioBitsPerSecond = aOptions.mAudioBitsPerSecond.WasPassed() + ? aOptions.mAudioBitsPerSecond.Value() + : DEFAULT_AUDIO_BITRATE_BPS; + + // 12. If recorder’s [[ConstrainedBitsPerSecond]] slot is not undefined, set + // recorder’s videoBitsPerSecond and audioBitsPerSecond attributes to + // values the User Agent deems reasonable for the respective media types, + // such that the sum of videoBitsPerSecond and audioBitsPerSecond is close + // to the value of recorder’s [[ConstrainedBitsPerSecond]] slot. + if (recorder->mConstrainedBitsPerSecond) { + SelectBitrates(*recorder->mConstrainedBitsPerSecond, 1, + &recorder->mVideoBitsPerSecond, 1, + &recorder->mAudioBitsPerSecond); + } + + // 13. Return recorder. + return recorder.forget(); } /* static */ already_AddRefed MediaRecorder::Constructor( - const GlobalObject& aGlobal, AudioNode& aSrcAudioNode, uint32_t aSrcOutput, - const MediaRecorderOptions& aInitDict, ErrorResult& aRv) { + const GlobalObject& aGlobal, AudioNode& aAudioNode, + uint32_t aAudioNodeOutput, const MediaRecorderOptions& aOptions, + ErrorResult& aRv) { // Allow recording from audio node only when pref is on. if (!Preferences::GetBool("media.recorder.audio_node.enabled", false)) { // Pretending that this constructor is not defined. @@ -1440,143 +1826,120 @@ already_AddRefed MediaRecorder::Constructor( return nullptr; } - // aSrcOutput doesn't matter to destination node because it has no output. - if (aSrcAudioNode.NumberOfOutputs() > 0 && - aSrcOutput >= aSrcAudioNode.NumberOfOutputs()) { - aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR); + // aAudioNodeOutput doesn't matter to destination node because it has no + // output. + if (aAudioNode.NumberOfOutputs() > 0 && + aAudioNodeOutput >= aAudioNode.NumberOfOutputs()) { + aRv.ThrowDOMException(NS_ERROR_DOM_INDEX_SIZE_ERR, + NS_LITERAL_CSTRING("Invalid AudioNode output index")); return nullptr; } - if (!IsTypeSupported(aInitDict.mMimeType)) { - aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR); + // When the MediaRecorder() constructor is invoked, the User Agent MUST run + // the following steps: + + // 1. Let stream be the constructor’s first argument. (we'll let audioNode be + // the first arg, and audioNodeOutput the second) + + // 2. Let options be the constructor’s second argument. (we'll let options be + // the third arg) + + // 3. If invoking is type supported with options’ mimeType member as its + // argument returns false, throw a NotSupportedError DOMException and abort + // these steps. + TypeSupport support = IsTypeSupportedImpl(aOptions.mMimeType); + if (support != TypeSupport::Supported) { + // This catches also the empty string mimeType when support for any encoders + // has been disabled. + aRv.ThrowDOMException(NS_ERROR_DOM_NOT_SUPPORTED_ERR, + TypeSupportToCString(support, aOptions.mMimeType)); return nullptr; } - RefPtr object = - new MediaRecorder(aSrcAudioNode, aSrcOutput, ownerWindow); - object->SetOptions(aInitDict); - return object.forget(); -} + // 4. Let recorder be a newly constructed MediaRecorder object. + RefPtr recorder = new MediaRecorder(ownerWindow); -void MediaRecorder::SetOptions(const MediaRecorderOptions& aInitDict) { - SetMimeType(aInitDict.mMimeType); - mAudioBitsPerSecond = aInitDict.mAudioBitsPerSecond.WasPassed() - ? aInitDict.mAudioBitsPerSecond.Value() - : 0; - mVideoBitsPerSecond = aInitDict.mVideoBitsPerSecond.WasPassed() - ? aInitDict.mVideoBitsPerSecond.Value() - : 0; - mBitsPerSecond = aInitDict.mBitsPerSecond.WasPassed() - ? aInitDict.mBitsPerSecond.Value() - : 0; - // We're not handling dynamic changes yet. Eventually we'll handle - // setting audio, video and/or total -- and anything that isn't set, - // we'll derive. Calculated versions require querying bitrates after - // the encoder is Init()ed. This happens only after data is - // available and thus requires dynamic changes. - // - // Until dynamic changes are supported, I prefer to be safe and err - // slightly high - if (aInitDict.mBitsPerSecond.WasPassed() && - !aInitDict.mVideoBitsPerSecond.WasPassed()) { - mVideoBitsPerSecond = mBitsPerSecond; + // 5. Let recorder have a [[ConstrainedMimeType]] internal slot, initialized + // to the value of options' mimeType member. + recorder->mConstrainedMimeType = aOptions.mMimeType; + + // 6. Let recorder have a [[ConstrainedBitsPerSecond]] internal slot, + // initialized to the value of options’ bitsPerSecond member, if it is + // present, otherwise undefined. + recorder->mConstrainedBitsPerSecond = + aOptions.mBitsPerSecond.WasPassed() + ? Some(aOptions.mBitsPerSecond.Value()) + : Nothing(); + + // 7. Initialize recorder’s stream attribute to stream. (make that the + // audioNode and audioNodeOutput equivalents) + recorder->mAudioNode = &aAudioNode; + recorder->mAudioNodeOutput = aAudioNodeOutput; + + // 8. Initialize recorder’s mimeType attribute to the value of recorder’s + // [[ConstrainedMimeType]] slot. + recorder->mMimeType = recorder->mConstrainedMimeType; + + // 9. Initialize recorder’s state attribute to inactive. + recorder->mState = RecordingState::Inactive; + + // 10. Initialize recorder’s videoBitsPerSecond attribute to the value of + // options’ videoBitsPerSecond member, if it is present. Otherwise, choose + // a target value the User Agent deems reasonable for video. + recorder->mVideoBitsPerSecond = aOptions.mVideoBitsPerSecond.WasPassed() + ? aOptions.mVideoBitsPerSecond.Value() + : DEFAULT_VIDEO_BITRATE_BPS; + + // 11. Initialize recorder’s audioBitsPerSecond attribute to the value of + // options’ audioBitsPerSecond member, if it is present. Otherwise, choose + // a target value the User Agent deems reasonable for audio. + recorder->mAudioBitsPerSecond = aOptions.mAudioBitsPerSecond.WasPassed() + ? aOptions.mAudioBitsPerSecond.Value() + : DEFAULT_AUDIO_BITRATE_BPS; + + // 12. If recorder’s [[ConstrainedBitsPerSecond]] slot is not undefined, set + // recorder’s videoBitsPerSecond and audioBitsPerSecond attributes to + // values the User Agent deems reasonable for the respective media types, + // such that the sum of videoBitsPerSecond and audioBitsPerSecond is close + // to the value of recorder’s [[ConstrainedBitsPerSecond]] slot. + if (recorder->mConstrainedBitsPerSecond) { + SelectBitrates(*recorder->mConstrainedBitsPerSecond, 1, + &recorder->mVideoBitsPerSecond, 1, + &recorder->mAudioBitsPerSecond); } -} -static char const* const gWebMVideoEncoderCodecs[4] = { - "opus", - "vp8", - "vp8.0", - // no VP9 yet - nullptr, -}; -static char const* const gWebMAudioEncoderCodecs[4] = { - "opus", - nullptr, -}; -static char const* const gOggAudioEncoderCodecs[2] = { - "opus", - // we could support vorbis here too, but don't - nullptr, -}; - -template -static bool CodecListContains(char const* const* aCodecs, - const String& aCodec) { - for (int32_t i = 0; aCodecs[i]; ++i) { - if (aCodec.EqualsASCII(aCodecs[i])) return true; - } - return false; + // 13. Return recorder. + return recorder.forget(); } /* static */ bool MediaRecorder::IsTypeSupported(GlobalObject& aGlobal, const nsAString& aMIMEType) { - return IsTypeSupported(aMIMEType); + return MediaRecorder::IsTypeSupported(aMIMEType); } /* static */ bool MediaRecorder::IsTypeSupported(const nsAString& aMIMEType) { - char const* const* codeclist = nullptr; - - if (aMIMEType.IsEmpty()) { - return true; - } - - nsContentTypeParser parser(aMIMEType); - nsAutoString mimeType; - nsresult rv = parser.GetType(mimeType); - if (NS_FAILED(rv)) { - return false; - } - - // effectively a 'switch (mimeType) {' - if (mimeType.EqualsLiteral(AUDIO_OGG)) { - if (MediaDecoder::IsOggEnabled() && MediaDecoder::IsOpusEnabled()) { - codeclist = gOggAudioEncoderCodecs; - } - } -#ifdef MOZ_WEBM_ENCODER - else if ((mimeType.EqualsLiteral(VIDEO_WEBM) || - mimeType.EqualsLiteral(AUDIO_WEBM)) && - MediaEncoder::IsWebMEncoderEnabled()) { - if (mimeType.EqualsLiteral(AUDIO_WEBM)) { - codeclist = gWebMAudioEncoderCodecs; - } else { - codeclist = gWebMVideoEncoderCodecs; - } - } -#endif - - // codecs don't matter if we don't support the container - if (!codeclist) { - return false; - } - // now filter on codecs, and if needed rescind support - nsAutoString codecstring; - rv = parser.GetParameter("codecs", codecstring); - - nsTArray codecs; - if (!ParseCodecsString(codecstring, codecs)) { - return false; - } - for (const nsString& codec : codecs) { - if (!CodecListContains(codeclist, codec)) { - // Totally unsupported codec - return false; - } - } - - return true; + return IsTypeSupportedImpl(aMIMEType) == TypeSupport::Supported; } -nsresult MediaRecorder::CreateAndDispatchBlobEvent(Blob* aBlob) { +nsresult MediaRecorder::CreateAndDispatchBlobEvent(BlobImpl* aBlobImpl) { MOZ_ASSERT(NS_IsMainThread(), "Not running on main thread"); + if (!GetOwnerGlobal()) { + // This MediaRecorder has been disconnected in the meantime. + return NS_ERROR_FAILURE; + } + + RefPtr blob = Blob::Create(GetOwnerGlobal(), aBlobImpl); + if (NS_WARN_IF(!blob)) { + return NS_ERROR_FAILURE; + } + BlobEventInit init; init.mBubbles = false; init.mCancelable = false; - init.mData = aBlob; + init.mData = blob; RefPtr event = BlobEvent::Constructor(this, NS_LITERAL_STRING("dataavailable"), init); @@ -1669,25 +2032,26 @@ void MediaRecorder::NotifyOwnerDocumentActivityChanged() { } } -void MediaRecorder::ForceInactive() { - LOG(LogLevel::Debug, ("MediaRecorder.ForceInactive %p", this)); - mState = RecordingState::Inactive; -} +void MediaRecorder::Inactivate() { + LOG(LogLevel::Debug, ("MediaRecorder.Inactivate %p", this)); + // The Inactivate the recorder algorithm given a recorder, is as follows: -void MediaRecorder::StopForSessionDestruction() { - LOG(LogLevel::Debug, ("MediaRecorder.StopForSessionDestruction %p", this)); - MediaRecorderReporter::RemoveMediaRecorder(this); - // We do not perform a mState != RecordingState::Recording) check here as - // we may already be inactive due to ForceInactive(). + // 1. Set recorder’s mimeType attribute to the value of the + // [[ConstrainedMimeType]] slot. + mMimeType = mConstrainedMimeType; + + // 2. Set recorder’s state attribute to inactive. mState = RecordingState::Inactive; - MOZ_ASSERT(mSessions.Length() > 0); - mSessions.LastElement()->Stop(); - // This is a coarse calculation and does not reflect the duration of the - // final recording for reasons such as pauses. However it allows us an idea - // of how long people are running their recorders for. - TimeDuration timeDelta = TimeStamp::Now() - mStartTime; - Telemetry::Accumulate(Telemetry::MEDIA_RECORDER_RECORDING_DURATION, - timeDelta.ToSeconds()); + + // 3. If recorder’s [[ConstrainedBitsPerSecond]] slot is not undefined, set + // recorder’s videoBitsPerSecond and audioBitsPerSecond attributes to + // values the User Agent deems reasonable for the respective media types, + // such that the sum of videoBitsPerSecond and audioBitsPerSecond is close + // to the value of recorder’s [[ConstrainedBitsPerSecond]] slot. + if (mConstrainedBitsPerSecond) { + SelectBitrates(*mConstrainedBitsPerSecond, 1, &mVideoBitsPerSecond, 1, + &mAudioBitsPerSecond); + } } void MediaRecorder::InitializeDomExceptions() { @@ -1728,3 +2092,5 @@ StaticRefPtr MediaRecorderReporter::sUniqueInstance; } // namespace dom } // namespace mozilla + +#undef LOG diff --git a/dom/media/MediaRecorder.h b/dom/media/MediaRecorder.h index 886c7419b6..ede6a63ca7 100644 --- a/dom/media/MediaRecorder.h +++ b/dom/media/MediaRecorder.h @@ -23,7 +23,7 @@ class GlobalObject; namespace dom { class AudioNode; -class Blob; +class BlobImpl; class Document; class DOMException; @@ -47,10 +47,7 @@ class MediaRecorder final : public DOMEventTargetHelper, public: class Session; - MediaRecorder(DOMMediaStream& aSourceMediaTrack, - nsPIDOMWindowInner* aOwnerWindow); - MediaRecorder(AudioNode& aSrcAudioNode, uint32_t aSrcOutput, - nsPIDOMWindowInner* aOwnerWindow); + explicit MediaRecorder(nsPIDOMWindowInner* aOwnerWindow); static nsTArray> GetSessions(); @@ -58,8 +55,6 @@ class MediaRecorder final : public DOMEventTargetHelper, JSObject* WrapObject(JSContext* aCx, JS::Handle aGivenProto) override; - nsPIDOMWindowInner* GetParentObject() { return GetOwner(); } - NS_DECL_ISUPPORTS_INHERITED NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(MediaRecorder, DOMEventTargetHelper) @@ -79,23 +74,24 @@ class MediaRecorder final : public DOMEventTargetHelper, // Extract encoded data Blob from MutableBlobStorage. void RequestData(ErrorResult& aResult); // Return the The DOMMediaStream passed from UA. - DOMMediaStream* Stream() const { return mDOMStream; } + DOMMediaStream* Stream() const { return mStream; } // Return the current encoding MIME type selected by the MediaEncoder. void GetMimeType(nsString& aMimeType); // The current state of the MediaRecorder object. RecordingState State() const { return mState; } - static bool IsTypeSupported(GlobalObject& aGlobal, const nsAString& aType); - static bool IsTypeSupported(const nsAString& aType); + static bool IsTypeSupported(GlobalObject& aGlobal, + const nsAString& aMIMEType); + static bool IsTypeSupported(const nsAString& aMIMEType); // Construct a recorder with a DOM media stream object as its source. static already_AddRefed Constructor( const GlobalObject& aGlobal, DOMMediaStream& aStream, - const MediaRecorderOptions& aInitDict, ErrorResult& aRv); + const MediaRecorderOptions& aOptions, ErrorResult& aRv); // Construct a recorder with a Web Audio destination node as its source. static already_AddRefed Constructor( - const GlobalObject& aGlobal, AudioNode& aSrcAudioNode, - uint32_t aSrcOutput, const MediaRecorderOptions& aInitDict, + const GlobalObject& aGlobal, AudioNode& aAudioNode, + uint32_t aAudioNodeOutput, const MediaRecorderOptions& aOptions, ErrorResult& aRv); /* @@ -112,27 +108,22 @@ class MediaRecorder final : public DOMEventTargetHelper, IMPL_EVENT_HANDLER(pause) IMPL_EVENT_HANDLER(resume) IMPL_EVENT_HANDLER(error) - IMPL_EVENT_HANDLER(warning) NS_DECL_NSIDOCUMENTACTIVITY - uint32_t GetAudioBitrate() { return mAudioBitsPerSecond; } - uint32_t GetVideoBitrate() { return mVideoBitsPerSecond; } - uint32_t GetBitrate() { return mBitsPerSecond; } + uint32_t AudioBitsPerSecond() const { return mAudioBitsPerSecond; } + uint32_t VideoBitsPerSecond() const { return mVideoBitsPerSecond; } protected: virtual ~MediaRecorder(); MediaRecorder& operator=(const MediaRecorder& x) = delete; // Create dataavailable event with Blob data and it runs in main thread - nsresult CreateAndDispatchBlobEvent(Blob* aBlob); + nsresult CreateAndDispatchBlobEvent(BlobImpl* aBlobImpl); // Creating a simple event to notify UA simple event. void DispatchSimpleEvent(const nsAString& aStr); // Creating a error event with message. void NotifyError(nsresult aRv); - // Set encoded MIME type. - void SetMimeType(const nsString& aMimeType); - void SetOptions(const MediaRecorderOptions& aInitDict); MediaRecorder(const MediaRecorder& x) = delete; // prevent bad usage // Remove session pointer. @@ -143,38 +134,33 @@ class MediaRecorder final : public DOMEventTargetHelper, // available at the time the error event is fired. Note, depending on when // this is called there may not be a JS stack to capture. void InitializeDomExceptions(); - // Set the recorder state to inactive. This is needed to handle error states - // in the recorder where state must transition to inactive before full - // stoppage can be reached. - void ForceInactive(); + // Runs the "Inactivate the recorder" algorithm. + void Inactivate(); // Stop the recorder and its internal session. This should be used by // sessions that are in the process of being destroyed. void StopForSessionDestruction(); // DOM wrapper for source media stream. Will be null when input is audio node. - RefPtr mDOMStream; + RefPtr mStream; // Source audio node. Will be null when input is a media stream. RefPtr mAudioNode; // Source audio node's output index. Will be zero when input is a media // stream. - const uint32_t mAudioNodeOutput; + uint32_t mAudioNodeOutput = 0; // The current state of the MediaRecorder object. - RecordingState mState; + RecordingState mState = RecordingState::Inactive; // Hold the sessions reference and clean it when the DestroyRunnable for a // session is running. nsTArray> mSessions; RefPtr mDocument; - // It specifies the container format as well as the audio and video capture - // formats. nsString mMimeType; + nsString mConstrainedMimeType; - uint32_t mAudioBitsPerSecond; - uint32_t mVideoBitsPerSecond; - uint32_t mBitsPerSecond; - - TimeStamp mStartTime; + uint32_t mAudioBitsPerSecond = 0; + uint32_t mVideoBitsPerSecond = 0; + Maybe mConstrainedBitsPerSecond; // DOMExceptions that are created early and possibly thrown in NotifyError. // Creating them early allows us to capture the JS stack for which cannot be diff --git a/dom/media/MediaTrackGraph.cpp b/dom/media/MediaTrackGraph.cpp index 4198ff1ab9..3b52f5c9c1 100644 --- a/dom/media/MediaTrackGraph.cpp +++ b/dom/media/MediaTrackGraph.cpp @@ -99,6 +99,8 @@ void MediaTrackGraphImpl::RemoveTrackGraphThread(MediaTrack* aTrack) { // Ensure that mFirstCycleBreaker and mMixer are updated when necessary. SetTrackOrderDirty(); + UnregisterAllAudioOutputs(aTrack); + if (aTrack->IsSuspended()) { mSuspendedTracks.RemoveElement(aTrack); } else { @@ -282,6 +284,7 @@ bool MediaTrackGraphImpl::AudioTrackPresent() { void MediaTrackGraphImpl::UpdateTrackOrder() { MOZ_ASSERT(OnGraphThread()); bool audioTrackPresent = AudioTrackPresent(); + uint32_t graphOutputChannelCount = AudioOutputChannelCount(); // Note that this looks for any audio tracks, input or output, and switches // to a SystemClockDriver if there are none. However, if another is already @@ -306,11 +309,30 @@ void MediaTrackGraphImpl::UpdateTrackOrder() { } if (audioTrackPresent && mRealtime && - !CurrentDriver()->AsAudioCallbackDriver() && !switching) { + !CurrentDriver()->AsAudioCallbackDriver() && !switching && + graphOutputChannelCount > 0) { Monitor2AutoLock mon(mMonitor); if (LifecycleStateRef() == LIFECYCLE_RUNNING) { AudioCallbackDriver* driver = new AudioCallbackDriver( - this, AudioInputChannelCount(), AudioInputDevicePreference()); + this, graphOutputChannelCount, AudioInputChannelCount(), + AudioInputDevicePreference()); + CurrentDriver()->SwitchAtNextIteration(driver); + } + } + + // Check if this graph should switch to a different number of output channels. + // Generally, a driver switch is explicitly made by an event (e.g., setting + // the AudioDestinationNode channelCount), but if an HTMLMediaElement is + // directly playing back via another HTMLMediaElement, the number of channels + // of the media determines how many channels to output, and it can change + // dynamically. + if (CurrentDriver()->AsAudioCallbackDriver() && !switching) { + if (graphOutputChannelCount != + CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount()) { + AudioCallbackDriver* driver = new AudioCallbackDriver( + this, graphOutputChannelCount, AudioInputChannelCount(), + AudioInputDevicePreference()); + Monitor2AutoLock mon(mMonitor); CurrentDriver()->SwitchAtNextIteration(driver); } } @@ -501,134 +523,97 @@ void MediaTrackGraphImpl::UpdateTrackOrder() { MOZ_ASSERT(orderedTrackCount == mFirstCycleBreaker); } -void MediaTrackGraphImpl::CreateOrDestroyAudioTracks(MediaTrack* aTrack) { - MOZ_ASSERT(OnGraphThread()); - MOZ_ASSERT(mRealtime, - "Should only attempt to create audio tracks in real-time mode"); - - if (aTrack->mAudioOutputs.IsEmpty()) { - aTrack->mAudioOutputStream = nullptr; - return; - } - - if (aTrack->mAudioOutputStream) { - return; - } - - LOG(LogLevel::Debug, - ("%p: Updating AudioOutputStream for MediaTrack %p", this, aTrack)); - - aTrack->mAudioOutputStream = MakeUnique(); - aTrack->mAudioOutputStream->mAudioPlaybackStartTime = mProcessedTime; - aTrack->mAudioOutputStream->mBlockedAudioTime = 0; - aTrack->mAudioOutputStream->mLastTickWritten = 0; - - bool switching = false; - { - Monitor2AutoLock lock(mMonitor); - switching = CurrentDriver()->Switching(); - } - - if (!CurrentDriver()->AsAudioCallbackDriver() && !switching) { - Monitor2AutoLock mon(mMonitor); - if (LifecycleStateRef() == LIFECYCLE_RUNNING) { - AudioCallbackDriver* driver = new AudioCallbackDriver( - this, AudioInputChannelCount(), AudioInputDevicePreference()); - CurrentDriver()->SwitchAtNextIteration(driver); - } - } -} - -TrackTime MediaTrackGraphImpl::PlayAudio(MediaTrack* aTrack) { +TrackTime MediaTrackGraphImpl::PlayAudio(const TrackKeyAndVolume& aTkv, + GraphTime aPlayedTime) { MOZ_ASSERT(OnGraphThread()); MOZ_ASSERT(mRealtime, "Should only attempt to play audio in realtime mode"); - float volume = 0.0f; - for (uint32_t i = 0; i < aTrack->mAudioOutputs.Length(); ++i) { - volume += aTrack->mAudioOutputs[i].mVolume * mGlobalVolume; - } - TrackTime ticksWritten = 0; - if (aTrack->mAudioOutputStream) { - ticksWritten = 0; + ticksWritten = 0; + MediaTrack* track = aTkv.mTrack; + AudioSegment* audio = track->GetData(); + AudioSegment output; - MediaTrack::AudioOutputStream& audioOutput = *aTrack->mAudioOutputStream; - AudioSegment* audio = aTrack->GetData(); - AudioSegment output; + TrackTime offset = track->GraphTimeToTrackTime(aPlayedTime); - TrackTime offset = aTrack->GraphTimeToTrackTime(mProcessedTime); + // We don't update Track->mTracksStartTime here to account for time spent + // blocked. Instead, we'll update it in UpdateCurrentTimeForTracks after + // the blocked period has completed. But we do need to make sure we play + // from the right offsets in the track buffer, even if we've already + // written silence for some amount of blocked time after the current time. + GraphTime t = aPlayedTime; + while (t < mStateComputedTime) { + bool blocked = t >= track->mStartBlocking; + GraphTime end = blocked ? mStateComputedTime : track->mStartBlocking; + NS_ASSERTION(end <= mStateComputedTime, "mStartBlocking is wrong!"); - // We don't update aTrack->mTracksStartTime here to account for time spent - // blocked. Instead, we'll update it in UpdateCurrentTimeForTracks after - // the blocked period has completed. But we do need to make sure we play - // from the right offsets in the track buffer, even if we've already - // written silence for some amount of blocked time after the current time. - GraphTime t = mProcessedTime; - while (t < mStateComputedTime) { - bool blocked = t >= aTrack->mStartBlocking; - GraphTime end = blocked ? mStateComputedTime : aTrack->mStartBlocking; - NS_ASSERTION(end <= mStateComputedTime, "mStartBlocking is wrong!"); + // Check how many ticks of sound we can provide if we are blocked some + // time in the middle of this cycle. + TrackTime toWrite = end - t; - // Check how many ticks of sound we can provide if we are blocked some - // time in the middle of this cycle. - TrackTime toWrite = end - t; + if (blocked) { + output.InsertNullDataAtStart(toWrite); + ticksWritten += toWrite; + LOG(LogLevel::Verbose, + ("%p: MediaTrack %p writing %" PRId64 " blocking-silence samples for " + "%f to %f (%" PRId64 " to %" PRId64 ")", + this, track, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end), + offset, offset + toWrite)); + } else { + TrackTime endTicksNeeded = offset + toWrite; + TrackTime endTicksAvailable = audio->GetDuration(); - if (blocked) { - output.InsertNullDataAtStart(toWrite); - ticksWritten += toWrite; + if (endTicksNeeded <= endTicksAvailable) { LOG(LogLevel::Verbose, - ("%p: MediaTrack %p writing %" PRId64 - " blocking-silence samples for " - "%f to %f (%" PRId64 " to %" PRId64 ")", - this, aTrack, toWrite, MediaTimeToSeconds(t), - MediaTimeToSeconds(end), offset, offset + toWrite)); + ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f " + "(samples %" PRId64 " to %" PRId64 ")", + this, track, toWrite, MediaTimeToSeconds(t), + MediaTimeToSeconds(end), offset, endTicksNeeded)); + output.AppendSlice(*audio, offset, endTicksNeeded); + ticksWritten += toWrite; + offset = endTicksNeeded; } else { - TrackTime endTicksNeeded = offset + toWrite; - TrackTime endTicksAvailable = audio->GetDuration(); + // MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not + // ended."); If we are at the end of the track, maybe write the + // remaining samples, and pad with/output silence. + if (endTicksNeeded > endTicksAvailable && offset < endTicksAvailable) { + output.AppendSlice(*audio, offset, endTicksAvailable); - if (endTicksNeeded <= endTicksAvailable) { LOG(LogLevel::Verbose, ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f " "(samples %" PRId64 " to %" PRId64 ")", - this, aTrack, toWrite, MediaTimeToSeconds(t), + this, track, toWrite, MediaTimeToSeconds(t), MediaTimeToSeconds(end), offset, endTicksNeeded)); - output.AppendSlice(*audio, offset, endTicksNeeded); - ticksWritten += toWrite; - offset = endTicksNeeded; - } else { - // MOZ_ASSERT(track->IsEnded(), "Not enough data, and track not - // ended."); If we are at the end of the track, maybe write the - // remaining samples, and pad with/output silence. - if (endTicksNeeded > endTicksAvailable && - offset < endTicksAvailable) { - output.AppendSlice(*audio, offset, endTicksAvailable); - LOG(LogLevel::Verbose, - ("%p: MediaTrack %p writing %" PRId64 " samples for %f to %f " - "(samples %" PRId64 " to %" PRId64 ")", - this, aTrack, toWrite, MediaTimeToSeconds(t), - MediaTimeToSeconds(end), offset, endTicksNeeded)); - uint32_t available = endTicksAvailable - offset; - ticksWritten += available; - toWrite -= available; - offset = endTicksAvailable; - } - output.AppendNullData(toWrite); - LOG(LogLevel::Verbose, - ("%p MediaTrack %p writing %" PRId64 - " padding slsamples for %f to " - "%f (samples %" PRId64 " to %" PRId64 ")", - this, aTrack, toWrite, MediaTimeToSeconds(t), - MediaTimeToSeconds(end), offset, endTicksNeeded)); - ticksWritten += toWrite; + uint32_t available = endTicksAvailable - offset; + ticksWritten += available; + toWrite -= available; + offset = endTicksAvailable; } - output.ApplyVolume(volume); + output.AppendNullData(toWrite); + LOG(LogLevel::Verbose, + ("%p MediaTrack %p writing %" PRId64 " padding slsamples for %f to " + "%f (samples %" PRId64 " to %" PRId64 ")", + this, track, toWrite, MediaTimeToSeconds(t), + MediaTimeToSeconds(end), offset, endTicksNeeded)); + ticksWritten += toWrite; } - t = end; + output.ApplyVolume(mGlobalVolume * aTkv.mVolume); } - audioOutput.mLastTickWritten = offset; + t = end; - output.WriteTo(mMixer, AudioOutputChannelCount(), mSampleRate); + uint32_t outputChannels; + // Use the number of channel the driver expects: this is the number of + // channel that can be output by the underlying system level audio stream. + // Fall back to something sensible if this graph is being driven by a normal + // thread (this can happen when there are no output devices, etc.). + if (CurrentDriver()->AsAudioCallbackDriver()) { + outputChannels = + CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount(); + } else { + outputChannels = AudioOutputChannelCount(); + } + output.WriteTo(mMixer, outputChannels, mSampleRate); } return ticksWritten; } @@ -656,7 +641,8 @@ void MediaTrackGraphImpl::OpenAudioInputImpl(CubebUtils::AudioDeviceID aID, Monitor2AutoLock mon(mMonitor); if (LifecycleStateRef() == LIFECYCLE_RUNNING) { AudioCallbackDriver* driver = new AudioCallbackDriver( - this, AudioInputChannelCount(), AudioInputDevicePreference()); + this, AudioOutputChannelCount(), AudioInputChannelCount(), + AudioInputDevicePreference()); LOG(LogLevel::Debug, ("%p OpenAudioInput: starting new AudioCallbackDriver(input) %p", this, driver)); @@ -732,7 +718,8 @@ void MediaTrackGraphImpl::CloseAudioInputImpl( LOG(LogLevel::Debug, ("%p: CloseInput: output present (AudioCallback)", this)); - driver = new AudioCallbackDriver(this, AudioInputChannelCount(), + driver = new AudioCallbackDriver(this, AudioOutputChannelCount(), + AudioInputChannelCount(), AudioInputDevicePreference()); CurrentDriver()->SwitchAtNextIteration(driver); } else if (CurrentDriver()->AsAudioCallbackDriver()) { @@ -745,6 +732,51 @@ void MediaTrackGraphImpl::CloseAudioInputImpl( } } +void MediaTrackGraphImpl::RegisterAudioOutput(MediaTrack* aTrack, void* aKey) { + MOZ_ASSERT(OnGraphThreadOrNotRunning()); + + TrackKeyAndVolume* tkv = mAudioOutputs.AppendElement(); + tkv->mTrack = aTrack; + tkv->mKey = aKey; + tkv->mVolume = 1.0; + + bool switching = false; + { + Monitor2AutoLock lock(mMonitor); + switching = CurrentDriver()->Switching(); + } + + if (!CurrentDriver()->AsAudioCallbackDriver() && !switching) { + Monitor2AutoLock mon(mMonitor); + if (LifecycleStateRef() == LIFECYCLE_RUNNING) { + AudioCallbackDriver* driver = new AudioCallbackDriver( + this, AudioOutputChannelCount(), AudioInputChannelCount(), + AudioInputDevicePreference()); + CurrentDriver()->SwitchAtNextIteration(driver); + } + } +} + +void MediaTrackGraphImpl::UnregisterAllAudioOutputs(MediaTrack* aTrack) { + MOZ_ASSERT(OnGraphThreadOrNotRunning()); + + for (int32_t i = mAudioOutputs.Length() - 1; i >= 0; i--) { + if (mAudioOutputs[i].mTrack == aTrack) { + mAudioOutputs.RemoveElementAt(i); + } + } +} + +void MediaTrackGraphImpl::UnregisterAudioOutput(MediaTrack* aTrack, + void* aKey) { + MOZ_ASSERT(OnGraphThreadOrNotRunning()); + + mAudioOutputs.RemoveElementsBy( + [&aKey, &aTrack](const TrackKeyAndVolume& aTkv) { + return aTkv.mKey == aKey && aTkv.mTrack == aTrack; + }); +} + void MediaTrackGraphImpl::CloseAudioInput(Maybe& aID, AudioDataListener* aListener) { MOZ_ASSERT(NS_IsMainThread()); @@ -907,7 +939,8 @@ void MediaTrackGraphImpl::ReevaluateInputDevice() { } if (needToSwitch) { AudioCallbackDriver* newDriver = new AudioCallbackDriver( - this, AudioInputChannelCount(), AudioInputDevicePreference()); + this, AudioOutputChannelCount(), AudioInputChannelCount(), + AudioInputDevicePreference()); { Monitor2AutoLock lock(mMonitor); CurrentDriver()->SwitchAtNextIteration(newDriver); @@ -1032,25 +1065,24 @@ void MediaTrackGraphImpl::ProduceDataForTracksBlockByBlock( MOZ_ASSERT(OnGraphThread()); MOZ_ASSERT(aTrackIndex <= mFirstCycleBreaker, "Cycle breaker is not AudioNodeTrack?"); - GraphTime t = mProcessedTime; - while (t < mStateComputedTime) { - GraphTime next = RoundUpToNextAudioBlock(t); + while (mProcessedTime < mStateComputedTime) { + GraphTime next = RoundUpToNextAudioBlock(mProcessedTime); for (uint32_t i = mFirstCycleBreaker; i < mTracks.Length(); ++i) { auto nt = static_cast(mTracks[i]); MOZ_ASSERT(nt->AsAudioNodeTrack()); - nt->ProduceOutputBeforeInput(t); + nt->ProduceOutputBeforeInput(mProcessedTime); } for (uint32_t i = aTrackIndex; i < mTracks.Length(); ++i) { ProcessedMediaTrack* pt = mTracks[i]->AsProcessedTrack(); if (pt) { pt->ProcessInput( - t, next, + mProcessedTime, next, (next == mStateComputedTime) ? ProcessedMediaTrack::ALLOW_END : 0); } } - t = next; + mProcessedTime = next; } - NS_ASSERTION(t == mStateComputedTime, + NS_ASSERTION(mProcessedTime == mStateComputedTime, "Something went wrong with rounding to block boundaries"); } @@ -1183,9 +1215,7 @@ void MediaTrackGraphImpl::Process() { bool allBlockedForever = true; // True when we've done ProcessInput for all processed tracks. bool doneAllProducing = false; - // This is the number of frame that are written to the AudioStreams, for - // this cycle. - TrackTime ticksPlayed = 0; + const GraphTime oldProcessedTime = mProcessedTime; mMixer.StartMixing(); @@ -1223,23 +1253,29 @@ void MediaTrackGraphImpl::Process() { } } } - // Only playback audio and video in real-time mode - if (mRealtime) { - CreateOrDestroyAudioTracks(track); - if (CurrentDriver()->AsAudioCallbackDriver()) { - TrackTime ticksPlayedForThisTrack = PlayAudio(track); - if (!ticksPlayed) { + if (track->mStartBlocking > oldProcessedTime) { + allBlockedForever = false; + } + } + mProcessedTime = mStateComputedTime; + + // This is the number of frames that are written to the output buffer, for + // this iteration. + TrackTime ticksPlayed = 0; + // Only playback audio and video in real-time mode + if (mRealtime) { + if (CurrentDriver()->AsAudioCallbackDriver()) { + for (auto& t : mAudioOutputs) { + TrackTime ticksPlayedForThisTrack = PlayAudio(t, oldProcessedTime); + if (ticksPlayed == 0) { ticksPlayed = ticksPlayedForThisTrack; } else { MOZ_ASSERT(!ticksPlayedForThisTrack || ticksPlayedForThisTrack == ticksPlayed, - "Each track should have the same number of frame."); + "Each track should have the same number of frames."); } } } - if (track->mStartBlocking > mProcessedTime) { - allBlockedForever = false; - } } if (CurrentDriver()->AsAudioCallbackDriver()) { @@ -1249,7 +1285,7 @@ void MediaTrackGraphImpl::Process() { // been processed. (bug 1406027) mMixer.Mix(nullptr, CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount(), - mStateComputedTime - mProcessedTime, mSampleRate); + mStateComputedTime - oldProcessedTime, mSampleRate); } mMixer.FinishMixing(); } @@ -1307,15 +1343,20 @@ bool MediaTrackGraphImpl::OneIterationImpl(GraphTime aStateEnd) { // Process graph message from the main thread for this iteration. RunMessagesInQueue(); + // Process MessagePort events. + // These require a single thread, which has an nsThread with an event queue. + if (mGraphRunner || !mRealtime) { + NS_ProcessPendingEvents(nullptr); + } + GraphTime stateEnd = std::min(aStateEnd, GraphTime(mEndTime)); UpdateGraph(stateEnd); mStateComputedTime = stateEnd; - Process(); - GraphTime oldProcessedTime = mProcessedTime; - mProcessedTime = stateEnd; + Process(); + MOZ_ASSERT(mProcessedTime == stateEnd); UpdateCurrentTimeForTracks(oldProcessedTime); @@ -1431,7 +1472,9 @@ class MediaTrackGraphShutDownRunnable : public Runnable { public: explicit MediaTrackGraphShutDownRunnable(MediaTrackGraphImpl* aGraph) : Runnable("MediaTrackGraphShutDownRunnable"), mGraph(aGraph) {} - NS_IMETHOD Run() override { + // MOZ_CAN_RUN_SCRIPT_BOUNDARY until Runnable::Run is MOZ_CAN_RUN_SCRIPT. + // See bug 1535398. + MOZ_CAN_RUN_SCRIPT_BOUNDARY NS_IMETHOD Run() override { MOZ_ASSERT(NS_IsMainThread()); MOZ_ASSERT(mGraph->mDetectedNotRunning && mGraph->mDriver, "We should know the graph thread control loop isn't running!"); @@ -1450,12 +1493,12 @@ class MediaTrackGraphShutDownRunnable : public Runnable { #endif if (mGraph->mGraphRunner) { - mGraph->mGraphRunner->Shutdown(); + RefPtr(mGraph->mGraphRunner)->Shutdown(); } - mGraph->mDriver - ->Shutdown(); // This will wait until it's shutdown since - // we'll start tearing down the graph after this + // This will wait until it's shutdown since + // we'll start tearing down the graph after this + RefPtr(mGraph->mDriver)->Shutdown(); // Release the driver now so that an AudioCallbackDriver will release its // SharedThreadPool reference. Each SharedThreadPool reference must be @@ -1798,13 +1841,10 @@ size_t MediaTrack::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const { // Future: // - mLastPlayedVideoFrame // - mTrackListeners - elements - // - mAudioOutputStream - elements - amount += mAudioOutputs.ShallowSizeOfExcludingThis(aMallocSizeOf); amount += mTrackListeners.ShallowSizeOfExcludingThis(aMallocSizeOf); amount += mMainThreadListeners.ShallowSizeOfExcludingThis(aMallocSizeOf); amount += mConsumers.ShallowSizeOfExcludingThis(aMallocSizeOf); - amount += aMallocSizeOf(mAudioOutputStream.get()); return amount; } @@ -1939,14 +1979,20 @@ void MediaTrack::AddAudioOutput(void* aKey) { GraphImpl()->AppendMessage(MakeUnique(this, aKey)); } -void MediaTrack::SetAudioOutputVolumeImpl(void* aKey, float aVolume) { - for (uint32_t i = 0; i < mAudioOutputs.Length(); ++i) { - if (mAudioOutputs[i].mKey == aKey) { - mAudioOutputs[i].mVolume = aVolume; +void MediaTrackGraphImpl::SetAudioOutputVolume(MediaTrack* aTrack, void* aKey, + float aVolume) { + for (auto& tkv : mAudioOutputs) { + if (tkv.mKey == aKey && aTrack == tkv.mTrack) { + tkv.mVolume = aVolume; return; } } - NS_ERROR("Audio output key not found"); + MOZ_CRASH("Audio stream key not found when setting the volume."); +} + +void MediaTrack::SetAudioOutputVolumeImpl(void* aKey, float aVolume) { + MOZ_ASSERT(GraphImpl()->OnGraphThread()); + GraphImpl()->SetAudioOutputVolume(this, aKey, aVolume); } void MediaTrack::SetAudioOutputVolume(void* aKey, float aVolume) { @@ -1965,27 +2011,19 @@ void MediaTrack::SetAudioOutputVolume(void* aKey, float aVolume) { } void MediaTrack::AddAudioOutputImpl(void* aKey) { - LOG(LogLevel::Info, - ("MediaTrack %p Adding AudioOutput for key %p", this, aKey)); - mAudioOutputs.AppendElement(AudioOutput(aKey)); + LOG(LogLevel::Info, ("MediaTrack %p adding AudioOutput", this)); + GraphImpl()->RegisterAudioOutput(this, aKey); } void MediaTrack::RemoveAudioOutputImpl(void* aKey) { - LOG(LogLevel::Info, - ("MediaTrack %p Removing AudioOutput for key %p", this, aKey)); - for (uint32_t i = 0; i < mAudioOutputs.Length(); ++i) { - if (mAudioOutputs[i].mKey == aKey) { - mAudioOutputs.RemoveElementAt(i); - return; - } - } - NS_ERROR("Audio output key not found"); + LOG(LogLevel::Info, ("MediaTrack %p removing AudioOutput", this)); + GraphImpl()->UnregisterAudioOutput(this, aKey); } void MediaTrack::RemoveAudioOutput(void* aKey) { class Message : public ControlMessage { public: - Message(MediaTrack* aTrack, void* aKey) + explicit Message(MediaTrack* aTrack, void* aKey) : ControlMessage(aTrack), mKey(aKey) {} void Run() override { mTrack->RemoveAudioOutputImpl(mKey); } void* mKey; @@ -2397,8 +2435,17 @@ static void MoveToSegment(SourceMediaTrack* aTrack, MediaSegment* aIn, TrackTime aDesiredUpToTime) { MOZ_ASSERT(aIn->GetType() == aOut->GetType()); MOZ_ASSERT(aOut->GetDuration() >= aCurrentTime); + MOZ_ASSERT(aDesiredUpToTime >= aCurrentTime); if (aIn->GetType() == MediaSegment::AUDIO) { - aOut->AppendFrom(aIn); + AudioSegment* in = static_cast(aIn); + AudioSegment* out = static_cast(aOut); + TrackTime desiredDurationToMove = aDesiredUpToTime - aCurrentTime; + TrackTime end = std::min(in->GetDuration(), desiredDurationToMove); + + out->AppendSlice(*in, 0, end); + in->RemoveLeading(end); + + out->ApplyVolume(aTrack->GetVolumeLocked()); } else { VideoSegment* in = static_cast(aIn); VideoSegment* out = static_cast(aOut); @@ -2440,8 +2487,8 @@ static void MoveToSegment(SourceMediaTrack* aTrack, MediaSegment* aIn, out->ExtendLastFrameBy(aDesiredUpToTime - out->GetDuration()); } in->Clear(); + MOZ_ASSERT(aIn->GetDuration() == 0, "aIn must be consumed"); } - MOZ_ASSERT(aIn->GetDuration() == 0, "aIn must be consumed"); } void SourceMediaTrack::ExtractPendingInput(GraphTime aCurrentTime, @@ -2486,23 +2533,9 @@ void SourceMediaTrack::ResampleAudioToGraphSampleRate(MediaSegment* aSegment) { return; } AudioSegment* segment = static_cast(aSegment); - int channels = segment->ChannelCount(); - - // If this segment is just silence, we delay instanciating the resampler. We - // also need to recreate the resampler if the channel count or input rate - // changes. - if (channels && mUpdateTrack->mResamplerChannelCount != channels) { - SpeexResamplerState* state = speex_resampler_init( - channels, mUpdateTrack->mInputRate, GraphImpl()->GraphRate(), - SPEEX_RESAMPLER_QUALITY_MIN, nullptr); - if (!state) { - return; - } - mUpdateTrack->mResampler.own(state); - mUpdateTrack->mResamplerChannelCount = channels; - } - segment->ResampleChunks(mUpdateTrack->mResampler, mUpdateTrack->mInputRate, - GraphImpl()->GraphRate()); + segment->ResampleChunks(mUpdateTrack->mResampler, + &mUpdateTrack->mResamplerChannelCount, + mUpdateTrack->mInputRate, GraphImpl()->GraphRate()); } void SourceMediaTrack::AdvanceTimeVaryingValuesToCurrentTime( @@ -2700,6 +2733,16 @@ void SourceMediaTrack::RemoveAllDirectListenersImpl() { mDirectTrackListeners.Clear(); } +void SourceMediaTrack::SetVolume(float aVolume) { + MutexAutoLock lock(mMutex); + mVolume = aVolume; +} + +float SourceMediaTrack::GetVolumeLocked() { + mMutex.AssertCurrentThreadOwns(); + return mVolume; +} + SourceMediaTrack::~SourceMediaTrack() {} void MediaInputPort::Init() { @@ -2855,8 +2898,9 @@ MediaTrackGraphImpl::MediaTrackGraphImpl(GraphDriverType aDriverRequested, uint32_t aChannelCount, AbstractThread* aMainThread) : MediaTrackGraph(aSampleRate), - mGraphRunner(aRunTypeRequested == SINGLE_THREAD ? new GraphRunner(this) - : nullptr), + mGraphRunner(aRunTypeRequested == SINGLE_THREAD + ? GraphRunner::Create(this) + : already_AddRefed(nullptr)), mFirstCycleBreaker(0) // An offline graph is not initially processing. , @@ -2876,7 +2920,6 @@ MediaTrackGraphImpl::MediaTrackGraphImpl(GraphDriverType aDriverRequested, mTrackOrderDirty(false), mAbstractMainThread(aMainThread), mSelfRef(this), - mOutputChannels(aChannelCount), mGlobalVolume(CubebUtils::GetVolumeScale()) #ifdef DEBUG , @@ -2885,11 +2928,21 @@ MediaTrackGraphImpl::MediaTrackGraphImpl(GraphDriverType aDriverRequested, , mMainThreadGraphTime(0, "MediaTrackGraphImpl::mMainThreadGraphTime"), mAudioOutputLatency(0.0) { + if (aRunTypeRequested == SINGLE_THREAD && !mGraphRunner) { + // Failed to create thread. Jump to the last phase of the lifecycle. + mDetectedNotRunning = true; + mLifecycleState = LIFECYCLE_WAITING_FOR_TRACK_DESTRUCTION; +#ifdef DEBUG + mCanRunMessagesSynchronously = true; +#endif + return; + } if (mRealtime) { if (aDriverRequested == AUDIO_THREAD_DRIVER) { // Always start with zero input channels, and no particular preferences // for the input channel. - mDriver = new AudioCallbackDriver(this, 0, AudioInputType::Unknown); + mDriver = new AudioCallbackDriver(this, aChannelCount, 0, + AudioInputType::Unknown); } else { mDriver = new SystemClockDriver(this); } @@ -2902,6 +2955,10 @@ MediaTrackGraphImpl::MediaTrackGraphImpl(GraphDriverType aDriverRequested, StartAudioCallbackTracing(); RegisterWeakAsyncMemoryReporter(this); + + if (!IsNonRealtime()) { + AddShutdownBlocker(); + } } AbstractThread* MediaTrackGraph::AbstractMainThread() { @@ -2984,10 +3041,6 @@ MediaTrackGraph* MediaTrackGraph::GetInstance( graph = new MediaTrackGraphImpl(aGraphDriverRequested, runType, sampleRate, channelCount, mainThread); - if (!graph->IsNonRealtime()) { - graph->AddShutdownBlocker(); - } - uint32_t hashkey = WindowToHash(aWindow, sampleRate); gGraphs.Put(hashkey, graph); @@ -3380,7 +3433,8 @@ void MediaTrackGraphImpl::ApplyAudioContextOperationImpl( MOZ_ASSERT(nextDriver->AsAudioCallbackDriver()); driver = nextDriver->AsAudioCallbackDriver(); } else { - driver = new AudioCallbackDriver(this, AudioInputChannelCount(), + driver = new AudioCallbackDriver(this, AudioOutputChannelCount(), + AudioInputChannelCount(), AudioInputDevicePreference()); Monitor2AutoLock lock(mMonitor); CurrentDriver()->SwitchAtNextIteration(driver); @@ -3483,6 +3537,33 @@ void MediaTrackGraph::ApplyAudioContextOperation( aDestinationTrack, aTracks, aOperation, aPromise, aFlags)); } +uint32_t MediaTrackGraphImpl::AudioOutputChannelCount() const { + MOZ_ASSERT(OnGraphThread()); + // The audio output channel count for a graph is the maximum of the output + // channel count of all the tracks that are in mAudioOutputs. + uint32_t channelCount = 0; + for (auto& tkv : mAudioOutputs) { + MediaTrack* t = tkv.mTrack; + // This is an AudioDestinationNode + if (t->AsAudioNodeTrack()) { + channelCount = std::max( + channelCount, t->AsAudioNodeTrack()->NumberOfChannels()); + } else if (t->GetData()) { + AudioSegment* segment = t->GetData(); + channelCount = + std::max(channelCount, segment->MaxChannelCount()); + } + } + if (channelCount) { + return channelCount; + } else { + if (CurrentDriver()->AsAudioCallbackDriver()) { + return CurrentDriver()->AsAudioCallbackDriver()->OutputChannelCount(); + } + return 2; + } +} + double MediaTrackGraph::AudioOutputLatency() { return static_cast(this)->AudioOutputLatency(); } @@ -3621,4 +3702,9 @@ Watchable& MediaTrackGraphImpl::CurrentTime() { return mMainThreadGraphTime; } +GraphTime MediaTrackGraph::ProcessedTime() const { + AssertOnGraphThreadOrNotRunning(); + return static_cast(this)->mProcessedTime; +} + } // namespace mozilla diff --git a/dom/media/MediaTrackGraph.h b/dom/media/MediaTrackGraph.h index 643ae2b1b5..2c2a8ea734 100644 --- a/dom/media/MediaTrackGraph.h +++ b/dom/media/MediaTrackGraph.h @@ -5,7 +5,8 @@ #ifndef MOZILLA_MEDIATRACKGRAPH_H_ #define MOZILLA_MEDIATRACKGRAPH_H_ -#include "AudioStream.h" +#include "AudioSampleFormat.h" +#include "CubebUtils.h" #include "MainThreadUtils.h" #include "MediaSegment.h" #include "mozilla/LinkedList.h" @@ -302,12 +303,6 @@ class MediaTrack : public mozilla::LinkedListElement { void SetGraphImpl(MediaTrackGraph* aGraph); // Control API. - // Since a track can be played multiple ways, we need to combine independent - // volume settings. The aKey parameter is used to keep volume settings - // separate. Since the track is always playing the same contents, only - // a single audio output track is used; the volumes are combined. - // Currently only the first enabled audio track is played. - // XXX change this so all enabled audio tracks are mixed and played. virtual void AddAudioOutput(void* aKey); virtual void SetAudioOutputVolume(void* aKey, float aVolume); virtual void RemoveAudioOutput(void* aKey); @@ -556,12 +551,6 @@ class MediaTrack : public mozilla::LinkedListElement { bool mNotifiedEnded; // Client-set volume of this track - struct AudioOutput { - explicit AudioOutput(void* aKey) : mKey(aKey), mVolume(1.0f) {} - void* mKey; - float mVolume; - }; - nsTArray mAudioOutputs; nsTArray> mTrackListeners; nsTArray mMainThreadListeners; // This track's associated disabled mode. It can either by disabled by frames @@ -577,20 +566,6 @@ class MediaTrack : public mozilla::LinkedListElement { // MediaInputPorts to which this is connected nsTArray mConsumers; - // Where audio output is going. There is one AudioOutputStream per - // Type::AUDIO MediaTrack. - struct AudioOutputStream { - // When we started audio playback for this track. - // Add mTrack->GetPosition() to find the current audio playback position. - GraphTime mAudioPlaybackStartTime; - // Amount of time that we've wanted to play silence because of the track - // blocking. - MediaTime mBlockedAudioTime; - // Last tick written to the audio output. - TrackTime mLastTickWritten; - }; - UniquePtr mAudioOutputStream; - /** * Number of outstanding suspend operations on this track. Track is * suspended when this is > 0. @@ -706,6 +681,11 @@ class SourceMediaTrack : public MediaTrack { void RemoveAllDirectListenersImpl() override; + // The value set here is applied in MoveToSegment so we can avoid the + // buffering delay in applying the change. See Bug 1443511. + void SetVolume(float aVolume); + float GetVolumeLocked(); + friend class MediaTrackGraphImpl; protected: @@ -722,7 +702,7 @@ class SourceMediaTrack : public MediaTrack { // Resampler if the rate of the input track does not match the // MediaTrackGraph's. nsAutoRef mResampler; - int mResamplerChannelCount; + uint32_t mResamplerChannelCount; // Each time the track updates are flushed to the media graph thread, // the segment buffer is emptied. UniquePtr mData; @@ -761,6 +741,7 @@ class SourceMediaTrack : public MediaTrack { // held together. Mutex mMutex; // protected by mMutex + float mVolume = 1.0; UniquePtr mUpdateTrack; nsTArray> mDirectTrackListeners; }; @@ -1058,6 +1039,7 @@ class MediaTrackGraph { AudioDataListener* aListener) = 0; virtual void CloseAudioInput(Maybe& aID, AudioDataListener* aListener) = 0; + // Control API. /** * Create a track that a media decoder (or some other source of @@ -1147,6 +1129,12 @@ class MediaTrackGraph { */ virtual Watchable& CurrentTime() = 0; + /** + * Graph thread function to return the time at which all processing has been + * completed. Some tracks may have performed processing beyond this time. + */ + GraphTime ProcessedTime() const; + protected: explicit MediaTrackGraph(TrackRate aSampleRate) : mSampleRate(aSampleRate) { MOZ_COUNT_CTOR(MediaTrackGraph); diff --git a/dom/media/MediaTrackGraphImpl.h b/dom/media/MediaTrackGraphImpl.h index 5a3ae5999f..aa4b67f61c 100644 --- a/dom/media/MediaTrackGraphImpl.h +++ b/dom/media/MediaTrackGraphImpl.h @@ -385,7 +385,13 @@ class MediaTrackGraphImpl : public MediaTrackGraph, * Queue audio (mix of track audio and silence for blocked intervals) * to the audio output track. Returns the number of frames played. */ - TrackTime PlayAudio(MediaTrack* aTrack); + + struct TrackKeyAndVolume { + MediaTrack* mTrack; + void* mKey; + float mVolume; + }; + TrackTime PlayAudio(const TrackKeyAndVolume& aTkv, GraphTime aPlayedTime); /* Runs off a message on the graph thread when something requests audio from * an input audio device of ID aID, and delivers the input audio frames to * aListener. */ @@ -405,6 +411,14 @@ class MediaTrackGraphImpl : public MediaTrackGraph, * audio from this audio input device. */ virtual void CloseAudioInput(Maybe& aID, AudioDataListener* aListener) override; + + /* Add or remove an audio output for this track. All tracks that have an + * audio output are mixed and written to a single audio output stream. */ + void RegisterAudioOutput(MediaTrack* aTrack, void* aKey); + void UnregisterAudioOutput(MediaTrack* aTrack, void* aKey); + void UnregisterAllAudioOutputs(MediaTrack* aTrack); + void SetAudioOutputVolume(MediaTrack* aTrack, void* aKey, float aVolume); + /* Called on the graph thread when the input device settings should be * reevaluated, for example, if the channel count of the input track should * be changed. */ @@ -466,7 +480,7 @@ class MediaTrackGraphImpl : public MediaTrackGraph, mTrackOrderDirty = true; } - uint32_t AudioOutputChannelCount() const { return mOutputChannels; } + uint32_t AudioOutputChannelCount() const; double AudioOutputLatency(); @@ -660,7 +674,7 @@ class MediaTrackGraphImpl : public MediaTrackGraph, * If set, the GraphRunner class handles handing over data from audio * callbacks to a common single thread, shared across GraphDrivers. */ - const UniquePtr mGraphRunner; + const RefPtr mGraphRunner; /** * Main-thread view of the number of tracks in this graph, for lifetime @@ -954,11 +968,11 @@ class MediaTrackGraphImpl : public MediaTrackGraph, * Track for window audio capture. */ nsTArray mWindowCaptureTracks; - /** - * Number of channels on output. + * Tracks that have their audio output mixed and written to an audio output + * device. */ - const uint32_t mOutputChannels; + nsTArray mAudioOutputs; /** * Global volume scale. Used when running tests so that the output is not too diff --git a/dom/media/MediaTrackListener.h b/dom/media/MediaTrackListener.h index 065db7310a..97e135839c 100644 --- a/dom/media/MediaTrackListener.h +++ b/dom/media/MediaTrackListener.h @@ -91,7 +91,7 @@ class MediaTrackListener { /** * Notify that this track listener has been removed from the graph, either - * after shutdown or RemoveTrackListener. + * after shutdown or through MediaTrack::RemoveListener(). */ virtual void NotifyRemoved(MediaTrackGraph* aGraph) {} diff --git a/dom/media/VideoFrameContainer.cpp b/dom/media/VideoFrameContainer.cpp index 277669579b..53f03bbba3 100644 --- a/dom/media/VideoFrameContainer.cpp +++ b/dom/media/VideoFrameContainer.cpp @@ -3,8 +3,13 @@ * You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "VideoFrameContainer.h" -#include "mozilla/Telemetry.h" + +#ifdef MOZ_WIDGET_ANDROID +#include "GLImages.h" // for SurfaceTextureImage +#endif #include "MediaDecoderOwner.h" +#include "mozilla/Telemetry.h" +#include "mozilla/AbstractThread.h" using namespace mozilla::layers; @@ -76,9 +81,27 @@ void VideoFrameContainer::UpdatePrincipalHandleForFrameIDLocked( mFrameIDForPendingPrincipalHandle = aFrameID; } +#ifdef MOZ_WIDGET_ANDROID +static void NotifySetCurrent(Image* aImage) { + if (aImage == nullptr) { + return; + } + + SurfaceTextureImage* image = aImage->AsSurfaceTextureImage(); + if (image == nullptr) { + return; + } + + image->OnSetCurrent(); +} +#endif + void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, Image* aImage, const TimeStamp& aTargetTime) { +#ifdef MOZ_WIDGET_ANDROID + NotifySetCurrent(aImage); +#endif if (aImage) { MutexAutoLock lock(mMutex); AutoTArray imageList; @@ -93,6 +116,15 @@ void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize, void VideoFrameContainer::SetCurrentFrames( const gfx::IntSize& aIntrinsicSize, const nsTArray& aImages) { +#ifdef MOZ_WIDGET_ANDROID + // When there are multiple frames, only the last one is effective + // (see bug 1299068 comment 4). Here I just count on VideoSink and VideoOutput + // to send one frame at a time and warn if not. + Unused << NS_WARN_IF(aImages.Length() > 1); + for (auto& image : aImages) { + NotifySetCurrent(image.mImage); + } +#endif MutexAutoLock lock(mMutex); SetCurrentFramesLocked(aIntrinsicSize, aImages); } diff --git a/dom/media/encoder/ContainerWriter.h b/dom/media/encoder/ContainerWriter.h index 8ec5f0305d..4a83e5060e 100644 --- a/dom/media/encoder/ContainerWriter.h +++ b/dom/media/encoder/ContainerWriter.h @@ -6,7 +6,7 @@ #define ContainerWriter_h_ #include "nsTArray.h" -#include "EncodedFrameContainer.h" +#include "EncodedFrame.h" #include "TrackMetadataBase.h" namespace mozilla { @@ -25,23 +25,26 @@ class ContainerWriter { enum { END_OF_STREAM = 1 << 0 }; /** - * Writes encoded track data from aBuffer to a packet, and insert this packet - * into the internal stream of container writer. aDuration is the playback - * duration of this packet in number of samples. aFlags is true with - * END_OF_STREAM if this is the last packet of track. - * Currently, WriteEncodedTrack doesn't support multiple tracks. + * Writes encoded track data from aData into the internal stream of container + * writer. aFlags is used to signal the impl of different conditions + * such as END_OF_STREAM. Each impl may handle different flags, and should be + * documented accordingly. Currently, WriteEncodedTrack doesn't support + * explicit track specification, though each impl may provide logic to + * allocate frames into different tracks. */ - virtual nsresult WriteEncodedTrack(const EncodedFrameContainer& aData, - uint32_t aFlags = 0) = 0; + virtual nsresult WriteEncodedTrack( + const nsTArray>& aData, uint32_t aFlags = 0) = 0; /** - * Set the meta data pointer into muxer - * This function will check the integrity of aMetadata. - * If the meta data isn't well format, this function will return - * NS_ERROR_FAILURE to caller, else save the pointer to mMetadata and return + * Stores the metadata for all given tracks to the muxer. + * + * This method checks the integrity of aMetadata. + * If the metadata isn't well formatted, this method returns NS_ERROR_FAILURE. + * If the metadata is well formatted, it stores the metadata and returns * NS_OK. */ - virtual nsresult SetMetadata(TrackMetadataBase* aMetadata) = 0; + virtual nsresult SetMetadata( + const nsTArray>& aMetadata) = 0; /** * Indicate if the writer has finished to output data @@ -58,7 +61,7 @@ class ContainerWriter { * even it is not full, and copy these container data to a buffer for * aOutputBufs to append. */ - virtual nsresult GetContainerData(nsTArray >* aOutputBufs, + virtual nsresult GetContainerData(nsTArray>* aOutputBufs, uint32_t aFlags = 0) = 0; protected: diff --git a/dom/media/encoder/EncodedFrame.h b/dom/media/encoder/EncodedFrame.h new file mode 100644 index 0000000000..7c5be03dd4 --- /dev/null +++ b/dom/media/encoder/EncodedFrame.h @@ -0,0 +1,70 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef EncodedFrame_h_ +#define EncodedFrame_h_ + +#include "nsISupportsImpl.h" +#include "VideoUtils.h" + +namespace mozilla { + +// Represent an encoded frame emitted by an encoder +class EncodedFrame final { + NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EncodedFrame) + public: + EncodedFrame() : mTime(0), mDuration(0), mFrameType(UNKNOWN) {} + enum FrameType { + VP8_I_FRAME, // VP8 intraframe + VP8_P_FRAME, // VP8 predicted frame + OPUS_AUDIO_FRAME, // Opus audio frame + UNKNOWN // FrameType not set + }; + void SwapInFrameData(nsTArray& aData) { + mFrameData.SwapElements(aData); + } + nsresult SwapOutFrameData(nsTArray& aData) { + if (mFrameType != UNKNOWN) { + // Reset this frame type to UNKNOWN once the data is swapped out. + mFrameData.SwapElements(aData); + mFrameType = UNKNOWN; + return NS_OK; + } + return NS_ERROR_FAILURE; + } + const nsTArray& GetFrameData() const { return mFrameData; } + // Timestamp in microseconds + uint64_t mTime; + // The playback duration of this packet. The unit is determined by the use + // case. For VP8 the unit should be microseconds. For opus this is the number + // of samples. + uint64_t mDuration; + // Represent what is in the FrameData + FrameType mFrameType; + + uint64_t GetEndTime() const { + // Defend against untested types. This assert can be removed but we want + // to make sure other types are correctly accounted for. + MOZ_ASSERT(mFrameType == OPUS_AUDIO_FRAME || mFrameType == VP8_I_FRAME || + mFrameType == VP8_P_FRAME); + if (mFrameType == OPUS_AUDIO_FRAME) { + // See bug 1356054 for discussion around standardization of time units + // (can remove videoutils import when this goes) + return mTime + FramesToUsecs(mDuration, 48000).value(); + } else { + return mTime + mDuration; + } + } + + private: + // Private destructor, to discourage deletion outside of Release(): + ~EncodedFrame() {} + + // Encoded data + nsTArray mFrameData; +}; + +} // namespace mozilla + +#endif // EncodedFrame_h_ diff --git a/dom/media/encoder/EncodedFrameContainer.h b/dom/media/encoder/EncodedFrameContainer.h deleted file mode 100644 index f7b7258a1b..0000000000 --- a/dom/media/encoder/EncodedFrameContainer.h +++ /dev/null @@ -1,96 +0,0 @@ -/* This Source Code Form is subject to the terms of the Mozilla Public - * License, v. 2.0. If a copy of the MPL was not distributed with this file, - * You can obtain one at http://mozilla.org/MPL/2.0/. */ - -#ifndef EncodedFrameContainer_H_ -#define EncodedFrameContainer_H_ - -#include "nsTArray.h" - -namespace mozilla { - -class EncodedFrame; - -/* - * This container is used to carry video or audio encoded data from encoder to - * muxer. The media data object is created by encoder and recycle by the - * destructor. Only allow to store audio or video encoded data in EncodedData. - */ -class EncodedFrameContainer { - public: - // Append encoded frame data - void AppendEncodedFrame(EncodedFrame* aEncodedFrame) { - mEncodedFrames.AppendElement(aEncodedFrame); - } - // Retrieve all of the encoded frames - const nsTArray >& GetEncodedFrames() const { - return mEncodedFrames; - } - - private: - // This container is used to store the video or audio encoded packets. - // Muxer should check mFrameType and get the encoded data type from - // mEncodedFrames. - nsTArray > mEncodedFrames; -}; - -// Represent one encoded frame -class EncodedFrame final { - NS_INLINE_DECL_THREADSAFE_REFCOUNTING(EncodedFrame) - public: - EncodedFrame() : mTimeStamp(0), mDuration(0), mFrameType(UNKNOWN) {} - enum FrameType { - VP8_I_FRAME, // VP8 intraframe - VP8_P_FRAME, // VP8 predicted frame - OPUS_AUDIO_FRAME, // Opus audio frame - VORBIS_AUDIO_FRAME, - AVC_I_FRAME, - AVC_P_FRAME, - AVC_B_FRAME, - AVC_CSD, // AVC codec specific data - AAC_AUDIO_FRAME, - AAC_CSD, // AAC codec specific data - AMR_AUDIO_CSD, - AMR_AUDIO_FRAME, - EVRC_AUDIO_CSD, - EVRC_AUDIO_FRAME, - UNKNOWN // FrameType not set - }; - void SwapInFrameData(nsTArray& aData) { - mFrameData.SwapElements(aData); - } - nsresult SwapOutFrameData(nsTArray& aData) { - if (mFrameType != UNKNOWN) { - // Reset this frame type to UNKNOWN once the data is swapped out. - mFrameData.SwapElements(aData); - mFrameType = UNKNOWN; - return NS_OK; - } - return NS_ERROR_FAILURE; - } - const nsTArray& GetFrameData() const { return mFrameData; } - uint64_t GetTimeStamp() const { return mTimeStamp; } - void SetTimeStamp(uint64_t aTimeStamp) { mTimeStamp = aTimeStamp; } - - uint64_t GetDuration() const { return mDuration; } - void SetDuration(uint64_t aDuration) { mDuration = aDuration; } - - FrameType GetFrameType() const { return mFrameType; } - void SetFrameType(FrameType aFrameType) { mFrameType = aFrameType; } - - private: - // Private destructor, to discourage deletion outside of Release(): - ~EncodedFrame() {} - - // Encoded data - nsTArray mFrameData; - uint64_t mTimeStamp; - // The playback duration of this packet in number of samples - uint64_t mDuration; - // Represent what is in the FrameData - FrameType mFrameType; -}; - -} // namespace mozilla - -#endif diff --git a/dom/media/encoder/MediaEncoder.cpp b/dom/media/encoder/MediaEncoder.cpp index 3ce199dad6..4ae898f178 100644 --- a/dom/media/encoder/MediaEncoder.cpp +++ b/dom/media/encoder/MediaEncoder.cpp @@ -18,12 +18,12 @@ #include "mozilla/dom/VideoStreamTrack.h" #include "mozilla/gfx/Point.h" // IntSize #include "mozilla/Logging.h" -#include "mozilla/media/MediaUtils.h" #include "mozilla/Preferences.h" #include "mozilla/StaticPrefs_media.h" #include "mozilla/StaticPtr.h" #include "mozilla/TaskQueue.h" #include "mozilla/Unused.h" +#include "Muxer.h" #include "nsMimeTypes.h" #include "nsThreadUtils.h" #include "OggWriter.h" @@ -36,10 +36,6 @@ # include "WebMWriter.h" #endif -#ifdef LOG -# undef LOG -#endif - mozilla::LazyLogModule gMediaEncoderLog("MediaEncoder"); #define LOG(type, msg) MOZ_LOG(gMediaEncoderLog, type, msg) @@ -57,13 +53,12 @@ class MediaEncoder::AudioTrackListener : public DirectMediaTrackListener { mRemoved(false), mDriftCompensator(aDriftCompensator), mEncoder(aEncoder), - mEncoderThread(aEncoderThread) { + mEncoderThread(aEncoderThread), + mShutdownPromise(mShutdownHolder.Ensure(__func__)) { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); } - void NotifyShutdown() { mShutdown = true; } - void NotifyDirectListenerInstalled(InstallationResult aResult) override { if (aResult == InstallationResult::SUCCESS) { LOG(LogLevel::Info, ("Audio track direct listener installed")); @@ -89,10 +84,6 @@ class MediaEncoder::AudioTrackListener : public DirectMediaTrackListener { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); - if (mShutdown) { - return; - } - if (!mInitialized) { mDriftCompensator->NotifyAudioStart(TimeStamp::Now()); mInitialized = true; @@ -117,10 +108,6 @@ class MediaEncoder::AudioTrackListener : public DirectMediaTrackListener { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); - if (mShutdown) { - return; - } - nsresult rv = mEncoderThread->Dispatch( NewRunnableMethod("mozilla::AudioTrackEncoder::NotifyEndOfStream", mEncoder, &AudioTrackEncoder::NotifyEndOfStream)); @@ -129,13 +116,11 @@ class MediaEncoder::AudioTrackListener : public DirectMediaTrackListener { } void NotifyRemoved(MediaTrackGraph* aGraph) override { - if (!mShutdown) { - nsresult rv = mEncoderThread->Dispatch( - NewRunnableMethod("mozilla::AudioTrackEncoder::NotifyEndOfStream", - mEncoder, &AudioTrackEncoder::NotifyEndOfStream)); - MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); - Unused << rv; - } + nsresult rv = mEncoderThread->Dispatch( + NewRunnableMethod("mozilla::AudioTrackEncoder::NotifyEndOfStream", + mEncoder, &AudioTrackEncoder::NotifyEndOfStream)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; mRemoved = true; @@ -143,17 +128,23 @@ class MediaEncoder::AudioTrackListener : public DirectMediaTrackListener { mEncoder = nullptr; mEncoderThread = nullptr; } + + mShutdownHolder.Resolve(true, __func__); + } + + const RefPtr& OnShutdown() const { + return mShutdownPromise; } private: - // True when MediaEncoder has shutdown and destroyed the TaskQueue. - Atomic mShutdown; bool mDirectConnected; bool mInitialized; bool mRemoved; const RefPtr mDriftCompensator; RefPtr mEncoder; RefPtr mEncoderThread; + MozPromiseHolder mShutdownHolder; + const RefPtr mShutdownPromise; }; class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { @@ -163,13 +154,12 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { mInitialized(false), mRemoved(false), mEncoder(aEncoder), - mEncoderThread(aEncoderThread) { + mEncoderThread(aEncoderThread), + mShutdownPromise(mShutdownHolder.Ensure(__func__)) { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); } - void NotifyShutdown() { mShutdown = true; } - void NotifyDirectListenerInstalled(InstallationResult aResult) override { if (aResult == InstallationResult::SUCCESS) { LOG(LogLevel::Info, ("Video track direct listener installed")); @@ -196,10 +186,6 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); - if (mShutdown) { - return; - } - const TimeStamp now = TimeStamp::Now(); if (!mInitialized) { nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod( @@ -224,10 +210,6 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { MOZ_ASSERT(mEncoderThread); MOZ_ASSERT(aMedia.GetType() == MediaSegment::VIDEO); - if (mShutdown) { - return; - } - const VideoSegment& video = static_cast(aMedia); VideoSegment copy; for (VideoSegment::ConstChunkIterator iter(video); !iter.IsEnded(); @@ -251,10 +233,6 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); - if (mShutdown) { - return; - } - nsresult rv; if (aEnabled) { rv = mEncoderThread->Dispatch(NewRunnableMethod( @@ -273,10 +251,6 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { MOZ_ASSERT(mEncoder); MOZ_ASSERT(mEncoderThread); - if (mShutdown) { - return; - } - nsresult rv = mEncoderThread->Dispatch( NewRunnableMethod("mozilla::VideoTrackEncoder::NotifyEndOfStream", mEncoder, &VideoTrackEncoder::NotifyEndOfStream)); @@ -285,13 +259,11 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { } void NotifyRemoved(MediaTrackGraph* aGraph) override { - if (!mShutdown) { - nsresult rv = mEncoderThread->Dispatch( - NewRunnableMethod("mozilla::VideoTrackEncoder::NotifyEndOfStream", - mEncoder, &VideoTrackEncoder::NotifyEndOfStream)); - MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); - Unused << rv; - } + nsresult rv = mEncoderThread->Dispatch( + NewRunnableMethod("mozilla::VideoTrackEncoder::NotifyEndOfStream", + mEncoder, &VideoTrackEncoder::NotifyEndOfStream)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; mRemoved = true; @@ -299,16 +271,22 @@ class MediaEncoder::VideoTrackListener : public DirectMediaTrackListener { mEncoder = nullptr; mEncoderThread = nullptr; } + + mShutdownHolder.Resolve(true, __func__); + } + + const RefPtr& OnShutdown() const { + return mShutdownPromise; } private: - // True when MediaEncoder has shutdown and destroyed the TaskQueue. - Atomic mShutdown; bool mDirectConnected; bool mInitialized; bool mRemoved; RefPtr mEncoder; RefPtr mEncoderThread; + MozPromiseHolder mShutdownHolder; + const RefPtr mShutdownPromise; }; class MediaEncoder::EncoderListener : public TrackEncoderListener { @@ -396,18 +374,15 @@ MediaEncoder::MediaEncoder(TaskQueue* aEncoderThread, VideoTrackEncoder* aVideoEncoder, TrackRate aTrackRate, const nsAString& aMIMEType) : mEncoderThread(aEncoderThread), - mWriter(std::move(aWriter)), + mMuxer(MakeUnique(std::move(aWriter))), mAudioEncoder(aAudioEncoder), mVideoEncoder(aVideoEncoder), mEncoderListener(MakeAndAddRef(mEncoderThread, this)), mStartTime(TimeStamp::Now()), mMIMEType(aMIMEType), mInitialized(false), - mMetadataEncoded(false), mCompleted(false), - mError(false), - mCanceled(false), - mShutdown(false) { + mError(false) { if (mAudioEncoder) { mAudioListener = MakeAndAddRef( aDriftCompensator, mAudioEncoder, mEncoderThread); @@ -430,7 +405,14 @@ MediaEncoder::MediaEncoder(TaskQueue* aEncoderThread, } } -MediaEncoder::~MediaEncoder() { MOZ_ASSERT(mListeners.IsEmpty()); } +MediaEncoder::~MediaEncoder() { + MOZ_ASSERT(mListeners.IsEmpty()); + MOZ_ASSERT(!mAudioTrack); + MOZ_ASSERT(!mVideoTrack); + MOZ_ASSERT(!mAudioNode); + MOZ_ASSERT(!mInputPort); + MOZ_ASSERT(!mPipeStream); +} void MediaEncoder::EnsureGraphTrackFrom(MediaTrack* aTrack) { if (mGraphTrack) { @@ -543,18 +525,16 @@ void MediaEncoder::ConnectMediaStreamTrack(MediaStreamTrack* aTrack) { if (AudioStreamTrack* audio = aTrack->AsAudioStreamTrack()) { if (!mAudioEncoder) { - MOZ_ASSERT(false, "No audio encoder for this audio track"); - return; - } - if (mAudioTrack) { - MOZ_ASSERT(false, "Only one audio track supported."); - return; - } - if (!mAudioListener) { - MOZ_ASSERT(false, "No audio listener for this audio track"); + // No audio encoder for this audio track. It could be disabled. + LOG(LogLevel::Warning, ("Cannot connect to audio track - no encoder")); return; } + MOZ_ASSERT(!mAudioTrack, "Only one audio track supported."); + MOZ_ASSERT(mAudioListener, "No audio listener for this audio track"); + + LOG(LogLevel::Info, ("Connected to audio track %p", aTrack)); + mAudioTrack = audio; // With full duplex we don't risk having audio come in late to the MTG // so we won't need a direct listener. @@ -566,18 +546,16 @@ void MediaEncoder::ConnectMediaStreamTrack(MediaStreamTrack* aTrack) { audio->AddListener(mAudioListener); } else if (VideoStreamTrack* video = aTrack->AsVideoStreamTrack()) { if (!mVideoEncoder) { - MOZ_ASSERT(false, "No video encoder for this video track"); - return; - } - if (mVideoTrack) { - MOZ_ASSERT(false, "Only one video track supported."); - return; - } - if (!mVideoListener) { - MOZ_ASSERT(false, "No video listener for this audio track"); + // No video encoder for this video track. It could be disabled. + LOG(LogLevel::Warning, ("Cannot connect to video track - no encoder")); return; } + MOZ_ASSERT(!mVideoTrack, "Only one video track supported."); + MOZ_ASSERT(mVideoListener, "No video listener for this video track"); + + LOG(LogLevel::Info, ("Connected to video track %p", aTrack)); + mVideoTrack = video; video->AddDirectListener(mVideoListener); video->AddListener(mVideoListener); @@ -629,21 +607,18 @@ already_AddRefed MediaEncoder::CreateEncoder( RefPtr videoEncoder; auto driftCompensator = MakeRefPtr(aEncoderThread, aTrackRate); - nsString mimeType; - if (!aTrackTypes) { - MOZ_ASSERT(false); - LOG(LogLevel::Error, ("No TrackTypes")); + Maybe mimeType = MakeMediaContainerType(aMIMEType); + if (!mimeType) { return nullptr; } -#ifdef MOZ_WEBM_ENCODER - else if (MediaEncoder::IsWebMEncoderEnabled() && - aMIMEType.EqualsLiteral(VIDEO_WEBM)) { - if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK && - MediaDecoder::IsOpusEnabled()) { + + for (const auto& codec : mimeType->ExtendedType().Codecs().Range()) { + if (codec.EqualsLiteral("opus")) { + MOZ_ASSERT(!audioEncoder); audioEncoder = MakeAndAddRef(aTrackRate); - } - if (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK) { + } else if (codec.EqualsLiteral("vp8") || codec.EqualsLiteral("vp8.0")) { + MOZ_ASSERT(!videoEncoder); if (Preferences::GetBool("media.recorder.video.frame_drops", true)) { videoEncoder = MakeAndAddRef( driftCompensator, aTrackRate, FrameDroppingMode::ALLOW); @@ -651,75 +626,31 @@ already_AddRefed MediaEncoder::CreateEncoder( videoEncoder = MakeAndAddRef( driftCompensator, aTrackRate, FrameDroppingMode::DISALLOW); } - } - writer = MakeUnique(aTrackTypes); - mimeType = NS_LITERAL_STRING(VIDEO_WEBM); - } else if (MediaEncoder::IsWebMEncoderEnabled() && - aMIMEType.EqualsLiteral(AUDIO_WEBM) && - aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) { - if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK && - MediaDecoder::IsOpusEnabled()) { - audioEncoder = MakeAndAddRef(aTrackRate); - } - if (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK) { - if (Preferences::GetBool("media.recorder.video.frame_drops", true)) { - videoEncoder = MakeAndAddRef( - driftCompensator, aTrackRate, FrameDroppingMode::ALLOW); - } else { - videoEncoder = MakeAndAddRef( - driftCompensator, aTrackRate, FrameDroppingMode::DISALLOW); - } - mimeType = NS_LITERAL_STRING(VIDEO_WEBM); } else { - mimeType = NS_LITERAL_STRING(AUDIO_WEBM); + MOZ_CRASH("Unknown codec"); } - writer = MakeUnique(aTrackTypes); - } -#endif // MOZ_WEBM_ENCODER - else if (MediaDecoder::IsOggEnabled() && MediaDecoder::IsOpusEnabled() && - aMIMEType.EqualsLiteral(AUDIO_OGG) && - aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) { - writer = MakeUnique(); - audioEncoder = MakeAndAddRef(aTrackRate); - mimeType = NS_LITERAL_STRING(AUDIO_OGG); } + + if (mimeType->Type() == MEDIAMIMETYPE(VIDEO_WEBM) || + mimeType->Type() == MEDIAMIMETYPE(AUDIO_WEBM)) { #ifdef MOZ_WEBM_ENCODER - else if (MediaEncoder::IsWebMEncoderEnabled() && - (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK || - !MediaDecoder::IsOggEnabled())) { - if (aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK && - MediaDecoder::IsOpusEnabled()) { - audioEncoder = MakeAndAddRef(aTrackRate); - } - if (aTrackTypes & ContainerWriter::CREATE_VIDEO_TRACK) { - if (Preferences::GetBool("media.recorder.video.frame_drops", true)) { - videoEncoder = MakeAndAddRef( - driftCompensator, aTrackRate, FrameDroppingMode::ALLOW); - } else { - videoEncoder = MakeAndAddRef( - driftCompensator, aTrackRate, FrameDroppingMode::DISALLOW); - } - } - writer = MakeUnique(aTrackTypes); - mimeType = NS_LITERAL_STRING(VIDEO_WEBM); - } + MOZ_ASSERT_IF(mimeType->Type() == MEDIAMIMETYPE(AUDIO_WEBM), !videoEncoder); + writer = MakeUnique(); +#else + MOZ_CRASH("Webm cannot be selected if not supported"); #endif // MOZ_WEBM_ENCODER - else if (MediaDecoder::IsOggEnabled() && MediaDecoder::IsOpusEnabled() && - aTrackTypes & ContainerWriter::CREATE_AUDIO_TRACK) { + } else if (mimeType->Type() == MEDIAMIMETYPE(AUDIO_OGG)) { + MOZ_ASSERT(audioEncoder); + MOZ_ASSERT(!videoEncoder); writer = MakeUnique(); - audioEncoder = MakeAndAddRef(aTrackRate); - mimeType = NS_LITERAL_STRING(AUDIO_OGG); - } else { - LOG(LogLevel::Error, - ("Can not find any encoder to record this media stream")); - return nullptr; } + NS_ENSURE_TRUE(writer, nullptr); LOG(LogLevel::Info, ("Create encoder result:a[%p](%u bps) v[%p](%u bps) w[%p] mimeType = " "%s.", audioEncoder.get(), aAudioBitrate, videoEncoder.get(), aVideoBitrate, - writer.get(), NS_ConvertUTF16toUTF8(mimeType).get())); + writer.get(), NS_ConvertUTF16toUTF8(aMIMEType).get())); if (audioEncoder) { audioEncoder->SetWorkerThread(aEncoderThread); @@ -735,77 +666,7 @@ already_AddRefed MediaEncoder::CreateEncoder( } return MakeAndAddRef( aEncoderThread, std::move(driftCompensator), std::move(writer), - audioEncoder, videoEncoder, aTrackRate, mimeType); -} - -nsresult MediaEncoder::GetEncodedMetadata( - nsTArray>* aOutputBufs, nsAString& aMIMEType) { - AUTO_PROFILER_LABEL("MediaEncoder::GetEncodedMetadata", OTHER); - - MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - - if (mShutdown) { - MOZ_ASSERT(false); - return NS_ERROR_FAILURE; - } - - if (!mInitialized) { - MOZ_ASSERT(false); - return NS_ERROR_FAILURE; - } - - if (mMetadataEncoded) { - MOZ_ASSERT(false); - return NS_ERROR_FAILURE; - } - - aMIMEType = mMIMEType; - - LOG(LogLevel::Verbose, - ("GetEncodedMetadata TimeStamp = %f", GetEncodeTimeStamp())); - - nsresult rv; - - if (mAudioEncoder) { - if (!mAudioEncoder->IsInitialized()) { - LOG(LogLevel::Error, - ("GetEncodedMetadata Audio encoder not initialized")); - MOZ_ASSERT(false); - return NS_ERROR_FAILURE; - } - rv = CopyMetadataToMuxer(mAudioEncoder); - if (NS_FAILED(rv)) { - LOG(LogLevel::Error, ("Failed to Set Audio Metadata")); - SetError(); - return rv; - } - } - if (mVideoEncoder) { - if (!mVideoEncoder->IsInitialized()) { - LOG(LogLevel::Error, - ("GetEncodedMetadata Video encoder not initialized")); - MOZ_ASSERT(false); - return NS_ERROR_FAILURE; - } - rv = CopyMetadataToMuxer(mVideoEncoder.get()); - if (NS_FAILED(rv)) { - LOG(LogLevel::Error, ("Failed to Set Video Metadata")); - SetError(); - return rv; - } - } - - rv = mWriter->GetContainerData(aOutputBufs, ContainerWriter::GET_HEADER); - if (NS_FAILED(rv)) { - LOG(LogLevel::Error, ("Writer fail to generate header!")); - SetError(); - return rv; - } - LOG(LogLevel::Verbose, - ("Finish GetEncodedMetadata TimeStamp = %f", GetEncodeTimeStamp())); - mMetadataEncoded = true; - - return NS_OK; + audioEncoder, videoEncoder, aTrackRate, aMIMEType); } nsresult MediaEncoder::GetEncodedData( @@ -813,47 +674,73 @@ nsresult MediaEncoder::GetEncodedData( AUTO_PROFILER_LABEL("MediaEncoder::GetEncodedData", OTHER); MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - - if (!mMetadataEncoded) { - MOZ_ASSERT(false); - return NS_ERROR_FAILURE; - } + MOZ_ASSERT(mInitialized); + MOZ_ASSERT_IF(mAudioEncoder, mAudioEncoder->IsInitialized()); + MOZ_ASSERT_IF(mVideoEncoder, mVideoEncoder->IsInitialized()); nsresult rv; LOG(LogLevel::Verbose, ("GetEncodedData TimeStamp = %f", GetEncodeTimeStamp())); - EncodedFrameContainer encodedData; - if (mVideoEncoder) { - // We're most likely to actually wait for a video frame, so do that first - // to minimize capture offset/lipsync issues. - rv = WriteEncodedDataToMuxer(mVideoEncoder); - LOG(LogLevel::Verbose, - ("Video encoded TimeStamp = %f", GetEncodeTimeStamp())); + if (mMuxer->NeedsMetadata()) { + nsTArray> meta; + if (mAudioEncoder && !*meta.AppendElement(mAudioEncoder->GetMetadata())) { + LOG(LogLevel::Error, ("Audio metadata is null")); + SetError(); + return NS_ERROR_ABORT; + } + if (mVideoEncoder && !*meta.AppendElement(mVideoEncoder->GetMetadata())) { + LOG(LogLevel::Error, ("Video metadata is null")); + SetError(); + return NS_ERROR_ABORT; + } + + rv = mMuxer->SetMetadata(meta); if (NS_FAILED(rv)) { - LOG(LogLevel::Warning, ("Failed to write encoded video data to muxer")); + LOG(LogLevel::Error, ("SetMetadata failed")); + SetError(); return rv; } } - if (mAudioEncoder) { - rv = WriteEncodedDataToMuxer(mAudioEncoder); - LOG(LogLevel::Verbose, - ("Audio encoded TimeStamp = %f", GetEncodeTimeStamp())); + // First, feed encoded data from encoders to muxer. + + if (mVideoEncoder && !mVideoEncoder->IsEncodingComplete()) { + nsTArray> videoFrames; + rv = mVideoEncoder->GetEncodedTrack(videoFrames); if (NS_FAILED(rv)) { - LOG(LogLevel::Warning, ("Failed to write encoded audio data to muxer")); + // Encoding might be canceled. + LOG(LogLevel::Error, ("Failed to get encoded data from video encoder.")); return rv; } + for (const RefPtr& frame : videoFrames) { + mMuxer->AddEncodedVideoFrame(frame); + } + if (mVideoEncoder->IsEncodingComplete()) { + mMuxer->VideoEndOfStream(); + } } - // In audio only or video only case, let unavailable track's flag to be - // true. - bool isAudioCompleted = !mAudioEncoder || mAudioEncoder->IsEncodingComplete(); - bool isVideoCompleted = !mVideoEncoder || mVideoEncoder->IsEncodingComplete(); - rv = mWriter->GetContainerData( - aOutputBufs, - isAudioCompleted && isVideoCompleted ? ContainerWriter::FLUSH_NEEDED : 0); - if (mWriter->IsWritingComplete()) { + if (mAudioEncoder && !mAudioEncoder->IsEncodingComplete()) { + nsTArray> audioFrames; + rv = mAudioEncoder->GetEncodedTrack(audioFrames); + if (NS_FAILED(rv)) { + // Encoding might be canceled. + LOG(LogLevel::Error, ("Failed to get encoded data from audio encoder.")); + return rv; + } + for (const RefPtr& frame : audioFrames) { + mMuxer->AddEncodedAudioFrame(frame); + } + if (mAudioEncoder->IsEncodingComplete()) { + mMuxer->AudioEndOfStream(); + } + } + + // Second, get data from muxer. This will do the actual muxing. + + rv = mMuxer->GetData(aOutputBufs); + if (mMuxer->IsFinished()) { mCompleted = true; Shutdown(); } @@ -861,38 +748,28 @@ nsresult MediaEncoder::GetEncodedData( LOG(LogLevel::Verbose, ("END GetEncodedData TimeStamp=%f " "mCompleted=%d, aComplete=%d, vComplete=%d", - GetEncodeTimeStamp(), mCompleted, isAudioCompleted, isVideoCompleted)); + GetEncodeTimeStamp(), mCompleted, + !mAudioEncoder || mAudioEncoder->IsEncodingComplete(), + !mVideoEncoder || mVideoEncoder->IsEncodingComplete())); return rv; } -void MediaEncoder::Shutdown() { +RefPtr MediaEncoder::Shutdown() { MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - if (mShutdown) { - return; + if (mShutdownPromise) { + return mShutdownPromise; } - mShutdown = true; - LOG(LogLevel::Info, ("MediaEncoder has been shut down.")); + LOG(LogLevel::Info, ("MediaEncoder is shutting down.")); if (mAudioEncoder) { mAudioEncoder->UnregisterListener(mEncoderListener); } - if (mAudioListener) { - mAudioListener->NotifyShutdown(); - } if (mVideoEncoder) { mVideoEncoder->UnregisterListener(mEncoderListener); } - if (mVideoListener) { - mVideoListener->NotifyShutdown(); - } mEncoderListener->Forget(); - if (mCanceled) { - // Shutting down after being canceled. We cannot use the encoder thread. - return; - } - auto listeners(mListeners); for (auto& l : listeners) { // We dispatch here since this method is typically called from @@ -903,89 +780,34 @@ void MediaEncoder::Shutdown() { MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); Unused << rv; } + + AutoTArray, 2> shutdownPromises; + if (mAudioListener) { + shutdownPromises.AppendElement(mAudioListener->OnShutdown()); + } + if (mVideoListener) { + shutdownPromises.AppendElement(mVideoListener->OnShutdown()); + } + + return mShutdownPromise = + GenericNonExclusivePromise::All(mEncoderThread, shutdownPromises); } -nsresult MediaEncoder::WriteEncodedDataToMuxer(TrackEncoder* aTrackEncoder) { - AUTO_PROFILER_LABEL("MediaEncoder::WriteEncodedDataToMuxer", OTHER); - - MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - - if (!aTrackEncoder) { - NS_ERROR("No track encoder to get data from"); - return NS_ERROR_FAILURE; - } - - if (aTrackEncoder->IsEncodingComplete()) { - return NS_OK; - } - - EncodedFrameContainer encodedData; - nsresult rv = aTrackEncoder->GetEncodedTrack(encodedData); - if (NS_FAILED(rv)) { - // Encoding might be canceled. - LOG(LogLevel::Error, ("Failed to get encoded data from encoder.")); - SetError(); - return rv; - } - rv = mWriter->WriteEncodedTrack( - encodedData, - aTrackEncoder->IsEncodingComplete() ? ContainerWriter::END_OF_STREAM : 0); - if (NS_FAILED(rv)) { - LOG(LogLevel::Error, - ("Failed to write encoded track to the media container.")); - SetError(); - } - return rv; -} - -nsresult MediaEncoder::CopyMetadataToMuxer(TrackEncoder* aTrackEncoder) { - AUTO_PROFILER_LABEL("MediaEncoder::CopyMetadataToMuxer", OTHER); - - MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - - if (!aTrackEncoder) { - NS_ERROR("No track encoder to get metadata from"); - return NS_ERROR_FAILURE; - } - - RefPtr meta = aTrackEncoder->GetMetadata(); - if (meta == nullptr) { - LOG(LogLevel::Error, ("metadata == null")); - SetError(); - return NS_ERROR_ABORT; - } - - nsresult rv = mWriter->SetMetadata(meta); - if (NS_FAILED(rv)) { - LOG(LogLevel::Error, ("SetMetadata failed")); - SetError(); - } - return rv; -} - -bool MediaEncoder::IsShutdown() { - MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); - return mShutdown; -} - -void MediaEncoder::Cancel() { +RefPtr MediaEncoder::Cancel() { MOZ_ASSERT(NS_IsMainThread()); - RefPtr self = this; - nsresult rv = mEncoderThread->Dispatch(NewRunnableFrom([self]() mutable { - self->mCanceled = true; + Stop(); - if (self->mAudioEncoder) { - self->mAudioEncoder->Cancel(); - } - if (self->mVideoEncoder) { - self->mVideoEncoder->Cancel(); - } - self->Shutdown(); - return NS_OK; - })); - MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); - Unused << rv; + return InvokeAsync(mEncoderThread, __func__, + [self = RefPtr(this), this]() { + if (mAudioEncoder) { + mAudioEncoder->Cancel(); + } + if (mVideoEncoder) { + mVideoEncoder->Cancel(); + } + return Shutdown(); + }); } bool MediaEncoder::HasError() { @@ -1033,11 +855,18 @@ void MediaEncoder::Stop() { } } -#ifdef MOZ_WEBM_ENCODER bool MediaEncoder::IsWebMEncoderEnabled() { +#ifdef MOZ_WEBM_ENCODER return StaticPrefs::media_encoder_webm_enabled(); -} +#else + return false; #endif +} + +const nsString& MediaEncoder::MimeType() const { + MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); + return mMIMEType; +} void MediaEncoder::NotifyInitialized() { MOZ_ASSERT(mEncoderThread->IsCurrentThreadIn()); @@ -1105,13 +934,13 @@ size_t MediaEncoder::SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) { return size; } -void MediaEncoder::SetVideoKeyFrameInterval(int32_t aVideoKeyFrameInterval) { +void MediaEncoder::SetVideoKeyFrameInterval(uint32_t aVideoKeyFrameInterval) { if (!mVideoEncoder) { return; } MOZ_ASSERT(mEncoderThread); - nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod( + nsresult rv = mEncoderThread->Dispatch(NewRunnableMethod( "mozilla::VideoTrackEncoder::SetKeyFrameInterval", mVideoEncoder, &VideoTrackEncoder::SetKeyFrameInterval, aVideoKeyFrameInterval)); MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); @@ -1119,3 +948,5 @@ void MediaEncoder::SetVideoKeyFrameInterval(int32_t aVideoKeyFrameInterval) { } } // namespace mozilla + +#undef LOG diff --git a/dom/media/encoder/MediaEncoder.h b/dom/media/encoder/MediaEncoder.h index 7f19d701c7..563db59185 100644 --- a/dom/media/encoder/MediaEncoder.h +++ b/dom/media/encoder/MediaEncoder.h @@ -7,6 +7,7 @@ #include "ContainerWriter.h" #include "CubebUtils.h" +#include "MediaQueue.h" #include "MediaTrackGraph.h" #include "MediaTrackListener.h" #include "mozilla/DebugOnly.h" @@ -18,6 +19,7 @@ namespace mozilla { class DriftCompensator; +class Muxer; class Runnable; class TaskQueue; @@ -75,29 +77,21 @@ class MediaEncoderListener { * been initialized and when there's data available. * => encoder->RegisterListener(listener); * - * 3) Connect the MediaStreamTracks to be recorded. - * => encoder->ConnectMediaStreamTrack(track); - * This creates the corresponding TrackEncoder and connects the track and - * the TrackEncoder through a track listener. This also starts encoding. - * - * 4) When the MediaEncoderListener is notified that the MediaEncoder is - * initialized, we can encode metadata. - * => encoder->GetEncodedMetadata(...); - * - * 5) When the MediaEncoderListener is notified that the MediaEncoder has - * data available, we can encode data. + * 3) When the MediaEncoderListener is notified that the MediaEncoder has + * data available, we can encode data. This also encodes metadata on its + * first invocation. * => encoder->GetEncodedData(...); * - * 6) To stop encoding, there are multiple options: + * 4) To stop encoding, there are multiple options: * - * 6.1) Stop() for a graceful stop. + * 4.1) Stop() for a graceful stop. * => encoder->Stop(); * - * 6.2) Cancel() for an immediate stop, if you don't need the data currently + * 4.2) Cancel() for an immediate stop, if you don't need the data currently * buffered. * => encoder->Cancel(); * - * 6.3) When all input tracks end, the MediaEncoder will automatically stop + * 4.3) When all input tracks end, the MediaEncoder will automatically stop * and shut down. */ class MediaEncoder { @@ -156,44 +150,31 @@ class MediaEncoder { uint32_t aAudioBitrate, uint32_t aVideoBitrate, uint8_t aTrackTypes, TrackRate aTrackRate); - /** - * Encodes raw metadata for all tracks to aOutputBufs. aMIMEType is the valid - * mime-type for the returned container data. The buffer of container data is - * allocated in ContainerWriter::GetContainerData(). - * - * Should there be insufficient input data for either track encoder to infer - * the metadata, or if metadata has already been encoded, we return an error - * and the output arguments are undefined. Otherwise we return NS_OK. - */ - nsresult GetEncodedMetadata(nsTArray>* aOutputBufs, - nsAString& aMIMEType); /** * Encodes raw data for all tracks to aOutputBufs. The buffer of container * data is allocated in ContainerWriter::GetContainerData(). * - * This implies that metadata has already been encoded and that all track - * encoders are still active. Should either implication break, we return an - * error and the output argument is undefined. Otherwise we return NS_OK. + * On its first call, metadata is also encoded. TrackEncoders must have been + * initialized before this is called. */ nsresult GetEncodedData(nsTArray>* aOutputBufs); /** - * Return true if MediaEncoder has been shutdown. Reasons are encoding + * Asserts that Shutdown() has been called. Reasons are encoding * complete, encounter an error, or being canceled by its caller. */ - bool IsShutdown(); + void AssertShutdownCalled() { MOZ_ASSERT(mShutdownPromise); } /** * Cancels the encoding and shuts down the encoder using Shutdown(). - * Listeners are not notified of the shutdown. */ - void Cancel(); + RefPtr Cancel(); bool HasError(); -#ifdef MOZ_WEBM_ENCODER static bool IsWebMEncoderEnabled(); -#endif + + const nsString& MimeType() const; /** * Notifies listeners that this MediaEncoder has been initialized. @@ -228,7 +209,7 @@ class MediaEncoder { /** * Set desired video keyframe interval defined in milliseconds. */ - void SetVideoKeyFrameInterval(int32_t aVideoKeyFrameInterval); + void SetVideoKeyFrameInterval(uint32_t aVideoKeyFrameInterval); protected: ~MediaEncoder(); @@ -250,7 +231,7 @@ class MediaEncoder { * Shuts down the MediaEncoder and cleans up track encoders. * Listeners will be notified of the shutdown unless we were Cancel()ed first. */ - void Shutdown(); + RefPtr Shutdown(); /** * Sets mError to true, notifies listeners of the error if mError changed, @@ -258,15 +239,10 @@ class MediaEncoder { */ void SetError(); - // Get encoded data from trackEncoder and write to muxer - nsresult WriteEncodedDataToMuxer(TrackEncoder* aTrackEncoder); - // Get metadata from trackEncoder and copy to muxer - nsresult CopyMetadataToMuxer(TrackEncoder* aTrackEncoder); - const RefPtr mEncoderThread; const RefPtr mDriftCompensator; - UniquePtr mWriter; + UniquePtr mMuxer; RefPtr mAudioEncoder; RefPtr mAudioListener; RefPtr mVideoEncoder; @@ -294,13 +270,12 @@ class MediaEncoder { RefPtr mGraphTrack; TimeStamp mStartTime; - nsString mMIMEType; + const nsString mMIMEType; bool mInitialized; - bool mMetadataEncoded; bool mCompleted; bool mError; - bool mCanceled; - bool mShutdown; + // Set when shutdown starts. + RefPtr mShutdownPromise; // Get duration from create encoder, for logging purpose double GetEncodeTimeStamp() { TimeDuration decodeTime; diff --git a/dom/media/encoder/Muxer.cpp b/dom/media/encoder/Muxer.cpp new file mode 100644 index 0000000000..a6d8cd7f41 --- /dev/null +++ b/dom/media/encoder/Muxer.cpp @@ -0,0 +1,228 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "Muxer.h" + +#include "ContainerWriter.h" + +namespace mozilla { + +LazyLogModule gMuxerLog("Muxer"); +#define LOG(type, ...) MOZ_LOG(gMuxerLog, type, (__VA_ARGS__)) + +Muxer::Muxer(UniquePtr aWriter) + : mWriter(std::move(aWriter)) {} + +bool Muxer::IsFinished() { return mWriter->IsWritingComplete(); } + +nsresult Muxer::SetMetadata( + const nsTArray>& aMetadata) { + nsresult rv = mWriter->SetMetadata(aMetadata); + if (NS_FAILED(rv)) { + LOG(LogLevel::Error, "%p Setting metadata failed, tracks=%zu", this, + aMetadata.Length()); + return rv; + } + + for (const auto& track : aMetadata) { + switch (track->GetKind()) { + case TrackMetadataBase::METADATA_OPUS: { + // In the case of Opus we need to calculate the codec delay based on the + // pre-skip. For more information see: + // https://tools.ietf.org/html/rfc7845#section-4.2 + // Calculate offset in microseconds + OpusMetadata* opusMeta = static_cast(track.get()); + mAudioCodecDelay = static_cast( + LittleEndian::readUint16(opusMeta->mIdHeader.Elements() + 10) * + PR_USEC_PER_SEC / 48000); + [[fallthrough]]; + } + case TrackMetadataBase::METADATA_VORBIS: + case TrackMetadataBase::METADATA_AAC: + case TrackMetadataBase::METADATA_AMR: + case TrackMetadataBase::METADATA_EVRC: + MOZ_ASSERT(!mHasAudio, "Only one audio track supported"); + mHasAudio = true; + break; + case TrackMetadataBase::METADATA_VP8: + MOZ_ASSERT(!mHasVideo, "Only one video track supported"); + mHasVideo = true; + break; + default: + MOZ_CRASH("Unknown codec metadata"); + }; + } + mMetadataSet = true; + MOZ_ASSERT(mHasAudio || mHasVideo); + if (!mHasAudio) { + mEncodedAudioFrames.Finish(); + MOZ_ASSERT(mEncodedAudioFrames.AtEndOfStream()); + } + if (!mHasVideo) { + mEncodedVideoFrames.Finish(); + MOZ_ASSERT(mEncodedVideoFrames.AtEndOfStream()); + } + LOG(LogLevel::Info, "%p Metadata set; audio=%d, video=%d", this, mHasAudio, + mHasVideo); + return rv; +} + +void Muxer::AddEncodedAudioFrame(EncodedFrame* aFrame) { + MOZ_ASSERT(mMetadataSet); + MOZ_ASSERT(mHasAudio); + if (aFrame->mFrameType == EncodedFrame::FrameType::OPUS_AUDIO_FRAME) { + aFrame->mTime += mAudioCodecDelay; + } + mEncodedAudioFrames.Push(aFrame); + LOG(LogLevel::Verbose, + "%p Added audio frame of type %u, [start %" PRIu64 ", end %" PRIu64 ")", + this, aFrame->mFrameType, aFrame->mTime, + aFrame->mTime + aFrame->mDuration); +} + +void Muxer::AddEncodedVideoFrame(EncodedFrame* aFrame) { + MOZ_ASSERT(mMetadataSet); + MOZ_ASSERT(mHasVideo); + mEncodedVideoFrames.Push(aFrame); + LOG(LogLevel::Verbose, + "%p Added video frame of type %u, [start %" PRIu64 ", end %" PRIu64 ")", + this, aFrame->mFrameType, aFrame->mTime, + aFrame->mTime + aFrame->mDuration); +} + +void Muxer::AudioEndOfStream() { + MOZ_ASSERT(mMetadataSet); + MOZ_ASSERT(mHasAudio); + LOG(LogLevel::Info, "%p Reached audio EOS", this); + mEncodedAudioFrames.Finish(); +} + +void Muxer::VideoEndOfStream() { + MOZ_ASSERT(mMetadataSet); + MOZ_ASSERT(mHasVideo); + LOG(LogLevel::Info, "%p Reached video EOS", this); + mEncodedVideoFrames.Finish(); +} + +nsresult Muxer::GetData(nsTArray>* aOutputBuffers) { + MOZ_ASSERT(mMetadataSet); + MOZ_ASSERT(mHasAudio || mHasVideo); + + nsresult rv; + if (!mMetadataEncoded) { + rv = mWriter->GetContainerData(aOutputBuffers, ContainerWriter::GET_HEADER); + if (NS_FAILED(rv)) { + LOG(LogLevel::Error, "%p Failed getting metadata from writer", this); + return rv; + } + mMetadataEncoded = true; + } + + if (mEncodedAudioFrames.GetSize() == 0 && !mEncodedAudioFrames.IsFinished() && + mEncodedVideoFrames.GetSize() == 0 && !mEncodedVideoFrames.IsFinished()) { + // Nothing to mux. + return NS_OK; + } + + rv = Mux(); + if (NS_FAILED(rv)) { + LOG(LogLevel::Error, "%p Failed muxing data into writer", this); + return rv; + } + + MOZ_ASSERT_IF( + mEncodedAudioFrames.IsFinished() && mEncodedVideoFrames.IsFinished(), + mEncodedAudioFrames.AtEndOfStream()); + MOZ_ASSERT_IF( + mEncodedAudioFrames.IsFinished() && mEncodedVideoFrames.IsFinished(), + mEncodedVideoFrames.AtEndOfStream()); + uint32_t flags = + mEncodedAudioFrames.AtEndOfStream() && mEncodedVideoFrames.AtEndOfStream() + ? ContainerWriter::FLUSH_NEEDED + : 0; + + if (mEncodedAudioFrames.AtEndOfStream() && + mEncodedVideoFrames.AtEndOfStream()) { + LOG(LogLevel::Info, "%p All data written", this); + } + + return mWriter->GetContainerData(aOutputBuffers, flags); +} + +nsresult Muxer::Mux() { + MOZ_ASSERT(mMetadataSet); + MOZ_ASSERT(mHasAudio || mHasVideo); + + nsTArray> frames; + // The times at which we expect our next video and audio frames. These are + // based on the time + duration (GetEndTime()) of the last seen frames. + // Assumes that the encoders write the correct duration for frames.; + uint64_t expectedNextVideoTime = 0; + uint64_t expectedNextAudioTime = 0; + // Interleave frames until we're out of audio or video + while (mEncodedVideoFrames.GetSize() > 0 && + mEncodedAudioFrames.GetSize() > 0) { + RefPtr videoFrame = mEncodedVideoFrames.PeekFront(); + RefPtr audioFrame = mEncodedAudioFrames.PeekFront(); + // For any expected time our frames should occur at or after that time. + MOZ_ASSERT(videoFrame->mTime >= expectedNextVideoTime); + MOZ_ASSERT(audioFrame->mTime >= expectedNextAudioTime); + if (videoFrame->mTime <= audioFrame->mTime) { + expectedNextVideoTime = videoFrame->GetEndTime(); + RefPtr frame = mEncodedVideoFrames.PopFront(); + frames.AppendElement(frame); + } else { + expectedNextAudioTime = audioFrame->GetEndTime(); + RefPtr frame = mEncodedAudioFrames.PopFront(); + frames.AppendElement(frame); + } + } + + // If we're out of audio we still may be able to add more video... + if (mEncodedAudioFrames.GetSize() == 0) { + while (mEncodedVideoFrames.GetSize() > 0) { + if (!mEncodedAudioFrames.AtEndOfStream() && + mEncodedVideoFrames.PeekFront()->mTime > expectedNextAudioTime) { + // Audio encoding is not complete and since the video frame comes + // after our next audio frame we cannot safely add it. + break; + } + frames.AppendElement(mEncodedVideoFrames.PopFront()); + } + } + + // If we're out of video we still may be able to add more audio... + if (mEncodedVideoFrames.GetSize() == 0) { + while (mEncodedAudioFrames.GetSize() > 0) { + if (!mEncodedVideoFrames.AtEndOfStream() && + mEncodedAudioFrames.PeekFront()->mTime > expectedNextVideoTime) { + // Video encoding is not complete and since the audio frame comes + // after our next video frame we cannot safely add it. + break; + } + frames.AppendElement(mEncodedAudioFrames.PopFront()); + } + } + + LOG(LogLevel::Debug, + "%p Muxed data, remaining-audio=%zu, remaining-video=%zu", this, + mEncodedAudioFrames.GetSize(), mEncodedVideoFrames.GetSize()); + + // If encoding is complete for both encoders we should signal end of stream, + // otherwise we keep going. + uint32_t flags = + mEncodedVideoFrames.AtEndOfStream() && mEncodedAudioFrames.AtEndOfStream() + ? ContainerWriter::END_OF_STREAM + : 0; + nsresult rv = mWriter->WriteEncodedTrack(frames, flags); + if (NS_FAILED(rv)) { + LOG(LogLevel::Error, "Error! Failed to write muxed data to the container"); + } + return rv; +} + +} // namespace mozilla + +#undef LOG diff --git a/dom/media/encoder/Muxer.h b/dom/media/encoder/Muxer.h new file mode 100644 index 0000000000..f2f93582d3 --- /dev/null +++ b/dom/media/encoder/Muxer.h @@ -0,0 +1,74 @@ +/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*-*/ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_ENCODER_MUXER_H_ +#define DOM_MEDIA_ENCODER_MUXER_H_ + +#include "MediaQueue.h" + +namespace mozilla { + +class ContainerWriter; + +// Generic Muxer class that helps pace the output from track encoders to the +// ContainerWriter, so time never appears to go backwards. +// Note that the entire class is written for single threaded access. +class Muxer { + public: + explicit Muxer(UniquePtr aWriter); + ~Muxer() = default; + + // Returns true when all tracks have ended, and all data has been muxed and + // fetched. + bool IsFinished(); + + // Returns true if this muxer has not been given metadata yet. + bool NeedsMetadata() const { return !mMetadataSet; } + + // Sets metadata for all tracks. This may only be called once. + nsresult SetMetadata(const nsTArray>& aMetadata); + + // Adds an encoded audio frame for muxing + void AddEncodedAudioFrame(EncodedFrame* aFrame); + + // Adds an encoded video frame for muxing + void AddEncodedVideoFrame(EncodedFrame* aFrame); + + // Marks the audio track as ended. Once all tracks for which we have metadata + // have ended, GetData() will drain and the muxer will be marked as finished. + void AudioEndOfStream(); + + // Marks the video track as ended. Once all tracks for which we have metadata + // have ended, GetData() will drain and the muxer will be marked as finished. + void VideoEndOfStream(); + + // Gets the data that has been muxed and written into the container so far. + nsresult GetData(nsTArray>* aOutputBuffers); + + private: + // Writes data in MediaQueues to the ContainerWriter. + nsresult Mux(); + + // Audio frames that have been encoded and are pending write to the muxer. + MediaQueue mEncodedAudioFrames; + // Video frames that have been encoded and are pending write to the muxer. + MediaQueue mEncodedVideoFrames; + // The writer for the specific container we're recording into. + UniquePtr mWriter; + // How much each audio time stamp should be delayed in microseconds. Used to + // adjust for opus codec delay. + uint64_t mAudioCodecDelay = 0; + // True once metadata has been set in the muxer. + bool mMetadataSet = false; + // True once metadata has been written to file. + bool mMetadataEncoded = false; + // True if metadata is set and contains an audio track. + bool mHasAudio = false; + // True if metadata is set and contains a video track. + bool mHasVideo = false; +}; +} // namespace mozilla + +#endif diff --git a/dom/media/encoder/OpusTrackEncoder.cpp b/dom/media/encoder/OpusTrackEncoder.cpp index 0c58c8eeb8..86f6a146a2 100644 --- a/dom/media/encoder/OpusTrackEncoder.cpp +++ b/dom/media/encoder/OpusTrackEncoder.cpp @@ -9,7 +9,6 @@ #include -#undef LOG #define LOG(args, ...) namespace mozilla { @@ -227,7 +226,8 @@ already_AddRefed OpusTrackEncoder::GetMetadata() { return meta.forget(); } -nsresult OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { +nsresult OpusTrackEncoder::GetEncodedTrack( + nsTArray>& aData) { AUTO_PROFILER_LABEL("OpusTrackEncoder::GetEncodedTrack", OTHER); MOZ_ASSERT(mInitialized || mCanceled); @@ -324,7 +324,7 @@ nsresult OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { MOZ_ASSERT(frameCopied <= 3844, "frameCopied exceeded expected range"); RefPtr audiodata = new EncodedFrame(); - audiodata->SetFrameType(EncodedFrame::OPUS_AUDIO_FRAME); + audiodata->mFrameType = EncodedFrame::OPUS_AUDIO_FRAME; int framesInPCM = frameCopied; if (mResampler) { AutoTArray resamplingDest; @@ -366,10 +366,10 @@ nsresult OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { mResampledLeftover.Length()); // This is always at 48000Hz. framesInPCM = framesLeft + outframesToCopy; - audiodata->SetDuration(framesInPCM); + audiodata->mDuration = framesInPCM; } else { // The ogg time stamping and pre-skip is always timed at 48000. - audiodata->SetDuration(frameCopied * (kOpusSamplingRate / mSamplingRate)); + audiodata->mDuration = frameCopied * (kOpusSamplingRate / mSamplingRate); } // Remove the raw data which has been pulled to pcm buffer. @@ -421,14 +421,16 @@ nsresult OpusTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { audiodata->SwapInFrameData(frameData); // timestamp should be the time of the first sample - audiodata->SetTimeStamp(mOutputTimeStamp); + audiodata->mTime = mOutputTimeStamp; mOutputTimeStamp += FramesToUsecs(GetPacketDuration(), kOpusSamplingRate).value(); LOG("[Opus] mOutputTimeStamp %lld.", mOutputTimeStamp); - aData.AppendEncodedFrame(audiodata); + aData.AppendElement(audiodata); } return result >= 0 ? NS_OK : NS_ERROR_FAILURE; } } // namespace mozilla + +#undef LOG diff --git a/dom/media/encoder/OpusTrackEncoder.h b/dom/media/encoder/OpusTrackEncoder.h index 425f8c51d5..f124285f5b 100644 --- a/dom/media/encoder/OpusTrackEncoder.h +++ b/dom/media/encoder/OpusTrackEncoder.h @@ -32,7 +32,7 @@ class OpusTrackEncoder : public AudioTrackEncoder { already_AddRefed GetMetadata() override; - nsresult GetEncodedTrack(EncodedFrameContainer& aData) override; + nsresult GetEncodedTrack(nsTArray>& aData) override; protected: int GetPacketDuration() override; diff --git a/dom/media/encoder/TrackEncoder.cpp b/dom/media/encoder/TrackEncoder.cpp index 20c31f70ad..1ba3e7d661 100644 --- a/dom/media/encoder/TrackEncoder.cpp +++ b/dom/media/encoder/TrackEncoder.cpp @@ -28,7 +28,7 @@ static const int AUDIO_INIT_FAILED_DURATION = 1; static const int VIDEO_INIT_FAILED_DURATION = 30; // A maximal key frame interval allowed to set. // Longer values will be shorten to this value. -static const int DEFAULT_KEYFRAME_INTERVAL_MS = 1000; +static const unsigned int DEFAULT_KEYFRAME_INTERVAL_MS = 1000; TrackEncoder::TrackEncoder(TrackRate aTrackRate) : mEncodingComplete(false), @@ -752,9 +752,15 @@ size_t VideoTrackEncoder::SizeOfExcludingThis( mOutgoingBuffer.SizeOfExcludingThis(aMallocSizeOf); } -void VideoTrackEncoder::SetKeyFrameInterval(int32_t aKeyFrameInterval) { +void VideoTrackEncoder::SetKeyFrameInterval(uint32_t aKeyFrameInterval) { MOZ_ASSERT(!mWorkerThread || mWorkerThread->IsCurrentThreadIn()); + if (aKeyFrameInterval == 0) { + mKeyFrameInterval = DEFAULT_KEYFRAME_INTERVAL_MS; + return; + } mKeyFrameInterval = std::min(aKeyFrameInterval, DEFAULT_KEYFRAME_INTERVAL_MS); } } // namespace mozilla + +#undef TRACK_LOG diff --git a/dom/media/encoder/TrackEncoder.h b/dom/media/encoder/TrackEncoder.h index a8cfddca5a..fd6031a0cc 100644 --- a/dom/media/encoder/TrackEncoder.h +++ b/dom/media/encoder/TrackEncoder.h @@ -6,7 +6,7 @@ #define TrackEncoder_h_ #include "AudioSegment.h" -#include "EncodedFrameContainer.h" +#include "EncodedFrame.h" #include "MediaTrackGraph.h" #include "TrackMetadataBase.h" #include "VideoSegment.h" @@ -80,7 +80,7 @@ class TrackEncoder { * Encodes raw segments. Result data is returned in aData, and called on the * worker thread. */ - virtual nsresult GetEncodedTrack(EncodedFrameContainer& aData) = 0; + virtual nsresult GetEncodedTrack(nsTArray>& aData) = 0; /** * Returns true once this TrackEncoder is initialized. @@ -419,7 +419,7 @@ class VideoTrackEncoder : public TrackEncoder { /** * Set desired keyframe interval defined in milliseconds. */ - void SetKeyFrameInterval(int32_t aKeyFrameInterval); + void SetKeyFrameInterval(uint32_t aKeyFrameInterval); protected: /** @@ -517,7 +517,7 @@ class VideoTrackEncoder : public TrackEncoder { /** * The desired keyframe interval defined in milliseconds. */ - int32_t mKeyFrameInterval; + uint32_t mKeyFrameInterval; /** * True if the video MediaTrackTrack this VideoTrackEncoder is attached to is diff --git a/dom/media/encoder/VP8TrackEncoder.cpp b/dom/media/encoder/VP8TrackEncoder.cpp index 74a805f4ca..ae12aa7d7d 100644 --- a/dom/media/encoder/VP8TrackEncoder.cpp +++ b/dom/media/encoder/VP8TrackEncoder.cpp @@ -219,7 +219,8 @@ already_AddRefed VP8TrackEncoder::GetMetadata() { return meta.forget(); } -nsresult VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData) { +nsresult VP8TrackEncoder::GetEncodedPartitions( + nsTArray>& aData) { vpx_codec_iter_t iter = nullptr; EncodedFrame::FrameType frameType = EncodedFrame::VP8_P_FRAME; nsTArray frameData; @@ -248,7 +249,7 @@ nsresult VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData) { if (!frameData.IsEmpty()) { // Copy the encoded data to aData. EncodedFrame* videoData = new EncodedFrame(); - videoData->SetFrameType(frameType); + videoData->mFrameType = frameType; // Convert the timestamp and duration to Usecs. CheckedInt64 timestamp = FramesToUsecs(pkt->data.frame.pts, mTrackRate); @@ -256,7 +257,7 @@ nsresult VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData) { NS_ERROR("Microsecond timestamp overflow"); return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } - videoData->SetTimeStamp((uint64_t)timestamp.value()); + videoData->mTime = (uint64_t)timestamp.value(); mExtractedDuration += pkt->data.frame.duration; if (!mExtractedDuration.isValid()) { @@ -278,14 +279,13 @@ nsresult VP8TrackEncoder::GetEncodedPartitions(EncodedFrameContainer& aData) { } mExtractedDurationUs = totalDuration; - videoData->SetDuration((uint64_t)duration.value()); + videoData->mDuration = (uint64_t)duration.value(); videoData->SwapInFrameData(frameData); VP8LOG(LogLevel::Verbose, "GetEncodedPartitions TimeStamp %" PRIu64 ", Duration %" PRIu64 ", FrameType %d", - videoData->GetTimeStamp(), videoData->GetDuration(), - videoData->GetFrameType()); - aData.AppendEncodedFrame(videoData); + videoData->mTime, videoData->mDuration, videoData->mFrameType); + aData.AppendElement(videoData); } return pkt ? NS_OK : NS_ERROR_NOT_AVAILABLE; @@ -440,7 +440,8 @@ VP8TrackEncoder::EncodeOperation VP8TrackEncoder::GetNextEncodeOperation( * encode it. * 4. Remove the encoded chunks in mSourceSegment after for-loop. */ -nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { +nsresult VP8TrackEncoder::GetEncodedTrack( + nsTArray>& aData) { AUTO_PROFILER_LABEL("VP8TrackEncoder::GetEncodedTrack", OTHER); MOZ_ASSERT(mInitialized || mCanceled); @@ -508,7 +509,7 @@ nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { // because this frame will be skipped. VP8LOG(LogLevel::Warning, "MediaRecorder lagging behind. Skipping a frame."); - RefPtr last = aData.GetEncodedFrames().LastElement(); + RefPtr last = aData.LastElement(); if (last) { mExtractedDuration += chunk.mDuration; if (!mExtractedDuration.isValid()) { @@ -524,8 +525,7 @@ nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { NS_ERROR("skipped duration overflow"); return NS_ERROR_DOM_MEDIA_OVERFLOW_ERR; } - last->SetDuration(last->GetDuration() + - (static_cast(skippedDuration.value()))); + last->mDuration += static_cast(skippedDuration.value()); } } @@ -569,3 +569,5 @@ nsresult VP8TrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { } } // namespace mozilla + +#undef VP8LOG diff --git a/dom/media/encoder/VP8TrackEncoder.h b/dom/media/encoder/VP8TrackEncoder.h index ae43bafe68..0c55cc5f5b 100644 --- a/dom/media/encoder/VP8TrackEncoder.h +++ b/dom/media/encoder/VP8TrackEncoder.h @@ -33,7 +33,7 @@ class VP8TrackEncoder : public VideoTrackEncoder { already_AddRefed GetMetadata() final; - nsresult GetEncodedTrack(EncodedFrameContainer& aData) final; + nsresult GetEncodedTrack(nsTArray>& aData) final; protected: nsresult Init(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth, @@ -49,7 +49,7 @@ class VP8TrackEncoder : public VideoTrackEncoder { // null for EOS detection. // NS_OK if some data was appended to aData. // An error nsresult otherwise. - nsresult GetEncodedPartitions(EncodedFrameContainer& aData); + nsresult GetEncodedPartitions(nsTArray>& aData); // Prepare the input data to the mVPXImageWrapper for encoding. nsresult PrepareRawFrame(VideoChunk& aChunk); diff --git a/dom/media/encoder/moz.build b/dom/media/encoder/moz.build index 8db22684df..d2a7c230df 100644 --- a/dom/media/encoder/moz.build +++ b/dom/media/encoder/moz.build @@ -7,7 +7,7 @@ with Files('*'): EXPORTS += [ 'ContainerWriter.h', - 'EncodedFrameContainer.h', + 'EncodedFrame.h', 'MediaEncoder.h', 'OpusTrackEncoder.h', 'TrackEncoder.h', @@ -16,6 +16,7 @@ EXPORTS += [ UNIFIED_SOURCES += [ 'MediaEncoder.cpp', + 'Muxer.cpp', 'OpusTrackEncoder.cpp', 'TrackEncoder.cpp', ] diff --git a/dom/media/gtest/AudioGenerator.cpp b/dom/media/gtest/AudioGenerator.cpp new file mode 100644 index 0000000000..c0ed779cfd --- /dev/null +++ b/dom/media/gtest/AudioGenerator.cpp @@ -0,0 +1,25 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#include "AudioGenerator.h" + +#include "AudioSegment.h" + +using namespace mozilla; + +AudioGenerator::AudioGenerator(int32_t aChannels, int32_t aSampleRate) + : mGenerator(aSampleRate, 1000), mChannels(aChannels) {} + +void AudioGenerator::Generate(AudioSegment& aSegment, const int32_t& aSamples) { + RefPtr buffer = + SharedBuffer::Create(aSamples * sizeof(int16_t)); + int16_t* dest = static_cast(buffer->Data()); + mGenerator.generate(dest, aSamples); + AutoTArray channels; + for (int32_t i = 0; i < mChannels; i++) { + channels.AppendElement(dest); + } + aSegment.AppendFrames(buffer.forget(), channels, aSamples, + PRINCIPAL_HANDLE_NONE); +} diff --git a/dom/media/gtest/AudioGenerator.h b/dom/media/gtest/AudioGenerator.h new file mode 100644 index 0000000000..83d0c4a429 --- /dev/null +++ b/dom/media/gtest/AudioGenerator.h @@ -0,0 +1,25 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_ +#define DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_ + +#include "prtime.h" +#include "SineWaveGenerator.h" + +namespace mozilla { +class AudioSegment; +} + +class AudioGenerator { + public: + AudioGenerator(int32_t aChannels, int32_t aSampleRate); + void Generate(mozilla::AudioSegment& aSegment, const int32_t& aSamples); + + private: + mozilla::SineWaveGenerator mGenerator; + const int32_t mChannels; +}; + +#endif // DOM_MEDIA_GTEST_AUDIO_GENERATOR_H_ diff --git a/dom/media/gtest/TestAudioCallbackDriver.cpp b/dom/media/gtest/TestAudioCallbackDriver.cpp index 04e0f1efff..89fc1de6de 100644 --- a/dom/media/gtest/TestAudioCallbackDriver.cpp +++ b/dom/media/gtest/TestAudioCallbackDriver.cpp @@ -2,7 +2,7 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this file, * You can obtain one at http://mozilla.org/MPL/2.0/. */ -#define ENABLE_SET_CUBEB_BACKEND 1 +#include "CubebUtils.h" #include "GraphDriver.h" #include "MediaTrackGraphImpl.h" @@ -26,9 +26,9 @@ RefPtr MakeMTGImpl() { } TEST(TestAudioCallbackDriver, StartStop) -{ +MOZ_CAN_RUN_SCRIPT_FOR_DEFINITION { MockCubeb* mock = new MockCubeb(); - mozilla::CubebUtils::ForceSetCubebContext(mock->AsCubebContext()); + CubebUtils::ForceSetCubebContext(mock->AsCubebContext()); RefPtr graph = MakeMTGImpl(); EXPECT_TRUE(!!graph->mDriver) << "AudioCallbackDriver created."; @@ -44,7 +44,7 @@ TEST(TestAudioCallbackDriver, StartStop) EXPECT_TRUE(driver->IsStarted()) << "Verify thread is started"; // This will block untill all events has been executed. - driver->AsAudioCallbackDriver()->Shutdown(); + MOZ_KnownLive(driver->AsAudioCallbackDriver())->Shutdown(); EXPECT_FALSE(driver->ThreadRunning()) << "Verify thread is not running"; EXPECT_FALSE(driver->IsStarted()) << "Verify thread is not started"; @@ -53,5 +53,6 @@ TEST(TestAudioCallbackDriver, StartStop) // block for ever if it was not cleared. The same logic exists in // MediaTrackGraphShutDownRunnable graph->mDriver = nullptr; + + graph->RemoveShutdownBlocker(); } -#undef ENABLE_SET_CUBEB_BACKEND diff --git a/dom/media/gtest/TestAudioMixer.cpp b/dom/media/gtest/TestAudioMixer.cpp index e294d21de6..d3a5b1aa91 100644 --- a/dom/media/gtest/TestAudioMixer.cpp +++ b/dom/media/gtest/TestAudioMixer.cpp @@ -85,7 +85,7 @@ TEST(AudioMixer, Test) { int iterations = 2; mozilla::AudioMixer mixer; - mixer.AddCallback(&consumer); + mixer.AddCallback(WrapNotNull(&consumer)); fprintf(stderr, "Test AudioMixer constant buffer length.\n"); @@ -98,7 +98,7 @@ TEST(AudioMixer, Test) { mozilla::AudioMixer mixer; - mixer.AddCallback(&consumer); + mixer.AddCallback(WrapNotNull(&consumer)); fprintf(stderr, "Test AudioMixer variable buffer length.\n"); @@ -136,7 +136,7 @@ TEST(AudioMixer, Test) { mozilla::AudioMixer mixer; - mixer.AddCallback(&consumer); + mixer.AddCallback(WrapNotNull(&consumer)); fprintf(stderr, "Test AudioMixer variable channel count.\n"); @@ -153,7 +153,7 @@ TEST(AudioMixer, Test) { mozilla::AudioMixer mixer; - mixer.AddCallback(&consumer); + mixer.AddCallback(WrapNotNull(&consumer)); fprintf(stderr, "Test AudioMixer variable stream count.\n"); mixer.Mix(a, 2, CHANNEL_LENGTH, AUDIO_RATE); diff --git a/dom/media/gtest/TestAudioTrackEncoder.cpp b/dom/media/gtest/TestAudioTrackEncoder.cpp index 6aa344f664..d6583c51c7 100644 --- a/dom/media/gtest/TestAudioTrackEncoder.cpp +++ b/dom/media/gtest/TestAudioTrackEncoder.cpp @@ -4,33 +4,11 @@ #include "gtest/gtest.h" #include "OpusTrackEncoder.h" -#include "SineWaveGenerator.h" + +#include "AudioGenerator.h" using namespace mozilla; -class AudioGenerator { - public: - AudioGenerator(int32_t aChannels, int32_t aSampleRate) - : mGenerator(aSampleRate, 1000), mChannels(aChannels) {} - - void Generate(AudioSegment& aSegment, const int32_t& aSamples) { - RefPtr buffer = - SharedBuffer::Create(aSamples * sizeof(int16_t)); - int16_t* dest = static_cast(buffer->Data()); - mGenerator.generate(dest, aSamples); - AutoTArray channels; - for (int32_t i = 0; i < mChannels; i++) { - channels.AppendElement(dest); - } - aSegment.AppendFrames(buffer.forget(), channels, aSamples, - PRINCIPAL_HANDLE_NONE); - } - - private: - SineWaveGenerator mGenerator; - const int32_t mChannels; -}; - class TestOpusTrackEncoder : public OpusTrackEncoder { public: TestOpusTrackEncoder() : OpusTrackEncoder(90000) {} @@ -222,13 +200,13 @@ TEST(OpusAudioTrackEncoder, FrameEncode) encoder.AppendAudioSegment(std::move(segment)); - EncodedFrameContainer container; - EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); // Verify that encoded data is 5 seconds long. uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } // 44100 as used above gets resampled to 48000 for opus. const uint64_t five = 48000 * 5; diff --git a/dom/media/gtest/TestMediaDataEncoder.cpp b/dom/media/gtest/TestMediaDataEncoder.cpp index 9da06a0859..630e791fc4 100644 --- a/dom/media/gtest/TestMediaDataEncoder.cpp +++ b/dom/media/gtest/TestMediaDataEncoder.cpp @@ -25,57 +25,142 @@ } \ } while (0) +#define BLOCK_SIZE 64 +#define WIDTH 640 +#define HEIGHT 480 +#define NUM_FRAMES 150UL +#define FRAME_RATE 30 +#define FRAME_DURATION (1000000 / FRAME_RATE) +#define BIT_RATE (1000 * 1000) // 1Mbps +#define KEYFRAME_INTERVAL FRAME_RATE // 1 keyframe per second + using namespace mozilla; -static gfx::IntSize kImageSize(640, 480); +static gfx::IntSize kImageSize(WIDTH, HEIGHT); class MediaDataEncoderTest : public testing::Test { protected: - void SetUp() override { InitData(kImageSize); } + void SetUp() override { mData.Init(kImageSize); } - void TearDown() override { DeinitData(); } + void TearDown() override { mData.Deinit(); } - layers::PlanarYCbCrData mData; - UniquePtr mBackBuffer; + public: + struct FrameSource final { + layers::PlanarYCbCrData mYUV; + UniquePtr mBuffer; + RefPtr mRecycleBin; + int16_t mColorStep = 4; - private: - void InitData(const gfx::IntSize& aSize) { - mData.mPicSize = aSize; - mData.mYStride = aSize.width; - mData.mYSize = aSize; - mData.mCbCrStride = aSize.width / 2; - mData.mCbCrSize = gfx::IntSize(aSize.width / 2, aSize.height / 2); - size_t bufferSize = mData.mYStride * mData.mYSize.height + - mData.mCbCrStride * mData.mCbCrSize.height + - mData.mCbCrStride * mData.mCbCrSize.height; - mBackBuffer = MakeUnique(bufferSize); - std::fill_n(mBackBuffer.get(), bufferSize, 42); - mData.mYChannel = mBackBuffer.get(); - mData.mCbChannel = mData.mYChannel + mData.mYStride * mData.mYSize.height; - mData.mCrChannel = - mData.mCbChannel + mData.mCbCrStride * mData.mCbCrSize.height; - } + void Init(const gfx::IntSize& aSize) { + mYUV.mPicSize = aSize; + mYUV.mYStride = aSize.width; + mYUV.mYSize = aSize; + mYUV.mCbCrStride = aSize.width / 2; + mYUV.mCbCrSize = gfx::IntSize(aSize.width / 2, aSize.height / 2); + size_t bufferSize = mYUV.mYStride * mYUV.mYSize.height + + mYUV.mCbCrStride * mYUV.mCbCrSize.height + + mYUV.mCbCrStride * mYUV.mCbCrSize.height; + mBuffer = MakeUnique(bufferSize); + std::fill_n(mBuffer.get(), bufferSize, 0x7F); + mYUV.mYChannel = mBuffer.get(); + mYUV.mCbChannel = mYUV.mYChannel + mYUV.mYStride * mYUV.mYSize.height; + mYUV.mCrChannel = + mYUV.mCbChannel + mYUV.mCbCrStride * mYUV.mCbCrSize.height; + mRecycleBin = new layers::BufferRecycleBin(); + } - void DeinitData() { mBackBuffer.reset(); } + void Deinit() { + mBuffer.reset(); + mRecycleBin = nullptr; + } + + already_AddRefed GetFrame(const size_t aIndex) { + Draw(aIndex); + RefPtr img = + new layers::RecyclingPlanarYCbCrImage(mRecycleBin); + img->CopyData(mYUV); + RefPtr frame = VideoData::CreateFromImage( + kImageSize, 0, TimeUnit::FromMicroseconds(aIndex * FRAME_DURATION), + TimeUnit::FromMicroseconds(FRAME_DURATION), img, (aIndex & 0xF) == 0, + TimeUnit::FromMicroseconds(aIndex * FRAME_DURATION)); + return frame.forget(); + } + + void DrawChessboard(uint8_t* aAddr, const size_t aWidth, + const size_t aHeight, const size_t aOffset) { + uint8_t pixels[2][BLOCK_SIZE]; + size_t x = aOffset % BLOCK_SIZE; + if ((aOffset / BLOCK_SIZE) & 1) { + x = BLOCK_SIZE - x; + } + for (size_t i = 0; i < x; i++) { + pixels[0][i] = 0x00; + pixels[1][i] = 0xFF; + } + for (size_t i = x; i < BLOCK_SIZE; i++) { + pixels[0][i] = 0xFF; + pixels[1][i] = 0x00; + } + + uint8_t* p = aAddr; + for (size_t row = 0; row < aHeight; row++) { + for (size_t col = 0; col < aWidth; col += BLOCK_SIZE) { + memcpy(p, pixels[((row / BLOCK_SIZE) + (col / BLOCK_SIZE)) % 2], + BLOCK_SIZE); + p += BLOCK_SIZE; + } + } + } + + void Draw(const size_t aIndex) { + DrawChessboard(mYUV.mYChannel, mYUV.mYSize.width, mYUV.mYSize.height, + aIndex << 1); + int16_t color = mYUV.mCbChannel[0] + mColorStep; + if (color > 255 || color < 0) { + mColorStep = -mColorStep; + color = mYUV.mCbChannel[0] + mColorStep; + } + + size_t size = (mYUV.mCrChannel - mYUV.mCbChannel); + + std::fill_n(mYUV.mCbChannel, size, static_cast(color)); + std::fill_n(mYUV.mCrChannel, size, 0xFF - static_cast(color)); + } + }; + + public: + FrameSource mData; }; static already_AddRefed CreateH264Encoder( - MediaDataEncoder::Usage aUsage, - MediaDataEncoder::PixelFormat aPixelFormat) { + MediaDataEncoder::Usage aUsage, MediaDataEncoder::PixelFormat aPixelFormat, + const Maybe& aSpecific = + Some(MediaDataEncoder::H264Specific( + KEYFRAME_INTERVAL, + MediaDataEncoder::H264Specific::ProfileLevel::BaselineAutoLevel))) { RefPtr f(new PEMFactory()); if (!f->SupportsMimeType(NS_LITERAL_CSTRING(VIDEO_MP4))) { return nullptr; } - VideoInfo videoInfo(1280, 720); + VideoInfo videoInfo(WIDTH, HEIGHT); videoInfo.mMimeType = NS_LITERAL_CSTRING(VIDEO_MP4); const RefPtr taskQueue( new TaskQueue(GetMediaThreadPool(MediaThreadType::PLAYBACK))); - CreateEncoderParams c(videoInfo /* track info */, aUsage, taskQueue, - aPixelFormat, 30 /* FPS */, - 10 * 1024 * 1024 /* bitrate */); - return f->CreateEncoder(c); + + RefPtr e; + if (aSpecific) { + e = f->CreateEncoder(CreateEncoderParams( + videoInfo /* track info */, aUsage, taskQueue, aPixelFormat, + FRAME_RATE /* FPS */, BIT_RATE /* bitrate */, aSpecific.value())); + } else { + e = f->CreateEncoder(CreateEncoderParams( + videoInfo /* track info */, aUsage, taskQueue, aPixelFormat, + FRAME_RATE /* FPS */, BIT_RATE /* bitrate */)); + } + + return e.forget(); } void WaitForShutdown(RefPtr aEncoder) { @@ -125,6 +210,22 @@ static bool EnsureInit(RefPtr aEncoder) { return succeeded; } +TEST_F(MediaDataEncoderTest, H264InitWithoutSpecific) { + SKIP_IF_NOT_SUPPORTED(VIDEO_MP4); + + RefPtr e = + CreateH264Encoder(MediaDataEncoder::Usage::Realtime, + MediaDataEncoder::PixelFormat::YUV420P, Nothing()); + +#if defined(MOZ_WIDGET_ANDROID) // Android encoder requires I-frame interval + EXPECT_FALSE(EnsureInit(e)); +#else + EXPECT_TRUE(EnsureInit(e)); +#endif + + WaitForShutdown(e); +} + TEST_F(MediaDataEncoderTest, H264Init) { SKIP_IF_NOT_SUPPORTED(VIDEO_MP4); @@ -139,17 +240,11 @@ TEST_F(MediaDataEncoderTest, H264Init) { static MediaDataEncoder::EncodedData Encode( const RefPtr aEncoder, const size_t aNumFrames, - const layers::PlanarYCbCrData& aYCbCrData) { + MediaDataEncoderTest::FrameSource& aSource) { MediaDataEncoder::EncodedData output; bool succeeded; for (size_t i = 0; i < aNumFrames; i++) { - RefPtr img = - new layers::RecyclingPlanarYCbCrImage(new layers::BufferRecycleBin()); - img->AdoptData(aYCbCrData); - RefPtr frame = VideoData::CreateFromImage( - kImageSize, 0, TimeUnit::FromMicroseconds(i * 30000), - TimeUnit::FromMicroseconds(30000), img, (i & 0xF) == 0, - TimeUnit::FromMicroseconds(i * 30000)); + RefPtr frame = aSource.GetFrame(i); media::Await( GetMediaThreadPool(MediaThreadType::PLAYBACK), aEncoder->Encode(frame), [&output, &succeeded](MediaDataEncoder::EncodedData encoded) { @@ -164,29 +259,20 @@ static MediaDataEncoder::EncodedData Encode( } size_t pending = 0; - media::Await( - GetMediaThreadPool(MediaThreadType::PLAYBACK), aEncoder->Drain(), - [&pending, &output, &succeeded](MediaDataEncoder::EncodedData encoded) { - pending = encoded.Length(); - output.AppendElements(std::move(encoded)); - succeeded = true; - }, - [&succeeded](MediaResult r) { succeeded = false; }); - EXPECT_TRUE(succeeded); - if (!succeeded) { - return output; - } - - if (pending > 0) { + do { media::Await( GetMediaThreadPool(MediaThreadType::PLAYBACK), aEncoder->Drain(), - [&succeeded](MediaDataEncoder::EncodedData encoded) { - EXPECT_EQ(encoded.Length(), 0UL); + [&pending, &output, &succeeded](MediaDataEncoder::EncodedData encoded) { + pending = encoded.Length(); + output.AppendElements(std::move(encoded)); succeeded = true; }, [&succeeded](MediaResult r) { succeeded = false; }); EXPECT_TRUE(succeeded); - } + if (!succeeded) { + return output; + } + } while (pending > 0); return output; } @@ -214,8 +300,8 @@ TEST_F(MediaDataEncoderTest, EncodeMultipleFramesAsAnnexB) { MediaDataEncoder::PixelFormat::YUV420P); EnsureInit(e); - MediaDataEncoder::EncodedData output = Encode(e, 30UL, mData); - EXPECT_EQ(output.Length(), 30UL); + MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData); + EXPECT_EQ(output.Length(), NUM_FRAMES); for (auto frame : output) { EXPECT_TRUE(AnnexB::IsAnnexB(frame)); } @@ -230,12 +316,12 @@ TEST_F(MediaDataEncoderTest, EncodeMultipleFramesAsAVCC) { MediaDataEncoder::Usage::Record, MediaDataEncoder::PixelFormat::YUV420P); EnsureInit(e); - MediaDataEncoder::EncodedData output = Encode(e, 30UL, mData); - EXPECT_EQ(output.Length(), 30UL); + MediaDataEncoder::EncodedData output = Encode(e, NUM_FRAMES, mData); + EXPECT_EQ(output.Length(), NUM_FRAMES); AnnexB::IsAVCC(output[0]); // Only 1st frame has extra data. for (auto frame : output) { EXPECT_FALSE(AnnexB::IsAnnexB(frame)); } WaitForShutdown(e); -} \ No newline at end of file +} diff --git a/dom/media/gtest/TestMuxer.cpp b/dom/media/gtest/TestMuxer.cpp new file mode 100644 index 0000000000..2a4ed16129 --- /dev/null +++ b/dom/media/gtest/TestMuxer.cpp @@ -0,0 +1,212 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this file, + * You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include + +#include "ContainerWriter.h" +#include "EncodedFrame.h" +#include "gtest/gtest.h" +#include "gmock/gmock.h" +#include "Muxer.h" +#include "OpusTrackEncoder.h" +#include "WebMWriter.h" + +using namespace mozilla; +using testing::_; +using testing::ElementsAre; +using testing::Return; +using testing::StaticAssertTypeEq; + +static RefPtr CreateOpusMetadata(int32_t aChannels, + float aSamplingFrequency, + size_t aIdHeaderSize, + size_t aCommentHeaderSize) { + auto opusMetadata = MakeRefPtr(); + opusMetadata->mChannels = aChannels; + opusMetadata->mSamplingFrequency = aSamplingFrequency; + opusMetadata->mIdHeader.SetLength(aIdHeaderSize); + for (size_t i = 0; i < opusMetadata->mIdHeader.Length(); i++) { + opusMetadata->mIdHeader[i] = 0; + } + opusMetadata->mCommentHeader.SetLength(aCommentHeaderSize); + for (size_t i = 0; i < opusMetadata->mCommentHeader.Length(); i++) { + opusMetadata->mCommentHeader[i] = 0; + } + return opusMetadata; +} + +static RefPtr CreateVP8Metadata(int32_t aWidth, + int32_t aHeight) { + auto vp8Metadata = MakeRefPtr(); + vp8Metadata->mWidth = aWidth; + vp8Metadata->mDisplayWidth = aWidth; + vp8Metadata->mHeight = aHeight; + vp8Metadata->mDisplayHeight = aHeight; + return vp8Metadata; +} + +static RefPtr CreateFrame(EncodedFrame::FrameType aType, + uint64_t aTimeUs, uint64_t aDurationUs, + size_t aDataSize) { + auto frame = MakeRefPtr(); + frame->mTime = aTimeUs; + if (aType == EncodedFrame::OPUS_AUDIO_FRAME) { + // Opus duration is in samples, so figure out how many samples will put us + // closest to aDurationUs without going over. + frame->mDuration = UsecsToFrames(aDurationUs, 48000).value(); + } else { + frame->mDuration = aDurationUs; + } + frame->mFrameType = aType; + + nsTArray data; + data.SetLength(aDataSize); + frame->SwapInFrameData(data); + return frame; +} + +namespace testing { +namespace internal { +// This makes the googletest framework treat nsTArray as an std::vector, so all +// the regular Matchers (like ElementsAre) work for it. +template +class StlContainerView> { + public: + typedef GTEST_REMOVE_CONST_(Element) RawElement; + typedef std::vector type; + typedef const type const_reference; + static const_reference ConstReference(const nsTArray& aContainer) { + StaticAssertTypeEq(); + return type(aContainer.begin(), aContainer.end()); + } + static type Copy(const nsTArray& aContainer) { + return type(aContainer.begin(), aContainer.end()); + } +}; +} // namespace internal +} // namespace testing + +class MockContainerWriter : public ContainerWriter { + public: + MOCK_METHOD2(WriteEncodedTrack, + nsresult(const nsTArray>&, uint32_t)); + MOCK_METHOD1(SetMetadata, + nsresult(const nsTArray>&)); + MOCK_METHOD0(IsWritingComplete, bool()); + MOCK_METHOD2(GetContainerData, + nsresult(nsTArray>*, uint32_t)); +}; + +TEST(MuxerTest, AudioOnly) +{ + MockContainerWriter* writer = new MockContainerWriter(); + Muxer muxer(WrapUnique(writer)); + + // Prepare data + + auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16); + auto audioFrame = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 0, 48000, 4096); + + // Expectations + + EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta))) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(audioFrame), + ContainerWriter::END_OF_STREAM)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, IsWritingComplete()).Times(0); + + // Test + + EXPECT_EQ(muxer.SetMetadata(nsTArray>({opusMeta})), + NS_OK); + muxer.AddEncodedAudioFrame(audioFrame); + muxer.AudioEndOfStream(); + nsTArray> buffers; + EXPECT_EQ(muxer.GetData(&buffers), NS_OK); +} + +TEST(MuxerTest, AudioVideo) +{ + MockContainerWriter* writer = new MockContainerWriter(); + Muxer muxer(WrapUnique(writer)); + + // Prepare data + + auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16); + auto vp8Meta = CreateVP8Metadata(640, 480); + auto audioFrame = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 0, 48000, 4096); + auto videoFrame = CreateFrame(EncodedFrame::VP8_I_FRAME, 0, 50000, 65536); + + // Expectations + + EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta, vp8Meta))) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(videoFrame, audioFrame), + ContainerWriter::END_OF_STREAM)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, IsWritingComplete()).Times(0); + + // Test + + EXPECT_EQ(muxer.SetMetadata( + nsTArray>({opusMeta, vp8Meta})), + NS_OK); + muxer.AddEncodedAudioFrame(audioFrame); + muxer.AudioEndOfStream(); + muxer.AddEncodedVideoFrame(videoFrame); + muxer.VideoEndOfStream(); + nsTArray> buffers; + EXPECT_EQ(muxer.GetData(&buffers), NS_OK); +} + +TEST(MuxerTest, AudioVideoOutOfOrder) +{ + MockContainerWriter* writer = new MockContainerWriter(); + Muxer muxer(WrapUnique(writer)); + + // Prepare data + + auto opusMeta = CreateOpusMetadata(1, 48000, 16, 16); + auto vp8Meta = CreateVP8Metadata(640, 480); + auto a0 = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 0, 48, 4096); + auto v0 = CreateFrame(EncodedFrame::VP8_I_FRAME, 0, 50, 65536); + auto a48 = CreateFrame(EncodedFrame::OPUS_AUDIO_FRAME, 48, 48, 4096); + auto v50 = CreateFrame(EncodedFrame::VP8_I_FRAME, 50, 50, 65536); + + // Expectations + + EXPECT_CALL(*writer, SetMetadata(ElementsAre(opusMeta, vp8Meta))) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, WriteEncodedTrack(ElementsAre(v0, a0, a48, v50), + ContainerWriter::END_OF_STREAM)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::GET_HEADER)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, GetContainerData(_, ContainerWriter::FLUSH_NEEDED)) + .WillOnce(Return(NS_OK)); + EXPECT_CALL(*writer, IsWritingComplete()).Times(0); + + // Test + + EXPECT_EQ(muxer.SetMetadata( + nsTArray>({opusMeta, vp8Meta})), + NS_OK); + muxer.AddEncodedAudioFrame(a0); + muxer.AddEncodedVideoFrame(v0); + muxer.AddEncodedVideoFrame(v50); + muxer.VideoEndOfStream(); + muxer.AddEncodedAudioFrame(a48); + muxer.AudioEndOfStream(); + nsTArray> buffers; + EXPECT_EQ(muxer.GetData(&buffers), NS_OK); +} diff --git a/dom/media/gtest/TestVideoTrackEncoder.cpp b/dom/media/gtest/TestVideoTrackEncoder.cpp index 965e477bb9..dac50e63c5 100644 --- a/dom/media/gtest/TestVideoTrackEncoder.cpp +++ b/dom/media/gtest/TestVideoTrackEncoder.cpp @@ -143,8 +143,8 @@ TEST(VP8VideoTrackEncoder, FrameEncode) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(images.Length())); // Pull Encoded Data back from encoder. - EncodedFrameContainer container; - EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); } // Test that encoding a single frame gives useful output. @@ -165,21 +165,20 @@ TEST(VP8VideoTrackEncoder, SingleFrameEncode) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Read out encoded data, and verify. - const nsTArray>& frames = container.GetEncodedFrames(); const size_t oneElement = 1; ASSERT_EQ(oneElement, frames.Length()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()) + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType) << "We only have one frame, so it should be a keyframe"; const uint64_t halfSecond = PR_USEC_PER_SEC / 2; - EXPECT_EQ(halfSecond, frames[0]->GetDuration()); + EXPECT_EQ(halfSecond, frames[0]->mDuration); } // Test that encoding a couple of identical images gives useful output. @@ -204,15 +203,15 @@ TEST(VP8VideoTrackEncoder, SameFrameEncode) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.5)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify total duration being 1.5s. uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t oneAndAHalf = (PR_USEC_PER_SEC / 2) * 3; EXPECT_EQ(oneAndAHalf, totalDuration); @@ -240,15 +239,15 @@ TEST(VP8VideoTrackEncoder, SkippedFrames) encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(100)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify total duration being 100 * 1ms = 100ms. uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t hundredMillis = PR_USEC_PER_SEC / 10; EXPECT_EQ(hundredMillis, totalDuration); @@ -282,15 +281,15 @@ TEST(VP8VideoTrackEncoder, RoundingErrorFramesEncode) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify total duration being 1s. uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t oneSecond = PR_USEC_PER_SEC; EXPECT_EQ(oneSecond, totalDuration); @@ -319,8 +318,8 @@ TEST(VP8VideoTrackEncoder, TimestampFrameEncode) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); @@ -331,9 +330,9 @@ TEST(VP8VideoTrackEncoder, TimestampFrameEncode) (PR_USEC_PER_SEC / 10)}; uint64_t totalDuration = 0; size_t i = 0; - for (auto& frame : container.GetEncodedFrames()) { - EXPECT_EQ(expectedDurations[i++], frame->GetDuration()); - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + EXPECT_EQ(expectedDurations[i++], frame->mDuration); + totalDuration += frame->mDuration; } const uint64_t pointThree = (PR_USEC_PER_SEC / 10) * 3; EXPECT_EQ(pointThree, totalDuration); @@ -368,8 +367,8 @@ TEST(VP8VideoTrackEncoder, DriftingFrameEncode) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.3)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); @@ -380,9 +379,9 @@ TEST(VP8VideoTrackEncoder, DriftingFrameEncode) (PR_USEC_PER_SEC / 10) * 2}; uint64_t totalDuration = 0; size_t i = 0; - for (auto& frame : container.GetEncodedFrames()) { - EXPECT_EQ(expectedDurations[i++], frame->GetDuration()); - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + EXPECT_EQ(expectedDurations[i++], frame->mDuration); + totalDuration += frame->mDuration; } const uint64_t pointSix = (PR_USEC_PER_SEC / 10) * 6; EXPECT_EQ(pointSix, totalDuration); @@ -433,18 +432,18 @@ TEST(VP8VideoTrackEncoder, Suspended) encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify that we have two encoded frames and a total duration of 0.2s. const uint64_t two = 2; - EXPECT_EQ(two, container.GetEncodedFrames().Length()); + EXPECT_EQ(two, frames.Length()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t pointTwo = (PR_USEC_PER_SEC / 10) * 2; EXPECT_EQ(pointTwo, totalDuration); @@ -483,18 +482,18 @@ TEST(VP8VideoTrackEncoder, SuspendedUntilEnd) encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify that we have one encoded frames and a total duration of 0.1s. const uint64_t one = 1; - EXPECT_EQ(one, container.GetEncodedFrames().Length()); + EXPECT_EQ(one, frames.Length()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t pointOne = PR_USEC_PER_SEC / 10; EXPECT_EQ(pointOne, totalDuration); @@ -522,14 +521,14 @@ TEST(VP8VideoTrackEncoder, AlwaysSuspended) encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify that we have no encoded frames. const uint64_t none = 0; - EXPECT_EQ(none, container.GetEncodedFrames().Length()); + EXPECT_EQ(none, frames.Length()); } // Test that encoding a track that is suspended in the beginning works. @@ -566,18 +565,18 @@ TEST(VP8VideoTrackEncoder, SuspendedBeginning) encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify that we have one encoded frames and a total duration of 0.1s. const uint64_t one = 1; - EXPECT_EQ(one, container.GetEncodedFrames().Length()); + EXPECT_EQ(one, frames.Length()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t half = PR_USEC_PER_SEC / 2; EXPECT_EQ(half, totalDuration); @@ -619,18 +618,18 @@ TEST(VP8VideoTrackEncoder, SuspendedOverlap) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); // Verify that we have two encoded frames and a total duration of 0.1s. const uint64_t two = 2; - ASSERT_EQ(two, container.GetEncodedFrames().Length()); + ASSERT_EQ(two, frames.Length()); const uint64_t pointFive = (PR_USEC_PER_SEC / 10) * 5; - EXPECT_EQ(pointFive, container.GetEncodedFrames()[0]->GetDuration()); + EXPECT_EQ(pointFive, frames[0]->mDuration); const uint64_t pointSeven = (PR_USEC_PER_SEC / 10) * 7; - EXPECT_EQ(pointSeven, container.GetEncodedFrames()[1]->GetDuration()); + EXPECT_EQ(pointSeven, frames[1]->mDuration); } // Test that ending a track in the middle of already pushed data works. @@ -651,14 +650,14 @@ TEST(VP8VideoTrackEncoder, PrematureEnding) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(0.5)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t half = PR_USEC_PER_SEC / 2; EXPECT_EQ(half, totalDuration); @@ -683,14 +682,14 @@ TEST(VP8VideoTrackEncoder, DelayedStart) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t half = PR_USEC_PER_SEC / 2; EXPECT_EQ(half, totalDuration); @@ -716,14 +715,14 @@ TEST(VP8VideoTrackEncoder, DelayedStartOtherEventOrder) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t half = PR_USEC_PER_SEC / 2; EXPECT_EQ(half, totalDuration); @@ -748,14 +747,14 @@ TEST(VP8VideoTrackEncoder, VeryDelayedStart) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(10.5)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t half = PR_USEC_PER_SEC / 2; EXPECT_EQ(half, totalDuration); @@ -785,34 +784,34 @@ TEST(VP8VideoTrackEncoder, LongFramesReEncoded) { encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.5)); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_FALSE(encoder.IsEncodingComplete()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t oneSec = PR_USEC_PER_SEC; EXPECT_EQ(oneSec, totalDuration); - EXPECT_EQ(1U, container.GetEncodedFrames().Length()); + EXPECT_EQ(1U, frames.Length()); } { encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(11)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); uint64_t totalDuration = 0; - for (auto& frame : container.GetEncodedFrames()) { - totalDuration += frame->GetDuration(); + for (auto& frame : frames) { + totalDuration += frame->mDuration; } const uint64_t tenSec = PR_USEC_PER_SEC * 10; EXPECT_EQ(tenSec, totalDuration); - EXPECT_EQ(10U, container.GetEncodedFrames().Length()); + EXPECT_EQ(10U, frames.Length()); } } @@ -853,37 +852,36 @@ TEST(VP8VideoTrackEncoder, ShortKeyFrameInterval) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(1.2)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(6UL, frames.Length()); // [0, 400ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 400UL, frames[0]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 400UL, frames[0]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType); // [400ms, 600ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[1]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[1]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->mFrameType); // [600ms, 750ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[2]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[2]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[2]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[2]->mFrameType); // [750ms, 900ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[3]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[3]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->mFrameType); // [900ms, 1100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->mFrameType); // [1100ms, 1200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType); } // Test that an encoding with a defined key frame interval encodes keyframes @@ -923,37 +921,36 @@ TEST(VP8VideoTrackEncoder, LongKeyFrameInterval) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2.2)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(6UL, frames.Length()); // [0, 600ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType); // [600ms, 900ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->mFrameType); // [900ms, 1100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->mFrameType); // [1100ms, 1900ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->mFrameType); // [1900ms, 2100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->mFrameType); // [2100ms, 2200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType); } // Test that an encoding with no defined key frame interval encodes keyframes @@ -991,37 +988,36 @@ TEST(VP8VideoTrackEncoder, DefaultKeyFrameInterval) encoder.AdvanceCurrentTime(now + TimeDuration::FromSeconds(2.2)); encoder.NotifyEndOfStream(); - EncodedFrameContainer container; - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(6UL, frames.Length()); // [0, 600ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 600UL, frames[0]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType); // [600ms, 900ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 300UL, frames[1]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[1]->mFrameType); // [900ms, 1100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[2]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->mFrameType); // [1100ms, 1900ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[3]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[3]->mFrameType); // [1900ms, 2100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[4]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[4]->mFrameType); // [2100ms, 2200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[5]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType); } // Test that an encoding where the key frame interval is updated dynamically @@ -1031,7 +1027,7 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); // Set keyframe interval to 100ms. @@ -1080,7 +1076,7 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges) // Advancing 501ms, so the first bit of the frame starting at 500ms is // included. encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(501)); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); { VideoSegment segment; @@ -1106,7 +1102,7 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges) // Advancing 2000ms from 501ms to 2501ms encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(2501)); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); { VideoSegment segment; @@ -1130,68 +1126,67 @@ TEST(VP8VideoTrackEncoder, DynamicKeyFrameIntervalChanges) encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(14UL, frames.Length()); // [0, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[0]->mFrameType); // [100ms, 120ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 20UL, frames[1]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[1]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 20UL, frames[1]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[1]->mFrameType); // [120ms, 130ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 10UL, frames[2]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 10UL, frames[2]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[2]->mFrameType); // [130ms, 200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 70UL, frames[3]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 70UL, frames[3]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[3]->mFrameType); // [200ms, 300ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[4]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[4]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[4]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[4]->mFrameType); // [300ms, 500ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[5]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[5]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[5]->mFrameType); // [500ms, 1300ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[6]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[6]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 800UL, frames[6]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[6]->mFrameType); // [1300ms, 1400ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[7]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[7]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[7]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[7]->mFrameType); // [1400ms, 2400ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frames[8]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[8]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 1000UL, frames[8]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[8]->mFrameType); // [2400ms, 2500ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[9]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[9]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[9]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[9]->mFrameType); // [2500ms, 2600ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[10]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[10]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[10]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[10]->mFrameType); // [2600ms, 2800ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[11]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[11]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 200UL, frames[11]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[11]->mFrameType); // [2800ms, 2900ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[12]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[12]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[12]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_I_FRAME, frames[12]->mFrameType); // [2900ms, 3000ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[13]->GetDuration()); - EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[13]->GetFrameType()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[13]->mDuration); + EXPECT_EQ(EncodedFrame::VP8_P_FRAME, frames[13]->mFrameType); } // Test that an encoding which is disabled on a frame timestamp encodes @@ -1201,7 +1196,7 @@ TEST(VP8VideoTrackEncoder, DisableOnFrameTime) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); // Pass a frame in at t=0. @@ -1226,17 +1221,16 @@ TEST(VP8VideoTrackEncoder, DisableOnFrameTime) encoder.Disable(now + TimeDuration::FromMilliseconds(100)); encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200)); encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(2UL, frames.Length()); // [0, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration); // [100ms, 200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->mDuration); } // Test that an encoding which is disabled between two frame timestamps encodes @@ -1246,7 +1240,7 @@ TEST(VP8VideoTrackEncoder, DisableBetweenFrames) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); // Pass a frame in at t=0. @@ -1268,20 +1262,19 @@ TEST(VP8VideoTrackEncoder, DisableBetweenFrames) encoder.Disable(now + TimeDuration::FromMilliseconds(50)); encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200)); encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(3UL, frames.Length()); // [0, 50ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->mDuration); // [50ms, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration); // [100ms, 200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration); } // Test that an encoding which is enabled on a frame timestamp encodes @@ -1291,7 +1284,7 @@ TEST(VP8VideoTrackEncoder, EnableOnFrameTime) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); // Disable the track at t=0. @@ -1318,17 +1311,16 @@ TEST(VP8VideoTrackEncoder, EnableOnFrameTime) encoder.Enable(now + TimeDuration::FromMilliseconds(100)); encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200)); encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(2UL, frames.Length()); // [0, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration); // [100ms, 200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[1]->mDuration); } // Test that an encoding which is enabled between two frame timestamps encodes @@ -1338,7 +1330,7 @@ TEST(VP8VideoTrackEncoder, EnableBetweenFrames) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); // Disable the track at t=0. @@ -1362,20 +1354,19 @@ TEST(VP8VideoTrackEncoder, EnableBetweenFrames) encoder.Enable(now + TimeDuration::FromMilliseconds(50)); encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(200)); encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(3UL, frames.Length()); // [0, 50ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[0]->mDuration); // [50ms, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration); // [100ms, 200ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration); } // Test that making time go backwards removes any future frames in the encoder. @@ -1384,7 +1375,7 @@ TEST(VP8VideoTrackEncoder, BackwardsTimeResets) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); encoder.SetStartOffset(now); @@ -1431,23 +1422,22 @@ TEST(VP8VideoTrackEncoder, BackwardsTimeResets) encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(300)); encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(4UL, frames.Length()); // [0, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration); // [100ms, 150ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[1]->mDuration); // [150ms, 250ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[2]->mDuration); // [250ms, 300ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[3]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[3]->mDuration); } // Test that trying to encode a null image removes any future frames in the @@ -1457,7 +1447,7 @@ TEST(VP8VideoTrackEncoder, NullImageResets) TestVP8TrackEncoder encoder; YUVBufferGenerator generator; generator.Init(mozilla::gfx::IntSize(640, 480)); - EncodedFrameContainer container; + nsTArray> frames; TimeStamp now = TimeStamp::Now(); encoder.SetStartOffset(now); @@ -1504,20 +1494,19 @@ TEST(VP8VideoTrackEncoder, NullImageResets) encoder.AdvanceCurrentTime(now + TimeDuration::FromMilliseconds(300)); encoder.NotifyEndOfStream(); - ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + ASSERT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); - const nsTArray>& frames = container.GetEncodedFrames(); ASSERT_EQ(3UL, frames.Length()); // [0, 100ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 100UL, frames[0]->mDuration); // [100ms, 250ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[1]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 150UL, frames[1]->mDuration); // [250ms, 300ms) - EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[2]->GetDuration()); + EXPECT_EQ(PR_USEC_PER_SEC / 1000 * 50UL, frames[2]->mDuration); } // EOS test @@ -1531,8 +1520,8 @@ TEST(VP8VideoTrackEncoder, EncodeComplete) // Pull Encoded Data back from encoder. Since we have sent // EOS to encoder, encoder.GetEncodedTrack should return // NS_OK immidiately. - EncodedFrameContainer container; - EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(container))); + nsTArray> frames; + EXPECT_TRUE(NS_SUCCEEDED(encoder.GetEncodedTrack(frames))); EXPECT_TRUE(encoder.IsEncodingComplete()); } diff --git a/dom/media/gtest/TestWebMWriter.cpp b/dom/media/gtest/TestWebMWriter.cpp index 1c230290a1..1333bfc1ed 100644 --- a/dom/media/gtest/TestWebMWriter.cpp +++ b/dom/media/gtest/TestWebMWriter.cpp @@ -39,28 +39,30 @@ class WebMVP8TrackEncoder : public VP8TrackEncoder { } }; +static void GetOpusMetadata(int aChannels, int aSampleRate, + TrackRate aTrackRate, + nsTArray>& aMeta) { + WebMOpusTrackEncoder opusEncoder(aTrackRate); + EXPECT_TRUE(opusEncoder.TestOpusCreation(aChannels, aSampleRate)); + aMeta.AppendElement(opusEncoder.GetMetadata()); +} + +static void GetVP8Metadata(int32_t aWidth, int32_t aHeight, + int32_t aDisplayWidth, int32_t aDisplayHeight, + TrackRate aTrackRate, + nsTArray>& aMeta) { + WebMVP8TrackEncoder vp8Encoder; + EXPECT_TRUE(vp8Encoder.TestVP8Creation(aWidth, aHeight, aDisplayWidth, + aDisplayHeight)); + aMeta.AppendElement(vp8Encoder.GetMetadata()); +} + const uint64_t FIXED_DURATION = 1000000; const uint32_t FIXED_FRAMESIZE = 500; class TestWebMWriter : public WebMWriter { public: - explicit TestWebMWriter(int aTrackTypes) - : WebMWriter(aTrackTypes), mTimestamp(0) {} - - void SetOpusMetadata(int aChannels, int aSampleRate, TrackRate aTrackRate) { - WebMOpusTrackEncoder opusEncoder(aTrackRate); - EXPECT_TRUE(opusEncoder.TestOpusCreation(aChannels, aSampleRate)); - RefPtr opusMeta = opusEncoder.GetMetadata(); - SetMetadata(opusMeta); - } - void SetVP8Metadata(int32_t aWidth, int32_t aHeight, int32_t aDisplayWidth, - int32_t aDisplayHeight, TrackRate aTrackRate) { - WebMVP8TrackEncoder vp8Encoder; - EXPECT_TRUE(vp8Encoder.TestVP8Creation(aWidth, aHeight, aDisplayWidth, - aDisplayHeight)); - RefPtr vp8Meta = vp8Encoder.GetMetadata(); - SetMetadata(vp8Meta); - } + TestWebMWriter() : WebMWriter(), mTimestamp(0) {} // When we append an I-Frame into WebM muxer, the muxer will treat previous // data as "a cluster". @@ -68,22 +70,22 @@ class TestWebMWriter : public WebMWriter { // previous cluster so that we can retrieve data by |GetContainerData|. void AppendDummyFrame(EncodedFrame::FrameType aFrameType, uint64_t aDuration) { - EncodedFrameContainer encodedVideoData; + nsTArray> encodedVideoData; nsTArray frameData; RefPtr videoData = new EncodedFrame(); // Create dummy frame data. frameData.SetLength(FIXED_FRAMESIZE); - videoData->SetFrameType(aFrameType); - videoData->SetTimeStamp(mTimestamp); - videoData->SetDuration(aDuration); + videoData->mFrameType = aFrameType; + videoData->mTime = mTimestamp; + videoData->mDuration = aDuration; videoData->SwapInFrameData(frameData); - encodedVideoData.AppendEncodedFrame(videoData); + encodedVideoData.AppendElement(videoData); WriteEncodedTrack(encodedVideoData, 0); mTimestamp += aDuration; } bool HaveValidCluster() { - nsTArray > encodedBuf; + nsTArray> encodedBuf; GetContainerData(&encodedBuf, 0); return (encodedBuf.Length() > 0) ? true : false; } @@ -95,35 +97,32 @@ class TestWebMWriter : public WebMWriter { TEST(WebMWriter, Metadata) { - TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK | - ContainerWriter::CREATE_VIDEO_TRACK); + TestWebMWriter writer; // The output should be empty since we didn't set any metadata in writer. - nsTArray > encodedBuf; + nsTArray> encodedBuf; writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER); EXPECT_TRUE(encodedBuf.Length() == 0); writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED); EXPECT_TRUE(encodedBuf.Length() == 0); - // Set opus metadata. + nsTArray> meta; + + // Get opus metadata. int channel = 1; int sampleRate = 44100; TrackRate aTrackRate = 90000; - writer.SetOpusMetadata(channel, sampleRate, aTrackRate); + GetOpusMetadata(channel, sampleRate, aTrackRate, meta); - // No output data since we didn't set both audio/video - // metadata in writer. - writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER); - EXPECT_TRUE(encodedBuf.Length() == 0); - writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED); - EXPECT_TRUE(encodedBuf.Length() == 0); - - // Set vp8 metadata + // Get vp8 metadata int32_t width = 640; int32_t height = 480; int32_t displayWidth = 640; int32_t displayHeight = 480; - writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate); + GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta); + + // Set metadata + writer.SetMetadata(meta); writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER); EXPECT_TRUE(encodedBuf.Length() > 0); @@ -131,21 +130,22 @@ TEST(WebMWriter, Metadata) TEST(WebMWriter, Cluster) { - TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK | - ContainerWriter::CREATE_VIDEO_TRACK); - // Set opus metadata. + TestWebMWriter writer; + nsTArray> meta; + // Get opus metadata. int channel = 1; int sampleRate = 48000; TrackRate aTrackRate = 90000; - writer.SetOpusMetadata(channel, sampleRate, aTrackRate); - // Set vp8 metadata + GetOpusMetadata(channel, sampleRate, aTrackRate, meta); + // Get vp8 metadata int32_t width = 320; int32_t height = 240; int32_t displayWidth = 320; int32_t displayHeight = 240; - writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate); + GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta); + writer.SetMetadata(meta); - nsTArray > encodedBuf; + nsTArray> encodedBuf; writer.GetContainerData(&encodedBuf, ContainerWriter::GET_HEADER); EXPECT_TRUE(encodedBuf.Length() > 0); encodedBuf.Clear(); @@ -173,19 +173,20 @@ TEST(WebMWriter, Cluster) TEST(WebMWriter, FLUSH_NEEDED) { - TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK | - ContainerWriter::CREATE_VIDEO_TRACK); - // Set opus metadata. + TestWebMWriter writer; + nsTArray> meta; + // Get opus metadata. int channel = 2; int sampleRate = 44100; TrackRate aTrackRate = 100000; - writer.SetOpusMetadata(channel, sampleRate, aTrackRate); - // Set vp8 metadata + GetOpusMetadata(channel, sampleRate, aTrackRate, meta); + // Get vp8 metadata int32_t width = 176; int32_t height = 352; int32_t displayWidth = 176; int32_t displayHeight = 352; - writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate); + GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta); + writer.SetMetadata(meta); // write the first I-Frame. writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION); @@ -198,7 +199,7 @@ TEST(WebMWriter, FLUSH_NEEDED) // retrieved EXPECT_FALSE(writer.HaveValidCluster()); - nsTArray > encodedBuf; + nsTArray> encodedBuf; // Have data because the flag ContainerWriter::FLUSH_NEEDED writer.GetContainerData(&encodedBuf, ContainerWriter::FLUSH_NEEDED); EXPECT_TRUE(encodedBuf.Length() > 0); @@ -293,19 +294,20 @@ static int64_t webm_tell(void* aUserData) { TEST(WebMWriter, bug970774_aspect_ratio) { - TestWebMWriter writer(ContainerWriter::CREATE_AUDIO_TRACK | - ContainerWriter::CREATE_VIDEO_TRACK); - // Set opus metadata. + TestWebMWriter writer; + nsTArray> meta; + // Get opus metadata. int channel = 1; int sampleRate = 44100; TrackRate aTrackRate = 90000; - writer.SetOpusMetadata(channel, sampleRate, aTrackRate); + GetOpusMetadata(channel, sampleRate, aTrackRate, meta); // Set vp8 metadata int32_t width = 640; int32_t height = 480; int32_t displayWidth = 1280; int32_t displayHeight = 960; - writer.SetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate); + GetVP8Metadata(width, height, displayWidth, displayHeight, aTrackRate, meta); + writer.SetMetadata(meta); // write the first I-Frame. writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION); @@ -314,7 +316,7 @@ TEST(WebMWriter, bug970774_aspect_ratio) writer.AppendDummyFrame(EncodedFrame::VP8_I_FRAME, FIXED_DURATION); // Get the metadata and the first cluster. - nsTArray > encodedBuf; + nsTArray> encodedBuf; writer.GetContainerData(&encodedBuf, 0); // Flatten the encodedBuf. WebMioData ioData; diff --git a/dom/media/gtest/moz.build b/dom/media/gtest/moz.build index 8a019e27b9..5e997e720a 100644 --- a/dom/media/gtest/moz.build +++ b/dom/media/gtest/moz.build @@ -4,6 +4,7 @@ include('/media/webrtc/webrtc.mozbuild') +DEFINES['ENABLE_SET_CUBEB_BACKEND'] = True LOCAL_INCLUDES += [ '/media/webrtc/signaling/src/common', @@ -12,6 +13,7 @@ LOCAL_INCLUDES += [ ] UNIFIED_SOURCES += [ + 'AudioGenerator.cpp', 'MockMediaResource.cpp', 'TestAudioBuffers.cpp', 'TestAudioCallbackDriver.cpp', @@ -35,6 +37,7 @@ UNIFIED_SOURCES += [ 'TestMediaSpan.cpp', 'TestMP3Demuxer.cpp', 'TestMP4Demuxer.cpp', + 'TestMuxer.cpp', 'TestOpusParser.cpp', 'TestRust.cpp', 'TestTimeUnit.cpp', diff --git a/dom/media/imagecapture/CaptureTask.cpp b/dom/media/imagecapture/CaptureTask.cpp index 8ee73bb2cd..65f98a4643 100644 --- a/dom/media/imagecapture/CaptureTask.cpp +++ b/dom/media/imagecapture/CaptureTask.cpp @@ -34,19 +34,23 @@ CaptureTask::CaptureTask(dom::ImageCapture* aImageCapture) mImageGrabbedOrTrackEnd(false), mPrincipalChanged(false) {} -nsresult CaptureTask::TaskComplete(already_AddRefed aBlob, +nsresult CaptureTask::TaskComplete(already_AddRefed aBlobImpl, nsresult aRv) { MOZ_ASSERT(NS_IsMainThread()); DetachTrack(); nsresult rv; - RefPtr blob(aBlob); + RefPtr blobImpl(aBlobImpl); // We have to set the parent because the blob has been generated with a valid // one. - if (blob) { - blob = dom::Blob::Create(mImageCapture->GetParentObject(), blob->Impl()); + RefPtr blob; + if (blobImpl) { + blob = dom::Blob::Create(mImageCapture->GetOwnerGlobal(), blobImpl); + if (NS_WARN_IF(!blob)) { + return NS_ERROR_FAILURE; + } } if (mPrincipalChanged) { @@ -102,9 +106,10 @@ void CaptureTask::NotifyRealtimeTrackData(MediaTrackGraph* aGraph, public: explicit EncodeComplete(CaptureTask* aTask) : mTask(aTask) {} - nsresult ReceiveBlob(already_AddRefed aBlob) override { - RefPtr blob(aBlob); - mTask->TaskComplete(blob.forget(), NS_OK); + nsresult ReceiveBlobImpl( + already_AddRefed aBlobImpl) override { + RefPtr blobImpl(aBlobImpl); + mTask->TaskComplete(blobImpl.forget(), NS_OK); mTask = nullptr; return NS_OK; } diff --git a/dom/media/imagecapture/CaptureTask.h b/dom/media/imagecapture/CaptureTask.h index 701d6a722b..0831dfe8ee 100644 --- a/dom/media/imagecapture/CaptureTask.h +++ b/dom/media/imagecapture/CaptureTask.h @@ -12,7 +12,7 @@ namespace mozilla { namespace dom { -class Blob; +class BlobImpl; class ImageCapture; class MediaStreamTrack; } // namespace dom @@ -46,7 +46,8 @@ class CaptureTask : public DirectMediaTrackListener, // // Note: // this function should be called on main thread. - nsresult TaskComplete(already_AddRefed aBlob, nsresult aRv); + nsresult TaskComplete(already_AddRefed aBlobImpl, + nsresult aRv); // Add listeners into MediaStreamTrack and PrincipalChangeObserver. // It should be on main thread only. diff --git a/dom/media/mediasink/DecodedStream.cpp b/dom/media/mediasink/DecodedStream.cpp index 60609a9bd3..c3f7cc55f5 100644 --- a/dom/media/mediasink/DecodedStream.cpp +++ b/dom/media/mediasink/DecodedStream.cpp @@ -491,6 +491,9 @@ nsresult DecodedStream::Start(const TimeUnit& aStartTime, mVideoEndedPromise = mData->mVideoEndedPromise; mOutputListener = mData->OnOutput().Connect(mOwnerThread, this, &DecodedStream::NotifyOutput); + if (mData->mAudioTrack) { + mData->mAudioTrack->SetVolume(static_cast(mVolume)); + } SendData(); } return NS_OK; @@ -559,6 +562,9 @@ void DecodedStream::SetPlaying(bool aPlaying) { void DecodedStream::SetVolume(double aVolume) { AssertOwnerThread(); mVolume = aVolume; + if (mData && mData->mAudioTrack) { + mData->mAudioTrack->SetVolume(static_cast(aVolume)); + } } void DecodedStream::SetPlaybackRate(double aPlaybackRate) { @@ -623,8 +629,7 @@ static void SendStreamAudio(DecodedStreamData* aStream, aStream->mNextAudioTime = audio->GetEndTime(); } -void DecodedStream::SendAudio(double aVolume, - const PrincipalHandle& aPrincipalHandle) { +void DecodedStream::SendAudio(const PrincipalHandle& aPrincipalHandle) { AssertOwnerThread(); if (!mInfo.HasAudio()) { @@ -650,8 +655,6 @@ void DecodedStream::SendAudio(double aVolume, aPrincipalHandle); } - output.ApplyVolume(aVolume); - // |mNextAudioTime| is updated as we process each audio sample in // SendStreamAudio(). if (output.GetDuration() > 0) { @@ -896,7 +899,7 @@ void DecodedStream::SendData() { } LOG_DS(LogLevel::Verbose, "SendData()"); - SendAudio(mVolume, mPrincipalHandle); + SendAudio(mPrincipalHandle); SendVideo(mPrincipalHandle); } diff --git a/dom/media/mediasink/DecodedStream.h b/dom/media/mediasink/DecodedStream.h index 6c7f74e66d..e04042a123 100644 --- a/dom/media/mediasink/DecodedStream.h +++ b/dom/media/mediasink/DecodedStream.h @@ -67,7 +67,7 @@ class DecodedStream : public MediaSink { private: void DestroyData(UniquePtr&& aData); - void SendAudio(double aVolume, const PrincipalHandle& aPrincipalHandle); + void SendAudio(const PrincipalHandle& aPrincipalHandle); void SendVideo(const PrincipalHandle& aPrincipalHandle); void ResetAudio(); void ResetVideo(const PrincipalHandle& aPrincipalHandle); diff --git a/dom/media/mediasource/test/mochitest.ini b/dom/media/mediasource/test/mochitest.ini index 9a495dd93f..5c7b16799c 100644 --- a/dom/media/mediasource/test/mochitest.ini +++ b/dom/media/mediasource/test/mochitest.ini @@ -56,6 +56,8 @@ support-files = skip-if = toolkit == 'android' # Not supported on android [test_AudioChange_mp4.html] skip-if = toolkit == 'android' || (os == "win" && processor == "aarch64") # Not supported on android, aarch64 due to 1538331 +[test_AudioChange_mp4_WebAudio.html] +skip-if = toolkit == 'android' || (os == "win" && processor == "aarch64") # Not supported on android, aarch64 due to 1538331 [test_AutoRevocation.html] tags = firstpartyisolation [test_BufferedSeek.html] diff --git a/dom/media/mediasource/test/test_AudioChange_mp4_WebAudio.html b/dom/media/mediasource/test/test_AudioChange_mp4_WebAudio.html new file mode 100644 index 0000000000..c76342f793 --- /dev/null +++ b/dom/media/mediasource/test/test_AudioChange_mp4_WebAudio.html @@ -0,0 +1,55 @@ + + + + MSE: basic functionality + + + + + +
+
+
+ + diff --git a/dom/media/ogg/OggCodecState.cpp b/dom/media/ogg/OggCodecState.cpp index a5a22895e1..e576506eb4 100644 --- a/dom/media/ogg/OggCodecState.cpp +++ b/dom/media/ogg/OggCodecState.cpp @@ -1673,4 +1673,6 @@ bool SkeletonState::DecodeHeader(OggPacketPtr aPacket) { return true; } +#undef LOG + } // namespace mozilla diff --git a/dom/media/ogg/OggDemuxer.cpp b/dom/media/ogg/OggDemuxer.cpp index 0554da3480..c7586724af 100644 --- a/dom/media/ogg/OggDemuxer.cpp +++ b/dom/media/ogg/OggDemuxer.cpp @@ -1887,5 +1887,5 @@ nsresult OggDemuxer::SeekBisection(TrackInfo::TrackType aType, int64_t aTarget, } #undef OGG_DEBUG -#undef SEEK_DEBUG +#undef SEEK_LOG } // namespace mozilla diff --git a/dom/media/ogg/OggWriter.cpp b/dom/media/ogg/OggWriter.cpp index 826099460b..7a7b7c5a43 100644 --- a/dom/media/ogg/OggWriter.cpp +++ b/dom/media/ogg/OggWriter.cpp @@ -5,7 +5,6 @@ #include "prtime.h" #include "GeckoProfiler.h" -#undef LOG #define LOG(args, ...) namespace mozilla { @@ -45,22 +44,20 @@ nsresult OggWriter::Init() { return (rc == 0) ? NS_OK : NS_ERROR_NOT_INITIALIZED; } -nsresult OggWriter::WriteEncodedTrack(const EncodedFrameContainer& aData, - uint32_t aFlags) { +nsresult OggWriter::WriteEncodedTrack( + const nsTArray>& aData, uint32_t aFlags) { AUTO_PROFILER_LABEL("OggWriter::WriteEncodedTrack", OTHER); - uint32_t len = aData.GetEncodedFrames().Length(); + uint32_t len = aData.Length(); for (uint32_t i = 0; i < len; i++) { - if (aData.GetEncodedFrames()[i]->GetFrameType() != - EncodedFrame::OPUS_AUDIO_FRAME) { + if (aData[i]->mFrameType != EncodedFrame::OPUS_AUDIO_FRAME) { LOG("[OggWriter] wrong encoded data type!"); return NS_ERROR_FAILURE; } // only pass END_OF_STREAM on the last frame! nsresult rv = WriteEncodedData( - aData.GetEncodedFrames()[i]->GetFrameData(), - aData.GetEncodedFrames()[i]->GetDuration(), + aData[i]->GetFrameData(), aData[i]->mDuration, i < len - 1 ? (aFlags & ~ContainerWriter::END_OF_STREAM) : aFlags); if (NS_FAILED(rv)) { LOG("%p Failed to WriteEncodedTrack!", this); @@ -110,7 +107,7 @@ nsresult OggWriter::WriteEncodedData(const nsTArray& aBuffer, return NS_OK; } -void OggWriter::ProduceOggPage(nsTArray >* aOutputBufs) { +void OggWriter::ProduceOggPage(nsTArray>* aOutputBufs) { aOutputBufs->AppendElement(); aOutputBufs->LastElement().SetLength(mOggPage.header_len + mOggPage.body_len); memcpy(aOutputBufs->LastElement().Elements(), mOggPage.header, @@ -119,7 +116,7 @@ void OggWriter::ProduceOggPage(nsTArray >* aOutputBufs) { mOggPage.body, mOggPage.body_len); } -nsresult OggWriter::GetContainerData(nsTArray >* aOutputBufs, +nsresult OggWriter::GetContainerData(nsTArray>* aOutputBufs, uint32_t aFlags) { int rc = -1; AUTO_PROFILER_LABEL("OggWriter::GetContainerData", OTHER); @@ -143,12 +140,13 @@ nsresult OggWriter::GetContainerData(nsTArray >* aOutputBufs, rc = ogg_stream_flush(&mOggStreamState, &mOggPage); NS_ENSURE_TRUE(rc > 0, NS_ERROR_FAILURE); - ProduceOggPage(aOutputBufs); - return NS_OK; - // Force generate a page even if the amount of packet data is not enough. // Usually do so after a header packet. - } else if (aFlags & ContainerWriter::FLUSH_NEEDED) { + + ProduceOggPage(aOutputBufs); + } + + if (aFlags & ContainerWriter::FLUSH_NEEDED) { // rc = 0 means no packet to put into a page, or an internal error. rc = ogg_stream_flush(&mOggStreamState, &mOggPage); } else { @@ -163,20 +161,25 @@ nsresult OggWriter::GetContainerData(nsTArray >* aOutputBufs, if (aFlags & ContainerWriter::FLUSH_NEEDED) { mIsWritingComplete = true; } - return (rc > 0) ? NS_OK : NS_ERROR_FAILURE; + // We always return NS_OK here since it's OK to call this without having + // enough data to fill a page. It's the more common case compared to internal + // errors, and we cannot distinguish the two. + return NS_OK; } -nsresult OggWriter::SetMetadata(TrackMetadataBase* aMetadata) { - MOZ_ASSERT(aMetadata); +nsresult OggWriter::SetMetadata( + const nsTArray>& aMetadata) { + MOZ_ASSERT(aMetadata.Length() == 1); + MOZ_ASSERT(aMetadata[0]); AUTO_PROFILER_LABEL("OggWriter::SetMetadata", OTHER); - if (aMetadata->GetKind() != TrackMetadataBase::METADATA_OPUS) { + if (aMetadata[0]->GetKind() != TrackMetadataBase::METADATA_OPUS) { LOG("wrong meta data type!"); return NS_ERROR_FAILURE; } // Validate each field of METADATA - mMetadata = static_cast(aMetadata); + mMetadata = static_cast(aMetadata[0].get()); if (mMetadata->mIdHeader.Length() == 0) { LOG("miss mIdHeader!"); return NS_ERROR_FAILURE; @@ -190,3 +193,5 @@ nsresult OggWriter::SetMetadata(TrackMetadataBase* aMetadata) { } } // namespace mozilla + +#undef LOG diff --git a/dom/media/ogg/OggWriter.h b/dom/media/ogg/OggWriter.h index 89c81f529f..fea7b23e9c 100644 --- a/dom/media/ogg/OggWriter.h +++ b/dom/media/ogg/OggWriter.h @@ -22,14 +22,17 @@ class OggWriter : public ContainerWriter { OggWriter(); ~OggWriter(); - nsresult WriteEncodedTrack(const EncodedFrameContainer& aData, + // Write frames into the ogg container. aFlags should be set to END_OF_STREAM + // for the final set of frames. + nsresult WriteEncodedTrack(const nsTArray>& aData, uint32_t aFlags = 0) override; - nsresult GetContainerData(nsTArray >* aOutputBufs, + nsresult GetContainerData(nsTArray>* aOutputBufs, uint32_t aFlags = 0) override; // Check metadata type integrity and reject unacceptable track encoder. - nsresult SetMetadata(TrackMetadataBase* aMetadata) override; + nsresult SetMetadata( + const nsTArray>& aMetadata) override; private: nsresult Init(); @@ -37,7 +40,7 @@ class OggWriter : public ContainerWriter { nsresult WriteEncodedData(const nsTArray& aBuffer, int aDuration, uint32_t aFlags = 0); - void ProduceOggPage(nsTArray >* aOutputBufs); + void ProduceOggPage(nsTArray>* aOutputBufs); // Store the Medatata from track encoder RefPtr mMetadata; diff --git a/dom/media/ogg/OpusParser.cpp b/dom/media/ogg/OpusParser.cpp index ea789d9e8c..3ea3200b8e 100644 --- a/dom/media/ogg/OpusParser.cpp +++ b/dom/media/ogg/OpusParser.cpp @@ -210,4 +210,6 @@ bool OpusParser::IsValidMapping2ChannelsCount(uint8_t aChannels) { return val == valInt || valInt * valInt + 2 == aChannels; } +#undef OPUS_LOG + } // namespace mozilla diff --git a/dom/media/platforms/PEMFactory.cpp b/dom/media/platforms/PEMFactory.cpp index 506a5c34dc..a478c75619 100644 --- a/dom/media/platforms/PEMFactory.cpp +++ b/dom/media/platforms/PEMFactory.cpp @@ -7,14 +7,22 @@ #ifdef MOZ_APPLEMEDIA # include "AppleEncoderModule.h" #endif +#ifdef MOZ_WIDGET_ANDROID +# include "AndroidEncoderModule.h" +#endif namespace mozilla { +LazyLogModule sPEMLog("PlatformEncoderModule"); + PEMFactory::PEMFactory() { #ifdef MOZ_APPLEMEDIA RefPtr m(new AppleEncoderModule()); mModules.AppendElement(m); #endif +#ifdef MOZ_WIDGET_ANDROID + mModules.AppendElement(new AndroidEncoderModule()); +#endif } bool PEMFactory::SupportsMimeType(const nsACString& aMimeType) const { diff --git a/dom/media/platforms/PlatformEncoderModule.h b/dom/media/platforms/PlatformEncoderModule.h index 3b36c81569..038cb31f1d 100644 --- a/dom/media/platforms/PlatformEncoderModule.h +++ b/dom/media/platforms/PlatformEncoderModule.h @@ -21,8 +21,6 @@ namespace mozilla { class MediaDataEncoder; struct CreateEncoderParams; -static LazyLogModule sPEMLog("PlatformEncoderModule"); - class PlatformEncoderModule { public: NS_INLINE_DECL_THREADSAFE_REFCOUNTING(PlatformEncoderModule) @@ -202,6 +200,9 @@ class MediaDataEncoder { }; virtual ~MediaDataEncoder() {} + + public: + using H264Config = VideoConfig; }; struct MOZ_STACK_CLASS CreateEncoderParams final { @@ -248,6 +249,20 @@ struct MOZ_STACK_CLASS CreateEncoderParams final { Set(std::forward(aCodecSpecific)...); } + const MediaDataEncoder::H264Config ToH264Config() const { + const VideoInfo* info = mConfig.GetAsVideoInfo(); + MOZ_ASSERT(info); + + auto config = MediaDataEncoder::H264Config( + MediaDataEncoder::CodecType::H264, mUsage, info->mImage, mPixelFormat, + mFramerate, mBitrate); + if (mCodecSpecific) { + config.SetCodecSpecific(mCodecSpecific.ref().mH264); + } + + return config; + } + const TrackInfo& mConfig; const MediaDataEncoder::Usage mUsage; const RefPtr mTaskQueue; diff --git a/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp b/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp index 028d950328..cf61941827 100644 --- a/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp +++ b/dom/media/platforms/agnostic/bytestreams/AnnexB.cpp @@ -255,7 +255,8 @@ static Result ParseNALUnits(ByteWriter& aBw, return Ok(); } -bool AnnexB::ConvertSampleToAVCC(mozilla::MediaRawData* aSample) { +bool AnnexB::ConvertSampleToAVCC(mozilla::MediaRawData* aSample, + const RefPtr& aAVCCHeader) { if (IsAVCC(aSample)) { return ConvertSampleTo4BytesAVCC(aSample).isOk(); } @@ -275,8 +276,14 @@ bool AnnexB::ConvertSampleToAVCC(mozilla::MediaRawData* aSample) { if (!samplewriter->Replace(nalu.Elements(), nalu.Length())) { return false; } + + if (aAVCCHeader) { + aSample->mExtraData = aAVCCHeader; + return true; + } + // Create the AVCC header. - RefPtr extradata = new mozilla::MediaByteBuffer; + auto extradata = MakeRefPtr(); static const uint8_t kFakeExtraData[] = { 1 /* version */, 0x64 /* profile (High) */, @@ -289,7 +296,7 @@ bool AnnexB::ConvertSampleToAVCC(mozilla::MediaRawData* aSample) { if (!extradata->AppendElements(kFakeExtraData, ArrayLength(kFakeExtraData))) { return false; } - aSample->mExtraData = extradata; + aSample->mExtraData = std::move(extradata); return true; } diff --git a/dom/media/platforms/agnostic/bytestreams/AnnexB.h b/dom/media/platforms/agnostic/bytestreams/AnnexB.h index 23f5e3f496..bb5e24baa1 100644 --- a/dom/media/platforms/agnostic/bytestreams/AnnexB.h +++ b/dom/media/platforms/agnostic/bytestreams/AnnexB.h @@ -34,7 +34,9 @@ class AnnexB { mozilla::MediaRawData* aSample, bool aAddSPS = true); // Convert a sample from Annex B to AVCC. // an AVCC extradata must not be set. - static bool ConvertSampleToAVCC(mozilla::MediaRawData* aSample); + static bool ConvertSampleToAVCC( + mozilla::MediaRawData* aSample, + const RefPtr& aAVCCHeader = nullptr); static mozilla::Result ConvertSampleTo4BytesAVCC( mozilla::MediaRawData* aSample); diff --git a/dom/media/platforms/agnostic/bytestreams/H264.cpp b/dom/media/platforms/agnostic/bytestreams/H264.cpp index a6e258876d..1d0a0fbd6a 100644 --- a/dom/media/platforms/agnostic/bytestreams/H264.cpp +++ b/dom/media/platforms/agnostic/bytestreams/H264.cpp @@ -1292,29 +1292,40 @@ bool H264::DecodeRecoverySEI(const mozilla::MediaByteBuffer* aSEI, RefPtr encodedSPS = EncodeNALUnit(sps->Elements(), sps->Length()); extraData->Clear(); - extraData->AppendElement(1); - extraData->AppendElement(aProfile); - extraData->AppendElement(aConstraints); - extraData->AppendElement(aLevel); - extraData->AppendElement(3); // nalLENSize-1 - extraData->AppendElement(1); // numPPS - uint8_t c[2]; - mozilla::BigEndian::writeUint16(&c[0], encodedSPS->Length() + 1); - extraData->AppendElements(c, 2); - extraData->AppendElement((0x00 << 7) | (0x3 << 5) | H264_NAL_SPS); - extraData->AppendElements(*encodedSPS); const uint8_t PPS[] = {0xeb, 0xef, 0x20}; - extraData->AppendElement(1); // numPPS - mozilla::BigEndian::writeUint16(&c[0], sizeof(PPS) + 1); - extraData->AppendElements(c, 2); - extraData->AppendElement((0x00 << 7) | (0x3 << 5) | H264_NAL_PPS); - extraData->AppendElements(PPS, sizeof(PPS)); + WriteExtraData( + extraData, aProfile, aConstraints, aLevel, + MakeSpan(encodedSPS->Elements(), encodedSPS->Length()), + MakeSpan(PPS, sizeof(PPS))); return extraData.forget(); } +void H264::WriteExtraData(MediaByteBuffer* aDestExtraData, + const uint8_t aProfile, const uint8_t aConstraints, + const uint8_t aLevel, const Span aSPS, + const Span aPPS) { + aDestExtraData->AppendElement(1); + aDestExtraData->AppendElement(aProfile); + aDestExtraData->AppendElement(aConstraints); + aDestExtraData->AppendElement(aLevel); + aDestExtraData->AppendElement(3); // nalLENSize-1 + aDestExtraData->AppendElement(1); // numPPS + uint8_t c[2]; + mozilla::BigEndian::writeUint16(&c[0], aSPS.Length() + 1); + aDestExtraData->AppendElements(c, 2); + aDestExtraData->AppendElement((0x00 << 7) | (0x3 << 5) | H264_NAL_SPS); + aDestExtraData->AppendElements(aSPS.Elements(), aSPS.Length()); + + aDestExtraData->AppendElement(1); // numPPS + mozilla::BigEndian::writeUint16(&c[0], aPPS.Length() + 1); + aDestExtraData->AppendElements(c, 2); + aDestExtraData->AppendElement((0x00 << 7) | (0x3 << 5) | H264_NAL_PPS); + aDestExtraData->AppendElements(aPPS.Elements(), aPPS.Length()); +} + #undef READUE #undef READSE diff --git a/dom/media/platforms/agnostic/bytestreams/H264.h b/dom/media/platforms/agnostic/bytestreams/H264.h index 6a771ce2df..8ecf966304 100644 --- a/dom/media/platforms/agnostic/bytestreams/H264.h +++ b/dom/media/platforms/agnostic/bytestreams/H264.h @@ -460,6 +460,8 @@ class H264 { static bool DecodeSPSFromExtraData(const mozilla::MediaByteBuffer* aExtraData, SPSData& aDest); + /* Decode SPS NAL RBSP and fill SPSData structure */ + static bool DecodeSPS(const mozilla::MediaByteBuffer* aSPS, SPSData& aDest); // If the given aExtraData is valid, return the aExtraData.max_num_ref_frames // clamped to be in the range of [4, 16]; otherwise return 4. @@ -480,6 +482,11 @@ class H264 { static already_AddRefed CreateExtraData( uint8_t aProfile, uint8_t aConstraints, uint8_t aLevel, const gfx::IntSize& aSize); + static void WriteExtraData(mozilla::MediaByteBuffer* aDestExtraData, + const uint8_t aProfile, const uint8_t aConstraints, + const uint8_t aLevel, + const Span aSPS, + const Span aPPS); private: friend class SPSNAL; @@ -491,8 +498,6 @@ class H264 { const uint8_t* aNAL, size_t aLength); static already_AddRefed EncodeNALUnit( const uint8_t* aNAL, size_t aLength); - /* Decode SPS NAL RBSP and fill SPSData structure */ - static bool DecodeSPS(const mozilla::MediaByteBuffer* aSPS, SPSData& aDest); static bool vui_parameters(mozilla::BitReader& aBr, SPSData& aDest); // Read HRD parameters, all data is ignored. static void hrd_parameters(mozilla::BitReader& aBr); diff --git a/dom/media/platforms/android/AndroidDataEncoder.cpp b/dom/media/platforms/android/AndroidDataEncoder.cpp new file mode 100644 index 0000000000..a8b6f64113 --- /dev/null +++ b/dom/media/platforms/android/AndroidDataEncoder.cpp @@ -0,0 +1,459 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AndroidDataEncoder.h" + +#include "AnnexB.h" +#include "MediaData.h" +#include "MediaInfo.h" +#include "SimpleMap.h" + +#include "ImageContainer.h" +#include "mozilla/Logging.h" + +#include "nsMimeTypes.h" + +#include "libyuv.h" + +using namespace mozilla::java; +using namespace mozilla::java::sdk; + +namespace mozilla { +using media::TimeUnit; + +extern LazyLogModule sPEMLog; +#define AND_ENC_LOG(arg, ...) \ + MOZ_LOG(sPEMLog, mozilla::LogLevel::Debug, \ + ("AndroidDataEncoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__)) +#define AND_ENC_LOGE(arg, ...) \ + MOZ_LOG(sPEMLog, mozilla::LogLevel::Error, \ + ("AndroidDataEncoder(%p)::%s: " arg, this, __func__, ##__VA_ARGS__)) + +#define REJECT_IF_ERROR() \ + do { \ + if (mError) { \ + auto error = mError.value(); \ + mError.reset(); \ + return EncodePromise::CreateAndReject(std::move(error), __func__); \ + } \ + } while (0) + +RefPtr AndroidDataEncoder::Init() { + return InvokeAsync(mTaskQueue, this, __func__, + &AndroidDataEncoder::ProcessInit); +} + +static const char* MimeTypeOf(MediaDataEncoder::CodecType aCodec) { + switch (aCodec) { + case MediaDataEncoder::CodecType::H264: + return "video/avc"; + default: + return ""; + } +} + +using FormatResult = Result; + +FormatResult ToMediaFormat(const AndroidDataEncoder::Config& aConfig) { + if (!aConfig.mCodecSpecific) { + return FormatResult( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "Android video encoder requires I-frame inverval")); + } + + nsresult rv = NS_OK; + MediaFormat::LocalRef format; + rv = MediaFormat::CreateVideoFormat(MimeTypeOf(aConfig.mCodecType), + aConfig.mSize.width, aConfig.mSize.height, + &format); + NS_ENSURE_SUCCESS( + rv, FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to create Java MediaFormat object"))); + + rv = format->SetInteger(MediaFormat::KEY_BITRATE_MODE, 2 /* CBR */); + NS_ENSURE_SUCCESS(rv, FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to set bitrate mode"))); + + rv = format->SetInteger(MediaFormat::KEY_BIT_RATE, aConfig.mBitsPerSec); + NS_ENSURE_SUCCESS(rv, FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to set bitrate"))); + + // COLOR_FormatYUV420SemiPlanar(NV12) is the most widely supported + // format. + rv = format->SetInteger(MediaFormat::KEY_COLOR_FORMAT, 0x15); + NS_ENSURE_SUCCESS(rv, FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to set color format"))); + + rv = format->SetInteger(MediaFormat::KEY_FRAME_RATE, aConfig.mFramerate); + NS_ENSURE_SUCCESS(rv, FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to set frame rate"))); + + // Ensure interval >= 1. A negative value means no key frames are + // requested after the first frame. A zero value means a stream + // containing all key frames is requested. + int32_t intervalInSec = std::max( + 1, aConfig.mCodecSpecific.value().mKeyframeInterval / aConfig.mFramerate); + rv = format->SetInteger(MediaFormat::KEY_I_FRAME_INTERVAL, intervalInSec); + NS_ENSURE_SUCCESS(rv, + FormatResult(MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to set I-frame interval"))); + + return format; +} + +RefPtr AndroidDataEncoder::ProcessInit() { + AssertOnTaskQueue(); + MOZ_ASSERT(!mJavaEncoder); + + BufferInfo::LocalRef bufferInfo; + if (NS_FAILED(BufferInfo::New(&bufferInfo)) || !bufferInfo) { + return InitPromise::CreateAndReject(NS_ERROR_OUT_OF_MEMORY, __func__); + } + mInputBufferInfo = bufferInfo; + + FormatResult result = ToMediaFormat(mConfig); + if (result.isErr()) { + return InitPromise::CreateAndReject(result.unwrapErr(), __func__); + } + mFormat = result.unwrap(); + + // Register native methods. + JavaCallbacksSupport::Init(); + + mJavaCallbacks = CodecProxy::NativeCallbacks::New(); + if (!mJavaCallbacks) { + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "cannot create Java callback object"), + __func__); + } + JavaCallbacksSupport::AttachNative( + mJavaCallbacks, mozilla::MakeUnique(this)); + + mJavaEncoder = CodecProxy::Create(true /* encoder */, mFormat, nullptr, + mJavaCallbacks, EmptyString()); + if (!mJavaEncoder) { + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "cannot create Java encoder object"), + __func__); + } + + mIsHardwareAccelerated = mJavaEncoder->IsHardwareAccelerated(); + mDrainState = DrainState::DRAINABLE; + + return InitPromise::CreateAndResolve(TrackInfo::kVideoTrack, __func__); +} + +RefPtr AndroidDataEncoder::Encode( + const MediaData* aSample) { + RefPtr self = this; + MOZ_ASSERT(aSample != nullptr); + + RefPtr sample(aSample); + return InvokeAsync(mTaskQueue, __func__, [self, sample]() { + return self->ProcessEncode(std::move(sample)); + }); +} + +static jni::ByteBuffer::LocalRef ConvertI420ToNV12Buffer( + RefPtr aSample, RefPtr& aYUVBuffer) { + const PlanarYCbCrImage* image = aSample->mImage->AsPlanarYCbCrImage(); + MOZ_ASSERT(image); + const PlanarYCbCrData* yuv = image->GetData(); + size_t ySize = yuv->mYStride * yuv->mYSize.height; + size_t size = ySize + (yuv->mCbCrStride * yuv->mCbCrSize.height * 2); + if (!aYUVBuffer || aYUVBuffer->Capacity() < size) { + aYUVBuffer = MakeRefPtr(size); + aYUVBuffer->SetLength(size); + } else { + MOZ_ASSERT(aYUVBuffer->Length() >= size); + } + + if (libyuv::I420ToNV12(yuv->mYChannel, yuv->mYStride, yuv->mCbChannel, + yuv->mCbCrStride, yuv->mCrChannel, yuv->mCbCrStride, + aYUVBuffer->Elements(), yuv->mYStride, + aYUVBuffer->Elements() + ySize, yuv->mCbCrStride * 2, + yuv->mYSize.width, yuv->mYSize.height) != 0) { + return nullptr; + } + + return jni::ByteBuffer::New(aYUVBuffer->Elements(), aYUVBuffer->Length()); +} + +RefPtr AndroidDataEncoder::ProcessEncode( + RefPtr aSample) { + AssertOnTaskQueue(); + + REJECT_IF_ERROR(); + + RefPtr sample(aSample->As()); + MOZ_ASSERT(sample); + + jni::ByteBuffer::LocalRef buffer = + ConvertI420ToNV12Buffer(sample, mYUVBuffer); + if (!buffer) { + return EncodePromise::CreateAndReject(NS_ERROR_ILLEGAL_INPUT, __func__); + } + + if (aSample->mKeyframe) { + mInputBufferInfo->Set(0, mYUVBuffer->Length(), + aSample->mTime.ToMicroseconds(), + MediaCodec::BUFFER_FLAG_SYNC_FRAME); + } else { + mInputBufferInfo->Set(0, mYUVBuffer->Length(), + aSample->mTime.ToMicroseconds(), 0); + } + + mJavaEncoder->Input(buffer, mInputBufferInfo, nullptr); + + if (mEncodedData.Length() > 0) { + EncodedData pending; + pending.SwapElements(mEncodedData); + return EncodePromise::CreateAndResolve(std::move(pending), __func__); + } else { + return EncodePromise::CreateAndResolve(EncodedData(), __func__); + } +} + +class AutoRelease final { + public: + AutoRelease(CodecProxy::Param aEncoder, Sample::Param aSample) + : mEncoder(aEncoder), mSample(aSample) {} + + ~AutoRelease() { mEncoder->ReleaseOutput(mSample, false); } + + private: + CodecProxy::GlobalRef mEncoder; + Sample::GlobalRef mSample; +}; + +static RefPtr ExtractCodecConfig(SampleBuffer::Param aBuffer, + const int32_t aOffset, + const int32_t aSize, + const bool aAsAnnexB) { + auto annexB = MakeRefPtr(aSize); + annexB->SetLength(aSize); + jni::ByteBuffer::LocalRef dest = + jni::ByteBuffer::New(annexB->Elements(), aSize); + aBuffer->WriteToByteBuffer(dest, aOffset, aSize); + if (aAsAnnexB) { + return annexB; + } + // Convert to avcC. + nsTArray paramSets; + AnnexB::ParseNALEntries( + MakeSpan(annexB->Elements(), annexB->Length()), paramSets); + + auto avcc = MakeRefPtr(); + AnnexB::NALEntry& sps = paramSets.ElementAt(0); + AnnexB::NALEntry& pps = paramSets.ElementAt(1); + const uint8_t* spsPtr = annexB->Elements() + sps.mOffset; + H264::WriteExtraData( + avcc, spsPtr[1], spsPtr[2], spsPtr[3], + MakeSpan(spsPtr, sps.mSize), + MakeSpan(annexB->Elements() + pps.mOffset, pps.mSize)); + return avcc; +} + +void AndroidDataEncoder::ProcessOutput(Sample::GlobalRef&& aSample, + SampleBuffer::GlobalRef&& aBuffer) { + if (!mTaskQueue->IsCurrentThreadIn()) { + nsresult rv = mTaskQueue->Dispatch( + NewRunnableMethod( + "AndroidDataEncoder::ProcessOutput", this, + &AndroidDataEncoder::ProcessOutput, std::move(aSample), + std::move(aBuffer))); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + return; + } + AssertOnTaskQueue(); + + AutoRelease releaseSample(mJavaEncoder, aSample); + + BufferInfo::LocalRef info = aSample->Info(); + MOZ_ASSERT(info); + + int32_t flags; + bool ok = NS_SUCCEEDED(info->Flags(&flags)); + bool isEOS = !!(flags & MediaCodec::BUFFER_FLAG_END_OF_STREAM); + + int32_t offset; + ok &= NS_SUCCEEDED(info->Offset(&offset)); + + int32_t size; + ok &= NS_SUCCEEDED(info->Size(&size)); + + int64_t presentationTimeUs; + ok &= NS_SUCCEEDED(info->PresentationTimeUs(&presentationTimeUs)); + + if (!ok) { + return; + } + + if (size > 0) { + if ((flags & MediaCodec::BUFFER_FLAG_CODEC_CONFIG) != 0) { + mConfigData = ExtractCodecConfig(aBuffer, offset, size, + mConfig.mUsage == Usage::Realtime); + return; + } + RefPtr output = GetOutputData( + aBuffer, offset, size, !!(flags & MediaCodec::BUFFER_FLAG_KEY_FRAME)); + output->mEOS = isEOS; + output->mTime = media::TimeUnit::FromMicroseconds(presentationTimeUs); + mEncodedData.AppendElement(std::move(output)); + } + + if (isEOS) { + mDrainState = DrainState::DRAINED; + } + if (!mDrainPromise.IsEmpty()) { + EncodedData pending; + pending.SwapElements(mEncodedData); + mDrainPromise.Resolve(std::move(pending), __func__); + } +} + +RefPtr AndroidDataEncoder::GetOutputData( + SampleBuffer::Param aBuffer, const int32_t aOffset, const int32_t aSize, + const bool aIsKeyFrame) { + auto output = MakeRefPtr(); + + size_t prependSize = 0; + RefPtr avccHeader; + if (aIsKeyFrame && mConfigData) { + if (mConfig.mUsage == Usage::Realtime) { + prependSize = mConfigData->Length(); + } else { + avccHeader = mConfigData; + } + } + + UniquePtr writer(output->CreateWriter()); + if (!writer->SetSize(prependSize + aSize)) { + AND_ENC_LOGE("fail to allocate output buffer"); + return nullptr; + } + + if (prependSize > 0) { + PodCopy(writer->Data(), mConfigData->Elements(), prependSize); + } + + jni::ByteBuffer::LocalRef buf = + jni::ByteBuffer::New(writer->Data() + prependSize, aSize); + aBuffer->WriteToByteBuffer(buf, aOffset, aSize); + + if (mConfig.mUsage != Usage::Realtime && + !AnnexB::ConvertSampleToAVCC(output, avccHeader)) { + AND_ENC_LOGE("fail to convert annex-b sample to AVCC"); + return nullptr; + } + + output->mKeyframe = aIsKeyFrame; + + return output; +} + +RefPtr AndroidDataEncoder::Drain() { + return InvokeAsync(mTaskQueue, this, __func__, + &AndroidDataEncoder::ProcessDrain); +} + +RefPtr AndroidDataEncoder::ProcessDrain() { + AssertOnTaskQueue(); + MOZ_ASSERT(mJavaEncoder); + MOZ_ASSERT(mDrainPromise.IsEmpty()); + + REJECT_IF_ERROR(); + + switch (mDrainState) { + case DrainState::DRAINABLE: + mInputBufferInfo->Set(0, 0, -1, MediaCodec::BUFFER_FLAG_END_OF_STREAM); + mJavaEncoder->Input(nullptr, mInputBufferInfo, nullptr); + mDrainState = DrainState::DRAINING; + [[fallthrough]]; + case DrainState::DRAINING: + if (mEncodedData.IsEmpty()) { + return mDrainPromise.Ensure(__func__); // Pending promise. + } + [[fallthrough]]; + case DrainState::DRAINED: + if (mEncodedData.Length() > 0) { + EncodedData pending; + pending.SwapElements(mEncodedData); + return EncodePromise::CreateAndResolve(std::move(pending), __func__); + } else { + return EncodePromise::CreateAndResolve(EncodedData(), __func__); + } + } +} + +RefPtr AndroidDataEncoder::Shutdown() { + return InvokeAsync(mTaskQueue, this, __func__, + &AndroidDataEncoder::ProcessShutdown); +} + +RefPtr AndroidDataEncoder::ProcessShutdown() { + AssertOnTaskQueue(); + if (mJavaEncoder) { + mJavaEncoder->Release(); + mJavaEncoder = nullptr; + } + + if (mJavaCallbacks) { + JavaCallbacksSupport::GetNative(mJavaCallbacks)->Cancel(); + JavaCallbacksSupport::DisposeNative(mJavaCallbacks); + mJavaCallbacks = nullptr; + } + + mFormat = nullptr; + + return ShutdownPromise::CreateAndResolve(true, __func__); +} + +RefPtr AndroidDataEncoder::SetBitrate( + const MediaDataEncoder::Rate aBitsPerSec) { + RefPtr self(this); + return InvokeAsync(mTaskQueue, __func__, [self, aBitsPerSec]() { + self->mJavaEncoder->SetRates(aBitsPerSec); + return GenericPromise::CreateAndResolve(true, __func__); + }); + + return nullptr; +} + +void AndroidDataEncoder::Error(const MediaResult& aError) { + if (!mTaskQueue->IsCurrentThreadIn()) { + nsresult rv = mTaskQueue->Dispatch(NewRunnableMethod( + "AndroidDataEncoder::Error", this, &AndroidDataEncoder::Error, aError)); + MOZ_DIAGNOSTIC_ASSERT(NS_SUCCEEDED(rv)); + Unused << rv; + return; + } + AssertOnTaskQueue(); + + mError.emplace(aError); +} + +void AndroidDataEncoder::CallbacksSupport::HandleInput(int64_t aTimestamp, + bool aProcessed) {} + +void AndroidDataEncoder::CallbacksSupport::HandleOutput( + Sample::Param aSample, SampleBuffer::Param aBuffer) { + mEncoder->ProcessOutput(std::move(aSample), std::move(aBuffer)); +} + +void AndroidDataEncoder::CallbacksSupport::HandleOutputFormatChanged( + MediaFormat::Param aFormat) {} + +void AndroidDataEncoder::CallbacksSupport::HandleError( + const MediaResult& aError) { + mEncoder->Error(aError); +} + +} // namespace mozilla + +#undef AND_ENC_LOG +#undef AND_ENC_LOGE diff --git a/dom/media/platforms/android/AndroidDataEncoder.h b/dom/media/platforms/android/AndroidDataEncoder.h new file mode 100644 index 0000000000..2c181c89e2 --- /dev/null +++ b/dom/media/platforms/android/AndroidDataEncoder.h @@ -0,0 +1,101 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_PLATFORMS_ANDROID_ANDROIDDATAENCODER_H_ +#define DOM_MEDIA_PLATFORMS_ANDROID_ANDROIDDATAENCODER_H_ + +#include "MediaData.h" +#include "PlatformEncoderModule.h" +#include "TimeUnits.h" + +#include "FennecJNIWrappers.h" +#include "JavaCallbacksSupport.h" + +#include "mozilla/Maybe.h" +#include "mozilla/Monitor.h" + +namespace mozilla { + +class AndroidDataEncoder final : public MediaDataEncoder { + public: + using Config = H264Config; + + AndroidDataEncoder(const Config& aConfig, RefPtr aTaskQueue) + : mConfig(aConfig), mTaskQueue(aTaskQueue) {} + RefPtr Init() override; + RefPtr Encode(const MediaData* aSample) override; + RefPtr Drain() override; + RefPtr Shutdown() override; + RefPtr SetBitrate(const Rate aBitsPerSec) override; + + nsCString GetDescriptionName() const override { + return NS_LITERAL_CSTRING("Android Encoder"); + } + + private: + class CallbacksSupport final : public JavaCallbacksSupport { + public: + explicit CallbacksSupport(AndroidDataEncoder* aEncoder) + : mEncoder(aEncoder) {} + + void HandleInput(int64_t aTimestamp, bool aProcessed) override; + void HandleOutput(java::Sample::Param aSample, + java::SampleBuffer::Param aBuffer) override; + void HandleOutputFormatChanged( + java::sdk::MediaFormat::Param aFormat) override; + void HandleError(const MediaResult& aError) override; + + private: + AndroidDataEncoder* mEncoder; + }; + friend class CallbacksSupport; + + // Methods only called on mTaskQueue. + RefPtr ProcessInit(); + RefPtr ProcessEncode(RefPtr aSample); + RefPtr ProcessDrain(); + RefPtr ProcessShutdown(); + void ProcessInput(); + void ProcessOutput(java::Sample::GlobalRef&& aSample, + java::SampleBuffer::GlobalRef&& aBuffer); + RefPtr GetOutputData(java::SampleBuffer::Param aBuffer, + const int32_t aOffset, const int32_t aSize, + const bool aIsKeyFrame); + void Error(const MediaResult& aError); + + void AssertOnTaskQueue() const { + MOZ_ASSERT(mTaskQueue->IsCurrentThreadIn()); + } + + Config mConfig; + + RefPtr mTaskQueue; + + // Can be accessed on any thread, but only written on during init. + bool mIsHardwareAccelerated = false; + + java::CodecProxy::GlobalRef mJavaEncoder; + java::CodecProxy::NativeCallbacks::GlobalRef mJavaCallbacks; + java::sdk::MediaFormat::GlobalRef mFormat; + // Preallocated Java object used as a reusable storage for input buffer + // information. Contents must be changed only on mTaskQueue. + java::sdk::BufferInfo::GlobalRef mInputBufferInfo; + + MozPromiseHolder mDrainPromise; + + // Accessed on mTaskqueue only. + RefPtr mYUVBuffer; + EncodedData mEncodedData; + // SPS/PPS NALUs for realtime usage, avcC otherwise. + RefPtr mConfigData; + + enum class DrainState { DRAINED, DRAINABLE, DRAINING }; + DrainState mDrainState; + + Maybe mError; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/platforms/android/AndroidEncoderModule.cpp b/dom/media/platforms/android/AndroidEncoderModule.cpp new file mode 100644 index 0000000000..0ca3efbc14 --- /dev/null +++ b/dom/media/platforms/android/AndroidEncoderModule.cpp @@ -0,0 +1,32 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "AndroidEncoderModule.h" + +#include "AndroidDataEncoder.h" +#include "MP4Decoder.h" + +#include "mozilla/Logging.h" + +namespace mozilla { +extern LazyLogModule sPEMLog; +#define AND_PEM_LOG(arg, ...) \ + MOZ_LOG( \ + sPEMLog, mozilla::LogLevel::Debug, \ + ("AndroidEncoderModule(%p)::%s: " arg, this, __func__, ##__VA_ARGS__)) + +bool AndroidEncoderModule::SupportsMimeType(const nsACString& aMimeType) const { + return MP4Decoder::IsH264(aMimeType); +} + +already_AddRefed AndroidEncoderModule::CreateVideoEncoder( + const CreateEncoderParams& aParams) const { + RefPtr encoder = + new AndroidDataEncoder(aParams.ToH264Config(), aParams.mTaskQueue); + return encoder.forget(); +} + +} // namespace mozilla + +#undef AND_PEM_LOG diff --git a/dom/media/platforms/android/AndroidEncoderModule.h b/dom/media/platforms/android/AndroidEncoderModule.h new file mode 100644 index 0000000000..2593f75043 --- /dev/null +++ b/dom/media/platforms/android/AndroidEncoderModule.h @@ -0,0 +1,22 @@ +/* This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef DOM_MEDIA_PLATFORMS_ANDROID_ANDROIDENCODERMODULE_H_ +#define DOM_MEDIA_PLATFORMS_ANDROID_ANDROIDENCODERMODULE_H_ + +#include "PlatformEncoderModule.h" + +namespace mozilla { + +class AndroidEncoderModule final : public PlatformEncoderModule { + public: + bool SupportsMimeType(const nsACString& aMimeType) const override; + + already_AddRefed CreateVideoEncoder( + const CreateEncoderParams& aParams) const override; +}; + +} // namespace mozilla + +#endif diff --git a/dom/media/platforms/android/RemoteDataDecoder.cpp b/dom/media/platforms/android/RemoteDataDecoder.cpp index b524c6c93b..fa92189667 100644 --- a/dom/media/platforms/android/RemoteDataDecoder.cpp +++ b/dom/media/platforms/android/RemoteDataDecoder.cpp @@ -2,13 +2,16 @@ * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ +#include "RemoteDataDecoder.h" + #include "AndroidBridge.h" #include "AndroidDecoderModule.h" -#include "JavaCallbacksSupport.h" -#include "SimpleMap.h" +#include "EMEDecoderModule.h" #include "GLImages.h" +#include "JavaCallbacksSupport.h" #include "MediaData.h" #include "MediaInfo.h" +#include "SimpleMap.h" #include "VideoUtils.h" #include "VPXDecoder.h" @@ -58,13 +61,14 @@ class RemoteVideoDecoder : public RemoteDataDecoder { public: // Render the output to the surface when the frame is sent // to compositor, or release it if not presented. - class CompositeListener : private RenderOrReleaseOutput, - public VideoData::Listener { + class CompositeListener + : private RenderOrReleaseOutput, + public layers::SurfaceTextureImage::SetCurrentCallback { public: CompositeListener(CodecProxy::Param aCodec, Sample::Param aSample) : RenderOrReleaseOutput(aCodec, aSample) {} - void OnSentToCompositor() override { ReleaseOutput(true); } + void operator()(void) override { ReleaseOutput(true); } }; class InputInfo { @@ -251,7 +255,7 @@ class RemoteVideoDecoder : public RemoteDataDecoder { return; } - UniquePtr releaseSample( + UniquePtr releaseSample( new CompositeListener(mJavaDecoder, aSample)); BufferInfo::LocalRef info = aSample->Info(); @@ -284,18 +288,19 @@ class RemoteVideoDecoder : public RemoteDataDecoder { } if (ok && (size > 0 || presentationTimeUs >= 0)) { - RefPtr img = new SurfaceTextureImage( + RefPtr img = new layers::SurfaceTextureImage( mSurfaceHandle, inputInfo.mImageSize, false /* NOT continuous */, gl::OriginPos::BottomLeft, mConfig.HasAlpha()); + img->AsSurfaceTextureImage()->RegisterSetCurrentCallback( + std::move(releaseSample)); RefPtr v = VideoData::CreateFromImage( inputInfo.mDisplaySize, offset, TimeUnit::FromMicroseconds(presentationTimeUs), - TimeUnit::FromMicroseconds(inputInfo.mDurationUs), img, + TimeUnit::FromMicroseconds(inputInfo.mDurationUs), img.forget(), !!(flags & MediaCodec::BUFFER_FLAG_SYNC_FRAME), TimeUnit::FromMicroseconds(presentationTimeUs)); - v->SetListener(std::move(releaseSample)); RemoteDataDecoder::UpdateOutputStatus(std::move(v)); } @@ -855,3 +860,4 @@ void RemoteDataDecoder::Error(const MediaResult& aError) { } } // namespace mozilla +#undef LOG diff --git a/dom/media/platforms/apple/AppleEncoderModule.cpp b/dom/media/platforms/apple/AppleEncoderModule.cpp index 75252a2e57..4f0d18ee22 100644 --- a/dom/media/platforms/apple/AppleEncoderModule.cpp +++ b/dom/media/platforms/apple/AppleEncoderModule.cpp @@ -4,32 +4,19 @@ #include "AppleEncoderModule.h" -#include "nsMimeTypes.h" - #include "AppleVTEncoder.h" +#include "MP4Decoder.h" namespace mozilla { bool AppleEncoderModule::SupportsMimeType(const nsACString& aMimeType) const { - return aMimeType.EqualsLiteral(VIDEO_MP4) || - aMimeType.EqualsLiteral("video/avc"); + return MP4Decoder::IsH264(aMimeType); } already_AddRefed AppleEncoderModule::CreateVideoEncoder( const CreateEncoderParams& aParams) const { - const VideoInfo* info = aParams.mConfig.GetAsVideoInfo(); - MOZ_ASSERT(info); - - using Config = AppleVTEncoder::Config; - Config config = - Config(MediaDataEncoder::CodecType::H264, aParams.mUsage, info->mImage, - aParams.mPixelFormat, aParams.mFramerate, aParams.mBitrate); - if (aParams.mCodecSpecific) { - config.SetCodecSpecific(aParams.mCodecSpecific.ref().mH264); - } - RefPtr encoder( - new AppleVTEncoder(std::forward(config), aParams.mTaskQueue)); + new AppleVTEncoder(aParams.ToH264Config(), aParams.mTaskQueue)); return encoder.forget(); } diff --git a/dom/media/platforms/apple/AppleVTEncoder.cpp b/dom/media/platforms/apple/AppleVTEncoder.cpp index 3249f3820e..97dec10114 100644 --- a/dom/media/platforms/apple/AppleVTEncoder.cpp +++ b/dom/media/platforms/apple/AppleVTEncoder.cpp @@ -16,6 +16,8 @@ #include "AppleUtils.h" +namespace mozilla { +extern LazyLogModule sPEMLog; #define VTENC_LOGE(fmt, ...) \ MOZ_LOG(sPEMLog, mozilla::LogLevel::Error, \ ("[AppleVTEncoder] %s: " fmt, __func__, ##__VA_ARGS__)) @@ -23,8 +25,6 @@ MOZ_LOG(sPEMLog, mozilla::LogLevel::Debug, \ ("[AppleVTEncoder] %s: " fmt, __func__, ##__VA_ARGS__)) -namespace mozilla { - static CFDictionaryRef BuildEncoderSpec() { const void* keys[] = { kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder}; @@ -37,17 +37,36 @@ static CFDictionaryRef BuildEncoderSpec() { &kCFTypeDictionaryValueCallBacks); } -static void FrameCallback(void* aEncoder, void* aFrameParams, OSStatus aStatus, +static void FrameCallback(void* aEncoder, void* aFrameRefCon, OSStatus aStatus, VTEncodeInfoFlags aInfoFlags, CMSampleBufferRef aSampleBuffer) { if (aStatus != noErr || !aSampleBuffer) { - VTENC_LOGE("VideoToolbox encoder returned no data"); + VTENC_LOGE("VideoToolbox encoder returned no data status=%d sample=%p", + aStatus, aSampleBuffer); aSampleBuffer = nullptr; } else if (aInfoFlags & kVTEncodeInfo_FrameDropped) { - VTENC_LOGE(" ...frame tagged as dropped..."); + VTENC_LOGE("frame tagged as dropped"); + return; } + (static_cast(aEncoder))->OutputFrame(aSampleBuffer); +} - static_cast(aEncoder)->OutputFrame(aSampleBuffer); +static bool SetAverageBitrate(VTCompressionSessionRef& aSession, + MediaDataEncoder::Rate aBitsPerSec) { + int64_t bps(aBitsPerSec); + AutoCFRelease bitrate( + CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &bps)); + return VTSessionSetProperty(aSession, + kVTCompressionPropertyKey_AverageBitRate, + bitrate) == noErr; +} + +static bool SetRealtimeProperties(VTCompressionSessionRef& aSession) { + return VTSessionSetProperty(aSession, kVTCompressionPropertyKey_RealTime, + kCFBooleanTrue) == noErr && + VTSessionSetProperty(aSession, + kVTCompressionPropertyKey_AllowFrameReordering, + kCFBooleanFalse) == noErr; } static bool SetProfileLevel(VTCompressionSessionRef& aSession, @@ -75,37 +94,61 @@ RefPtr AppleVTEncoder::Init() { AutoCFRelease srcBufferAttr( BuildSourceImageBufferAttributes()); if (!srcBufferAttr) { - return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, - __func__); + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, + "fail to create source buffer attributes"), + __func__); } OSStatus status = VTCompressionSessionCreate( kCFAllocatorDefault, mConfig.mSize.width, mConfig.mSize.height, kCMVideoCodecType_H264, spec, srcBufferAttr, kCFAllocatorDefault, - &FrameCallback, this, &mSession); + &FrameCallback, this /* outputCallbackRefCon */, &mSession); + if (status != noErr) { - VTENC_LOGE("fail to create encoder session"); - // TODO: new error codes for encoder - return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_ABORT_ERR, __func__); + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to create encoder session"), + __func__); } - const Maybe& h264Config = mConfig.mCodecSpecific; - if (h264Config) { - if (!SetProfileLevel(mSession, h264Config.ref().mProfileLevel)) { - VTENC_LOGE("fail to configurate profile level"); - return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_ABORT_ERR, - __func__); + if (!SetAverageBitrate(mSession, mConfig.mBitsPerSec)) { + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to configurate average bitrate"), + __func__); + } + + if (mConfig.mUsage == Usage::Realtime && !SetRealtimeProperties(mSession)) { + VTENC_LOGE("fail to configurate realtime properties"); + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + "fail to configurate average bitrate"), + __func__); + } + + if (mConfig.mCodecSpecific) { + const H264Specific& specific = mConfig.mCodecSpecific.ref(); + if (!SetProfileLevel(mSession, specific.mProfileLevel)) { + return InitPromise::CreateAndReject( + MediaResult(NS_ERROR_DOM_MEDIA_FATAL_ERR, + nsPrintfCString("fail to configurate profile level:%d", + specific.mProfileLevel)), + __func__); } - int64_t interval = h264Config.ref().mKeyframeInterval; + int64_t interval = specific.mKeyframeInterval; AutoCFRelease cf( CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &interval)); if (VTSessionSetProperty(mSession, kVTCompressionPropertyKey_MaxKeyFrameInterval, cf) != noErr) { - VTENC_LOGE("fail to configurate keyframe interval"); - return InitPromise::CreateAndReject(NS_ERROR_DOM_MEDIA_ABORT_ERR, - __func__); + return InitPromise::CreateAndReject( + MediaResult( + NS_ERROR_DOM_MEDIA_FATAL_ERR, + nsPrintfCString("fail to configurate keyframe interval:%" PRId64, + interval)), + __func__); } } @@ -411,7 +454,7 @@ RefPtr AppleVTEncoder::ProcessEncode( mSession, buffer, CMTimeMake(aSample->mTime.ToMicroseconds(), USECS_PER_S), CMTimeMake(aSample->mDuration.ToMicroseconds(), USECS_PER_S), frameProps, - nullptr, &info); + nullptr /* sourceFrameRefcon */, &info); if (status != noErr) { return EncodePromise::CreateAndReject(NS_ERROR_DOM_MEDIA_FATAL_ERR, __func__); @@ -441,12 +484,17 @@ static size_t NumberOfPlanes(MediaDataEncoder::PixelFormat aPixelFormat) { using namespace layers; +static void ReleaseImage(void* aImageGrip, const void* aDataPtr, + size_t aDataSize, size_t aNumOfPlanes, + const void** aPlanes) { + (static_cast(aImageGrip))->Release(); +} + CVPixelBufferRef AppleVTEncoder::CreateCVPixelBuffer(const Image* aSource) { AssertOnTaskQueue(); // TODO: support types other than YUV - const PlanarYCbCrImage* image = - const_cast(aSource)->AsPlanarYCbCrImage(); + PlanarYCbCrImage* image = const_cast(aSource)->AsPlanarYCbCrImage(); if (!image || !image->GetData()) { return nullptr; } @@ -482,12 +530,19 @@ CVPixelBufferRef AppleVTEncoder::CreateCVPixelBuffer(const Image* aSource) { } CVPixelBufferRef buffer = nullptr; - return CVPixelBufferCreateWithPlanarBytes( - kCFAllocatorDefault, yuv->mPicSize.width, yuv->mPicSize.height, - format, nullptr, 0, numPlanes, addresses, widths, heights, strides, - nullptr, nullptr, nullptr, &buffer) == kCVReturnSuccess - ? buffer - : nullptr; + image->AddRef(); // Grip input buffers. + CVReturn rv = CVPixelBufferCreateWithPlanarBytes( + kCFAllocatorDefault, yuv->mPicSize.width, yuv->mPicSize.height, format, + nullptr /* dataPtr */, 0 /* dataSize */, numPlanes, addresses, widths, + heights, strides, ReleaseImage /* releaseCallback */, + image /* releaseRefCon */, nullptr /* pixelBufferAttributes */, &buffer); + if (rv == kCVReturnSuccess) { + return buffer; + // |image| will be released in |ReleaseImage()|. + } else { + image->Release(); + return nullptr; + } } RefPtr AppleVTEncoder::Drain() { @@ -541,11 +596,7 @@ RefPtr AppleVTEncoder::SetBitrate( RefPtr self = this; return InvokeAsync(mTaskQueue, __func__, [self, aBitsPerSec]() { MOZ_ASSERT(self->mSession); - AutoCFRelease bitrate( - CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, &aBitsPerSec)); - return VTSessionSetProperty(self->mSession, - kVTCompressionPropertyKey_AverageBitRate, - bitrate) == noErr + return SetAverageBitrate(self->mSession, aBitsPerSec) ? GenericPromise::CreateAndResolve(true, __func__) : GenericPromise::CreateAndReject( NS_ERROR_DOM_MEDIA_NOT_SUPPORTED_ERR, __func__); diff --git a/dom/media/platforms/apple/AppleVTEncoder.h b/dom/media/platforms/apple/AppleVTEncoder.h index aef6902ee0..1665a957a1 100644 --- a/dom/media/platforms/apple/AppleVTEncoder.h +++ b/dom/media/platforms/apple/AppleVTEncoder.h @@ -19,7 +19,7 @@ class Image; class AppleVTEncoder final : public MediaDataEncoder { public: - using Config = VideoConfig; + using Config = H264Config; struct FrameParams { using TimeUnit = media::TimeUnit; diff --git a/dom/media/platforms/moz.build b/dom/media/platforms/moz.build index cdc19d8ac1..a20339bca7 100644 --- a/dom/media/platforms/moz.build +++ b/dom/media/platforms/moz.build @@ -113,12 +113,18 @@ include('/ipc/chromium/chromium-config.mozbuild') if CONFIG['MOZ_WIDGET_TOOLKIT'] == 'android': EXPORTS += [ 'android/AndroidDecoderModule.h', + 'android/AndroidEncoderModule.h', 'android/JavaCallbacksSupport.h', ] UNIFIED_SOURCES += [ + 'android/AndroidDataEncoder.cpp', 'android/AndroidDecoderModule.cpp', + 'android/AndroidEncoderModule.cpp', 'android/RemoteDataDecoder.cpp', ] + LOCAL_INCLUDES += [ + '/media/libyuv/libyuv/include', + ] FINAL_LIBRARY = 'xul' diff --git a/dom/media/test/crashtests/1547784.html b/dom/media/test/crashtests/1547784.html new file mode 100644 index 0000000000..ee270491f1 --- /dev/null +++ b/dom/media/test/crashtests/1547784.html @@ -0,0 +1,33 @@ + + + + + diff --git a/dom/media/test/crashtests/1569645.html b/dom/media/test/crashtests/1569645.html new file mode 100644 index 0000000000..b1f1247f26 --- /dev/null +++ b/dom/media/test/crashtests/1569645.html @@ -0,0 +1,23 @@ + + + + + + + + diff --git a/dom/media/test/crashtests/1594466.html b/dom/media/test/crashtests/1594466.html new file mode 100644 index 0000000000..3d3f58dbb8 --- /dev/null +++ b/dom/media/test/crashtests/1594466.html @@ -0,0 +1,21 @@ + + + + + diff --git a/dom/media/test/crashtests/crashtests.list b/dom/media/test/crashtests/crashtests.list index 8d2114cc92..3d666dd70a 100644 --- a/dom/media/test/crashtests/crashtests.list +++ b/dom/media/test/crashtests/crashtests.list @@ -126,7 +126,10 @@ load empty-samples.webm # Bug 1540580 test-pref(media.autoplay.block-webaudio,false) load 1545133.html load track-with-zero-dimensions.mp4 # Bug 1542539 load 1560215.html +skip-if(Android) load 1547784.html # Skip on Android as clearkey is not supported load 1547899.html +load 1569645.html load 1575271.html load 1577184.html pref(media.autoplay.default,0) load 1587248.html +load 1594466.html diff --git a/dom/media/test/mochitest.ini b/dom/media/test/mochitest.ini index 65eaa27c42..6f4e1aaf81 100644 --- a/dom/media/test/mochitest.ini +++ b/dom/media/test/mochitest.ini @@ -926,8 +926,8 @@ tags=mtg [test_mediarecorder_fires_start_event_once_when_erroring.html] skip-if = android_version == '17' # android(bug 1232305) tags=mtg -[test_mediarecorder_getencodeddata.html] -skip-if = android_version == '17' # android(bug 1232305) +[test_mediarecorder_onerror_pause.html] +scheme=https tags=mtg [test_mediarecorder_pause_resume_video.html] skip-if = toolkit == 'android' # android(bug 1232305) @@ -938,7 +938,7 @@ tags=mtg skip-if = toolkit == 'android' || (os == 'win' && os_version == '10.0' && webrender) # android(bug 1232305), Bug 1453375 tags=mtg [test_mediarecorder_record_4ch_audiocontext.html] -skip-if = android_version == '17' # android(bug 1232305) +skip-if = android_version == '17' || (os == 'win' && os_version == '10.0' && webrender) # android(bug 1232305), Bug 1598101 tags=mtg [test_mediarecorder_record_addtracked_stream.html] skip-if = toolkit == 'android' # Bug 1408241 @@ -995,9 +995,6 @@ tags=mtg capturestream [test_mediarecorder_state_event_order.html] skip-if = android_version == '17' # android(bug 1232305) tags=mtg capturestream -[test_mediarecorder_unsupported_src.html] -scheme=https -tags=mtg [test_mediarecorder_webm_support.html] skip-if = android_version == '17' # android(bug 1232305) tags=mtg diff --git a/dom/media/test/test_mediarecorder_creation_fail.html b/dom/media/test/test_mediarecorder_creation_fail.html index 42fc90ffe4..f411c2d3e1 100644 --- a/dom/media/test/test_mediarecorder_creation_fail.html +++ b/dom/media/test/test_mediarecorder_creation_fail.html @@ -9,65 +9,41 @@
 
 
diff --git a/dom/media/test/test_mediarecorder_getencodeddata.html b/dom/media/test/test_mediarecorder_getencodeddata.html deleted file mode 100644 index 48fa216b6b..0000000000 --- a/dom/media/test/test_mediarecorder_getencodeddata.html +++ /dev/null @@ -1,83 +0,0 @@ - - - - Bug 957452 Test GetEncodedData problem on asan build - - - - -
-
-
- - diff --git a/dom/media/test/test_mediarecorder_onerror_pause.html b/dom/media/test/test_mediarecorder_onerror_pause.html new file mode 100644 index 0000000000..e7e5ce38bc --- /dev/null +++ b/dom/media/test/test_mediarecorder_onerror_pause.html @@ -0,0 +1,106 @@ + + + Bug 957439 - Media Recording - Assertion fail at Pause if unsupported input stream. + + + + + + +Mozilla Bug 957439 +
+
+
+
diff --git a/dom/media/test/test_mediarecorder_record_4ch_audiocontext.html b/dom/media/test/test_mediarecorder_record_4ch_audiocontext.html
index d26da77970..19518af2cf 100644
--- a/dom/media/test/test_mediarecorder_record_4ch_audiocontext.html
+++ b/dom/media/test/test_mediarecorder_record_4ch_audiocontext.html
@@ -26,7 +26,7 @@ function startTest() {
   var stopTriggered = false;
   var onstopTriggered = false;
   dest.channelCount = 4;
-  var expectedMimeType = 'audio/ogg';
+  var expectedMimeType = 'audio/ogg; codecs=opus';
   var totalBlobSize = 0;
   source.channelCountMode = 'explicit';
   source.connect(dest);
@@ -51,14 +51,13 @@ function startTest() {
     SimpleTest.finish();
   };
   mMediaRecorder.ondataavailable = function (e) {
-    if (mMediaRecorder.state == 'recording') {
-      ok(e.data.size > 0, 'check blob has data');
-    }
+    ok(e.data.size > 0, 'check blob has data');
     totalBlobSize += e.data.size;
-    ok(totalBlobSize > 0, 'check the totalBlobSize');
-    is(mMediaRecorder.mimeType, expectedMimeType, 'blob should has mimetype, return ' + mMediaRecorder.mimeType);
+    is(e.data.type, expectedMimeType, 'blob should have expected mimetype');
     if (!stopTriggered) {
+      is(mMediaRecorder.mimeType, expectedMimeType, 'recorder should have expected mimetype');
       mMediaRecorder.stop();
+      is(mMediaRecorder.mimeType, '', 'recorder should have reset its mimetype');
       stopTriggered = true;
     } else if (onstopTriggered) {
       ok(false, 'ondataavailable should come before onstop event');
diff --git a/dom/media/test/test_mediarecorder_record_audiocontext.html b/dom/media/test/test_mediarecorder_record_audiocontext.html
index 8367b8674e..9b5fa46140 100644
--- a/dom/media/test/test_mediarecorder_record_audiocontext.html
+++ b/dom/media/test/test_mediarecorder_record_audiocontext.html
@@ -45,7 +45,8 @@ function startTest() {
   };
   mMediaRecorder.ondataavailable = function (e) {
     if (mMediaRecorder.state == 'recording') {
-      is('audio/ogg', mMediaRecorder.mimeType, "check the record mimetype return " + mMediaRecorder.mimeType);
+      is(mMediaRecorder.mimeType, 'audio/ogg; codecs=opus', 'Expected MediaRecorder mimetype');
+      is(e.data.type, 'audio/ogg; codecs=opus', 'Expected Blob mimetype');
       ok(e.data.size > 0, 'check blob has data');
       mMediaRecorder.stop();
     }
diff --git a/dom/media/test/test_mediarecorder_record_audionode.html b/dom/media/test/test_mediarecorder_record_audionode.html
index 731675b822..8a57437b81 100644
--- a/dom/media/test/test_mediarecorder_record_audionode.html
+++ b/dom/media/test/test_mediarecorder_record_audionode.html
@@ -53,6 +53,7 @@ async function testRecord(source, mimeType) {
   const isOffline = source.context instanceof OfflineAudioContext;
   const recorder = new MediaRecorder(source, 0, {mimeType});
   is(recorder.mimeType, mimeType, "Mime type is set");
+  const extendedMimeType = `${mimeType || "audio/ogg"}; codecs=opus`;
 
   recorder.onwarning = () => ok(false, "should not fire onwarning");
   recorder.onerror = () => ok(false, "should not fire onerror");
@@ -62,14 +63,17 @@ async function testRecord(source, mimeType) {
 
   recorder.start(1000);
   is("recording", recorder.state, "state should become recording after calling start()");
+  is(recorder.mimeType, mimeType, "Mime type is not changed by start()");
+
+  await new Promise(r => recorder.onstart = r);
+  is(recorder.mimeType, extendedMimeType, "Mime type is fully defined");
 
   const chunks = [];
   let {data} = await new Promise(r => recorder.ondataavailable = r);
-  is(recorder.state, "recording", "Expected to still be recording");
-  is(data.type, recorder.mimeType, "Blob has recorder mimetype");
-  if (mimeType != "") {
-    is(data.type, mimeType, "Blob has given mimetype");
+  if (!isOffline) {
+    is(recorder.state, "recording", "Expected to still be recording");
   }
+  is(data.type, extendedMimeType, "Blob has fully defined mimetype");
   isnot(data.size, 0, "should get data and its length should be > 0");
   chunks.push(data);
 
@@ -90,7 +94,7 @@ async function testRecord(source, mimeType) {
   return new Blob(chunks, {type: chunks[0].type});
 }
 
-addLoadEvent(async function() {
+addLoadEvent(async () => {
   const src = setUpSource();
   let didThrow = false;
   try {
@@ -100,7 +104,7 @@ addLoadEvent(async function() {
   }
   ok(didThrow, "MediaRecorder(AudioNode) should be hidden behind a pref");
 
-  await SpecialPowers.pushPrefEnv({"set": [
+  await SpecialPowers.pushPrefEnv({set: [
       ["media.recorder.audio_node.enabled", true],
     ]});
 
diff --git a/dom/media/test/test_mediarecorder_record_getdata_afterstart.html b/dom/media/test/test_mediarecorder_record_getdata_afterstart.html
index 5b7b9cabe9..3b181ed8db 100644
--- a/dom/media/test/test_mediarecorder_record_getdata_afterstart.html
+++ b/dom/media/test/test_mediarecorder_record_getdata_afterstart.html
@@ -26,6 +26,7 @@ function startTest(test, token) {
   element.stream = element.mozCaptureStream();
 
   mMediaRecorder = new MediaRecorder(element.stream);
+  is(mMediaRecorder.mimeType, '', 'Expected MediaRecorder mimetype');
   mMediaRecorder.onwarning = function() {
     ok(false, 'onwarning unexpectedly fired');
   };
@@ -37,14 +38,14 @@ function startTest(test, token) {
   mMediaRecorder.onstart = function() {
     info('onstart fired successfully');
     hasonstart = true;
-    // On audio only case, we produce audio/ogg as mimeType.
-    is('audio/ogg', mMediaRecorder.mimeType, "check the record mimetype return " + mMediaRecorder.mimeType);
+    is(mMediaRecorder.mimeType, 'audio/ogg; codecs=opus',
+      "MediaRecorder mimetype as expected");
     mMediaRecorder.requestData();
   };
 
   mMediaRecorder.onstop = function() {
     info('onstop fired successfully');
-    ok (hasondataavailable, "should have ondataavailable before onstop");
+    ok(hasondataavailable, "should have ondataavailable before onstop");
     is(mMediaRecorder.state, 'inactive', 'check recording status is inactive');
     SimpleTest.finish();
   };
@@ -53,8 +54,9 @@ function startTest(test, token) {
     info('ondataavailable fired successfully');
     if (mMediaRecorder.state == 'recording') {
       hasondataavailable = true;
-      ok(hasonstart, "should has onstart event first");
-      ok(e.data.size > 0, 'check blob has data');
+      ok(hasonstart, "should have had start event first");
+      is(e.data.type, mMediaRecorder.mimeType,
+        "blob's mimeType matches the recorder's");
       mMediaRecorder.stop();
     }
   };
diff --git a/dom/media/test/test_mediarecorder_record_gum_video_timeslice.html b/dom/media/test/test_mediarecorder_record_gum_video_timeslice.html
index afed950c1d..aed2fb7250 100644
--- a/dom/media/test/test_mediarecorder_record_gum_video_timeslice.html
+++ b/dom/media/test/test_mediarecorder_record_gum_video_timeslice.html
@@ -18,6 +18,7 @@ async function startTest() {
     let stream = await navigator.mediaDevices.getUserMedia({audio: true, video: true});
     let dataAvailableCount = 0;
     let onDataAvailableFirst = false;
+    const expectedMimeType = 'video/webm; codecs="vp8, opus"';
 
     mediaRecorder = new MediaRecorder(stream);
     is(mediaRecorder.stream, stream,
@@ -34,6 +35,10 @@ async function startTest() {
       ok(false, 'Unexpected onstop callback fired');
     };
 
+    mediaRecorder.onstart = function() {
+      is(mediaRecorder.mimeType, expectedMimeType, 'Expected mime type');
+    };
+
     mediaRecorder.ondataavailable = function (evt) {
       info('ondataavailable fired');
       dataAvailableCount++;
@@ -44,9 +49,7 @@ async function startTest() {
          'Event type should dataavailable');
       ok(evt.data.size >= 0,
          'Blob data size ' + evt.data.size + ' received is greater than or equal to zero');
-      is(mediaRecorder.mimeType, evt.data.type,
-         'Mime type in MediaRecorder and ondataavailable : '
-         + mediaRecorder.mimeType + ' == ' + evt.data.type);
+      is(evt.data.type, expectedMimeType, 'Expected blob mime type');
 
       // We'll stop recording upon the 1st blob being received
       if (dataAvailableCount === 1) {
@@ -76,6 +79,7 @@ async function startTest() {
 
     mediaRecorder.start(250);
     is(mediaRecorder.state, 'recording', 'Media recorder should be recording');
+    is(mediaRecorder.mimeType, '', 'Expected mime type');
   } catch (err) {
     ok(false, 'Unexpected error fired with: ' + err);
     SimpleTest.finish();
diff --git a/dom/media/test/test_mediarecorder_record_gum_video_timeslice_mixed.html b/dom/media/test/test_mediarecorder_record_gum_video_timeslice_mixed.html
index de8fc1baa7..7c6861c351 100644
--- a/dom/media/test/test_mediarecorder_record_gum_video_timeslice_mixed.html
+++ b/dom/media/test/test_mediarecorder_record_gum_video_timeslice_mixed.html
@@ -9,61 +9,64 @@
 
 
 
-  
-  
-  
-
-
-Mozilla Bug 957439
-
-
-
-
diff --git a/dom/media/test/test_mediarecorder_webm_support.html b/dom/media/test/test_mediarecorder_webm_support.html
index deae5af9f0..6b115ee33a 100644
--- a/dom/media/test/test_mediarecorder_webm_support.html
+++ b/dom/media/test/test_mediarecorder_webm_support.html
@@ -8,11 +8,49 @@
 
 
 
 
 
diff --git a/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStream.html b/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStream.html
index 2c0637a00d..df0add4204 100644
--- a/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStream.html
+++ b/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStream.html
@@ -18,6 +18,10 @@
     await pushPrefs(
       ['media.video_loopback_dev', ''],
       ['media.navigator.streams.fake', true]);
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
 
     const test = new PeerConnectionTest(options);
     addRenegotiation(test.chain,
diff --git a/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStreamNoBundle.html b/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStreamNoBundle.html
index 22e1820a46..9dcce7a46f 100644
--- a/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStreamNoBundle.html
+++ b/dom/media/tests/mochitest/test_peerConnection_addSecondVideoStreamNoBundle.html
@@ -18,6 +18,10 @@
     await pushPrefs(
       ['media.video_loopback_dev', ''],
       ['media.navigator.streams.fake', true]);
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
 
     options.bundle = false;
     const test = new PeerConnectionTest(options);
diff --git a/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d.html b/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d.html
index 17d1baff94..3336ec4127 100644
--- a/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d.html
+++ b/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d.html
@@ -13,7 +13,12 @@ createHTML({
   visible: true
 });
 
-runNetworkTest(() => {
+runNetworkTest(async () => {
+  // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+  if (navigator.userAgent.includes("Android")) {
+    await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+  }
+
   var test = new PeerConnectionTest();
   var mediaElement;
   var h = new CaptureStreamTestHelper2D();
diff --git a/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d_noSSRC.html b/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d_noSSRC.html
index 0cf8c2214b..5c2fc94fc7 100644
--- a/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d_noSSRC.html
+++ b/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_2d_noSSRC.html
@@ -13,7 +13,12 @@ createHTML({
 });
 
 var test;
-runNetworkTest((options) => {
+runNetworkTest(async (options) => {
+  // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+  if (navigator.userAgent.includes("Android")) {
+    await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+  }
+
   options = options || { };
   options.ssrc = false;
   test = new PeerConnectionTest(options);
diff --git a/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_webgl.html b/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_webgl.html
index 2bfb909f5b..db03a01cb5 100644
--- a/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_webgl.html
+++ b/dom/media/tests/mochitest/test_peerConnection_captureStream_canvas_webgl.html
@@ -24,7 +24,12 @@ createHTML({
   title: "Canvas(WebGL)::CaptureStream as video-only input to peerconnection"
 });
 
-runNetworkTest(() => {
+runNetworkTest(async () => {
+  // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+  if (navigator.userAgent.includes("Android")) {
+    await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+  }
+
   var test = new PeerConnectionTest();
   var vremote;
   var h = new CaptureStreamTestHelperWebGL();
diff --git a/dom/media/tests/mochitest/test_peerConnection_disabledVideoPreNegotiation.html b/dom/media/tests/mochitest/test_peerConnection_disabledVideoPreNegotiation.html
index 7e9424e1bb..3d1d65de86 100644
--- a/dom/media/tests/mochitest/test_peerConnection_disabledVideoPreNegotiation.html
+++ b/dom/media/tests/mochitest/test_peerConnection_disabledVideoPreNegotiation.html
@@ -14,7 +14,12 @@
   });
 
   var test;
-  runNetworkTest(function (options) {
+  runNetworkTest(async (options) => {
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
+
     test = new PeerConnectionTest(options);
     test.setMediaConstraints([{video: true}], []);
     test.chain.insertAfter("PC_LOCAL_GUM", function PC_LOCAL_DISABLE_VIDEO() {
diff --git a/dom/media/tests/mochitest/test_peerConnection_multiple_captureStream_canvas_2d.html b/dom/media/tests/mochitest/test_peerConnection_multiple_captureStream_canvas_2d.html
index a1cd1e09cc..8185fe60bd 100644
--- a/dom/media/tests/mochitest/test_peerConnection_multiple_captureStream_canvas_2d.html
+++ b/dom/media/tests/mochitest/test_peerConnection_multiple_captureStream_canvas_2d.html
@@ -16,7 +16,12 @@ createHTML({
 /**
  * Test to verify using multiple capture streams concurrently.
  */
-runNetworkTest(() => {
+runNetworkTest(async () => {
+  // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+  if (navigator.userAgent.includes("Android")) {
+    await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+  }
+
   var test = new PeerConnectionTest();
   var h = new CaptureStreamTestHelper2D(50, 50);
 
diff --git a/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrack.html b/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrack.html
index a6e32ad778..3cd12fa9bc 100644
--- a/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrack.html
+++ b/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrack.html
@@ -18,6 +18,10 @@
     await pushPrefs(
       ['media.video_loopback_dev', ''],
       ['media.navigator.streams.fake', true]);
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
 
     const test = new PeerConnectionTest(options);
     const helper = new VideoStreamHelper();
diff --git a/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrackNoBundle.html b/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrackNoBundle.html
index 1afcb018f9..fb45da23e9 100644
--- a/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrackNoBundle.html
+++ b/dom/media/tests/mochitest/test_peerConnection_removeThenAddVideoTrackNoBundle.html
@@ -18,6 +18,10 @@
     await pushPrefs(
       ['media.video_loopback_dev', ''],
       ['media.navigator.streams.fake', true]);
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
 
     options = options || { };
     options.bundle = false;
diff --git a/dom/media/tests/mochitest/test_peerConnection_removeVideoTrack.html b/dom/media/tests/mochitest/test_peerConnection_removeVideoTrack.html
index ca51bb7b1b..92e36e5831 100644
--- a/dom/media/tests/mochitest/test_peerConnection_removeVideoTrack.html
+++ b/dom/media/tests/mochitest/test_peerConnection_removeVideoTrack.html
@@ -12,7 +12,12 @@
     title: "Renegotiation: remove video track"
   });
 
-  runNetworkTest(function (options) {
+  runNetworkTest(async (options) => {
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
+
     const test = new PeerConnectionTest(options);
     let receivedTrack, element;
     addRenegotiation(test.chain,
diff --git a/dom/media/tests/mochitest/test_peerConnection_renderAfterRenegotiation.html b/dom/media/tests/mochitest/test_peerConnection_renderAfterRenegotiation.html
index 77e2e692c7..4533d50bda 100644
--- a/dom/media/tests/mochitest/test_peerConnection_renderAfterRenegotiation.html
+++ b/dom/media/tests/mochitest/test_peerConnection_renderAfterRenegotiation.html
@@ -35,7 +35,12 @@
     }
   });
 
-  runNetworkTest(function() {
+  runNetworkTest(async () => {
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
+
     v2 = createMediaElement('video', 'v2');
     is(v2.currentTime, 0, "v2.currentTime is zero at outset");
 
diff --git a/dom/media/tests/mochitest/test_peerConnection_replaceVideoThenRenegotiate.html b/dom/media/tests/mochitest/test_peerConnection_replaceVideoThenRenegotiate.html
index 49aaf210ad..dd72a1ba12 100644
--- a/dom/media/tests/mochitest/test_peerConnection_replaceVideoThenRenegotiate.html
+++ b/dom/media/tests/mochitest/test_peerConnection_replaceVideoThenRenegotiate.html
@@ -12,8 +12,13 @@
     title: "Renegotiation: replaceTrack followed by adding a second video stream"
   });
 
-  runNetworkTest(function (options) {
-   pushPrefs(['media.peerconnection.video.min_bitrate_estimate', 180*1000]).then(() => {
+  runNetworkTest(async (options) => {
+    await pushPrefs(['media.peerconnection.video.min_bitrate_estimate', 180*1000]);
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
+
     const test = new PeerConnectionTest(options);
     test.setMediaConstraints([{video:true}], [{video:true}]);
     const helper = new VideoStreamHelper();
@@ -60,7 +65,6 @@
     );
 
     test.run();
-   });
   });
 
 
diff --git a/dom/media/tests/mochitest/test_peerConnection_setParameters_scaleResolutionDownBy.html b/dom/media/tests/mochitest/test_peerConnection_setParameters_scaleResolutionDownBy.html
index e1c009098f..f78b4bbab1 100644
--- a/dom/media/tests/mochitest/test_peerConnection_setParameters_scaleResolutionDownBy.html
+++ b/dom/media/tests/mochitest/test_peerConnection_setParameters_scaleResolutionDownBy.html
@@ -32,6 +32,10 @@ async function checkScaleDownBy(scale) {
 
 runNetworkTest(async function (options) {
   await pushPrefs(['media.peerconnection.video.lock_scaling', true]);
+  // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+  if (navigator.userAgent.includes("Android")) {
+    await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+  }
 
   let test = new PeerConnectionTest(options);
   test.setMediaConstraints([{video: true}], []);
diff --git a/dom/media/tests/mochitest/test_peerConnection_trackDisabling_clones.html b/dom/media/tests/mochitest/test_peerConnection_trackDisabling_clones.html
index 627a04907b..4b682cf827 100644
--- a/dom/media/tests/mochitest/test_peerConnection_trackDisabling_clones.html
+++ b/dom/media/tests/mochitest/test_peerConnection_trackDisabling_clones.html
@@ -24,6 +24,10 @@ runNetworkTest(async () => {
     // component.
     ['media.audio_loopback_dev', ''],
     ['media.navigator.streams.fake', true]);
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
 
   var originalStream;
   var localVideoOriginal;
diff --git a/dom/media/tests/mochitest/test_peerConnection_verifyVideoAfterRenegotiation.html b/dom/media/tests/mochitest/test_peerConnection_verifyVideoAfterRenegotiation.html
index 64a2c3c529..fb8949f323 100644
--- a/dom/media/tests/mochitest/test_peerConnection_verifyVideoAfterRenegotiation.html
+++ b/dom/media/tests/mochitest/test_peerConnection_verifyVideoAfterRenegotiation.html
@@ -12,7 +12,12 @@
     title: "Renegotiation: verify video after renegotiation"
   });
 
-runNetworkTest(() => {
+runNetworkTest(async () => {
+  // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+  if (navigator.userAgent.includes("Android")) {
+    await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+  }
+
   var test = new PeerConnectionTest();
 
   var h1 = new CaptureStreamTestHelper2D(50, 50);
diff --git a/dom/media/tests/mochitest/test_peerConnection_videoRenegotiationInactiveAnswer.html b/dom/media/tests/mochitest/test_peerConnection_videoRenegotiationInactiveAnswer.html
index c8ab4b4026..80f4993c35 100644
--- a/dom/media/tests/mochitest/test_peerConnection_videoRenegotiationInactiveAnswer.html
+++ b/dom/media/tests/mochitest/test_peerConnection_videoRenegotiationInactiveAnswer.html
@@ -14,7 +14,12 @@
   });
 
   var test;
-  runNetworkTest(function (options) {
+  runNetworkTest(async (options) => {
+    // [TODO] re-enable HW decoder after bug 1526207 is fixed.
+    if (navigator.userAgent.includes("Android")) {
+      await pushPrefs(["media.navigator.mediadatadecoder_vpx_enabled", false]);
+    }
+
     const emitter = new VideoFrameEmitter();
     const helper = new VideoStreamHelper();
 
diff --git a/dom/media/webaudio/AudioContext.cpp b/dom/media/webaudio/AudioContext.cpp
index 811effc06c..dbdb787b08 100644
--- a/dom/media/webaudio/AudioContext.cpp
+++ b/dom/media/webaudio/AudioContext.cpp
@@ -142,7 +142,11 @@ static float GetSampleRateForAudioContext(bool aIsOffline, float aSampleRate) {
   if (aIsOffline || aSampleRate != 0.0) {
     return aSampleRate;
   } else {
-    return static_cast(CubebUtils::PreferredSampleRate());
+    float rate = static_cast(CubebUtils::PreferredSampleRate());
+    if (nsRFPService::IsResistFingerprintingEnabled()) {
+      return 44100.f;
+    }
+    return rate;
   }
 }
 
@@ -265,10 +269,8 @@ already_AddRefed AudioContext::Constructor(
   }
   sampleRate = aOptions.mSampleRate;
 
-  uint32_t maxChannelCount = std::min(
-      WebAudioUtils::MaxChannelCount, CubebUtils::MaxNumberOfChannels());
   RefPtr object =
-      new AudioContext(window, false, maxChannelCount, 0, sampleRate);
+      new AudioContext(window, false, 2, 0, sampleRate);
   aRv = object->Init();
   if (NS_WARN_IF(aRv.Failed())) {
     return nullptr;
@@ -642,6 +644,9 @@ void AudioContext::UnregisterActiveNode(AudioNode* aNode) {
 }
 
 uint32_t AudioContext::MaxChannelCount() const {
+  if (nsRFPService::IsResistFingerprintingEnabled()) {
+    return 2;
+  }
   return std::min(
       WebAudioUtils::MaxChannelCount,
       mIsOffline ? mNumberOfChannels : CubebUtils::MaxNumberOfChannels());
diff --git a/dom/media/webaudio/AudioDestinationNode.cpp b/dom/media/webaudio/AudioDestinationNode.cpp
index 8fc7856ca6..654b65ebc9 100644
--- a/dom/media/webaudio/AudioDestinationNode.cpp
+++ b/dom/media/webaudio/AudioDestinationNode.cpp
@@ -29,8 +29,6 @@ extern mozilla::LazyLogModule gAudioChannelLog;
 namespace mozilla {
 namespace dom {
 
-static uint8_t gWebAudioOutputKey;
-
 class OfflineDestinationNodeEngine final : public AudioNodeEngine {
  public:
   explicit OfflineDestinationNodeEngine(AudioDestinationNode* aNode)
@@ -338,7 +336,8 @@ AudioDestinationNode::AudioDestinationNode(AudioContext* aContext,
 
   mTrack = AudioNodeTrack::Create(aContext, engine, kTrackFlags, graph);
   mTrack->AddMainThreadListener(this);
-  mTrack->AddAudioOutput(&gWebAudioOutputKey);
+  // null key is fine: only one output per mTrack
+  mTrack->AddAudioOutput(nullptr);
 
   if (aAllowedToStart) {
     graph->NotifyWhenGraphStarted(mTrack);
@@ -447,6 +446,10 @@ void AudioDestinationNode::SetChannelCount(uint32_t aChannelCount,
     return;
   }
 
+  if (aChannelCount == ChannelCount()) {
+    return;
+  }
+
   AudioNode::SetChannelCount(aChannelCount, aRv);
 }
 
@@ -503,7 +506,7 @@ AudioDestinationNode::WindowVolumeChanged(float aVolume, bool aMuted) {
       this, aVolume, aMuted ? "true" : "false");
 
   float volume = aMuted ? 0.0 : aVolume;
-  mTrack->SetAudioOutputVolume(&gWebAudioOutputKey, volume);
+  mTrack->SetAudioOutputVolume(nullptr, volume);
 
   AudioChannelService::AudibleState audible =
       volume > 0.0 ? AudioChannelService::AudibleState::eAudible
diff --git a/dom/media/webaudio/AudioNodeEngine.h b/dom/media/webaudio/AudioNodeEngine.h
index 7fa2709bf7..e8e94d7179 100644
--- a/dom/media/webaudio/AudioNodeEngine.h
+++ b/dom/media/webaudio/AudioNodeEngine.h
@@ -313,18 +313,14 @@ class AudioNodeEngine {
 
   /**
    * Produce the next block of audio samples, given input samples in the aInput
-   * array.  There is one input sample per active port in aInput, in order.
+   * array.  There is one input sample per port in aInput, in order.
    * This is the multi-input/output version of ProcessBlock.  Only one kind
-   * of ProcessBlock is called on each node, depending on whether the
-   * number of inputs and outputs are both 1 or not.
+   * of ProcessBlock is called on each node.  ProcessBlocksOnPorts() is called
+   * instead of ProcessBlock() if either the number of inputs or the number of
+   * outputs is greater than 1.
    *
-   * aInput is always guaranteed to not contain more input AudioChunks than the
-   * maximum number of inputs for the node.  It is the responsibility of the
-   * overrides of this function to make sure they will only add a maximum number
-   * of AudioChunks to aOutput as advertized by the AudioNode implementation.
-   * An engine may choose to produce fewer inputs than advertizes by the
-   * corresponding AudioNode, in which case it will be interpreted as a channel
-   * of silence.
+   * The numbers of AudioBlocks in aInput and aOutput are always guaranteed to
+   * match the numbers of inputs and outputs for the node.
    */
   virtual void ProcessBlocksOnPorts(AudioNodeTrack* aTrack,
                                     Span aInput,
diff --git a/dom/media/webaudio/AudioNodeTrack.cpp b/dom/media/webaudio/AudioNodeTrack.cpp
index e9941228d8..249a100bb0 100644
--- a/dom/media/webaudio/AudioNodeTrack.cpp
+++ b/dom/media/webaudio/AudioNodeTrack.cpp
@@ -333,6 +333,12 @@ uint32_t AudioNodeTrack::ComputedNumberOfChannels(uint32_t aInputChannelCount) {
   }
 }
 
+uint32_t AudioNodeTrack::NumberOfChannels() const {
+  MOZ_ASSERT(GraphImpl()->OnGraphThread());
+
+  return mNumberOfInputChannels;
+}
+
 class AudioNodeTrack::AdvanceAndResumeMessage final : public ControlMessage {
  public:
   AdvanceAndResumeMessage(AudioNodeTrack* aTrack, TrackTime aAdvance)
@@ -515,8 +521,10 @@ void AudioNodeTrack::ProcessInput(GraphTime aFrom, GraphTime aTo,
         mEngine->ProcessBlock(this, aFrom, mInputChunks[0], &mLastChunks[0],
                               &finished);
       } else {
-        mEngine->ProcessBlocksOnPorts(this, mInputChunks, mLastChunks,
-                                      &finished);
+        mEngine->ProcessBlocksOnPorts(
+            this, MakeSpan(mInputChunks.Elements(), mEngine->InputCount()),
+            MakeSpan(mLastChunks.Elements(), mEngine->OutputCount()),
+            &finished);
       }
     }
     for (uint16_t i = 0; i < outputCount; ++i) {
diff --git a/dom/media/webaudio/AudioNodeTrack.h b/dom/media/webaudio/AudioNodeTrack.h
index 04a6ace91c..bba6587d71 100644
--- a/dom/media/webaudio/AudioNodeTrack.h
+++ b/dom/media/webaudio/AudioNodeTrack.h
@@ -118,6 +118,8 @@ class AudioNodeTrack : public ProcessedMediaTrack {
     MOZ_ASSERT(!mAudioParamTrack, "Can only do this once");
     mAudioParamTrack = true;
   }
+  // The value for channelCount on an AudioNode, but on the audio thread side.
+  uint32_t NumberOfChannels() const;
 
   /*
    * Resume track after updating its concept of current time by aAdvance.
diff --git a/dom/media/webaudio/AudioWorkletGlobalScope.cpp b/dom/media/webaudio/AudioWorkletGlobalScope.cpp
index 168c19a756..9faa03547c 100644
--- a/dom/media/webaudio/AudioWorkletGlobalScope.cpp
+++ b/dom/media/webaudio/AudioWorkletGlobalScope.cpp
@@ -11,6 +11,7 @@
 #include "js/Array.h"  // JS::GetArrayLength, JS::IsArrayObject
 #include "mozilla/dom/AudioWorkletGlobalScopeBinding.h"
 #include "mozilla/dom/AudioWorkletProcessor.h"
+#include "mozilla/dom/MessagePort.h"
 #include "mozilla/dom/StructuredCloneHolder.h"
 #include "mozilla/dom/WorkletPrincipals.h"
 #include "mozilla/dom/AudioParamDescriptorBinding.h"
@@ -30,7 +31,7 @@ NS_IMPL_ADDREF_INHERITED(AudioWorkletGlobalScope, WorkletGlobalScope)
 NS_IMPL_RELEASE_INHERITED(AudioWorkletGlobalScope, WorkletGlobalScope)
 
 AudioWorkletGlobalScope::AudioWorkletGlobalScope(AudioWorkletImpl* aImpl)
-    : mImpl(aImpl), mCurrentFrame(0), mCurrentTime(0), mSampleRate(0.0) {}
+    : mImpl(aImpl) {}
 
 bool AudioWorkletGlobalScope::WrapGlobalObject(
     JSContext* aCx, JS::MutableHandle aReflector) {
@@ -116,27 +117,8 @@ void AudioWorkletGlobalScope::RegisterProcessor(
         "processorCtor.prototype"));
     return;
   }
-
   /**
-   * 6. If the result of IsCallable(argument=Get(O=prototype, P="process"))
-   *    is false, throw a TypeError and abort these steps.
-   */
-  JS::Rooted process(aCx);
-  JS::Rooted prototypeObject(aCx, &prototype.toObject());
-  if (!JS_GetProperty(aCx, prototypeObject, "process", &process)) {
-    aRv.NoteJSContextException(aCx);
-    return;
-  }
-
-  if (!process.isObjectOrNull() || !JS::IsCallable(process.toObjectOrNull())) {
-    aRv.ThrowTypeError(NS_LITERAL_STRING(
-        "Argument 2 of AudioWorkletGlobalScope.registerProcessor "
-        "constructor.process"));
-    return;
-  }
-
-  /**
-   * 7. Let descriptors be the result of Get(O=processorCtor,
+   * 6. Let parameterDescriptorsValue be the result of Get(O=processorCtor,
    *    P="parameterDescriptors").
    */
   JS::Rooted descriptors(aCx);
@@ -145,8 +127,12 @@ void AudioWorkletGlobalScope::RegisterProcessor(
     aRv.NoteJSContextException(aCx);
     return;
   }
-
-  /**
+  /** TODO https://bugzilla.mozilla.org/show_bug.cgi?id=1565464
+   * 7. Let parameterDescriptorSequence be the result of the conversion
+   *    from parameterDescriptorsValue to an IDL value of type
+   *    sequence.
+   *
+   * This is now obsolete:
    * 8. If descriptors is neither an array nor undefined, throw a
    *    TypeError and abort these steps.
    */
@@ -201,11 +187,19 @@ void AudioWorkletGlobalScope::RegisterProcessor(
 
 WorkletImpl* AudioWorkletGlobalScope::Impl() const { return mImpl; }
 
-uint64_t AudioWorkletGlobalScope::CurrentFrame() const { return mCurrentFrame; }
+uint64_t AudioWorkletGlobalScope::CurrentFrame() const {
+  AudioNodeTrack* destinationTrack = mImpl->DestinationTrack();
+  GraphTime processedTime = destinationTrack->Graph()->ProcessedTime();
+  return destinationTrack->GraphTimeToTrackTime(processedTime);
+}
 
-double AudioWorkletGlobalScope::CurrentTime() const { return mCurrentTime; }
+double AudioWorkletGlobalScope::CurrentTime() const {
+  return static_cast(CurrentFrame()) / SampleRate();
+}
 
-float AudioWorkletGlobalScope::SampleRate() const { return mSampleRate; }
+float AudioWorkletGlobalScope::SampleRate() const {
+  return static_cast(mImpl->DestinationTrack()->mSampleRate);
+}
 
 AudioParamDescriptorMap AudioWorkletGlobalScope::DescriptorsFromJS(
     JSContext* aCx, const JS::Rooted& aDescriptors,
@@ -286,12 +280,12 @@ AudioParamDescriptorMap AudioWorkletGlobalScope::DescriptorsFromJS(
 }
 
 bool AudioWorkletGlobalScope::ConstructProcessor(
-    const nsAString& aName,
-    NotNull aOptionsSerialization,
+    const nsAString& aName, NotNull aSerializedOptions,
+    UniqueMessagePortId& aPortIdentifier,
     JS::MutableHandle aRetProcessor) {
   /**
-   * See the second algorithm at
-   * https://webaudio.github.io/web-audio-api/#instantiation-of-AudioWorkletNode-and-AudioWorkletProcessor
+   * See
+   * https://webaudio.github.io/web-audio-api/#AudioWorkletProcessor-instantiation
    */
   AutoJSAPI jsapi;
   if (NS_WARN_IF(!jsapi.Init(this))) {
@@ -299,52 +293,57 @@ bool AudioWorkletGlobalScope::ConstructProcessor(
   }
   JSContext* cx = jsapi.cx();
   ErrorResult rv;
-  /** TODO https://bugzilla.mozilla.org/show_bug.cgi?id=1565956
-   * 1. Let processorPort be
-   *    StructuredDeserializeWithTransfer(processorPortSerialization,
-   *                                      the current Realm).
-   */
   /**
-   * 2. Let options be StructuredDeserialize(optionsSerialization,
-   *                                         the current Realm).
+   * 4. Let deserializedPort be the result of
+   *    StructuredDeserialize(serializedPort, the current Realm).
    */
-  JS::Rooted optionsVal(cx);
-  aOptionsSerialization->Read(this, cx, &optionsVal, rv);
+  RefPtr deserializedPort =
+      MessagePort::Create(this, aPortIdentifier, rv);
+  if (NS_WARN_IF(rv.MaybeSetPendingException(cx))) {
+    return false;
+  }
+  /**
+   * 5. Let deserializedOptions be the result of
+   *    StructuredDeserialize(serializedOptions, the current Realm).
+   */
+  JS::Rooted deserializedOptions(cx);
+  aSerializedOptions->Read(this, cx, &deserializedOptions, rv);
   if (rv.MaybeSetPendingException(cx)) {
     return false;
   }
   /**
-   * 3. Let processorConstructor be the result of looking up nodeName on the
+   * 6. Let processorCtor be the result of looking up processorName on the
    *    AudioWorkletGlobalScope's node name to processor definition map.
    */
-  RefPtr processorConstructor =
+  RefPtr processorCtor =
       mNameToProcessorMap.Get(aName);
   // AudioWorkletNode has already checked the definition exists.
   // See also https://github.com/WebAudio/web-audio-api/issues/1854
-  MOZ_ASSERT(processorConstructor);
+  MOZ_ASSERT(processorCtor);
   /**
-   * 4. Let processor be the result of Construct(processorConstructor,
-   *                                             « options »).
+   * 7. Store nodeReference and deserializedPort to node reference and
+   *    transferred port of this AudioWorkletGlobalScope's pending processor
+   *    construction data respectively.
+   */
+  // |nodeReference| is not required here because the "processorerror" event
+  // is thrown by WorkletNodeEngine::ConstructProcessor().
+  mPortForProcessor = std::move(deserializedPort);
+  /**
+   * 8. Construct a callback function from processorCtor with the argument
+   *    of deserializedOptions.
    */
   // The options were an object before serialization and so will be an object
   // if deserialization succeeded above.  toObject() asserts.
-  JS::Rooted options(cx, &optionsVal.toObject());
-  // Using https://heycam.github.io/webidl/#construct-a-callback-function
-  // See
-  // https://github.com/WebAudio/web-audio-api/pull/1843#issuecomment-478590304
-  RefPtr processor = processorConstructor->Construct(
+  JS::Rooted options(cx, &deserializedOptions.toObject());
+  RefPtr processor = processorCtor->Construct(
       options, rv, "AudioWorkletProcessor construction",
       CallbackFunction::eReportExceptions);
+  // https://github.com/WebAudio/web-audio-api/issues/2096
+  mPortForProcessor = nullptr;
   if (rv.Failed()) {
     rv.SuppressException();  // already reported
     return false;
   }
-  /** TODO https://bugzilla.mozilla.org/show_bug.cgi?id=1565956
-   * but see https://github.com/WebAudio/web-audio-api/issues/1973
-   *
-   * 5. Set processor’s port to processorPort.
-   */
-
   JS::Rooted processorVal(cx);
   if (NS_WARN_IF(!ToJSValue(cx, processor, &processorVal))) {
     return false;
@@ -354,5 +353,9 @@ bool AudioWorkletGlobalScope::ConstructProcessor(
   return true;
 }
 
+RefPtr AudioWorkletGlobalScope::TakePortForProcessorCtor() {
+  return std::move(mPortForProcessor);
+}
+
 }  // namespace dom
 }  // namespace mozilla
diff --git a/dom/media/webaudio/AudioWorkletGlobalScope.h b/dom/media/webaudio/AudioWorkletGlobalScope.h
index cf1f110451..51e19b2541 100644
--- a/dom/media/webaudio/AudioWorkletGlobalScope.h
+++ b/dom/media/webaudio/AudioWorkletGlobalScope.h
@@ -17,7 +17,9 @@ class AudioWorkletImpl;
 namespace dom {
 
 class AudioWorkletProcessorConstructor;
+class MessagePort;
 class StructuredCloneHolder;
+class UniqueMessagePortId;
 
 class AudioWorkletGlobalScope final : public WorkletGlobalScope {
  public:
@@ -46,9 +48,14 @@ class AudioWorkletGlobalScope final : public WorkletGlobalScope {
   // compartment for the realm of this global.  Returns false on failure.
   MOZ_CAN_RUN_SCRIPT
   bool ConstructProcessor(const nsAString& aName,
-                          NotNull aOptionsSerialization,
+                          NotNull aSerializedOptions,
+                          UniqueMessagePortId& aPortIdentifier,
                           JS::MutableHandle aRetProcessor);
 
+  // Returns null if not called during ConstructProcessor() or if the port has
+  // already been taken.
+  RefPtr TakePortForProcessorCtor();
+
  private:
   ~AudioWorkletGlobalScope() = default;
 
@@ -61,13 +68,13 @@ class AudioWorkletGlobalScope final : public WorkletGlobalScope {
 
   const RefPtr mImpl;
 
-  uint64_t mCurrentFrame;
-  double mCurrentTime;
-  float mSampleRate;
-
   typedef nsRefPtrHashtable
       NodeNameToProcessorDefinitionMap;
   NodeNameToProcessorDefinitionMap mNameToProcessorMap;
+  // https://webaudio.github.io/web-audio-api/#pending-processor-construction-data-transferred-port
+  // This does not need to be traversed during cycle-collection because it is
+  // only set while this AudioWorkletGlobalScope is on the stack.
+  RefPtr mPortForProcessor;
 };
 
 }  // namespace dom
diff --git a/dom/media/webaudio/AudioWorkletImpl.h b/dom/media/webaudio/AudioWorkletImpl.h
index 88d4167ade..7ae968b605 100644
--- a/dom/media/webaudio/AudioWorkletImpl.h
+++ b/dom/media/webaudio/AudioWorkletImpl.h
@@ -35,7 +35,7 @@ class AudioWorkletImpl final : public WorkletImpl {
   }
 
   // Any thread:
-  AudioNodeTrack* DestinationTrack() { return mDestinationTrack; }
+  AudioNodeTrack* DestinationTrack() const { return mDestinationTrack; }
 
  protected:
   // Execution thread only.
diff --git a/dom/media/webaudio/AudioWorkletNode.cpp b/dom/media/webaudio/AudioWorkletNode.cpp
index 83f656f2ce..d3fedd1b2a 100644
--- a/dom/media/webaudio/AudioWorkletNode.cpp
+++ b/dom/media/webaudio/AudioWorkletNode.cpp
@@ -7,12 +7,14 @@
 #include "AudioParamMap.h"
 #include "js/Array.h"
 #include "mozilla/dom/AudioWorkletNodeBinding.h"
+#include "mozilla/dom/MessageChannel.h"
 #include "mozilla/dom/MessagePort.h"
 
 namespace mozilla {
 namespace dom {
 
 NS_IMPL_ISUPPORTS_CYCLE_COLLECTION_INHERITED_0(AudioWorkletNode, AudioNode)
+NS_IMPL_CYCLE_COLLECTION_INHERITED(AudioWorkletNode, AudioNode, mPort)
 
 class WorkletNodeEngine final : public AudioNodeEngine {
  public:
@@ -25,9 +27,10 @@ class WorkletNodeEngine final : public AudioNodeEngine {
   }
 
   MOZ_CAN_RUN_SCRIPT
-  void ConstructProcessor(
-      AudioWorkletImpl* aWorkletImpl, const nsAString& aName,
-      NotNull aOptionsSerialization);
+  void ConstructProcessor(AudioWorkletImpl* aWorkletImpl,
+                          const nsAString& aName,
+                          NotNull aSerializedOptions,
+                          UniqueMessagePortId& aPortIdentifier);
 
   void ProcessBlock(AudioNodeTrack* aTrack, GraphTime aFrom,
                     const AudioBlock& aInput, AudioBlock* aOutput,
@@ -117,13 +120,15 @@ void WorkletNodeEngine::SendProcessorError() {
 
 void WorkletNodeEngine::ConstructProcessor(
     AudioWorkletImpl* aWorkletImpl, const nsAString& aName,
-    NotNull aOptionsSerialization) {
+    NotNull aSerializedOptions,
+    UniqueMessagePortId& aPortIdentifier) {
   MOZ_ASSERT(mInputs.mPorts.empty() && mOutputs.mPorts.empty());
   RefPtr global = aWorkletImpl->GetGlobalScope();
   MOZ_ASSERT(global);  // global has already been used to register processor
   JS::RootingContext* cx = RootingCx();
   mProcessor.init(cx);
-  if (!global->ConstructProcessor(aName, aOptionsSerialization, &mProcessor) ||
+  if (!global->ConstructProcessor(aName, aSerializedOptions, aPortIdentifier,
+                                  &mProcessor) ||
       // mInputs and mOutputs outer arrays are fixed length and so much of the
       // initialization need only be performed once (i.e. here).
       NS_WARN_IF(!mInputs.mPorts.growBy(InputCount())) ||
@@ -369,27 +374,8 @@ already_AddRefed AudioWorkletNode::Constructor(
     const GlobalObject& aGlobal, AudioContext& aAudioContext,
     const nsAString& aName, const AudioWorkletNodeOptions& aOptions,
     ErrorResult& aRv) {
-  if (aOptions.mNumberOfInputs == 0 && aOptions.mNumberOfOutputs == 0) {
-    aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
-    return nullptr;
-  }
-
-  if (aOptions.mOutputChannelCount.WasPassed()) {
-    if (aOptions.mOutputChannelCount.Value().Length() !=
-        aOptions.mNumberOfOutputs) {
-      aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
-      return nullptr;
-    }
-
-    for (uint32_t channelCount : aOptions.mOutputChannelCount.Value()) {
-      if (channelCount == 0 || channelCount > WebAudioUtils::MaxChannelCount) {
-        aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
-        return nullptr;
-      }
-    }
-  }
   /**
-   * 2. If nodeName does not exists as a key in the BaseAudioContext’s node
+   * 1. If nodeName does not exist as a key in the BaseAudioContext’s node
    *    name to parameter descriptor map, throw a NotSupportedError exception
    *    and abort these steps.
    */
@@ -400,6 +386,44 @@ already_AddRefed AudioWorkletNode::Constructor(
     return nullptr;
   }
 
+  // See https://github.com/WebAudio/web-audio-api/issues/2074 for ordering.
+  RefPtr audioWorkletNode =
+      new AudioWorkletNode(&aAudioContext, aName, aOptions);
+  audioWorkletNode->Initialize(aOptions, aRv);
+  if (NS_WARN_IF(aRv.Failed())) {
+    return nullptr;
+  }
+
+  /**
+   * 3. Configure input, output and output channels of node with options.
+   */
+  if (aOptions.mNumberOfInputs == 0 && aOptions.mNumberOfOutputs == 0) {
+    aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
+    return nullptr;
+  }
+
+  if (aOptions.mOutputChannelCount.WasPassed()) {
+    /**
+     * 1. If any value in outputChannelCount is zero or greater than the
+     *    implementation’s maximum number of channels, throw a
+     *    NotSupportedError and abort the remaining steps.
+     */
+    for (uint32_t channelCount : aOptions.mOutputChannelCount.Value()) {
+      if (channelCount == 0 || channelCount > WebAudioUtils::MaxChannelCount) {
+        aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
+        return nullptr;
+      }
+    }
+    /**
+     * 2. If the length of outputChannelCount does not equal numberOfOutputs,
+     *    throw an IndexSizeError and abort the remaining steps.
+     */
+    if (aOptions.mOutputChannelCount.Value().Length() !=
+        aOptions.mNumberOfOutputs) {
+      aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
+      return nullptr;
+    }
+  }
   // MTG does not support more than UINT16_MAX inputs or outputs.
   if (aOptions.mNumberOfInputs > UINT16_MAX) {
     aRv.ThrowRangeError(
@@ -412,16 +436,25 @@ already_AddRefed AudioWorkletNode::Constructor(
     return nullptr;
   }
 
-  RefPtr audioWorkletNode =
-      new AudioWorkletNode(&aAudioContext, aName, aOptions);
-
-  audioWorkletNode->Initialize(aOptions, aRv);
+  /**
+   * 4. Let messageChannel be a new MessageChannel.
+   */
+  RefPtr messageChannel =
+      MessageChannel::Constructor(aGlobal, aRv);
   if (NS_WARN_IF(aRv.Failed())) {
     return nullptr;
   }
-
+  /* 5. Let nodePort be the value of messageChannel’s port1 attribute.
+   * 6. Let processorPortOnThisSide be the value of messageChannel’s port2
+   *    attribute.
+   * 7. Let serializedProcessorPort be the result of
+   *    StructuredSerializeWithTransfer(processorPortOnThisSide,
+   *                                    « processorPortOnThisSide »).
+   */
+  UniqueMessagePortId processorPortId;
+  messageChannel->Port2()->CloneAndDisentangle(processorPortId);
   /**
-   * 7. Let optionsSerialization be the result of StructuredSerialize(options).
+   * 8. Convert options dictionary to optionsObject.
    */
   JSContext* cx = aGlobal.Context();
   JS::Rooted optionsVal(cx);
@@ -429,17 +462,25 @@ already_AddRefed AudioWorkletNode::Constructor(
     aRv.NoteJSContextException(cx);
     return nullptr;
   }
+  /**
+   * 9. Let serializedOptions be the result of
+   *    StructuredSerialize(optionsObject).
+   */
   // StructuredCloneHolder does not have a move constructor.  Instead allocate
   // memory so that the pointer can be passed to the rendering thread.
-  UniquePtr optionsSerialization =
+  UniquePtr serializedOptions =
       MakeUnique(
           StructuredCloneHolder::CloningSupported,
           StructuredCloneHolder::TransferringNotSupported,
           JS::StructuredCloneScope::SameProcessDifferentThread);
-  optionsSerialization->Write(cx, optionsVal, aRv);
+  serializedOptions->Write(cx, optionsVal, aRv);
   if (NS_WARN_IF(aRv.Failed())) {
     return nullptr;
   }
+  /**
+   * 10. Set node’s port to nodePort.
+   */
+  audioWorkletNode->mPort = messageChannel->Port1();
 
   auto engine =
       new WorkletNodeEngine(audioWorkletNode, aOptions.mOutputChannelCount);
@@ -448,8 +489,10 @@ already_AddRefed AudioWorkletNode::Constructor(
       aAudioContext.Graph());
 
   /**
-   * 10. Queue a control message to create an AudioWorkletProcessor, given
-   *     nodeName, processorPortSerialization, optionsSerialization, and node.
+   * 12. Queue a control message to invoke the constructor of the
+   *     corresponding AudioWorkletProcessor with the processor construction
+   *     data that consists of: nodeName, node, serializedOptions, and
+   *     serializedProcessorPort.
    */
   Worklet* worklet = aAudioContext.GetAudioWorklet(aRv);
   MOZ_ASSERT(worklet, "Worklet already existed and so getter shouldn't fail.");
@@ -460,11 +503,12 @@ already_AddRefed AudioWorkletNode::Constructor(
       // See bug 1535398.
       [track = audioWorkletNode->mTrack,
        workletImpl = RefPtr(workletImpl),
-       name = nsString(aName), options = std::move(optionsSerialization)]()
-          MOZ_CAN_RUN_SCRIPT_BOUNDARY {
+       name = nsString(aName), options = std::move(serializedOptions),
+       portId = std::move(processorPortId)]()
+          MOZ_CAN_RUN_SCRIPT_BOUNDARY mutable {
             auto engine = static_cast(track->Engine());
             engine->ConstructProcessor(workletImpl, name,
-                                       WrapNotNull(options.get()));
+                                       WrapNotNull(options.get()), portId);
           }));
 
   return audioWorkletNode.forget();
@@ -475,11 +519,6 @@ AudioParamMap* AudioWorkletNode::GetParameters(ErrorResult& aRv) const {
   return nullptr;
 }
 
-MessagePort* AudioWorkletNode::GetPort(ErrorResult& aRv) const {
-  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
-  return nullptr;
-}
-
 JSObject* AudioWorkletNode::WrapObject(JSContext* aCx,
                                        JS::Handle aGivenProto) {
   return AudioWorkletNode_Binding::Wrap(aCx, this, aGivenProto);
diff --git a/dom/media/webaudio/AudioWorkletNode.h b/dom/media/webaudio/AudioWorkletNode.h
index 9b60ec422a..68163fec85 100644
--- a/dom/media/webaudio/AudioWorkletNode.h
+++ b/dom/media/webaudio/AudioWorkletNode.h
@@ -17,6 +17,7 @@ class MessagePort;
 class AudioWorkletNode : public AudioNode {
  public:
   NS_DECL_ISUPPORTS_INHERITED
+  NS_DECL_CYCLE_COLLECTION_CLASS_INHERITED(AudioWorkletNode, AudioNode)
 
   IMPL_EVENT_HANDLER(processorerror)
 
@@ -27,7 +28,7 @@ class AudioWorkletNode : public AudioNode {
 
   AudioParamMap* GetParameters(ErrorResult& aRv) const;
 
-  MessagePort* GetPort(ErrorResult& aRv) const;
+  MessagePort* Port() const { return mPort; };
 
   JSObject* WrapObject(JSContext* aCx,
                        JS::Handle aGivenProto) override;
@@ -46,6 +47,7 @@ class AudioWorkletNode : public AudioNode {
   ~AudioWorkletNode() = default;
 
   nsString mNodeName;
+  RefPtr mPort;
   uint16_t mInputCount;
   uint16_t mOutputCount;
 };
diff --git a/dom/media/webaudio/AudioWorkletProcessor.cpp b/dom/media/webaudio/AudioWorkletProcessor.cpp
index abfa421cb3..735ea8768e 100644
--- a/dom/media/webaudio/AudioWorkletProcessor.cpp
+++ b/dom/media/webaudio/AudioWorkletProcessor.cpp
@@ -12,22 +12,31 @@
 namespace mozilla {
 namespace dom {
 
-NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(AudioWorkletProcessor, mParent)
+NS_IMPL_CYCLE_COLLECTION_WRAPPERCACHE(AudioWorkletProcessor, mParent, mPort)
 
 NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(AudioWorkletProcessor, AddRef)
 NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(AudioWorkletProcessor, Release)
 
-AudioWorkletProcessor::AudioWorkletProcessor(nsIGlobalObject* aParent)
-    : mParent(aParent) {}
+AudioWorkletProcessor::AudioWorkletProcessor(nsIGlobalObject* aParent,
+                                             MessagePort* aPort)
+    : mParent(aParent), mPort(aPort) {}
+
+AudioWorkletProcessor::~AudioWorkletProcessor() = default;
 
 /* static */
 already_AddRefed AudioWorkletProcessor::Constructor(
-    const GlobalObject& aGlobal, const AudioWorkletNodeOptions& aOptions) {
-  nsCOMPtr global = do_QueryInterface(aGlobal.GetAsSupports());
+    const GlobalObject& aGlobal, ErrorResult& aRv) {
+  nsCOMPtr global =
+      do_QueryInterface(aGlobal.GetAsSupports());
   MOZ_ASSERT(global);
-
+  RefPtr port = static_cast(global.get())
+                                 ->TakePortForProcessorCtor();
+  if (!port) {
+    aRv.ThrowTypeError();
+    return nullptr;
+  }
   RefPtr audioWorkletProcessor =
-      new AudioWorkletProcessor(global);
+      new AudioWorkletProcessor(global, port);
 
   return audioWorkletProcessor.forget();
 }
@@ -37,10 +46,5 @@ JSObject* AudioWorkletProcessor::WrapObject(JSContext* aCx,
   return AudioWorkletProcessor_Binding::Wrap(aCx, this, aGivenProto);
 }
 
-MessagePort* AudioWorkletProcessor::GetPort(ErrorResult& aRv) const {
-  aRv.Throw(NS_ERROR_NOT_IMPLEMENTED);
-  return nullptr;
-}
-
 }  // namespace dom
 }  // namespace mozilla
diff --git a/dom/media/webaudio/AudioWorkletProcessor.h b/dom/media/webaudio/AudioWorkletProcessor.h
index 5efdfbe420..878b3146d8 100644
--- a/dom/media/webaudio/AudioWorkletProcessor.h
+++ b/dom/media/webaudio/AudioWorkletProcessor.h
@@ -26,19 +26,20 @@ class AudioWorkletProcessor final : public nsWrapperCache {
   NS_DECL_CYCLE_COLLECTION_SCRIPT_HOLDER_NATIVE_CLASS(AudioWorkletProcessor)
 
   static already_AddRefed Constructor(
-      const GlobalObject& aGlobal, const AudioWorkletNodeOptions& aOptions);
+      const GlobalObject& aGlobal, ErrorResult& aRv);
 
   nsIGlobalObject* GetParentObject() const { return mParent; }
 
   JSObject* WrapObject(JSContext* aCx,
                        JS::Handle aGivenProto) override;
 
-  MessagePort* GetPort(ErrorResult& aRv) const;
+  MessagePort* Port() const { return mPort; };
 
  private:
-  explicit AudioWorkletProcessor(nsIGlobalObject* aParent);
-  ~AudioWorkletProcessor() = default;
+  explicit AudioWorkletProcessor(nsIGlobalObject* aParent, MessagePort* aPort);
+  ~AudioWorkletProcessor();
   nsCOMPtr mParent;
+  RefPtr mPort;
 };
 
 }  // namespace dom
diff --git a/dom/media/webm/EbmlComposer.cpp b/dom/media/webm/EbmlComposer.cpp
index 5884ec73db..ac4076c050 100644
--- a/dom/media/webm/EbmlComposer.cpp
+++ b/dom/media/webm/EbmlComposer.cpp
@@ -54,14 +54,15 @@ void EbmlComposer::GenerateHeader() {
           if (mCodecPrivateData.Length() > 0) {
             // Extract the pre-skip from mCodecPrivateData
             // then convert it to nanoseconds.
-            // Details in OpusTrackEncoder.cpp.
-            mCodecDelay = (uint64_t)LittleEndian::readUint16(
-                              mCodecPrivateData.Elements() + 10) *
-                          PR_NSEC_PER_SEC / 48000;
+            // For more details see
+            // https://tools.ietf.org/html/rfc7845#section-4.2
+            uint64_t codecDelay = (uint64_t)LittleEndian::readUint16(
+                                      mCodecPrivateData.Elements() + 10) *
+                                  PR_NSEC_PER_SEC / 48000;
             // Fixed 80ms, convert into nanoseconds.
             uint64_t seekPreRoll = 80 * PR_NSEC_PER_MSEC;
             writeAudioTrack(&ebml, 0x2, 0x0, "A_OPUS", mSampleFreq, mChannels,
-                            mCodecDelay, seekPreRoll,
+                            codecDelay, seekPreRoll,
                             mCodecPrivateData.Elements(),
                             mCodecPrivateData.Length());
           }
@@ -113,7 +114,7 @@ void EbmlComposer::WriteSimpleBlock(EncodedFrame* aFrame) {
   EbmlGlobal ebml;
   ebml.offset = 0;
 
-  auto frameType = aFrame->GetFrameType();
+  auto frameType = aFrame->mFrameType;
   const bool isVP8IFrame = (frameType == EncodedFrame::FrameType::VP8_I_FRAME);
   const bool isVP8PFrame = (frameType == EncodedFrame::FrameType::VP8_P_FRAME);
   const bool isOpus = (frameType == EncodedFrame::FrameType::OPUS_AUDIO_FRAME);
@@ -127,11 +128,7 @@ void EbmlComposer::WriteSimpleBlock(EncodedFrame* aFrame) {
     return;
   }
 
-  int64_t timeCode =
-      aFrame->GetTimeStamp() / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
-  if (isOpus) {
-    timeCode += mCodecDelay / PR_NSEC_PER_MSEC;
-  }
+  int64_t timeCode = aFrame->mTime / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
 
   if (!mHasVideo && timeCode >= FLUSH_AUDIO_ONLY_AFTER_MS) {
     MOZ_ASSERT(mHasAudio);
@@ -156,15 +153,11 @@ void EbmlComposer::WriteSimpleBlock(EncodedFrame* aFrame) {
     mClusterHeaderIndex = mClusters.Length() - 1;
     mClusterLengthLoc = ebmlLoc.offset;
     // if timeCode didn't under/overflow before, it shouldn't after this
-    mClusterTimecode = aFrame->GetTimeStamp() / PR_USEC_PER_MSEC;
+    mClusterTimecode = aFrame->mTime / PR_USEC_PER_MSEC;
     Ebml_SerializeUnsigned(&ebml, Timecode, mClusterTimecode);
 
     // Can't under-/overflow now
-    timeCode =
-        aFrame->GetTimeStamp() / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
-    if (isOpus) {
-      timeCode += mCodecDelay / PR_NSEC_PER_MSEC;
-    }
+    timeCode = aFrame->mTime / ((int)PR_USEC_PER_MSEC) - mClusterTimecode;
 
     mWritingCluster = true;
   }
diff --git a/dom/media/webm/EbmlComposer.h b/dom/media/webm/EbmlComposer.h
index d0a39142d0..057c1d60f3 100644
--- a/dom/media/webm/EbmlComposer.h
+++ b/dom/media/webm/EbmlComposer.h
@@ -37,7 +37,8 @@ class EbmlComposer {
   /*
    * Insert media encoded buffer into muxer and it would be package
    * into SimpleBlock. If no cluster is opened, new cluster will start for
-   * writing.
+   * writing. Frames passed to this function should already have any codec delay
+   * applied.
    */
   void WriteSimpleBlock(EncodedFrame* aFrame);
   /*
@@ -67,8 +68,6 @@ class EbmlComposer {
   uint64_t mClusterLengthLoc = 0;
   // Audio codec specific header data.
   nsTArray mCodecPrivateData;
-  // Codec delay in nanoseconds.
-  uint64_t mCodecDelay = 0;
 
   // The timecode of the cluster.
   uint64_t mClusterTimecode = 0;
diff --git a/dom/media/webm/WebMDemuxer.cpp b/dom/media/webm/WebMDemuxer.cpp
index b656a0bb8b..660769820a 100644
--- a/dom/media/webm/WebMDemuxer.cpp
+++ b/dom/media/webm/WebMDemuxer.cpp
@@ -1254,6 +1254,6 @@ int64_t WebMTrackDemuxer::GetEvictionOffset(const TimeUnit& aTime) {
 
   return offset;
 }
+}  // namespace mozilla
 
 #undef WEBM_DEBUG
-}  // namespace mozilla
diff --git a/dom/media/webm/WebMWriter.cpp b/dom/media/webm/WebMWriter.cpp
index db59bb3db0..e746b2e214 100644
--- a/dom/media/webm/WebMWriter.cpp
+++ b/dom/media/webm/WebMWriter.cpp
@@ -9,8 +9,7 @@
 
 namespace mozilla {
 
-WebMWriter::WebMWriter(uint32_t aTrackTypes) : ContainerWriter() {
-  mMetadataRequiredFlag = aTrackTypes;
+WebMWriter::WebMWriter() : ContainerWriter() {
   mEbmlComposer = new EbmlComposer();
 }
 
@@ -18,17 +17,16 @@ WebMWriter::~WebMWriter() {
   // Out-of-line dtor so mEbmlComposer nsAutoPtr can delete a complete type.
 }
 
-nsresult WebMWriter::WriteEncodedTrack(const EncodedFrameContainer& aData,
-                                       uint32_t aFlags) {
+nsresult WebMWriter::WriteEncodedTrack(
+    const nsTArray>& aData, uint32_t aFlags) {
   AUTO_PROFILER_LABEL("WebMWriter::WriteEncodedTrack", OTHER);
-  for (uint32_t i = 0; i < aData.GetEncodedFrames().Length(); i++) {
-    mEbmlComposer->WriteSimpleBlock(
-        aData.GetEncodedFrames().ElementAt(i).get());
+  for (uint32_t i = 0; i < aData.Length(); i++) {
+    mEbmlComposer->WriteSimpleBlock(aData.ElementAt(i).get());
   }
   return NS_OK;
 }
 
-nsresult WebMWriter::GetContainerData(nsTArray >* aOutputBufs,
+nsresult WebMWriter::GetContainerData(nsTArray>* aOutputBufs,
                                       uint32_t aFlags) {
   AUTO_PROFILER_LABEL("WebMWriter::GetContainerData", OTHER);
   mEbmlComposer->ExtractBuffer(aOutputBufs, aFlags);
@@ -38,40 +36,75 @@ nsresult WebMWriter::GetContainerData(nsTArray >* aOutputBufs,
   return NS_OK;
 }
 
-nsresult WebMWriter::SetMetadata(TrackMetadataBase* aMetadata) {
-  MOZ_ASSERT(aMetadata);
+nsresult WebMWriter::SetMetadata(
+    const nsTArray>& aMetadata) {
   AUTO_PROFILER_LABEL("WebMWriter::SetMetadata", OTHER);
+  MOZ_DIAGNOSTIC_ASSERT(!aMetadata.IsEmpty());
 
-  if (aMetadata->GetKind() == TrackMetadataBase::METADATA_VP8) {
-    VP8Metadata* meta = static_cast(aMetadata);
-    MOZ_ASSERT(meta, "Cannot find vp8 encoder metadata");
-    mEbmlComposer->SetVideoConfig(meta->mWidth, meta->mHeight,
-                                  meta->mDisplayWidth, meta->mDisplayHeight);
-    mMetadataRequiredFlag =
-        mMetadataRequiredFlag & ~ContainerWriter::CREATE_VIDEO_TRACK;
+  // Integrity checks
+  bool bad = false;
+  for (const RefPtr& metadata : aMetadata) {
+    MOZ_ASSERT(metadata);
+
+    if (metadata->GetKind() == TrackMetadataBase::METADATA_VP8) {
+      VP8Metadata* meta = static_cast(metadata.get());
+      if (meta->mWidth == 0 || meta->mHeight == 0 || meta->mDisplayWidth == 0 ||
+          meta->mDisplayHeight == 0) {
+        bad = true;
+      }
+    }
+
+    if (metadata->GetKind() == TrackMetadataBase::METADATA_VORBIS) {
+      VorbisMetadata* meta = static_cast(metadata.get());
+      if (meta->mSamplingFrequency == 0 || meta->mChannels == 0 ||
+          meta->mData.IsEmpty()) {
+        bad = true;
+      }
+    }
+
+    if (metadata->GetKind() == TrackMetadataBase::METADATA_OPUS) {
+      OpusMetadata* meta = static_cast(metadata.get());
+      if (meta->mSamplingFrequency == 0 || meta->mChannels == 0 ||
+          meta->mIdHeader.IsEmpty()) {
+        bad = true;
+      }
+    }
+  }
+  if (bad) {
+    return NS_ERROR_FAILURE;
   }
 
-  if (aMetadata->GetKind() == TrackMetadataBase::METADATA_VORBIS) {
-    VorbisMetadata* meta = static_cast(aMetadata);
-    MOZ_ASSERT(meta, "Cannot find vorbis encoder metadata");
-    mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
-    mEbmlComposer->SetAudioCodecPrivateData(meta->mData);
-    mMetadataRequiredFlag =
-        mMetadataRequiredFlag & ~ContainerWriter::CREATE_AUDIO_TRACK;
-  }
+  // Storing
+  DebugOnly hasAudio = false;
+  DebugOnly hasVideo = false;
+  for (const RefPtr& metadata : aMetadata) {
+    MOZ_ASSERT(metadata);
 
-  if (aMetadata->GetKind() == TrackMetadataBase::METADATA_OPUS) {
-    OpusMetadata* meta = static_cast(aMetadata);
-    MOZ_ASSERT(meta, "Cannot find Opus encoder metadata");
-    mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
-    mEbmlComposer->SetAudioCodecPrivateData(meta->mIdHeader);
-    mMetadataRequiredFlag =
-        mMetadataRequiredFlag & ~ContainerWriter::CREATE_AUDIO_TRACK;
-  }
+    if (metadata->GetKind() == TrackMetadataBase::METADATA_VP8) {
+      MOZ_ASSERT(!hasVideo);
+      VP8Metadata* meta = static_cast(metadata.get());
+      mEbmlComposer->SetVideoConfig(meta->mWidth, meta->mHeight,
+                                    meta->mDisplayWidth, meta->mDisplayHeight);
+      hasVideo = true;
+    }
 
-  if (!mMetadataRequiredFlag) {
-    mEbmlComposer->GenerateHeader();
+    if (metadata->GetKind() == TrackMetadataBase::METADATA_VORBIS) {
+      MOZ_ASSERT(!hasAudio);
+      VorbisMetadata* meta = static_cast(metadata.get());
+      mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
+      mEbmlComposer->SetAudioCodecPrivateData(meta->mData);
+      hasAudio = true;
+    }
+
+    if (metadata->GetKind() == TrackMetadataBase::METADATA_OPUS) {
+      MOZ_ASSERT(!hasAudio);
+      OpusMetadata* meta = static_cast(metadata.get());
+      mEbmlComposer->SetAudioConfig(meta->mSamplingFrequency, meta->mChannels);
+      mEbmlComposer->SetAudioCodecPrivateData(meta->mIdHeader);
+      hasAudio = true;
+    }
   }
+  mEbmlComposer->GenerateHeader();
   return NS_OK;
 }
 
diff --git a/dom/media/webm/WebMWriter.h b/dom/media/webm/WebMWriter.h
index f3b72bc9dc..ccd5ca03ca 100644
--- a/dom/media/webm/WebMWriter.h
+++ b/dom/media/webm/WebMWriter.h
@@ -40,30 +40,28 @@ class VP8Metadata : public TrackMetadataBase {
  */
 class WebMWriter : public ContainerWriter {
  public:
-  // aTrackTypes indicate this muxer should multiplex into Video only or A/V
-  // foramt. Run in MediaRecorder thread
-  explicit WebMWriter(uint32_t aTrackTypes);
+  // Run in MediaRecorder thread
+  WebMWriter();
   virtual ~WebMWriter();
 
-  // WriteEncodedTrack inserts raw packets into WebM stream.
-  nsresult WriteEncodedTrack(const EncodedFrameContainer& aData,
+  // WriteEncodedTrack inserts raw packets into WebM stream. Does not accept
+  // any flags: any specified will be ignored. Writing is finalized via
+  // flushing via GetContainerData().
+  nsresult WriteEncodedTrack(const nsTArray>& aData,
                              uint32_t aFlags = 0) override;
 
   // GetContainerData outputs multiplexing data.
   // aFlags indicates the muxer should enter into finished stage and flush out
   // queue data.
-  nsresult GetContainerData(nsTArray >* aOutputBufs,
+  nsresult GetContainerData(nsTArray>* aOutputBufs,
                             uint32_t aFlags = 0) override;
 
   // Assign metadata into muxer
-  nsresult SetMetadata(TrackMetadataBase* aMetadata) override;
+  nsresult SetMetadata(
+      const nsTArray>& aMetadata) override;
 
  private:
   nsAutoPtr mEbmlComposer;
-
-  // Indicate what kind of meta data needed in the writer.
-  // If this value become 0, it means writer can start to generate header.
-  uint8_t mMetadataRequiredFlag;
 };
 
 }  // namespace mozilla
diff --git a/dom/messagechannel/MessagePort.cpp b/dom/messagechannel/MessagePort.cpp
index a3a99d9b16..c71e7cf441 100644
--- a/dom/messagechannel/MessagePort.cpp
+++ b/dom/messagechannel/MessagePort.cpp
@@ -41,6 +41,13 @@
 namespace mozilla {
 namespace dom {
 
+void UniqueMessagePortId::ForceClose() {
+  if (!mIdentifier.neutered()) {
+    MessagePort::ForceClose(mIdentifier);
+    mIdentifier.neutered() = true;
+  }
+}
+
 class PostMessageRunnable final : public CancelableRunnable {
   friend class MessagePort;
 
@@ -222,13 +229,14 @@ already_AddRefed MessagePort::Create(nsIGlobalObject* aGlobal,
 
 /* static */
 already_AddRefed MessagePort::Create(
-    nsIGlobalObject* aGlobal, const MessagePortIdentifier& aIdentifier,
+    nsIGlobalObject* aGlobal, UniqueMessagePortId& aIdentifier,
     ErrorResult& aRv) {
   MOZ_ASSERT(aGlobal);
 
   RefPtr mp = new MessagePort(aGlobal, eStateEntangling);
   mp->Initialize(aIdentifier.uuid(), aIdentifier.destinationUuid(),
                  aIdentifier.sequenceId(), aIdentifier.neutered(), aRv);
+  aIdentifier.neutered() = true;
   return mp.forget();
 }
 
@@ -266,12 +274,9 @@ void MessagePort::Initialize(const nsID& aUUID, const nsID& aDestinationUUID,
   // The port has to keep itself alive until it's entangled.
   UpdateMustKeepAlive();
 
-  if (!NS_IsMainThread()) {
+  if (WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate()) {
     RefPtr self = this;
 
-    WorkerPrivate* workerPrivate = GetCurrentThreadWorkerPrivate();
-    MOZ_ASSERT(workerPrivate);
-
     // When the callback is executed, we cannot process messages anymore because
     // we cannot dispatch new runnables. Let's force a Close().
     RefPtr strongWorkerRef = StrongWorkerRef::Create(
@@ -664,7 +669,7 @@ void MessagePort::Disentangle() {
   UpdateMustKeepAlive();
 }
 
-void MessagePort::CloneAndDisentangle(MessagePortIdentifier& aIdentifier) {
+void MessagePort::CloneAndDisentangle(UniqueMessagePortId& aIdentifier) {
   MOZ_ASSERT(mIdentifier);
   MOZ_ASSERT(!mHasBeenTransferredOrClosed);
 
diff --git a/dom/messagechannel/MessagePort.h b/dom/messagechannel/MessagePort.h
index f511744f75..26a3570001 100644
--- a/dom/messagechannel/MessagePort.h
+++ b/dom/messagechannel/MessagePort.h
@@ -7,6 +7,7 @@
 
 #include "mozilla/Attributes.h"
 #include "mozilla/DOMEventTargetHelper.h"
+#include "mozilla/dom/DOMTypes.h"
 #include "nsAutoPtr.h"
 #include "nsTArray.h"
 
@@ -19,14 +20,52 @@ class nsIGlobalObject;
 namespace mozilla {
 namespace dom {
 
-class ClonedMessageData;
 class MessagePortChild;
-class MessagePortIdentifier;
 struct PostMessageOptions;
 class PostMessageRunnable;
 class SharedMessagePortMessage;
 class StrongWorkerRef;
 
+// A class to hold a MessagePortIdentifier from
+// MessagePort::CloneAndDistentangle() and close if neither passed to
+// MessagePort::Create() nor release()ed to send via IPC.
+// When the `neutered` field of the MessagePortIdentifier is false, a close is
+// required.
+// This does not derive from MessagePortIdentifier because
+// MessagePortIdentifier is final and because use of UniqueMessagePortId as a
+// MessagePortIdentifier is intentionally prevented without release of
+// ownership.
+class UniqueMessagePortId final {
+ public:
+  UniqueMessagePortId() { mIdentifier.neutered() = true; }
+  explicit UniqueMessagePortId(const MessagePortIdentifier& aIdentifier)
+      : mIdentifier(aIdentifier) {}
+  UniqueMessagePortId(UniqueMessagePortId&& aOther) noexcept
+      : mIdentifier(aOther.mIdentifier) {
+    aOther.mIdentifier.neutered() = true;
+  }
+  ~UniqueMessagePortId() { ForceClose(); };
+  void ForceClose();
+
+  MOZ_MUST_USE MessagePortIdentifier release() {
+    MessagePortIdentifier id = mIdentifier;
+    mIdentifier.neutered() = true;
+    return id;
+  }
+  // const member accessors are not required because a const
+  // UniqueMessagePortId is not useful.
+  nsID& uuid() { return mIdentifier.uuid(); }
+  nsID& destinationUuid() { return mIdentifier.destinationUuid(); }
+  uint32_t& sequenceId() { return mIdentifier.sequenceId(); }
+  bool& neutered() { return mIdentifier.neutered(); }
+
+  UniqueMessagePortId(const UniqueMessagePortId& aOther) = delete;
+  void operator=(const UniqueMessagePortId& aOther) = delete;
+
+ private:
+  MessagePortIdentifier mIdentifier;
+};
+
 class MessagePort final : public DOMEventTargetHelper {
   friend class PostMessageRunnable;
 
@@ -39,9 +78,9 @@ class MessagePort final : public DOMEventTargetHelper {
                                               const nsID& aDestinationUUID,
                                               ErrorResult& aRv);
 
-  static already_AddRefed Create(
-      nsIGlobalObject* aGlobal, const MessagePortIdentifier& aIdentifier,
-      ErrorResult& aRv);
+  static already_AddRefed Create(nsIGlobalObject* aGlobal,
+                                              UniqueMessagePortId& aIdentifier,
+                                              ErrorResult& aRv);
 
   // For IPC.
   static void ForceClose(const MessagePortIdentifier& aIdentifier);
@@ -71,7 +110,7 @@ class MessagePort final : public DOMEventTargetHelper {
 
   bool CanBeCloned() const { return !mHasBeenTransferredOrClosed; }
 
-  void CloneAndDisentangle(MessagePortIdentifier& aIdentifier);
+  void CloneAndDisentangle(UniqueMessagePortId& aIdentifier);
 
   void CloseForced();
 
diff --git a/dom/presentation/PresentationConnection.cpp b/dom/presentation/PresentationConnection.cpp
index 9d257a89f2..7e56a9ba7b 100644
--- a/dom/presentation/PresentationConnection.cpp
+++ b/dom/presentation/PresentationConnection.cpp
@@ -510,8 +510,10 @@ nsresult PresentationConnection::DoReceiveMessage(const nsACString& aData,
   if (aIsBinary) {
     if (mBinaryType == PresentationConnectionBinaryType::Blob) {
       RefPtr blob =
-          Blob::CreateStringBlob(GetOwner(), aData, EmptyString());
-      MOZ_ASSERT(blob);
+          Blob::CreateStringBlob(GetOwnerGlobal(), aData, EmptyString());
+      if (NS_WARN_IF(!blob)) {
+        return NS_ERROR_FAILURE;
+      }
 
       if (!ToJSValue(cx, blob, &jsData)) {
         return NS_ERROR_FAILURE;
diff --git a/dom/serviceworkers/ServiceWorkerEvents.cpp b/dom/serviceworkers/ServiceWorkerEvents.cpp
index 285e5c0cb4..098c1251df 100644
--- a/dom/serviceworkers/ServiceWorkerEvents.cpp
+++ b/dom/serviceworkers/ServiceWorkerEvents.cpp
@@ -1017,7 +1017,7 @@ nsresult ExtractBytesFromData(
 }
 }  // namespace
 
-PushMessageData::PushMessageData(nsISupports* aOwner,
+PushMessageData::PushMessageData(nsIGlobalObject* aOwner,
                                  nsTArray&& aBytes)
     : mOwner(aOwner), mBytes(std::move(aBytes)) {}
 
@@ -1115,7 +1115,7 @@ already_AddRefed PushEvent::Constructor(
       aRv.Throw(rv);
       return nullptr;
     }
-    e->mData = new PushMessageData(aOwner, std::move(bytes));
+    e->mData = new PushMessageData(aOwner->GetOwnerGlobal(), std::move(bytes));
   }
   return e.forget();
 }
diff --git a/dom/serviceworkers/ServiceWorkerEvents.h b/dom/serviceworkers/ServiceWorkerEvents.h
index 530589918b..b807a026d0 100644
--- a/dom/serviceworkers/ServiceWorkerEvents.h
+++ b/dom/serviceworkers/ServiceWorkerEvents.h
@@ -171,7 +171,7 @@ class PushMessageData final : public nsISupports, public nsWrapperCache {
   virtual JSObject* WrapObject(JSContext* aCx,
                                JS::Handle aGivenProto) override;
 
-  nsISupports* GetParentObject() const { return mOwner; }
+  nsIGlobalObject* GetParentObject() const { return mOwner; }
 
   void Json(JSContext* cx, JS::MutableHandle aRetval,
             ErrorResult& aRv);
@@ -180,10 +180,10 @@ class PushMessageData final : public nsISupports, public nsWrapperCache {
                    ErrorResult& aRv);
   already_AddRefed Blob(ErrorResult& aRv);
 
-  PushMessageData(nsISupports* aOwner, nsTArray&& aBytes);
+  PushMessageData(nsIGlobalObject* aOwner, nsTArray&& aBytes);
 
  private:
-  nsCOMPtr mOwner;
+  nsCOMPtr mOwner;
   nsTArray mBytes;
   nsString mDecodedText;
   ~PushMessageData();
diff --git a/dom/webidl/AudioWorkletNode.webidl b/dom/webidl/AudioWorkletNode.webidl
index 0e64e61146..35c3da05c5 100644
--- a/dom/webidl/AudioWorkletNode.webidl
+++ b/dom/webidl/AudioWorkletNode.webidl
@@ -15,7 +15,7 @@ dictionary AudioWorkletNodeOptions : AudioNodeOptions {
              unsigned long             numberOfOutputs = 1;
              sequence   outputChannelCount;
              record parameterData;
-             object?                   processorOptions = null;
+             object                    processorOptions;
 };
 
 [SecureContext, Pref="dom.audioworklet.enabled",
@@ -27,7 +27,6 @@ interface AudioWorkletNode : AudioNode {
 
     [Throws]
     readonly        attribute AudioParamMap              parameters;
-    [Throws]
     readonly        attribute MessagePort                port;
                     attribute EventHandler               onprocessorerror;
 };
diff --git a/dom/webidl/AudioWorkletProcessor.webidl b/dom/webidl/AudioWorkletProcessor.webidl
index 1f0d38ea2f..7c1672df6b 100644
--- a/dom/webidl/AudioWorkletProcessor.webidl
+++ b/dom/webidl/AudioWorkletProcessor.webidl
@@ -11,8 +11,8 @@
 
 [Exposed=AudioWorklet]
 interface AudioWorkletProcessor {
-    constructor(optional AudioWorkletNodeOptions options = {});
+  [Throws]
+  constructor();
 
-    [Throws]
-    readonly attribute MessagePort port;
+  readonly attribute MessagePort port;
 };
diff --git a/dom/webidl/MediaRecorder.webidl b/dom/webidl/MediaRecorder.webidl
index 0087a4fa42..be79aee3da 100644
--- a/dom/webidl/MediaRecorder.webidl
+++ b/dom/webidl/MediaRecorder.webidl
@@ -18,39 +18,27 @@ interface MediaRecorder : EventTarget {
   [Throws]
   constructor(AudioNode node, optional unsigned long output = 0,
               optional MediaRecorderOptions options = {});
-
   readonly attribute MediaStream stream;
-
   readonly attribute DOMString mimeType;
-
   readonly attribute RecordingState state;
-
   attribute EventHandler onstart;
-
   attribute EventHandler onstop;
-
   attribute EventHandler ondataavailable;
-
   attribute EventHandler onpause;
-
   attribute EventHandler onresume;
-
   attribute EventHandler onerror;
+  readonly attribute unsigned long videoBitsPerSecond;
+  readonly attribute unsigned long audioBitsPerSecond;
 
-  attribute EventHandler onwarning;
 
   [Throws]
   void start(optional unsigned long timeslice);
-
   [Throws]
   void stop();
-
   [Throws]
   void pause();
-
   [Throws]
   void resume();
-
   [Throws]
   void requestData();
 
@@ -58,7 +46,7 @@ interface MediaRecorder : EventTarget {
 };
 
 dictionary MediaRecorderOptions {
-  DOMString mimeType = ""; // Default encoding mimeType.
+  DOMString mimeType = "";
   unsigned long audioBitsPerSecond;
   unsigned long videoBitsPerSecond;
   unsigned long bitsPerSecond;
diff --git a/dom/websocket/WebSocket.cpp b/dom/websocket/WebSocket.cpp
index bd1ab7d9eb..7ead426914 100644
--- a/dom/websocket/WebSocket.cpp
+++ b/dom/websocket/WebSocket.cpp
@@ -1871,7 +1871,9 @@ nsresult WebSocket::CreateAndDispatchMessageEvent(const nsACString& aData,
 
       RefPtr blob =
           Blob::CreateStringBlob(GetOwnerGlobal(), aData, EmptyString());
-      MOZ_ASSERT(blob);
+      if (NS_WARN_IF(!blob)) {
+        return NS_ERROR_FAILURE;
+      }
 
       if (!ToJSValue(cx, blob, &jsData)) {
         return NS_ERROR_FAILURE;
diff --git a/dom/workers/RuntimeService.cpp b/dom/workers/RuntimeService.cpp
index 863f137deb..de50373702 100644
--- a/dom/workers/RuntimeService.cpp
+++ b/dom/workers/RuntimeService.cpp
@@ -2421,7 +2421,9 @@ WorkerPrivate* GetWorkerPrivateFromContext(JSContext* aCx) {
 }
 
 WorkerPrivate* GetCurrentThreadWorkerPrivate() {
-  MOZ_ASSERT(!NS_IsMainThread());
+  if (NS_IsMainThread()) {
+    return nullptr;
+  }
 
   CycleCollectedJSContext* ccjscx = CycleCollectedJSContext::Get();
   if (!ccjscx) {
@@ -2429,7 +2431,7 @@ WorkerPrivate* GetCurrentThreadWorkerPrivate() {
   }
 
   WorkerJSContext* workerjscx = ccjscx->GetAsWorkerJSContext();
-  // Although GetCurrentThreadWorkerPrivate() is called only for worker
+  // Even when GetCurrentThreadWorkerPrivate() is called on worker
   // threads, the ccjscx will no longer be a WorkerJSContext if called from
   // stable state events during ~CycleCollectedJSContext().
   if (!workerjscx) {
@@ -2444,7 +2446,8 @@ bool IsCurrentThreadRunningWorker() {
 }
 
 bool IsCurrentThreadRunningChromeWorker() {
-  return GetCurrentThreadWorkerPrivate()->UsesSystemPrincipal();
+  WorkerPrivate* wp = GetCurrentThreadWorkerPrivate();
+  return wp && wp->UsesSystemPrincipal();
 }
 
 JSContext* GetCurrentWorkerThreadJSContext() {
diff --git a/dom/workers/WorkerPrivate.cpp b/dom/workers/WorkerPrivate.cpp
index dc426e8b87..8d4b322091 100644
--- a/dom/workers/WorkerPrivate.cpp
+++ b/dom/workers/WorkerPrivate.cpp
@@ -4720,8 +4720,8 @@ void WorkerPrivate::EndCTypesCall() {
   SetGCTimerMode(PeriodicTimer);
 }
 
-bool WorkerPrivate::ConnectMessagePort(
-    JSContext* aCx, const MessagePortIdentifier& aIdentifier) {
+bool WorkerPrivate::ConnectMessagePort(JSContext* aCx,
+                                       UniqueMessagePortId& aIdentifier) {
   AssertIsOnWorkerThread();
 
   WorkerGlobalScope* globalScope = GlobalScope();
@@ -4729,7 +4729,7 @@ bool WorkerPrivate::ConnectMessagePort(
   JS::Rooted jsGlobal(aCx, globalScope->GetWrapper());
   MOZ_ASSERT(jsGlobal);
 
-  // This MessagePortIdentifier is used to create a new port, still connected
+  // This UniqueMessagePortId is used to create a new port, still connected
   // with the other one, but in the worker thread.
   ErrorResult rv;
   RefPtr port = MessagePort::Create(globalScope, aIdentifier, rv);
diff --git a/dom/workers/WorkerPrivate.h b/dom/workers/WorkerPrivate.h
index 4a978a432a..ea9c62ce80 100644
--- a/dom/workers/WorkerPrivate.h
+++ b/dom/workers/WorkerPrivate.h
@@ -39,7 +39,7 @@ class ClientInfo;
 class ClientSource;
 class Function;
 class MessagePort;
-class MessagePortIdentifier;
+class UniqueMessagePortId;
 class PerformanceStorage;
 class RemoteWorkerChild;
 class TimeoutHandler;
@@ -357,8 +357,7 @@ class WorkerPrivate : public RelativeTimeline {
     BeginCTypesCall();
   }
 
-  bool ConnectMessagePort(JSContext* aCx,
-                          const MessagePortIdentifier& aIdentifier);
+  bool ConnectMessagePort(JSContext* aCx, UniqueMessagePortId& aIdentifier);
 
   WorkerGlobalScope* GetOrCreateGlobalScope(JSContext* aCx);
 
diff --git a/dom/workers/remoteworkers/RemoteWorkerChild.cpp b/dom/workers/remoteworkers/RemoteWorkerChild.cpp
index d84b32534e..31e4194c7f 100644
--- a/dom/workers/remoteworkers/RemoteWorkerChild.cpp
+++ b/dom/workers/remoteworkers/RemoteWorkerChild.cpp
@@ -86,11 +86,6 @@ class MessagePortIdentifierRunnable final : public WorkerRunnable {
     return true;
   }
 
-  nsresult Cancel() override {
-    MessagePort::ForceClose(mPortIdentifier);
-    return WorkerRunnable::Cancel();
-  }
-
   virtual bool PreDispatch(WorkerPrivate* aWorkerPrivate) override {
     // Silence bad assertions.
     return true;
@@ -113,7 +108,7 @@ class MessagePortIdentifierRunnable final : public WorkerRunnable {
   }
 
   RefPtr mActor;
-  MessagePortIdentifier mPortIdentifier;
+  UniqueMessagePortId mPortIdentifier;
 };
 
 }  // namespace
@@ -570,7 +565,7 @@ void RemoteWorkerChild::RecvExecOpOnMainThread(const RemoteWorkerOp& aOp) {
 
 void RemoteWorkerChild::AddPortIdentifier(
     JSContext* aCx, WorkerPrivate* aWorkerPrivate,
-    const MessagePortIdentifier& aPortIdentifier) {
+    UniqueMessagePortId& aPortIdentifier) {
   if (NS_WARN_IF(!aWorkerPrivate->ConnectMessagePort(aCx, aPortIdentifier))) {
     ErrorPropagationDispatch(NS_ERROR_FAILURE);
   }
diff --git a/dom/workers/remoteworkers/RemoteWorkerChild.h b/dom/workers/remoteworkers/RemoteWorkerChild.h
index 7a68d1bf14..fa29b23393 100644
--- a/dom/workers/remoteworkers/RemoteWorkerChild.h
+++ b/dom/workers/remoteworkers/RemoteWorkerChild.h
@@ -17,6 +17,7 @@ namespace mozilla {
 namespace dom {
 
 class RemoteWorkerData;
+class UniqueMessagePortId;
 class WeakWorkerRef;
 class WorkerErrorReport;
 class WorkerPrivate;
@@ -37,7 +38,7 @@ class RemoteWorkerChild final : public PRemoteWorkerChild {
   void ShutdownOnWorker();
 
   void AddPortIdentifier(JSContext* aCx, WorkerPrivate* aWorkerPrivate,
-                         const MessagePortIdentifier& aPortIdentifier);
+                         UniqueMessagePortId& aPortIdentifier);
 
   void ErrorPropagationOnMainThread(const WorkerErrorReport* aReport,
                                     bool aIsErrorEvent);
diff --git a/dom/workers/sharedworkers/SharedWorker.cpp b/dom/workers/sharedworkers/SharedWorker.cpp
index 8034edb631..63b8bbc3ea 100644
--- a/dom/workers/sharedworkers/SharedWorker.cpp
+++ b/dom/workers/sharedworkers/SharedWorker.cpp
@@ -169,7 +169,7 @@ already_AddRefed SharedWorker::Constructor(
     return nullptr;
   }
 
-  MessagePortIdentifier portIdentifier;
+  UniqueMessagePortId portIdentifier;
   channel->Port1()->CloneAndDisentangle(portIdentifier);
 
   URIParams resolvedScriptURL;
@@ -196,7 +196,7 @@ already_AddRefed SharedWorker::Constructor(
       storageAllowed, true /* sharedWorker */);
 
   PSharedWorkerChild* pActor = actorChild->SendPSharedWorkerConstructor(
-      remoteWorkerData, loadInfo.mWindowID, portIdentifier);
+      remoteWorkerData, loadInfo.mWindowID, portIdentifier.release());
 
   RefPtr actor = static_cast(pActor);
   MOZ_ASSERT(actor);
diff --git a/dom/workers/sharedworkers/SharedWorkerManager.cpp b/dom/workers/sharedworkers/SharedWorkerManager.cpp
index 4ed1840c32..7fc471bc79 100644
--- a/dom/workers/sharedworkers/SharedWorkerManager.cpp
+++ b/dom/workers/sharedworkers/SharedWorkerManager.cpp
@@ -61,7 +61,7 @@ SharedWorkerManager::~SharedWorkerManager() {
 
 bool SharedWorkerManager::MaybeCreateRemoteWorker(
     const RemoteWorkerData& aData, uint64_t aWindowID,
-    const MessagePortIdentifier& aPortIdentifier, base::ProcessId aProcessId) {
+    UniqueMessagePortId& aPortIdentifier, base::ProcessId aProcessId) {
   AssertIsOnBackgroundThread();
 
   if (!mRemoteWorkerController) {
@@ -76,7 +76,7 @@ bool SharedWorkerManager::MaybeCreateRemoteWorker(
     mRemoteWorkerController->AddWindowID(aWindowID);
   }
 
-  mRemoteWorkerController->AddPortIdentifier(aPortIdentifier);
+  mRemoteWorkerController->AddPortIdentifier(aPortIdentifier.release());
   return true;
 }
 
diff --git a/dom/workers/sharedworkers/SharedWorkerManager.h b/dom/workers/sharedworkers/SharedWorkerManager.h
index a1a1d3ff75..48bb6ad5df 100644
--- a/dom/workers/sharedworkers/SharedWorkerManager.h
+++ b/dom/workers/sharedworkers/SharedWorkerManager.h
@@ -14,7 +14,7 @@ class nsIPrincipal;
 namespace mozilla {
 namespace dom {
 
-class MessagePortIdentifier;
+class UniqueMessagePortId;
 class RemoteWorkerData;
 class SharedWorkerManager;
 class SharedWorkerService;
@@ -90,7 +90,7 @@ class SharedWorkerManager final : public RemoteWorkerObserver {
 
   bool MaybeCreateRemoteWorker(const RemoteWorkerData& aData,
                                uint64_t aWindowID,
-                               const MessagePortIdentifier& aPortIdentifier,
+                               UniqueMessagePortId& aPortIdentifier,
                                base::ProcessId aProcessId);
 
   void AddActor(SharedWorkerParent* aParent);
diff --git a/dom/workers/sharedworkers/SharedWorkerService.cpp b/dom/workers/sharedworkers/SharedWorkerService.cpp
index 0c24a34afd..d8c7f07338 100644
--- a/dom/workers/sharedworkers/SharedWorkerService.cpp
+++ b/dom/workers/sharedworkers/SharedWorkerService.cpp
@@ -51,7 +51,7 @@ class GetOrCreateWorkerManagerRunnable final : public Runnable {
   RefPtr mActor;
   RemoteWorkerData mData;
   uint64_t mWindowID;
-  MessagePortIdentifier mPortIdentifier;
+  UniqueMessagePortId mPortIdentifier;
 };
 
 class WorkerManagerCreatedRunnable final : public Runnable {
@@ -59,13 +59,13 @@ class WorkerManagerCreatedRunnable final : public Runnable {
   WorkerManagerCreatedRunnable(
       already_AddRefed aManagerWrapper,
       SharedWorkerParent* aActor, const RemoteWorkerData& aData,
-      uint64_t aWindowID, const MessagePortIdentifier& aPortIdentifier)
+      uint64_t aWindowID, UniqueMessagePortId& aPortIdentifier)
       : Runnable("WorkerManagerCreatedRunnable"),
         mManagerWrapper(aManagerWrapper),
         mActor(aActor),
         mData(aData),
         mWindowID(aWindowID),
-        mPortIdentifier(aPortIdentifier) {}
+        mPortIdentifier(std::move(aPortIdentifier)) {}
 
   NS_IMETHOD
   Run() {
@@ -87,7 +87,7 @@ class WorkerManagerCreatedRunnable final : public Runnable {
   RefPtr mActor;
   RemoteWorkerData mData;
   uint64_t mWindowID;
-  MessagePortIdentifier mPortIdentifier;
+  UniqueMessagePortId mPortIdentifier;
 };
 
 class ErrorPropagationRunnable final : public Runnable {
@@ -160,14 +160,11 @@ void SharedWorkerService::GetOrCreateWorkerManager(
 void SharedWorkerService::GetOrCreateWorkerManagerOnMainThread(
     nsIEventTarget* aBackgroundEventTarget, SharedWorkerParent* aActor,
     const RemoteWorkerData& aData, uint64_t aWindowID,
-    const MessagePortIdentifier& aPortIdentifier) {
+    UniqueMessagePortId& aPortIdentifier) {
   MOZ_ASSERT(NS_IsMainThread());
   MOZ_ASSERT(aBackgroundEventTarget);
   MOZ_ASSERT(aActor);
 
-  auto closeMessagePortIdentifier =
-      MakeScopeExit([&] { MessagePort::ForceClose(aPortIdentifier); });
-
   nsresult rv = NS_OK;
   nsCOMPtr storagePrincipal =
       PrincipalInfoToPrincipal(aData.storagePrincipalInfo(), &rv);
@@ -220,8 +217,6 @@ void SharedWorkerService::GetOrCreateWorkerManagerOnMainThread(
   RefPtr r = new WorkerManagerCreatedRunnable(
       wrapper.forget(), aActor, aData, aWindowID, aPortIdentifier);
   aBackgroundEventTarget->Dispatch(r.forget(), NS_DISPATCH_NORMAL);
-
-  closeMessagePortIdentifier.release();
 }
 
 void SharedWorkerService::ErrorPropagationOnMainThread(
diff --git a/dom/workers/sharedworkers/SharedWorkerService.h b/dom/workers/sharedworkers/SharedWorkerService.h
index b382302fbb..067d768706 100644
--- a/dom/workers/sharedworkers/SharedWorkerService.h
+++ b/dom/workers/sharedworkers/SharedWorkerService.h
@@ -20,6 +20,7 @@ class MessagePortIdentifier;
 class RemoteWorkerData;
 class SharedWorkerManager;
 class SharedWorkerParent;
+class UniqueMessagePortId;
 
 class SharedWorkerService final {
  public:
@@ -41,7 +42,7 @@ class SharedWorkerService final {
   void GetOrCreateWorkerManagerOnMainThread(
       nsIEventTarget* aBackgroundEventTarget, SharedWorkerParent* aActor,
       const RemoteWorkerData& aData, uint64_t aWindowID,
-      const MessagePortIdentifier& aPortIdentifier);
+      UniqueMessagePortId& aPortIdentifier);
 
   void RemoveWorkerManagerOnMainThread(SharedWorkerManager* aManager);
 
diff --git a/dom/worklet/WorkletThread.cpp b/dom/worklet/WorkletThread.cpp
index 794d2a254e..df1329281b 100644
--- a/dom/worklet/WorkletThread.cpp
+++ b/dom/worklet/WorkletThread.cpp
@@ -7,6 +7,7 @@
 #include "nsContentUtils.h"
 #include "nsCycleCollector.h"
 #include "mozilla/dom/AtomList.h"
+#include "mozilla/ipc/BackgroundChild.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/EventQueue.h"
 #include "mozilla/ThreadEventQueue.h"
@@ -383,6 +384,9 @@ void WorkletThread::DeleteCycleCollectedJSContext() {
     return;
   }
 
+  // Release any MessagePort kept alive by its ipc actor.
+  mozilla::ipc::BackgroundChild::CloseForCurrentThread();
+
   WorkletJSContext* workletjscx = ccjscx->GetAsWorkletJSContext();
   MOZ_ASSERT(workletjscx);
   delete workletjscx;
diff --git a/dom/worklet/tests/test_audioWorkletGlobalScopeRegisterProcessor.html b/dom/worklet/tests/test_audioWorkletGlobalScopeRegisterProcessor.html
index ecd2afbb0a..a251e4a9bc 100644
--- a/dom/worklet/tests/test_audioWorkletGlobalScopeRegisterProcessor.html
+++ b/dom/worklet/tests/test_audioWorkletGlobalScopeRegisterProcessor.html
@@ -16,8 +16,6 @@ function configureTest() {
     "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor is not a constructor.",
     "NotSupportedError: Argument 1 of AudioWorkletGlobalScope.registerProcessor should not be an empty string.",
     "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor is not an object.",
-    "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor constructor.process is not callable.",
-    "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor constructor.process is not callable.",
     "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor constructor.parameterDescriptors is neither an array nor undefined.",
     "NotSupportedError: Argument 1 of AudioWorkletGlobalScope.registerProcessor is invalid: a class with the same name is already registered.",
     "TypeError: Missing required 'name' member of AudioParamDescriptor.",
diff --git a/dom/worklet/tests/worklet_test_audioWorkletGlobalScopeRegisterProcessor.js b/dom/worklet/tests/worklet_test_audioWorkletGlobalScopeRegisterProcessor.js
index b57169276e..2e25c9ec1d 100644
--- a/dom/worklet/tests/worklet_test_audioWorkletGlobalScopeRegisterProcessor.js
+++ b/dom/worklet/tests/worklet_test_audioWorkletGlobalScopeRegisterProcessor.js
@@ -1,5 +1,4 @@
 // Define several classes.
-// Only the last ones are valid.
 class EmptyWorkletProcessor extends AudioWorkletProcessor {}
 
 class NoProcessWorkletProcessor extends AudioWorkletProcessor {
@@ -249,20 +248,10 @@ try {
 }
 
 // Test Empty class definition
-// "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor constructor.process is not callable."
-try {
-  registerProcessor("empty-worklet-processor", EmptyWorkletProcessor);
-} catch (e) {
-  console.log(e);
-}
+registerProcessor("empty-worklet-processor", EmptyWorkletProcessor);
 
 // Test class with constructor but not process function
-// "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor constructor.process is not callable."
-try {
-  registerProcessor("no-worklet-processor", NoProcessWorkletProcessor);
-} catch (e) {
-  console.log(e);
-}
+registerProcessor("no-worklet-processor", NoProcessWorkletProcessor);
 
 // Test class with parameterDescriptors not being array nor undefined
 // "TypeError: Argument 2 of AudioWorkletGlobalScope.registerProcessor constructor.parameterDescriptors is neither an array nor undefined."
diff --git a/dom/xhr/XMLHttpRequestMainThread.cpp b/dom/xhr/XMLHttpRequestMainThread.cpp
index 15a6195ade..f4c012602e 100644
--- a/dom/xhr/XMLHttpRequestMainThread.cpp
+++ b/dom/xhr/XMLHttpRequestMainThread.cpp
@@ -1692,7 +1692,7 @@ XMLHttpRequestMainThread::OnDataAvailable(nsIRequest* request,
       rv = NS_GetBlobForBlobURI(blobURI, getter_AddRefs(blobImpl));
       if (NS_SUCCEEDED(rv)) {
         if (blobImpl) {
-          mResponseBlob = Blob::Create(GetOwner(), blobImpl);
+          mResponseBlob = Blob::Create(GetOwnerGlobal(), blobImpl);
         }
         if (!mResponseBlob) {
           rv = NS_ERROR_FILE_NOT_FOUND;
@@ -2134,7 +2134,7 @@ XMLHttpRequestMainThread::OnStopRequest(nsIRequest* request, nsresult status) {
     // mBlobStorage can be null if the channel is non-file non-cacheable
     // and if the response length is zero.
     MaybeCreateBlobStorage();
-    mBlobStorage->GetBlobWhenReady(GetOwner(), contentType, this);
+    mBlobStorage->GetBlobImplWhenReady(contentType, this);
     waitingForBlobCreation = true;
 
     NS_ASSERTION(mResponseBody.IsEmpty(), "mResponseBody should be empty");
@@ -3560,7 +3560,7 @@ void XMLHttpRequestMainThread::MaybeCreateBlobStorage() {
 }
 
 void XMLHttpRequestMainThread::BlobStoreCompleted(
-    MutableBlobStorage* aBlobStorage, Blob* aBlob, nsresult aRv) {
+    MutableBlobStorage* aBlobStorage, BlobImpl* aBlobImpl, nsresult aRv) {
   // Ok, the state is changed...
   if (mBlobStorage != aBlobStorage || NS_FAILED(aRv)) {
     return;
@@ -3568,7 +3568,7 @@ void XMLHttpRequestMainThread::BlobStoreCompleted(
 
   MOZ_ASSERT(mState != XMLHttpRequest_Binding::DONE);
 
-  mResponseBlob = aBlob;
+  mResponseBlob = Blob::Create(GetOwnerGlobal(), aBlobImpl);
   mBlobStorage = nullptr;
 
   ChangeStateToDone(mFlagSyncLooping);
diff --git a/dom/xhr/XMLHttpRequestMainThread.h b/dom/xhr/XMLHttpRequestMainThread.h
index 537627d85f..fc19ae7a15 100644
--- a/dom/xhr/XMLHttpRequestMainThread.h
+++ b/dom/xhr/XMLHttpRequestMainThread.h
@@ -428,7 +428,7 @@ class XMLHttpRequestMainThread final : public XMLHttpRequest,
   virtual void SetOriginAttributes(
       const mozilla::dom::OriginAttributesDictionary& aAttrs) override;
 
-  void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, Blob* aBlob,
+  void BlobStoreCompleted(MutableBlobStorage* aBlobStorage, BlobImpl* aBlobImpl,
                           nsresult aResult) override;
 
   void LocalFileToBlobCompleted(Blob* aBlob);
diff --git a/gfx/gl/ScopedGLHelpers.cpp b/gfx/gl/ScopedGLHelpers.cpp
index 6694026618..ec48605c74 100644
--- a/gfx/gl/ScopedGLHelpers.cpp
+++ b/gfx/gl/ScopedGLHelpers.cpp
@@ -14,7 +14,7 @@ namespace gl {
 
 // Use |newState = true| to enable, |false| to disable.
 ScopedGLState::ScopedGLState(GLContext* aGL, GLenum aCapability, bool aNewState)
-    : ScopedGLWrapper(aGL), mCapability(aCapability) {
+    : mGL(aGL), mCapability(aCapability) {
   mOldState = mGL->fIsEnabled(mCapability);
 
   // Early out if we're already in the right state.
@@ -28,11 +28,11 @@ ScopedGLState::ScopedGLState(GLContext* aGL, GLenum aCapability, bool aNewState)
 }
 
 ScopedGLState::ScopedGLState(GLContext* aGL, GLenum aCapability)
-    : ScopedGLWrapper(aGL), mCapability(aCapability) {
+    : mGL(aGL), mCapability(aCapability) {
   mOldState = mGL->fIsEnabled(mCapability);
 }
 
-void ScopedGLState::UnwrapImpl() {
+ScopedGLState::~ScopedGLState() {
   if (mOldState) {
     mGL->fEnable(mCapability);
   } else {
@@ -52,18 +52,17 @@ void ScopedBindFramebuffer::Init() {
   }
 }
 
-ScopedBindFramebuffer::ScopedBindFramebuffer(GLContext* aGL)
-    : ScopedGLWrapper(aGL) {
+ScopedBindFramebuffer::ScopedBindFramebuffer(GLContext* aGL) : mGL(aGL) {
   Init();
 }
 
 ScopedBindFramebuffer::ScopedBindFramebuffer(GLContext* aGL, GLuint aNewFB)
-    : ScopedGLWrapper(aGL) {
+    : mGL(aGL) {
   Init();
   mGL->BindFB(aNewFB);
 }
 
-void ScopedBindFramebuffer::UnwrapImpl() {
+ScopedBindFramebuffer::~ScopedBindFramebuffer() {
   if (mOldReadFB == mOldDrawFB) {
     mGL->BindFB(mOldDrawFB);
   } else {
@@ -75,42 +74,43 @@ void ScopedBindFramebuffer::UnwrapImpl() {
 /* ScopedBindTextureUnit ******************************************************/
 
 ScopedBindTextureUnit::ScopedBindTextureUnit(GLContext* aGL, GLenum aTexUnit)
-    : ScopedGLWrapper(aGL), mOldTexUnit(0) {
+    : mGL(aGL), mOldTexUnit(0) {
   MOZ_ASSERT(aTexUnit >= LOCAL_GL_TEXTURE0);
   mGL->GetUIntegerv(LOCAL_GL_ACTIVE_TEXTURE, &mOldTexUnit);
   mGL->fActiveTexture(aTexUnit);
 }
 
-void ScopedBindTextureUnit::UnwrapImpl() { mGL->fActiveTexture(mOldTexUnit); }
+ScopedBindTextureUnit::~ScopedBindTextureUnit() {
+  mGL->fActiveTexture(mOldTexUnit);
+}
 
 /* ScopedTexture **************************************************************/
 
-ScopedTexture::ScopedTexture(GLContext* aGL)
-    : ScopedGLWrapper(aGL), mTexture(0) {
+ScopedTexture::ScopedTexture(GLContext* aGL) : mGL(aGL), mTexture(0) {
   mGL->fGenTextures(1, &mTexture);
 }
 
-void ScopedTexture::UnwrapImpl() { mGL->fDeleteTextures(1, &mTexture); }
+ScopedTexture::~ScopedTexture() { mGL->fDeleteTextures(1, &mTexture); }
 
 /* ScopedFramebuffer
  * **************************************************************/
 
-ScopedFramebuffer::ScopedFramebuffer(GLContext* aGL)
-    : ScopedGLWrapper(aGL), mFB(0) {
+ScopedFramebuffer::ScopedFramebuffer(GLContext* aGL) : mGL(aGL), mFB(0) {
   mGL->fGenFramebuffers(1, &mFB);
 }
 
-void ScopedFramebuffer::UnwrapImpl() { mGL->fDeleteFramebuffers(1, &mFB); }
+ScopedFramebuffer::~ScopedFramebuffer() { mGL->fDeleteFramebuffers(1, &mFB); }
 
 /* ScopedRenderbuffer
  * **************************************************************/
 
-ScopedRenderbuffer::ScopedRenderbuffer(GLContext* aGL)
-    : ScopedGLWrapper(aGL), mRB(0) {
+ScopedRenderbuffer::ScopedRenderbuffer(GLContext* aGL) : mGL(aGL), mRB(0) {
   mGL->fGenRenderbuffers(1, &mRB);
 }
 
-void ScopedRenderbuffer::UnwrapImpl() { mGL->fDeleteRenderbuffers(1, &mRB); }
+ScopedRenderbuffer::~ScopedRenderbuffer() {
+  mGL->fDeleteRenderbuffers(1, &mRB);
+}
 
 /* ScopedBindTexture **********************************************************/
 
@@ -152,13 +152,11 @@ static GLuint GetBoundTexture(GLContext* gl, GLenum texTarget) {
 
 ScopedBindTexture::ScopedBindTexture(GLContext* aGL, GLuint aNewTex,
                                      GLenum aTarget)
-    : ScopedGLWrapper(aGL),
-      mTarget(aTarget),
-      mOldTex(GetBoundTexture(aGL, aTarget)) {
+    : mGL(aGL), mTarget(aTarget), mOldTex(GetBoundTexture(aGL, aTarget)) {
   mGL->fBindTexture(mTarget, aNewTex);
 }
 
-void ScopedBindTexture::UnwrapImpl() { mGL->fBindTexture(mTarget, mOldTex); }
+ScopedBindTexture::~ScopedBindTexture() { mGL->fBindTexture(mTarget, mOldTex); }
 
 /* ScopedBindRenderbuffer *****************************************************/
 
@@ -167,18 +165,17 @@ void ScopedBindRenderbuffer::Init() {
   mGL->GetUIntegerv(LOCAL_GL_RENDERBUFFER_BINDING, &mOldRB);
 }
 
-ScopedBindRenderbuffer::ScopedBindRenderbuffer(GLContext* aGL)
-    : ScopedGLWrapper(aGL) {
+ScopedBindRenderbuffer::ScopedBindRenderbuffer(GLContext* aGL) : mGL(aGL) {
   Init();
 }
 
 ScopedBindRenderbuffer::ScopedBindRenderbuffer(GLContext* aGL, GLuint aNewRB)
-    : ScopedGLWrapper(aGL) {
+    : mGL(aGL) {
   Init();
   mGL->fBindRenderbuffer(LOCAL_GL_RENDERBUFFER, aNewRB);
 }
 
-void ScopedBindRenderbuffer::UnwrapImpl() {
+ScopedBindRenderbuffer::~ScopedBindRenderbuffer() {
   mGL->fBindRenderbuffer(LOCAL_GL_RENDERBUFFER, mOldRB);
 }
 
@@ -186,9 +183,7 @@ void ScopedBindRenderbuffer::UnwrapImpl() {
 ScopedFramebufferForTexture::ScopedFramebufferForTexture(GLContext* aGL,
                                                          GLuint aTexture,
                                                          GLenum aTarget)
-    : ScopedGLWrapper(aGL),
-      mComplete(false),
-      mFB(0) {
+    : mGL(aGL), mComplete(false), mFB(0) {
   mGL->fGenFramebuffers(1, &mFB);
   ScopedBindFramebuffer autoFB(aGL, mFB);
   mGL->fFramebufferTexture2D(LOCAL_GL_FRAMEBUFFER, LOCAL_GL_COLOR_ATTACHMENT0,
@@ -203,7 +198,7 @@ ScopedFramebufferForTexture::ScopedFramebufferForTexture(GLContext* aGL,
   }
 }
 
-void ScopedFramebufferForTexture::UnwrapImpl() {
+ScopedFramebufferForTexture::~ScopedFramebufferForTexture() {
   if (!mFB) return;
 
   mGL->fDeleteFramebuffers(1, &mFB);
@@ -214,9 +209,7 @@ void ScopedFramebufferForTexture::UnwrapImpl() {
 
 ScopedFramebufferForRenderbuffer::ScopedFramebufferForRenderbuffer(
     GLContext* aGL, GLuint aRB)
-    : ScopedGLWrapper(aGL),
-      mComplete(false),
-      mFB(0) {
+    : mGL(aGL), mComplete(false), mFB(0) {
   mGL->fGenFramebuffers(1, &mFB);
   ScopedBindFramebuffer autoFB(aGL, mFB);
   mGL->fFramebufferRenderbuffer(LOCAL_GL_FRAMEBUFFER,
@@ -232,7 +225,7 @@ ScopedFramebufferForRenderbuffer::ScopedFramebufferForRenderbuffer(
   }
 }
 
-void ScopedFramebufferForRenderbuffer::UnwrapImpl() {
+ScopedFramebufferForRenderbuffer::~ScopedFramebufferForRenderbuffer() {
   if (!mFB) return;
 
   mGL->fDeleteFramebuffers(1, &mFB);
@@ -243,12 +236,12 @@ void ScopedFramebufferForRenderbuffer::UnwrapImpl() {
 
 ScopedViewportRect::ScopedViewportRect(GLContext* aGL, GLint x, GLint y,
                                        GLsizei width, GLsizei height)
-    : ScopedGLWrapper(aGL) {
+    : mGL(aGL) {
   mGL->fGetIntegerv(LOCAL_GL_VIEWPORT, mSavedViewportRect);
   mGL->fViewport(x, y, width, height);
 }
 
-void ScopedViewportRect::UnwrapImpl() {
+ScopedViewportRect::~ScopedViewportRect() {
   mGL->fViewport(mSavedViewportRect[0], mSavedViewportRect[1],
                  mSavedViewportRect[2], mSavedViewportRect[3]);
 }
@@ -257,17 +250,16 @@ void ScopedViewportRect::UnwrapImpl() {
 
 ScopedScissorRect::ScopedScissorRect(GLContext* aGL, GLint x, GLint y,
                                      GLsizei width, GLsizei height)
-    : ScopedGLWrapper(aGL) {
+    : mGL(aGL) {
   mGL->fGetIntegerv(LOCAL_GL_SCISSOR_BOX, mSavedScissorRect);
   mGL->fScissor(x, y, width, height);
 }
 
-ScopedScissorRect::ScopedScissorRect(GLContext* aGL)
-    : ScopedGLWrapper(aGL) {
+ScopedScissorRect::ScopedScissorRect(GLContext* aGL) : mGL(aGL) {
   mGL->fGetIntegerv(LOCAL_GL_SCISSOR_BOX, mSavedScissorRect);
 }
 
-void ScopedScissorRect::UnwrapImpl() {
+ScopedScissorRect::~ScopedScissorRect() {
   mGL->fScissor(mSavedScissorRect[0], mSavedScissorRect[1],
                 mSavedScissorRect[2], mSavedScissorRect[3]);
 }
@@ -278,7 +270,7 @@ ScopedVertexAttribPointer::ScopedVertexAttribPointer(
     GLContext* aGL, GLuint index, GLint size, GLenum type,
     realGLboolean normalized, GLsizei stride, GLuint buffer,
     const GLvoid* pointer)
-    : ScopedGLWrapper(aGL),
+    : mGL(aGL),
       mAttribEnabled(0),
       mAttribSize(0),
       mAttribStride(0),
@@ -295,7 +287,7 @@ ScopedVertexAttribPointer::ScopedVertexAttribPointer(
 
 ScopedVertexAttribPointer::ScopedVertexAttribPointer(GLContext* aGL,
                                                      GLuint index)
-    : ScopedGLWrapper(aGL),
+    : mGL(aGL),
       mAttribEnabled(0),
       mAttribSize(0),
       mAttribStride(0),
@@ -348,7 +340,7 @@ void ScopedVertexAttribPointer::WrapImpl(GLuint index) {
   mGL->GetUIntegerv(LOCAL_GL_ARRAY_BUFFER_BINDING, &mBoundBuffer);
 }
 
-void ScopedVertexAttribPointer::UnwrapImpl() {
+ScopedVertexAttribPointer::~ScopedVertexAttribPointer() {
   mGL->fBindBuffer(LOCAL_GL_ARRAY_BUFFER, mAttribBufferBinding);
   mGL->fVertexAttribPointer(mAttribIndex, mAttribSize, mAttribType,
                             mAttribNormalized, mAttribStride, mAttribPointer);
@@ -363,7 +355,7 @@ void ScopedVertexAttribPointer::UnwrapImpl() {
 // ScopedPackState
 
 ScopedPackState::ScopedPackState(GLContext* gl)
-    : ScopedGLWrapper(gl),
+    : mGL(gl),
       mAlignment(0),
       mPixelBuffer(0),
       mRowLength(0),
@@ -402,7 +394,7 @@ bool ScopedPackState::SetForWidthAndStrideRGBA(GLsizei aWidth,
   return false;
 }
 
-void ScopedPackState::UnwrapImpl() {
+ScopedPackState::~ScopedPackState() {
   mGL->fPixelStorei(LOCAL_GL_PACK_ALIGNMENT, mAlignment);
 
   if (!mGL->HasPBOState()) return;
@@ -417,7 +409,7 @@ void ScopedPackState::UnwrapImpl() {
 // ResetUnpackState
 
 ResetUnpackState::ResetUnpackState(GLContext* gl)
-    : ScopedGLWrapper(gl),
+    : mGL(gl),
       mAlignment(0),
       mPBO(0),
       mRowLength(0),
@@ -447,7 +439,7 @@ ResetUnpackState::ResetUnpackState(GLContext* gl)
   fnReset(LOCAL_GL_UNPACK_SKIP_IMAGES, 0, &mSkipImages);
 }
 
-void ResetUnpackState::UnwrapImpl() {
+ResetUnpackState::~ResetUnpackState() {
   mGL->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, mAlignment);
 
   if (!mGL->HasPBOState()) return;
@@ -485,11 +477,9 @@ static GLuint GetPBOBinding(GLContext* gl, GLenum target) {
 }
 
 ScopedBindPBO::ScopedBindPBO(GLContext* gl, GLenum target)
-    : ScopedGLWrapper(gl),
-      mTarget(target),
-      mPBO(GetPBOBinding(mGL, mTarget)) {}
+    : mGL(gl), mTarget(target), mPBO(GetPBOBinding(mGL, mTarget)) {}
 
-void ScopedBindPBO::UnwrapImpl() {
+ScopedBindPBO::~ScopedBindPBO() {
   if (!mGL->HasPBOState()) return;
 
   mGL->fBindBuffer(mTarget, mPBO);
diff --git a/gfx/gl/ScopedGLHelpers.h b/gfx/gl/ScopedGLHelpers.h
index 6c5d6d441d..001755dde3 100644
--- a/gfx/gl/ScopedGLHelpers.h
+++ b/gfx/gl/ScopedGLHelpers.h
@@ -17,40 +17,10 @@ class GLContext;
 bool IsContextCurrent(GLContext* gl);
 #endif
 
-// RAII via CRTP!
-template 
-struct ScopedGLWrapper {
- private:
-  bool mIsUnwrapped;
-
- protected:
-  GLContext* const mGL;
-
-  explicit ScopedGLWrapper(GLContext* gl) : mIsUnwrapped(false), mGL(gl) {
-    MOZ_ASSERT(&ScopedGLWrapper::Unwrap == &Derived::Unwrap);
-    MOZ_ASSERT(&Derived::UnwrapImpl);
-  }
-
-  virtual ~ScopedGLWrapper() {
-    if (!mIsUnwrapped) Unwrap();
-  }
-
- public:
-  void Unwrap() {
-    MOZ_ASSERT(!mIsUnwrapped);
-
-    Derived* derived = static_cast(this);
-    derived->UnwrapImpl();
-
-    mIsUnwrapped = true;
-  }
-};
-
 // Wraps glEnable/Disable.
-struct ScopedGLState : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedGLState final {
+ private:
+  GLContext* const mGL;
   const GLenum mCapability;
   bool mOldState;
 
@@ -60,106 +30,88 @@ struct ScopedGLState : public ScopedGLWrapper {
   // variant that doesn't change state; simply records existing state to be
   // restored by the destructor
   ScopedGLState(GLContext* aGL, GLenum aCapability);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedGLState();
 };
 
 // Saves and restores with GetUserBoundFB and BindUserFB.
-struct ScopedBindFramebuffer : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedBindFramebuffer final {
+ private:
+  GLContext* const mGL;
   GLuint mOldReadFB;
   GLuint mOldDrawFB;
 
- private:
   void Init();
 
  public:
   explicit ScopedBindFramebuffer(GLContext* aGL);
   ScopedBindFramebuffer(GLContext* aGL, GLuint aNewFB);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedBindFramebuffer();
 };
 
-struct ScopedBindTextureUnit : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedBindTextureUnit final {
+ private:
+  GLContext* const mGL;
   GLenum mOldTexUnit;
 
  public:
   ScopedBindTextureUnit(GLContext* aGL, GLenum aTexUnit);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedBindTextureUnit();
 };
 
-struct ScopedTexture : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedTexture final {
+ private:
+  GLContext* const mGL;
   GLuint mTexture;
 
  public:
   explicit ScopedTexture(GLContext* aGL);
+  ~ScopedTexture();
 
   GLuint Texture() const { return mTexture; }
   operator GLuint() const { return mTexture; }
-
- protected:
-  void UnwrapImpl();
 };
 
-struct ScopedFramebuffer : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedFramebuffer final {
+ private:
+  GLContext* const mGL;
   GLuint mFB;
 
  public:
   explicit ScopedFramebuffer(GLContext* aGL);
-  GLuint FB() { return mFB; }
+  ~ScopedFramebuffer();
 
- protected:
-  void UnwrapImpl();
+  GLuint FB() { return mFB; }
+  operator GLuint() const { return mFB; }
 };
 
-struct ScopedRenderbuffer : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedRenderbuffer final {
+ private:
+  GLContext* const mGL;
   GLuint mRB;
 
  public:
   explicit ScopedRenderbuffer(GLContext* aGL);
-  GLuint RB() { return mRB; }
+  ~ScopedRenderbuffer();
 
- protected:
-  void UnwrapImpl();
+  GLuint RB() { return mRB; }
+  operator GLuint() const { return mRB; }
 };
 
-struct ScopedBindTexture : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedBindTexture final {
+ private:
+  GLContext* const mGL;
   const GLenum mTarget;
   const GLuint mOldTex;
 
  public:
   ScopedBindTexture(GLContext* aGL, GLuint aNewTex,
                     GLenum aTarget = LOCAL_GL_TEXTURE_2D);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedBindTexture();
 };
 
-struct ScopedBindRenderbuffer : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedBindRenderbuffer final {
+ private:
+  GLContext* const mGL;
   GLuint mOldRB;
 
  private:
@@ -167,24 +119,20 @@ struct ScopedBindRenderbuffer : public ScopedGLWrapper {
 
  public:
   explicit ScopedBindRenderbuffer(GLContext* aGL);
-
   ScopedBindRenderbuffer(GLContext* aGL, GLuint aNewRB);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedBindRenderbuffer();
 };
 
-struct ScopedFramebufferForTexture
-    : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedFramebufferForTexture final {
+ private:
+  GLContext* const mGL;
   bool mComplete;  // True if the framebuffer we create is complete.
   GLuint mFB;
 
  public:
   ScopedFramebufferForTexture(GLContext* aGL, GLuint aTexture,
                               GLenum aTarget = LOCAL_GL_TEXTURE_2D);
+  ~ScopedFramebufferForTexture();
 
   bool IsComplete() const { return mComplete; }
 
@@ -192,64 +140,51 @@ struct ScopedFramebufferForTexture
     MOZ_GL_ASSERT(mGL, IsComplete());
     return mFB;
   }
-
- protected:
-  void UnwrapImpl();
 };
 
-struct ScopedFramebufferForRenderbuffer
-    : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedFramebufferForRenderbuffer final {
+ private:
+  GLContext* const mGL;
   bool mComplete;  // True if the framebuffer we create is complete.
   GLuint mFB;
 
  public:
   ScopedFramebufferForRenderbuffer(GLContext* aGL, GLuint aRB);
+  ~ScopedFramebufferForRenderbuffer();
 
   bool IsComplete() const { return mComplete; }
-
-  GLuint FB() const { return mFB; }
-
- protected:
-  void UnwrapImpl();
+  GLuint FB() const {
+    MOZ_GL_ASSERT(mGL, IsComplete());
+    return mFB;
+  }
 };
 
-struct ScopedViewportRect : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedViewportRect final {
+ private:
+  GLContext* const mGL;
   GLint mSavedViewportRect[4];
 
  public:
   ScopedViewportRect(GLContext* aGL, GLint x, GLint y, GLsizei width,
                      GLsizei height);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedViewportRect();
 };
 
-struct ScopedScissorRect : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedScissorRect final {
+ private:
+  GLContext* const mGL;
   GLint mSavedScissorRect[4];
 
  public:
   ScopedScissorRect(GLContext* aGL, GLint x, GLint y, GLsizei width,
                     GLsizei height);
   explicit ScopedScissorRect(GLContext* aGL);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedScissorRect();
 };
 
-struct ScopedVertexAttribPointer
-    : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedVertexAttribPointer final {
+ private:
+  GLContext* const mGL;
   GLuint mAttribIndex;
   GLint mAttribEnabled;
   GLint mAttribSize;
@@ -266,16 +201,15 @@ struct ScopedVertexAttribPointer
                             GLsizei stride, GLuint buffer,
                             const GLvoid* pointer);
   explicit ScopedVertexAttribPointer(GLContext* aGL, GLuint index);
+  ~ScopedVertexAttribPointer();
 
- protected:
+ private:
   void WrapImpl(GLuint index);
-  void UnwrapImpl();
 };
 
-struct ScopedPackState : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedPackState final {
+ private:
+  GLContext* const mGL;
   GLint mAlignment;
 
   GLuint mPixelBuffer;
@@ -285,18 +219,15 @@ struct ScopedPackState : public ScopedGLWrapper {
 
  public:
   explicit ScopedPackState(GLContext* gl);
+  ~ScopedPackState();
 
   // Returns whether the stride was handled successfully.
   bool SetForWidthAndStrideRGBA(GLsizei aWidth, GLsizei aStride);
-
- protected:
-  void UnwrapImpl();
 };
 
-struct ResetUnpackState : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ResetUnpackState final {
+ private:
+  GLContext* const mGL;
   GLuint mAlignment;
 
   GLuint mPBO;
@@ -308,23 +239,18 @@ struct ResetUnpackState : public ScopedGLWrapper {
 
  public:
   explicit ResetUnpackState(GLContext* gl);
-
- protected:
-  void UnwrapImpl();
+  ~ResetUnpackState();
 };
 
-struct ScopedBindPBO final : public ScopedGLWrapper {
-  friend struct ScopedGLWrapper;
-
- protected:
+struct ScopedBindPBO final {
+ private:
+  GLContext* const mGL;
   const GLenum mTarget;
   const GLuint mPBO;
 
  public:
   ScopedBindPBO(GLContext* gl, GLenum target);
-
- protected:
-  void UnwrapImpl();
+  ~ScopedBindPBO();
 };
 
 } /* namespace gl */
diff --git a/gfx/layers/GLImages.h b/gfx/layers/GLImages.h
index bc8b74361a..183631e932 100644
--- a/gfx/layers/GLImages.h
+++ b/gfx/layers/GLImages.h
@@ -29,6 +29,12 @@ class GLImage : public Image {
 
 class SurfaceTextureImage : public GLImage {
  public:
+  class SetCurrentCallback {
+   public:
+    virtual void operator()(void) = 0;
+    virtual ~SetCurrentCallback() {}
+  };
+
   SurfaceTextureImage(AndroidSurfaceTextureHandle aHandle,
                       const gfx::IntSize& aSize, bool aContinuous,
                       gl::OriginPos aOriginPos, bool aHasAlpha = true);
@@ -48,12 +54,24 @@ class SurfaceTextureImage : public GLImage {
 
   SurfaceTextureImage* AsSurfaceTextureImage() override { return this; }
 
+  void RegisterSetCurrentCallback(UniquePtr aCallback) {
+    mSetCurrentCallback = std::move(aCallback);
+  }
+
+  void OnSetCurrent() {
+    if (mSetCurrentCallback) {
+      (*mSetCurrentCallback)();
+      mSetCurrentCallback.reset();
+    }
+  }
+
  private:
   AndroidSurfaceTextureHandle mHandle;
   gfx::IntSize mSize;
   bool mContinuous;
   gl::OriginPos mOriginPos;
   const bool mHasAlpha;
+  UniquePtr mSetCurrentCallback;
 };
 
 #endif  // MOZ_WIDGET_ANDROID
diff --git a/gfx/layers/ProfilerScreenshots.cpp b/gfx/layers/ProfilerScreenshots.cpp
index cbe0c5494e..2c906dcb61 100644
--- a/gfx/layers/ProfilerScreenshots.cpp
+++ b/gfx/layers/ProfilerScreenshots.cpp
@@ -60,7 +60,7 @@ void ProfilerScreenshots::SubmitScreenshot(
 
   RefPtr self = this;
 
-  NS_DispatchToBackgroundThread(NS_NewRunnableFunction(
+  NS_DispatchBackgroundTask(NS_NewRunnableFunction(
       "ProfilerScreenshots::SubmitScreenshot",
       [self{std::move(self)}, backingSurface, sourceThread, windowIdentifier,
        originalSize, scaledSize, timeStamp]() {
diff --git a/js/xpconnect/src/ExportHelpers.cpp b/js/xpconnect/src/ExportHelpers.cpp
index 72ad053d5a..3cc620a97f 100644
--- a/js/xpconnect/src/ExportHelpers.cpp
+++ b/js/xpconnect/src/ExportHelpers.cpp
@@ -104,6 +104,10 @@ class MOZ_STACK_CLASS StackScopedCloneData : public StructuredCloneHolderBase {
       JS::Rooted val(aCx);
       {
         RefPtr blob = Blob::Create(global, mBlobImpls[idx]);
+        if (NS_WARN_IF(!blob)) {
+          return nullptr;
+        }
+
         if (!ToJSValue(aCx, blob, &val)) {
           return nullptr;
         }
diff --git a/layout/build/nsLayoutStatics.cpp b/layout/build/nsLayoutStatics.cpp
index b8efd5ce43..846e53dbbc 100644
--- a/layout/build/nsLayoutStatics.cpp
+++ b/layout/build/nsLayoutStatics.cpp
@@ -68,7 +68,6 @@
 
 #include "AudioChannelService.h"
 #include "mozilla/dom/PromiseDebugging.h"
-#include "mozilla/dom/WebCryptoThreadPool.h"
 
 #ifdef MOZ_XUL
 #  include "nsXULPopupManager.h"
@@ -275,8 +274,6 @@ nsresult nsLayoutStatics::Initialize() {
 
   PromiseDebugging::Init();
 
-  mozilla::dom::WebCryptoThreadPool::Initialize();
-
   if (XRE_IsParentProcess() || XRE_IsContentProcess()) {
     InitializeServo();
   }
diff --git a/layout/forms/nsFileControlFrame.cpp b/layout/forms/nsFileControlFrame.cpp
index bf399f4a52..1812e3574f 100644
--- a/layout/forms/nsFileControlFrame.cpp
+++ b/layout/forms/nsFileControlFrame.cpp
@@ -329,7 +329,7 @@ static void AppendBlobImplAsDirectory(nsTArray& aArray,
     return;
   }
 
-  RefPtr directory = Directory::Create(inner, file);
+  RefPtr directory = Directory::Create(inner->AsGlobal(), file);
   MOZ_ASSERT(directory);
 
   OwningFileOrDirectory* element = aArray.AppendElement();
diff --git a/mfbt/RefPtr.h b/mfbt/RefPtr.h
index 822327f12d..b95ec08d64 100644
--- a/mfbt/RefPtr.h
+++ b/mfbt/RefPtr.h
@@ -10,6 +10,8 @@
 #include "mozilla/Attributes.h"
 #include "mozilla/DbgMacro.h"
 
+#include 
+
 /*****************************************************************************/
 
 // template  class RefPtrGetterAddRefs;
@@ -108,19 +110,22 @@ class MOZ_IS_REFPTR RefPtr {
 
   MOZ_IMPLICIT RefPtr(decltype(nullptr)) : mRawPtr(nullptr) {}
 
-  template 
+  template >>
   MOZ_IMPLICIT RefPtr(already_AddRefed& aSmartPtr)
       : mRawPtr(aSmartPtr.take())
   // construct from |already_AddRefed|
   {}
 
-  template 
+  template >>
   MOZ_IMPLICIT RefPtr(already_AddRefed&& aSmartPtr)
       : mRawPtr(aSmartPtr.take())
   // construct from |otherRefPtr.forget()|
   {}
 
-  template 
+  template >>
   MOZ_IMPLICIT RefPtr(const RefPtr& aSmartPtr)
       : mRawPtr(aSmartPtr.get())
   // copy-construct from a smart pointer with a related pointer type
@@ -130,7 +135,8 @@ class MOZ_IS_REFPTR RefPtr {
     }
   }
 
-  template 
+  template >>
   MOZ_IMPLICIT RefPtr(RefPtr&& aSmartPtr)
       : mRawPtr(aSmartPtr.forget().take())
   // construct from |Move(RefPtr)|.
diff --git a/mfbt/tests/TestRefPtr.cpp b/mfbt/tests/TestRefPtr.cpp
index 9fb98d347e..97bd933a94 100644
--- a/mfbt/tests/TestRefPtr.cpp
+++ b/mfbt/tests/TestRefPtr.cpp
@@ -112,5 +112,13 @@ int main() {
   }
   MOZ_RELEASE_ASSERT(11 == Foo::sNumDestroyed);
 
+  {
+    bool condition = true;
+    const auto f =
+        condition ? mozilla::MakeRefPtr() : mozilla::MakeRefPtr();
+
+    MOZ_RELEASE_ASSERT(f);
+  }
+
   return 0;
 }
diff --git a/modules/libpref/init/StaticPrefList.yaml b/modules/libpref/init/StaticPrefList.yaml
index 36a906083a..5e20f443e9 100644
--- a/modules/libpref/init/StaticPrefList.yaml
+++ b/modules/libpref/init/StaticPrefList.yaml
@@ -5807,10 +5807,9 @@
 
   # Use MediaDataDecoder API for VP8/VP9 in WebRTC. This includes hardware
   # acceleration for decoding.
-  # disable on android bug 1509316
 -   name: media.navigator.mediadatadecoder_vpx_enabled
     type: RelaxedAtomicBool
-  #if defined(NIGHTLY_BUILD) && !defined(ANDROID)
+  #if defined(NIGHTLY_BUILD)
     value: true
   #else
     value: false
@@ -5818,12 +5817,10 @@
     mirror: always
 
   # Use MediaDataDecoder API for H264 in WebRTC. This includes hardware
-  # acceleration for decoding. False on Android due to bug 1509316.
+  # acceleration for decoding.
 -   name: media.navigator.mediadatadecoder_h264_enabled
     type: RelaxedAtomicBool
-  #if defined(ANDROID)
-    value: false
-  #elif defined(_ARM64_) && defined(XP_WIN)
+  #if defined(_ARM64_) && defined(XP_WIN)
     value: false
   #else
     value: true
diff --git a/testing/web-platform/meta/mediacapture-record/MediaRecorder-creation.https.html.ini b/testing/web-platform/meta/mediacapture-record/MediaRecorder-creation.https.html.ini
deleted file mode 100644
index 983b8299ee..0000000000
--- a/testing/web-platform/meta/mediacapture-record/MediaRecorder-creation.https.html.ini
+++ /dev/null
@@ -1,10 +0,0 @@
-[MediaRecorder-creation.https.html]
-  [Video+Audio MediaRecorder]
-    expected: FAIL
-
-  [Video-only MediaRecorder]
-    expected: FAIL
-
-  [Audio-only MediaRecorder]
-    expected: FAIL
-
diff --git a/testing/web-platform/meta/mediacapture-record/MediaRecorder-destroy-script-execution.html.ini b/testing/web-platform/meta/mediacapture-record/MediaRecorder-destroy-script-execution.html.ini
deleted file mode 100644
index df4f35fac4..0000000000
--- a/testing/web-platform/meta/mediacapture-record/MediaRecorder-destroy-script-execution.html.ini
+++ /dev/null
@@ -1,5 +0,0 @@
-[MediaRecorder-destroy-script-execution.html]
-  expected: TIMEOUT
-  [MediaRecorder will not fire the stop event when all tracks are ended and the script execution context is going away]
-    expected: NOTRUN
-
diff --git a/testing/web-platform/meta/mediacapture-record/MediaRecorder-disabled-tracks.https.html.ini b/testing/web-platform/meta/mediacapture-record/MediaRecorder-disabled-tracks.https.html.ini
deleted file mode 100644
index be5a714951..0000000000
--- a/testing/web-platform/meta/mediacapture-record/MediaRecorder-disabled-tracks.https.html.ini
+++ /dev/null
@@ -1,10 +0,0 @@
-[MediaRecorder-disabled-tracks.https.html]
-  [audio-video]
-    expected: FAIL
-
-  [video-only]
-    expected: FAIL
-
-  [audio-only]
-    expected: FAIL
-
diff --git a/testing/web-platform/meta/mediacapture-record/MediaRecorder-error.html.ini b/testing/web-platform/meta/mediacapture-record/MediaRecorder-error.html.ini
deleted file mode 100644
index c52bb7b6c8..0000000000
--- a/testing/web-platform/meta/mediacapture-record/MediaRecorder-error.html.ini
+++ /dev/null
@@ -1,7 +0,0 @@
-[MediaRecorder-error.html]
-  [MediaRecorder will stop recording when any of track is removed and error event will be fired]
-    expected: FAIL
-
-  [MediaRecorder will stop recording when any of track is added and error event will be fired]
-    expected: FAIL
-
diff --git a/testing/web-platform/meta/mediacapture-record/MediaRecorder-events-and-exceptions.html.ini b/testing/web-platform/meta/mediacapture-record/MediaRecorder-events-and-exceptions.html.ini
deleted file mode 100644
index e81b7344a5..0000000000
--- a/testing/web-platform/meta/mediacapture-record/MediaRecorder-events-and-exceptions.html.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[MediaRecorder-events-and-exceptions.html]
-  [MediaRecorder events and exceptions]
-    expected: FAIL
-
diff --git a/testing/web-platform/meta/mediacapture-record/MediaRecorder-stop.html.ini b/testing/web-platform/meta/mediacapture-record/MediaRecorder-stop.html.ini
deleted file mode 100644
index 7e18bee2bf..0000000000
--- a/testing/web-platform/meta/mediacapture-record/MediaRecorder-stop.html.ini
+++ /dev/null
@@ -1,8 +0,0 @@
-[MediaRecorder-stop.html]
-  expected: TIMEOUT
-  [MediaRecorder will stop recording and fire a stop event when stop() is called]
-    expected: NOTRUN
-
-  [MediaRecorder will stop recording and fire a stop event when all tracks are ended]
-    expected: TIMEOUT
-
diff --git a/testing/web-platform/meta/mediacapture-record/__dir__.ini b/testing/web-platform/meta/mediacapture-record/__dir__.ini
new file mode 100644
index 0000000000..fcfb216d29
--- /dev/null
+++ b/testing/web-platform/meta/mediacapture-record/__dir__.ini
@@ -0,0 +1 @@
+prefs: [media.navigator.permission.disabled:true, media.navigator.streams.fake:true, media.devices.insecure.enabled:false, dom.security.featurePolicy.enabled:true, dom.security.featurePolicy.header.enabled:true, dom.security.featurePolicy.webidl.enabled:true]
diff --git a/testing/web-platform/meta/mediacapture-record/idlharness.window.js.ini b/testing/web-platform/meta/mediacapture-record/idlharness.window.js.ini
index bb6b454b39..1b5314119b 100644
--- a/testing/web-platform/meta/mediacapture-record/idlharness.window.js.ini
+++ b/testing/web-platform/meta/mediacapture-record/idlharness.window.js.ini
@@ -1,16 +1,4 @@
 [idlharness.window.html]
-  [MediaRecorder interface: attribute videoBitsPerSecond]
-    expected: FAIL
-
-  [MediaRecorder interface: attribute audioBitsPerSecond]
-    expected: FAIL
-
-  [MediaRecorder interface: [object MediaRecorder\] must inherit property "videoBitsPerSecond" with the proper type]
-    expected: FAIL
-
-  [MediaRecorder interface: [object MediaRecorder\] must inherit property "audioBitsPerSecond" with the proper type]
-    expected: FAIL
-
   [BlobEvent interface object length]
     expected: FAIL
 
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html.ini
new file mode 100644
index 0000000000..69cdb7f118
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html.ini
@@ -0,0 +1,21 @@
+[audiobuffer-copy-channel.html]
+  [X 4: buffer.copyToChannel(x, 0, -1) incorrectly threw IndexSizeError: "Index or size is negative or greater than the allowed amount".]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 2 out of 5 tasks were failed.]
+    expected: FAIL
+
+  [X 5: buffer.copyFromChannel(x, 0, -1) incorrectly threw IndexSizeError: "Index or size is negative or greater than the allowed amount".]
+    expected: FAIL
+
+  [< [copyFrom-exceptions\] 1 out of 9 assertions were failed.]
+    expected: FAIL
+
+  [< [copyTo-exceptions\] 1 out of 8 assertions were failed.]
+    expected: FAIL
+
+  [Executing "copyFrom-exceptions"]
+    expected: FAIL
+
+  [Executing "copyTo-exceptions"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html.ini
new file mode 100644
index 0000000000..22d83ce6e2
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffer-interface/ctor-audiobuffer.html.ini
@@ -0,0 +1,3 @@
+[ctor-audiobuffer.html]
+  disabled:
+    if processor == "x86": wpt-sync Bug 1559261
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html.ini
index 94a0458195..c49269ce1c 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/active-processing.https.html.ini
@@ -1,6 +1,9 @@
 [active-processing.https.html]
-  expected:
-    if release_or_beta: ERROR
+  [Executing "Setup graph"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+        if release_or_beta: FAIL
+
   [X Number of channels in input[-1:\]: Expected 1 for all values but found 1 unexpected values: \n\tIndex\tActual\n\t[0\]\t0]
     expected: FAIL
 
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html.ini
index b1b1fdea57..5495074dac 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/audiosource-time-limits.html.ini
@@ -1,2 +1,6 @@
 [audiosource-time-limits.html]
-  expected: ERROR
+  [Executing "buffersource: huge stop time"]
+    expected: FAIL
+
+  [Executing "oscillator: huge stop time"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html.ini
index 2f6d9df290..ec359fc17e 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audiobuffersourcenode-interface/sub-sample-buffer-stitching.html.ini
@@ -70,3 +70,5 @@
   [X Stitched sine-wave buffers at sample rate 44100 does not equal [0,0.06264832615852356,0.12505052983760834,0.18696144223213196,0.24813786149024963,0.308339387178421,0.36732956767082214,0.4248766303062439,0.4807544946670532,0.5347436666488647,0.5866319537162781,0.6362155675888062,0.683299720287323,0.7276993989944458,0.7692402005195618,0.8077588677406311...\] with an element-wise tolerance of {"absoluteThreshold":0.000090957,"relativeThreshold":0}.\n\tIndex\tActual\t\t\tExpected\t\tAbsError\t\tRelError\t\tTest threshold\n\t[2003\]\t-9.6732087433338165e-2\t-9.6823699772357941e-2\t9.1612339019775391e-5\t9.4617680624852212e-4\t9.0957000000000003e-5\n\t[2004\]\t-3.4187544137239456e-2\t-3.4279607236385345e-2\t9.2063099145889282e-5\t2.6856520995424621e-3\t9.0957000000000003e-5\n\t[2005\]\t2.8491314500570297e-2\t2.8398986905813217e-2\t9.2327594757080078e-5\t3.2510876202481997e-3\t9.0957000000000003e-5\n\t[2006\]\t9.1058239340782166e-2\t9.0966261923313141e-2\t9.1977417469024658e-5\t1.0111157205356415e-3\t9.0957000000000003e-5\n\t[2007\]\t1.5326742827892303e-1\t1.5317615866661072e-1\t9.1269612312316895e-5\t5.9584737668585898e-4\t9.0957000000000003e-5\n\t...and 38030 more errors.\n\tMax AbsError of 9.2362356169980164e-1 at index of 43530.\n\t[43530\]\t1.5932920260919303e-10\t9.2362356185913086e-1\t9.2362356169980164e-1\t9.9999999982749554e-1\t9.0957000000000003e-5\n\tMax RelError of 5.5714977262789269e+1 at index of 30419.\n\t[30419\]\t-1.4247581129893661e-3\t-2.5121373255387880e-5\t1.3996367397339782e-3\t5.5714977262789269e+1\t9.0957000000000003e-5\n]
     expected: FAIL
 
+  [X Stitched sine-wave buffers at sample rate 44100 does not equal [0,0.06264832615852356,0.12505052983760834,0.18696144223213196,0.24813786149024963,0.308339387178421,0.36732956767082214,0.4248766303062439,0.4807544946670532,0.5347436666488647,0.5866319537162781,0.6362155675888062,0.683299720287323,0.7276993989944458,0.7692402005195618,0.8077588677406311...\] with an element-wise tolerance of {"absoluteThreshold":0.000090957,"relativeThreshold":0}.\n\tIndex\tActual\t\t\tExpected\t\tAbsError\t\tRelError\t\tTest threshold\n\t[2003\]\t-9.6732087433338165e-2\t-9.6823699772357941e-2\t9.1612339019775391e-5\t9.4617680624852212e-4\t9.0957000000000003e-5\n\t[2004\]\t-3.4187544137239456e-2\t-3.4279607236385345e-2\t9.2063099145889282e-5\t2.6856520995424621e-3\t9.0957000000000003e-5\n\t[2005\]\t2.8491314500570297e-2\t2.8398986905813217e-2\t9.2327594757080078e-5\t3.2510876202481997e-3\t9.0957000000000003e-5\n\t[2006\]\t9.1058239340782166e-2\t9.0966261923313141e-2\t9.1977417469024658e-5\t1.0111157205356415e-3\t9.0957000000000003e-5\n\t[2007\]\t1.5326742827892303e-1\t1.5317615866661072e-1\t9.1269612312316895e-5\t5.9584737668585898e-4\t9.0957000000000003e-5\n\t...and 38032 more errors.\n\tMax AbsError of 9.9982368946136246e-1 at index of 40868.\n\t[40868\]\t6.0811311498529252e-13\t-9.9982368946075439e-1\t9.9982368946136246e-1\t1.0000000000006082e+0\t9.0957000000000003e-5\n\tMax RelError of 5.5714977262789269e+1 at index of 30419.\n\t[30419\]\t-1.4247581129893661e-3\t-2.5121373255387880e-5\t1.3996367397339782e-3\t5.5714977262789269e+1\t9.0957000000000003e-5\n]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html.ini
index ea96d4b24a..c57f0ac655 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audiocontext-interface/audiocontext-suspend-resume.html.ini
@@ -6,3 +6,17 @@
   [X p1 instanceof Promise is not true. Got false.]
     expected: FAIL
 
+  [Executing "test-suspend"]
+    expected: FAIL
+
+  [< [test-suspend\] 2 out of 4 assertions were failed.]
+    expected: FAIL
+
+  [Executing "test-after-close"]
+    expected: TIMEOUT
+
+  [Audit report]
+    expected: NOTRUN
+
+  [Executing "resume-running-context"]
+    expected: NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html.ini
index cad8cb42b1..102e4f6151 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audionode-interface/audionode-disconnect-audioparam.html.ini
@@ -1,2 +1,8 @@
 [audionode-disconnect-audioparam.html]
-  expected: ERROR
+  [Executing "disconnect(AudioParam)"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "disconnect(AudioParam, output)"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/adding-events.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/adding-events.html.ini
index 56096461d8..b32ab3cd6d 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/adding-events.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/adding-events.html.ini
@@ -1,2 +1,8 @@
 [adding-events.html]
-  expected: ERROR
+  [Executing "linearRamp"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "expoRamp"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html.ini
index eacbf1d55b..6d2c44c45a 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-large-endtime.html.ini
@@ -1,2 +1,6 @@
 [audioparam-large-endtime.html]
-  expected: ERROR
+  [Executing "linearRamp"]
+    expected: FAIL
+
+  [Executing "exponentialRamp"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html.ini
deleted file mode 100644
index 8fd28847a7..0000000000
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/audioparam-method-chaining.html.ini
+++ /dev/null
@@ -1,16 +0,0 @@
-[audioparam-method-chaining.html]
-  [X The gain value of the second gain node is not equal to 0.5. Got 1.]
-    expected: FAIL
-
-  [< [invalid-operation\] 1 out of 4 assertions were failed.]
-    expected: FAIL
-
-  [# AUDIT TASK RUNNER FINISHED: 1 out of 3 tasks were failed.]
-    expected: FAIL
-
-  [X Calling exponentialRampToValueAtTime() with a zero target value threw "SyntaxError" instead of EcmaScript error RangeError.]
-    expected: FAIL
-
-  [X Calling setValueAtTime() with a negative end time threw "NotSupportedError" instead of EcmaScript error RangeError.]
-    expected: FAIL
-
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html.ini
index 4f8b38ef21..a5f7fb4b97 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/automation-rate.html.ini
@@ -1,5 +1,4 @@
 [automation-rate.html]
-  expected: ERROR
   [X Default AudioBufferSourceNode.detune.automationRate is not equal to k-rate. Got undefined.]
     expected: FAIL
 
@@ -117,3 +116,8 @@
   [< [StereoPannerNode\] 1 out of 3 assertions were failed.]
     expected: FAIL
 
+  [Executing "AudioListener"]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 9 out of 10 tasks were failed.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html.ini
index 61a3eca9fc..d89a871ea3 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-audioworklet.https.html.ini
@@ -1,3 +1,9 @@
 [k-rate-audioworklet.https.html]
-  expected: ERROR
-  bug: AudioWorketNode::GetParameters is not implemented
+  [Executing "Create Test Worklet"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+        if release_or_beta: FAIL
+
+  [Executing "AudioWorklet k-rate AudioParam"]
+    expected: FAIL
+    bug: AudioWorketNode::GetParameters is not implemented
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html.ini
index da6cbb076a..6be80d8032 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/k-rate-panner.html.ini
@@ -1,5 +1,4 @@
 [k-rate-panner.html]
-  expected: ERROR
   [X k-rate positionX: Output of k-rate PannerNode expected to be equal to the array [0,0.0033637583255767822,0.0062364861369132996,0.008294522762298584,0.009320056065917015,0.009223895147442818,0.00805223174393177,0.005976978689432144,0.0032713967375457287,0.00027436629170551896,-0.0026520458050072193,-0.005166400223970413,-0.006987776607275009,-0.00792707223445177,-0.00790623389184475,-0.006963531486690044...\] but differs in 1 places:\n\tIndex\tActual\t\t\tExpected\n\t[80\]\tNaN\tNaN]
     expected: FAIL
 
@@ -279,3 +278,32 @@
   [X k-rate orientationZ k-rate output [384: 511\]: Expected 0.10211683064699173 for all values but found 127 unexpected values: \n\tIndex\tActual\n\t[1\]\t0.10214255005121231\n\t[2\]\t0.10216815769672394\n\t[3\]\t0.10219366103410721\n\t[4\]\t0.10221906751394272\n\t...and 123 more errors.]
     expected: FAIL
 
+  [Executing "Listener k-rate positionX"]
+    expected: FAIL
+
+  [Executing "Listener k-rate positionY"]
+    expected: FAIL
+
+  [Executing "Listener k-rate positionZ"]
+    expected: FAIL
+
+  [Executing "Listener k-rate forwardX"]
+    expected: FAIL
+
+  [Executing "Listener k-rate forwardY"]
+    expected: FAIL
+
+  [Executing "Listener k-rate forwardZ"]
+    expected: FAIL
+
+  [Executing "Listener k-rate upX"]
+    expected: FAIL
+
+  [Executing "Listener k-rate upY"]
+    expected: FAIL
+
+  [Executing "Listener k-rate upZ"]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 5 out of 14 tasks were failed.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html.ini
index 40b8609176..ddaf63e872 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-exponentialRampToValueAtTime.html.ini
@@ -1,2 +1,4 @@
 [retrospective-exponentialRampToValueAtTime.html]
-  expected: ERROR
+  [Executing "test"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html.ini
index b1b685eaef..dec2b8f570 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-linearRampToValueAtTime.html.ini
@@ -1,2 +1,4 @@
 [retrospective-linearRampToValueAtTime.html]
-  expected: ERROR
+  [Executing "test"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html.ini
index b100363750..1fa1e5a599 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setTargetAtTime.html.ini
@@ -1,2 +1,4 @@
 [retrospective-setTargetAtTime.html]
-  expected: ERROR
+  [Executing "test"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html.ini
index c9647f410f..7316424cfa 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueAtTime.html.ini
@@ -1,2 +1,4 @@
 [retrospective-setValueAtTime.html]
-  expected: ERROR
+  [Executing "test"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html.ini
index 28e1d12c23..be9dd3d2dc 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioparam-interface/retrospective-setValueCurveAtTime.html.ini
@@ -1,2 +1,4 @@
 [retrospective-setValueCurveAtTime.html]
-  expected: ERROR
+  [Executing "test"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html.ini
deleted file mode 100644
index 7aed531655..0000000000
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-addmodule-resolution.https.html.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[audioworklet-addmodule-resolution.https.html]
-  [\n      Test the invocation order of AudioWorklet.addModule() and BaseAudioContext\n    ]
-    expected:
-        if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html.ini
index 96c7333c9a..124a77fea8 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam-size.https.html.ini
@@ -1,2 +1,8 @@
 [audioworklet-audioparam-size.https.html]
-  expected: ERROR
+  [Executing "Initializing AudioWorklet and Context"]
+    expected:
+      if release_or_beta: FAIL
+    bug: AudioWorklet not enabled on release_or_beta
+
+  [Executing "Verify Size of AudioParam Arrays"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html.ini
index 5ae63f521c..9612de0bac 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-audioparam.https.html.ini
@@ -1,2 +1,8 @@
 [audioworklet-audioparam.https.html]
-  expected: ERROR
+  [Executing "Initializing AudioWorklet and Context"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "Verifying AudioParam in AudioWorkletNode"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html.ini
index 1e2702cab4..5a5d7730c6 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-messageport.https.html.ini
@@ -1,6 +1,11 @@
 [audioworklet-messageport.https.html]
+  bug: AudioWorklet not enabled on release_or_beta
   expected:
-    if not release_or_beta: ERROR
-  [\n      Test MessagePort in AudioWorkletNode and AudioWorkletProcessor\n    ]
-    expected: FAIL
+    if release_or_beta: ERROR
+  [Executing "Test postMessage from AudioWorkletProcessor to AudioWorkletNode"]
+    expected:
+      if release_or_beta: FAIL
 
+  [Executing "Test postMessage from AudioWorkletNode to AudioWorkletProcessor"]
+    expected:
+      if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.ini
index 63dd94b9f0..07ade99478 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.ini
@@ -1,4 +1,10 @@
 [audioworklet-postmessage-sharedarraybuffer.https.html]
-  [\n      Test passing SharedArrayBuffer to an AudioWorklet\n    ]
-    expected:
-        if release_or_beta: FAIL
+  bug: AudioWorklet not enabled on release_or_beta
+  expected:
+    if release_or_beta: ERROR
+    TIMEOUT
+  [Executing "Test postMessage from AudioWorkletProcessor to AudioWorkletNode"]
+    expected: TIMEOUT
+
+  [Audit report]
+    expected: NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html.ini
new file mode 100644
index 0000000000..ae500b6021
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html.ini
@@ -0,0 +1,5 @@
+[audioworklet-suspend.https.html]
+  [Executing "load-worklet-and-suspend"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html.ini
index 60a8810ace..0ba1a47a8e 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html.ini
@@ -1,9 +1,8 @@
 [audioworkletglobalscope-sample-rate.https.html]
-  bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1572627
+  bug: AudioWorklet not enabled on release_or_beta
   expected:
-    if release_or_beta: OK
-    ERROR
-  [\n      Test sampleRate in AudioWorkletGlobalScope\n    ]
-    expected:
-      if release_or_beta: FAIL
+    if release_or_beta: ERROR
+  [Executing "Query |sampleRate| upon AudioWorkletGlobalScope construction"]
+    bug: AudioWorkletNode.parameters https://bugzilla.mozilla.org/show_bug.cgi?id=1598114
+    expected: FAIL
 
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html.ini
index c8b168398e..6a16c34491 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html.ini
@@ -1,6 +1,8 @@
 [audioworkletglobalscope-timing-info.https.html]
+  bug: AudioWorklet not enabled on release_or_beta
   expected:
-    if not release_or_beta: ERROR
-  [\n      Test currentTime and currentFrame in AudioWorkletGlobalScope\n    ]
+    if release_or_beta: ERROR
+  [Executing "Check the timing information from AudioWorkletProcessor"]
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
     expected: FAIL
 
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html.ini
index a9dede8200..5b43354d51 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-automatic-pull.https.html.ini
@@ -1,2 +1,14 @@
 [audioworkletnode-automatic-pull.https.html]
-  expected: ERROR
+  expected:
+    if release_or_beta: OK
+    TIMEOUT
+  [Executing "setup-worklet"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+      TIMEOUT
+
+  [Audit report]
+    expected:
+      if release_or_beta: PASS
+      NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html.ini
index 96ee512383..821554e3a9 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-channel-count.https.html.ini
@@ -1,2 +1,19 @@
 [audioworkletnode-channel-count.https.html]
-  expected: ERROR
+  expected:
+    if release_or_beta: OK
+    ERROR
+  [Executing "setup-buffer-and-worklet"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+      TIMEOUT
+
+  [Executing "verify-rendered-buffer"]
+    expected:
+      if release_or_beta: PASS
+      NOTRUN
+
+  [Audit report]
+    expected:
+      if release_or_beta: PASS
+      NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html.ini
index f525a730d9..577626f62c 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-construction.https.html.ini
@@ -1,10 +1,20 @@
 [audioworkletnode-construction.https.html]
-  expected:
-    if release_or_beta: ERROR
   [X Creating a node before loading a module should throw. threw "ReferenceError" instead of InvalidStateError.]
+    bug: 1585946
     expected:
       if release_or_beta: FAIL
 
   [< [construction-before-module-loading\] 1 out of 1 assertions were failed.]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "construction-after-module-loading"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
+    bug: 1585946
     expected:
       if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html.ini
index 5ddb3c0f7d..75400c6103 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-constructor-options.https.html.ini
@@ -1,3 +1,55 @@
 [audioworkletnode-constructor-options.https.html]
-  expected:
-    if release_or_beta: ERROR
+  [Executing "setup"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [X Creating AudioWOrkletNode without options incorrectly threw ReferenceError: "AudioWorkletNode is not defined".]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "without-audio-node-options"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [< [without-audio-node-options\] 1 out of 1 assertions were failed.]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [X Creating AudioWOrkletNode with options: {"numberOfInputs":7,"numberOfOutputs":18,"channelCount":4,"channelCountMode":"clamped-max","channelInterpretation":"discrete"} incorrectly threw ReferenceError: "AudioWorkletNode is not defined".]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "audio-node-options"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [< [audio-node-options\] 1 out of 1 assertions were failed.]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "channel-count"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "channel-count-mode"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "channel-interpretation"]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 2 out of 6 tasks were failed.]
+    bug: 1585946
+    expected:
+      if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html.ini
index 2949be5b50..fc7cf5ab47 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-disconnected-input.https.html.ini
@@ -1,2 +1,14 @@
 [audioworkletnode-disconnected-input.https.html]
-  expected: ERROR
+  expected:
+    if release_or_beta: OK
+    ERROR
+  [Executing "test"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+      TIMEOUT
+
+  [Audit report]
+    expected:
+      if release_or_beta: PASS
+      NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html.ini
index ce0799cdc0..39b655924e 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html.ini
@@ -1,6 +1,13 @@
 [audioworkletnode-onerror.https.html]
+  bug: AudioWorklet not enabled on release_or_beta
   expected:
-    if not release_or_beta: TIMEOUT
-  [\n      Test onprocessorerror handler in AudioWorkletNode\n    ]
-    expected: FAIL
+    if release_or_beta: ERROR
+    TIMEOUT
+  [Audit report]
+    expected: NOTRUN
 
+  [Executing "constructor-error"]
+    expected: TIMEOUT
+
+  [Executing "process-error"]
+    expected: NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html.ini
index 35f2cb6f89..2570fdf84b 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html.ini
@@ -1,8 +1,13 @@
 [audioworkletnode-output-channel-count.https.html]
+  bug: AudioWorklet not enabled on release_or_beta
   expected:
-    if release_or_beta: OK
-    ERROR
-  [\n      Test the construction of AudioWorkletNode with real-time context\n    ]
-    expected:
-      if release_or_beta: FAIL
+    if release_or_beta: ERROR
+  [X The expected output channel count is not equal to 17. Got 1.]
+    expected: FAIL
+
+  [< [Dynamically change the channel count to if unspecified.\] 1 out of 1 assertions were failed.]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
+    expected: FAIL
 
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html.ini
index a07e70774d..3ca63be2b3 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/audioworkletprocessor-options.https.html.ini
@@ -1,2 +1,19 @@
 [audioworkletprocessor-options.https.html]
-  expected: ERROR
+  [Executing "valid-processor-data"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "empty-option"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [X Number of properties in data from processor is not equal to 2. Got 3.]
+    expected: FAIL
+
+  [< [empty-option\] 1 out of 3 assertions were failed.]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html.ini
index 3adb91e822..b60de51811 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/baseaudiocontext-audioworklet.https.html.ini
@@ -1,3 +1,5 @@
 [baseaudiocontext-audioworklet.https.html]
-  expected:
-    if release_or_beta: ERROR
+  [Executing "Test if AudioWorklet exists"]
+    bug: 1585946
+    expected:
+        if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html.ini
new file mode 100644
index 0000000000..0a68a18451
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html.ini
@@ -0,0 +1,9 @@
+[process-getter.https.html]
+  ['process' getter on instance]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  ['process' getter on prototype]
+    expected:
+      if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html.ini
new file mode 100644
index 0000000000..78697c3f12
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html.ini
@@ -0,0 +1,11 @@
+[process-parameters.https.html]
+  bug: AudioWorklet not enabled on release_or_beta
+  expected:
+    if release_or_beta: ERROR
+  [0 inputs; 3 outputs]
+    expected:
+      if release_or_beta: NOTRUN
+
+  [3 inputs; 0 outputs]
+    expected:
+      if release_or_beta: NOTRUN
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html.ini
new file mode 100644
index 0000000000..313e3a790d
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html.ini
@@ -0,0 +1,17 @@
+[processor-construction-port.https.html]
+  [Singleton AudioWorkletProcessor]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [new AudioWorkletProcessor() after super()]
+    expected:
+      if release_or_beta: FAIL
+
+  [new AudioWorkletProcessor() after new AudioWorkletProcessor()]
+    expected:
+      if release_or_beta: FAIL
+
+  [super() after new AudioWorkletProcessor()]
+    expected:
+      if release_or_beta: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html.ini
index cbd197ccf5..dd309a6969 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-audioworklet-interface/simple-input-output.https.html.ini
@@ -1,6 +1,14 @@
 [simple-input-output.https.html]
-  expected:
-    if release_or_beta: ERROR
+  [Executing "Initialize worklet"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "test"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
   [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
     expected: FAIL
 
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html.ini
index a09d9921c4..63063dff2d 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-biquadfilternode-interface/no-dezippering.html.ini
@@ -1,2 +1,24 @@
 [no-dezippering.html]
-  expected: ERROR
+  [Executing "Test 0"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "Test 1"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "Test 2"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "Test 3"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "Test 4"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "Test 5"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html.ini
index 116273a1a3..4cbbc935cd 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/active-processing.https.html.ini
@@ -1,2 +1,18 @@
 [active-processing.https.html]
-  expected: ERROR
+  [Executing "initialize"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "test"]
+    expected:
+      if release_or_beta: FAIL
+
+  [X Test 1: Number of convolver output channels is not equal to 1. Got 0.]
+    expected: FAIL
+
+  [< [test\] 1 out of 3 assertions were failed.]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html.ini
index c85f6c075d..a2ffb5b0ba 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-channelmergernode-interface/audiochannelmerger-disconnect.html.ini
@@ -1,2 +1,4 @@
 [audiochannelmerger-disconnect.html]
-  expected: ERROR
+  [Executing "silent-disconnect"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html.ini
new file mode 100644
index 0000000000..b543339665
--- /dev/null
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html.ini
@@ -0,0 +1,18 @@
+[active-processing.https.html]
+  [Executing "initialize"]
+    bug: AudioWorklet not enabled on release_or_beta
+    expected:
+      if release_or_beta: FAIL
+
+  [Executing "test"]
+    expected:
+      if release_or_beta: FAIL
+
+  [X Number of distinct values is not equal to 2. Got 0.]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
+    expected: FAIL
+
+  [< [test\] 1 out of 1 assertions were failed.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html.ini
index 5335e1c802..5b7fc5dbb1 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-1-chan.html.ini
@@ -62,18 +62,3 @@
   [X 5.1: Channel 1 expected to be equal to the array [0,0,2.400212526321411,1.8464009761810303,1.2422339916229248,0.5788586139678955,0.3615038990974426,0.16441935300827026,-0.742911696434021,-1.530196189880371,-1.8989348411560059,-0.7277186512947083,0.010559797286987305,0.7105643153190613,1.748615026473999,0.26711004972457886...\] but differs in 1050 places:\n\tIndex\tActual\t\t\tExpected\n\t[0\]\t-5.9604644775390625e-8\t0.0000000000000000e+0\n\t[1\]\t7.0780515670776367e-8\t0.0000000000000000e+0\n\t[2\]\t2.4002122879028320e+0\t2.4002125263214111e+0\n\t[3\]\t1.8464007377624512e+0\t1.8464009761810303e+0\n\t...and 1046 more errors.]
     expected: FAIL
 
-  [# AUDIT TASK RUNNER FINISHED: 5 out of 6 tasks were failed.]
-    expected: FAIL
-
-  [< [1-channel input\] 1 out of 2 assertions were failed.]
-    expected: FAIL
-
-  [X 1: Channel 1: Expected 0 for all values but found 1279 unexpected values: \n\tIndex\tActual\n\t[1\]\t-2.9802322387695312e-8\n\t[2\]\t0.33110618591308594\n\t[3\]\t0.6248594522476196\n\t[4\]\t0.8481202721595764\n\t...and 1275 more errors.]
-    expected: FAIL
-
-  [# AUDIT TASK RUNNER FINISHED: 1 out of 6 tasks were failed.]
-    expected: FAIL
-
-  [X Convolver output does not equal [0,0,0.3311063051223755,0.6248595118522644,0.8481203317642212,0.9757021069526672,0.9932119250297546,0.8986744284629822,0.7027547359466553,0.42755505442619324,0.1041216030716896,-0.23105813562870026,-0.5401715040206909,-0.7883464694023132,-0.9475855827331543,-0.9999247193336487...\] with an element-wise tolerance of {"absoluteThreshold":4.1724e-7,"relativeThreshold":0}.\n\tIndex\tActual\t\t\tExpected\t\tAbsError\t\tRelError\t\tTest threshold\n\t[2\]\t0.0000000000000000e+0\t3.3110630512237549e-1\t3.3110630512237549e-1\t1.0000000000000000e+0\t4.1724000000000000e-7\n\t[3\]\t0.0000000000000000e+0\t6.2485951185226440e-1\t6.2485951185226440e-1\t1.0000000000000000e+0\t4.1724000000000000e-7\n\t[4\]\t0.0000000000000000e+0\t8.4812033176422119e-1\t8.4812033176422119e-1\t1.0000000000000000e+0\t4.1724000000000000e-7\n\t[5\]\t0.0000000000000000e+0\t9.7570210695266724e-1\t9.7570210695266724e-1\t1.0000000000000000e+0\t4.1724000000000000e-7\n\t[6\]\t0.0000000000000000e+0\t9.9321192502975464e-1\t9.9321192502975464e-1\t1.0000000000000000e+0\t4.1724000000000000e-7\n\t...and 1273 more errors.\n\tMax AbsError of 1.0000000000000000e+0 at index of 257.\n\t[257\]\t0.0000000000000000e+0\t-1.0000000000000000e+0\t1.0000000000000000e+0\t1.0000000000000000e+0\t4.1724000000000000e-7\n\tMax RelError of 1.0000000000000000e+0 at index of 2.\n]
-    expected: FAIL
-
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html.ini
index 1e1cd02d1c..24cec94bfc 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-convolvernode-interface/convolver-response-4-chan.html.ini
@@ -1,5 +1,4 @@
 [convolver-response-4-chan.html]
-  expected: ERROR
   [X 1: Channel 0 expected to be equal to the array [0,0,0.9458407163619995,0.844833254814148,1.7668657302856445,1.706931710243225,1.6640565395355225,1.7177008390426636,1.6363749504089355,1.8421846628189087,1.19059157371521,0.20796966552734375,-0.5251069068908691,-1.5682597160339355,-1.7950977087020874,-1.6221750974655151...\] but differs in 985 places:\n\tIndex\tActual\t\t\tExpected\n\t[0\]\t-3.5762786865234375e-7\t0.0000000000000000e+0\n\t[1\]\t-1.4901161193847656e-7\t0.0000000000000000e+0\n\t[2\]\t9.4584035873413086e-1\t9.4584071636199951e-1\n\t[3\]\t8.4483319520950317e-1\t8.4483325481414795e-1\n\t...and 981 more errors.]
     expected: FAIL
 
@@ -75,3 +74,9 @@
   [X 5.1: Channel 1 expected to be equal to the array [0,0,0,2.2955899238586426,2.0720272064208984,3.9322750568389893,2.6799845695495605,1.0305213928222656,0.573580801486969,1.1953470706939697,1.1813762187957764,-0.4176445007324219,-2.2066144943237305,-2.9535818099975586,-1.275363564491272,0.26151078939437866...\] but differs in 1039 places:\n\tIndex\tActual\t\t\tExpected\n\t[0\]\t2.9802322387695313e-8\t0.0000000000000000e+0\n\t[2\]\t1.1175870895385742e-8\t0.0000000000000000e+0\n\t[3\]\t2.2955901622772217e+0\t2.2955899238586426e+0\n\t[4\]\t2.0720274448394775e+0\t2.0720272064208984e+0\n\t...and 1035 more errors.]
     expected: FAIL
 
+  [Executing "delayed buffer set"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [# AUDIT TASK RUNNER FINISHED: 5 out of 7 tasks were failed.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html.ini
index a09d9921c4..1989016088 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-delaynode-interface/no-dezippering.html.ini
@@ -1,2 +1,12 @@
 [no-dezippering.html]
-  expected: ERROR
+  [Executing "test0"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test1"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test2"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html.ini
index a09d9921c4..dcd8f3aefc 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-gainnode-interface/no-dezippering.html.ini
@@ -1,2 +1,12 @@
 [no-dezippering.html]
-  expected: ERROR
+  [Executing "test0"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test2"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test3"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html.ini
index 434bc7dbb8..02cf1055f9 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html.ini
@@ -1,6 +1,21 @@
 [cors-check.https.html]
+  bug: AudioWorklet not enabled on release_or_beta
   expected:
-    if not release_or_beta: ERROR
-  [\n      Test if MediaElementAudioSourceNode works for cross-origin redirects with\n      "cors" request mode.\n    ]
+    if release_or_beta: ERROR
+  [X Recorded channel #0 should have contain at least one value different from 0.]
     expected: FAIL
 
+  [X Recorded channel #3 should have contain at least one value different from 0.]
+    expected: FAIL
+
+  [< [start-playback-and-capture\] 4 out of 4 assertions were failed.]
+    expected: FAIL
+
+  [# AUDIT TASK RUNNER FINISHED: 1 out of 2 tasks were failed.]
+    expected: FAIL
+
+  [X Recorded channel #2 should have contain at least one value different from 0.]
+    expected: FAIL
+
+  [X Recorded channel #1 should have contain at least one value different from 0.]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html.ini
index 1ec5f86b17..158872d889 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html.ini
@@ -1,10 +1,3 @@
 [no-cors.https.html]
-  disabled:
-    if (os == "android") and e10s: bug 1550895 (frequently fails on geckoview)
-  bug: AudioWorkletNode::GetPort is not implemented
-  expected:
-    if release_or_beta: OK
-    ERROR
-  [\n      Test if MediaElementAudioSourceNode works for cross-origin redirects with\n      "no-cors" request mode.\n    ]
-    expected:
-      if release_or_beta: FAIL
+  bug: redirect not tainted https://bugzilla.mozilla.org/show_bug.cgi?id=1599950
+  disabled: unstable failing test names https://bugzilla.mozilla.org/show_bug.cgi?id=1599954
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html.ini
index a20719e01f..4650139ba3 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/automation-changes.html.ini
@@ -1,2 +1,9 @@
 [automation-changes.html]
-  expected: ERROR
+  [Executing "Set Listener.positionX.value"]
+    expected: FAIL
+
+  [Executing "Listener.positionX.setValue"]
+    expected: FAIL
+
+  [Executing "Listener.setPosition"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html.ini
index b84095dd0b..e21b4a0338 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html.ini
@@ -1,2 +1,3 @@
 [ctor-panner.html]
-  expected: ERROR
+  [Executing "default constructor"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html.ini
index a1c827555e..5023b3d1d1 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-basic.html.ini
@@ -1,2 +1,48 @@
 [panner-automation-basic.html]
-  expected: ERROR
+  [Executing "Stereo panner.positionX"]
+    expected: FAIL
+
+  [Executing "Mono panner.positionX"]
+    expected: FAIL
+
+  [Executing "Stereo panner.positionY"]
+    expected: FAIL
+
+  [Executing "Mono panner.positionY"]
+    expected: FAIL
+
+  [Executing "Stereo panner.positionZ"]
+    expected: FAIL
+
+  [Executing "Mono panner.positionZ"]
+    expected: FAIL
+
+  [Executing "Stereo listener.positionX"]
+    expected: FAIL
+
+  [Executing "Mono listener.positionX"]
+    expected: FAIL
+
+  [Executing "Stereo listener.positionY"]
+    expected: FAIL
+
+  [Executing "Mono listener.positionY"]
+    expected: FAIL
+
+  [Executing "Stereo listener.positionZ"]
+    expected: FAIL
+
+  [Executing "Mono listener.positionZ"]
+    expected: FAIL
+
+  [Executing "setPosition"]
+    expected: FAIL
+
+  [Executing "orientation setter"]
+    expected: FAIL
+
+  [Executing "forward setter"]
+    expected: FAIL
+
+  [Executing "up setter"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html.ini
index 2c76c302d1..28b5e4653a 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/panner-automation-position.html.ini
@@ -1,2 +1,28 @@
 [panner-automation-position.html]
   expected: ERROR
+  [Executing "1: 1-channel inverse rolloff: 1"]
+    expected: NOTRUN
+
+  [Executing "3: 1-channel linear rolloff: 1"]
+    expected: NOTRUN
+
+  [Executing "3: 2-channel linear rolloff: 1"]
+    expected: NOTRUN
+
+  [Audit report]
+    expected: NOTRUN
+
+  [Executing "2: 1-channel exponential rolloff: 1.5"]
+    expected: NOTRUN
+
+  [Executing "1: 2-channel inverse rolloff: 1"]
+    expected: NOTRUN
+
+  [Executing "2: 2-channel exponential rolloff: 1.5"]
+    expected: NOTRUN
+
+  [Executing "0: 2-channel inverse rolloff: 1"]
+    expected: NOTRUN
+
+  [Executing "0: 1-channel inverse rolloff: 1"]
+    expected: TIMEOUT
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.html.ini
index a36f3d39b3..f6cb362c97 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-pannernode-interface/pannernode-basic.html.ini
@@ -1,2 +1,3 @@
 [pannernode-basic.html]
-  expected: ERROR
+  [Executing "listener"]
+    expected: FAIL
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html.ini
index a09d9921c4..a17dd33b18 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-stereopanner-interface/no-dezippering.html.ini
@@ -1,2 +1,20 @@
 [no-dezippering.html]
-  expected: ERROR
+  [Executing "test mono input"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test stereo input"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test mono input setValue"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test stereo input setValue"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
+
+  [Executing "test mono input automation"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html.ini b/testing/web-platform/meta/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html.ini
index e89b8bae3d..2bb57db257 100644
--- a/testing/web-platform/meta/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html.ini
+++ b/testing/web-platform/meta/webaudio/the-audio-api/the-waveshapernode-interface/waveshaper-copy-curve.html.ini
@@ -1,2 +1,4 @@
 [waveshaper-copy-curve.html]
-  expected: ERROR
+  [Executing "test copying"]
+    expected: FAIL
+    bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1265406
diff --git a/testing/web-platform/meta/webauthn/createcredential-pubkeycredparams.https.html.ini b/testing/web-platform/meta/webauthn/createcredential-pubkeycredparams.https.html.ini
new file mode 100644
index 0000000000..08480840a4
--- /dev/null
+++ b/testing/web-platform/meta/webauthn/createcredential-pubkeycredparams.https.html.ini
@@ -0,0 +1,10 @@
+[createcredential-pubkeycredparams.https.html]
+  expected:
+    if processor == "aarch64": ["OK", "TIMEOUT"]
+  [Bad pubKeyCredParams: first param has bad alg (0)]
+    expected:
+      if processor == "aarch64": ["PASS", "NOTRUN"]
+
+  [Bad pubKeyCredParams: first param has bad alg (42)]
+    expected:
+      if processor == "aarch64": ["PASS", "TIMEOUT"]
diff --git a/testing/web-platform/tests/mediacapture-record/MediaRecorder-bitrate.https.html b/testing/web-platform/tests/mediacapture-record/MediaRecorder-bitrate.https.html
new file mode 100644
index 0000000000..472c0661cb
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-record/MediaRecorder-bitrate.https.html
@@ -0,0 +1,226 @@
+
+
+
+MediaRecorder {audio|video}bitsPerSecond attributes
+
+
+
+
+
+
diff --git a/testing/web-platform/tests/mediacapture-record/MediaRecorder-creation.https.html b/testing/web-platform/tests/mediacapture-record/MediaRecorder-creation.https.html
index 57e636c698..b724ca1450 100644
--- a/testing/web-platform/tests/mediacapture-record/MediaRecorder-creation.https.html
+++ b/testing/web-platform/tests/mediacapture-record/MediaRecorder-creation.https.html
@@ -20,13 +20,13 @@
 
         var recorder = new MediaRecorder(stream);
         assert_equals(recorder.state, "inactive");
-        assert_equals(recorder.videoBitsPerSecond, 0);
-        assert_equals(recorder.audioBitsPerSecond, 0);
+        assert_not_equals(recorder.videoBitsPerSecond, 0);
+        assert_not_equals(recorder.audioBitsPerSecond, 0);
         test.done();
       });
 
       const onError = test.unreached_func('Error creating MediaStream.');
-      navigator.getUserMedia(constraints, gotStream, onError);
+      navigator.mediaDevices.getUserMedia(constraints).then(gotStream, onError);
     }, message);
   }
 
diff --git a/testing/web-platform/tests/mediacapture-record/MediaRecorder-disabled-tracks.https.html b/testing/web-platform/tests/mediacapture-record/MediaRecorder-disabled-tracks.https.html
index fe26e0c961..52e0010cfe 100644
--- a/testing/web-platform/tests/mediacapture-record/MediaRecorder-disabled-tracks.https.html
+++ b/testing/web-platform/tests/mediacapture-record/MediaRecorder-disabled-tracks.https.html
@@ -46,7 +46,7 @@
       });
 
       const onError = test.unreached_func('Error creating MediaStream.');
-      navigator.getUserMedia(args[1], gotStream, onError);
+      navigator.mediaDevices.getUserMedia(args[1]).then(gotStream, onError);
     }, args[0]);
   });
 
diff --git a/testing/web-platform/tests/mediacapture-record/MediaRecorder-events-and-exceptions.html b/testing/web-platform/tests/mediacapture-record/MediaRecorder-events-and-exceptions.html
index 58b9c08eca..cddc07e507 100644
--- a/testing/web-platform/tests/mediacapture-record/MediaRecorder-events-and-exceptions.html
+++ b/testing/web-platform/tests/mediacapture-record/MediaRecorder-events-and-exceptions.html
@@ -36,23 +36,21 @@
     });
 
     recorderOnDataAvailable = test.step_func(event => {
-      // TODO(mcasas): ondataavailable might never be pinged with an empty Blob
-      // data on recorder.stop(), see http://crbug.com/54428
       assert_equals(recorder.state, "inactive");
-      assert_equals(event.data.size, 0, 'We should have gotten an empty Blob');
+      assert_not_equals(event.data.size, 0, 'We should get a Blob with data');
     });
 
     recorderOnStop = test.step_func(function() {
       assert_equals(recorder.state, "inactive");
-      assert_throws("InvalidStateError", function() { recorder.stop() },
-                    "recorder cannot be stop()ped in |inactive| state");
+      recorder.onstop = recorderOnUnexpectedEvent;
+      recorder.stop();
+      assert_equals(recorder.state, "inactive", "stop() is idempotent");
       assert_throws("InvalidStateError", function() { recorder.pause() },
                     "recorder cannot be pause()ed in |inactive| state");
       assert_throws("InvalidStateError", function() { recorder.resume() },
                     "recorder cannot be resume()d in |inactive| state");
       assert_throws("InvalidStateError", function() { recorder.requestData() },
                     "cannot requestData() if recorder is in |inactive| state");
-      recorder.onstop = recorderOnUnexpectedEvent;
       test.done();
     });
 
@@ -84,15 +82,15 @@
 
     assert_throws("NotSupportedError",
                   function() {
-                    recorder =
-                        new MediaRecorder(stream, {mimeType : "video/invalid"});
+                    recorder = new MediaRecorder(
+                      new MediaStream(), {mimeType : "video/invalid"});
                   },
                   "recorder should throw() with unsupported mimeType");
-    let recorder = new MediaRecorder(stream);
+    let recorder = new MediaRecorder(new MediaStream());
     assert_equals(recorder.state, "inactive");
 
-    assert_throws("InvalidStateError", function(){recorder.stop()},
-                  "recorder cannot be stop()ped in |inactive| state");
+    recorder.stop();
+    assert_equals(recorder.state, "inactive", "stop() is idempotent");
     assert_throws("InvalidStateError", function(){recorder.pause()},
                   "recorder cannot be pause()ed in |inactive| state");
     assert_throws("InvalidStateError", function(){recorder.resume()},
@@ -100,6 +98,14 @@
     assert_throws("InvalidStateError", function(){recorder.requestData()},
                   "cannot requestData() if recorder is in |inactive| state");
 
+    assert_throws("NotSupportedError",
+                  function() {
+                    recorder.start();
+                  },
+                  "recorder should throw() when starting with inactive stream");
+
+    recorder.stream.addTrack(stream.getTracks()[0]);
+
     drawSomethingOnCanvas();
 
     recorder.onstop = recorderOnUnexpectedEvent;
diff --git a/testing/web-platform/tests/mediacapture-record/MediaRecorder-mimetype.html b/testing/web-platform/tests/mediacapture-record/MediaRecorder-mimetype.html
new file mode 100644
index 0000000000..e90bbcced9
--- /dev/null
+++ b/testing/web-platform/tests/mediacapture-record/MediaRecorder-mimetype.html
@@ -0,0 +1,169 @@
+
+
+
+  MediaRecorder MIMEType
+  
+  
+  
+
+
+
+
+
+
+
diff --git a/testing/web-platform/tests/webaudio/resources/audit.js b/testing/web-platform/tests/webaudio/resources/audit.js
index b7ca020161..ac97b66249 100644
--- a/testing/web-platform/tests/webaudio/resources/audit.js
+++ b/testing/web-platform/tests/webaudio/resources/audit.js
@@ -788,21 +788,31 @@ window.Audit = (function() {
       }
 
       // Compare against the expected sequence.
-      for (let j = 0; j < this._expected.length; j++) {
-        if (this._expected[j] !== indexedActual[j].value) {
-          firstErrorIndex = indexedActual[j].index;
-          passed = false;
-          break;
+      let failMessage =
+          '${actual} expected to have the value sequence of ${expected} but ' +
+          'got ';
+      if (this._expected.length === indexedActual.length) {
+        for (let j = 0; j < this._expected.length; j++) {
+          if (this._expected[j] !== indexedActual[j].value) {
+            firstErrorIndex = indexedActual[j].index;
+            passed = false;
+            failMessage += this._actual[firstErrorIndex] + ' at index ' +
+                firstErrorIndex + '.';
+            break;
+          }
         }
+      } else {
+        passed = false;
+        let indexedValues = indexedActual.map(x => x.value);
+        failMessage += `${indexedActual.length} values, [${
+            indexedValues}], instead of ${this._expected.length}.`;
       }
 
       return this._assert(
           passed,
           '${actual} contains all the expected values in the correct order: ' +
               '${expected}.',
-          '${actual} expected to have the value sequence of ${expected} but ' +
-              'got ' + this._actual[firstErrorIndex] + ' at index ' +
-              firstErrorIndex + '.');
+          failMessage);
     }
 
     /**
@@ -1169,15 +1179,22 @@ window.Audit = (function() {
 
     // Run this task. |this| task will be passed into the user-supplied test
     // task function.
-    run() {
+    run(harnessTest) {
       this._state = TaskState.STARTED;
-
+      this._harnessTest = harnessTest;
       // Print out the task entry with label and description.
       _logPassed(
           '> [' + this._label + '] ' +
           (this._description ? this._description : ''));
 
-      this._taskFunction(this, this.should.bind(this));
+      return new Promise((resolve, reject) => {
+        this._resolve = resolve;
+        this._reject = reject;
+        let result = this._taskFunction(this, this.should.bind(this));
+        if (result && typeof result.then === "function") {
+          result.then(() => this.done()).catch(reject);
+        }
+      });
     }
 
     // Update the task success based on the individual assertion/test inside.
@@ -1193,6 +1210,7 @@ window.Audit = (function() {
 
     // Finish the current task and start the next one if available.
     done() {
+      assert_equals(this._state, TaskState.STARTED)
       this._state = TaskState.FINISHED;
 
       let message = '< [' + this._label + '] ';
@@ -1207,7 +1225,26 @@ window.Audit = (function() {
         _logFailed(message);
       }
 
-      this._taskRunner._runNextTask();
+      this._resolve();
+    }
+
+    // Runs |subTask| |time| milliseconds later. |setTimeout| is not allowed in
+    // WPT linter, so a thin wrapper around the harness's |step_timeout| is
+    // used here.  Returns a Promise which is resolved after |subTask| runs.
+    timeout(subTask, time) {
+      return new Promise(resolve => {
+        this._harnessTest.step_timeout(() => {
+          let result = subTask();
+          if (result && typeof result.then === "function") {
+            // Chain rejection directly to the harness test Promise, to report
+            // the rejection against the subtest even when the caller of
+            // timeout does not handle the rejection.
+            result.then(resolve, this._reject());
+          } else {
+            resolve();
+          }
+        }, time);
+      });
     }
 
     isPassed() {
@@ -1228,20 +1265,11 @@ window.Audit = (function() {
     constructor() {
       this._tasks = {};
       this._taskSequence = [];
-      this._currentTaskIndex = -1;
 
       // Configure testharness.js for the async operation.
       setup(new Function(), {explicit_done: true});
     }
 
-    _runNextTask() {
-      if (this._currentTaskIndex < this._taskSequence.length) {
-        this._tasks[this._taskSequence[this._currentTaskIndex++]].run();
-      } else {
-        this._finish();
-      }
-    }
-
     _finish() {
       let numberOfFailures = 0;
       for (let taskIndex in this._taskSequence) {
@@ -1259,13 +1287,13 @@ window.Audit = (function() {
             prefix + this._taskSequence.length + ' tasks ran successfully.');
       }
 
-      // From testharness.js, report back to the test infrastructure that
-      // the task runner completed all the tasks.
-      _testharnessDone();
+      return Promise.resolve();
     }
 
     // |taskLabel| can be either a string or a dictionary. See Task constructor
-    // for the detail.
+    // for the detail.  If |taskFunction| returns a thenable, then the task
+    // is considered complete when the thenable is fulfilled; otherwise the
+    // task must be completed with an explicit call to |task.done()|.
     define(taskLabel, taskFunction) {
       let task = new Task(this, taskLabel, taskFunction);
       if (this._tasks.hasOwnProperty(task.label)) {
@@ -1304,9 +1332,19 @@ window.Audit = (function() {
         return;
       }
 
-      // Start the first task.
-      this._currentTaskIndex = 0;
-      this._runNextTask();
+      for (let taskIndex in this._taskSequence) {
+        let task = this._tasks[this._taskSequence[taskIndex]];
+        // Some tests assume that tasks run in sequence, which is provided by
+        // promise_test().
+        promise_test((t) => task.run(t), `Executing "${task.label}"`);
+      }
+
+      // Schedule a summary report on completion.
+      promise_test(() => this._finish(), "Audit report");
+
+      // From testharness.js. The harness now need not wait for more subtests
+      // to be added.
+      _testharnessDone();
     }
   }
 
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html
index a1a5f3fce5..20780d94b7 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audiobuffer-interface/audiobuffer-copy-channel.html
@@ -137,7 +137,7 @@
         ;
         should(() => {
           buffer.copyFromChannel(x, 0, -1);
-        }, '5: buffer.copyFromChannel(x, 0, -1)').throw(DOMException, 'IndexSizeError');
+        }, '5: buffer.copyFromChannel(x, 0, -1)').notThrow();
         should(
             () => {
               buffer.copyFromChannel(x, 0, bufferLength);
@@ -149,22 +149,20 @@
           buffer.copyFromChannel(x, 3);
         }, '7: buffer.copyFromChannel(x, 3)').throw(DOMException, 'IndexSizeError');
 
-        if (window.SharedArrayBuffer) {
-          let shared_buffer = new Float32Array(new SharedArrayBuffer(32));
-          should(
-              () => {
-                buffer.copyFromChannel(shared_buffer, 0);
-              },
-              '8: buffer.copyFromChannel(SharedArrayBuffer view, 0)')
-              .throw(TypeError);
+        let shared_buffer = new Float32Array(new SharedArrayBuffer(32));
+        should(
+            () => {
+              buffer.copyFromChannel(shared_buffer, 0);
+            },
+            '8: buffer.copyFromChannel(SharedArrayBuffer view, 0)')
+            .throw(TypeError);
 
-          should(
-              () => {
-                buffer.copyFromChannel(shared_buffer, 0, 0);
-              },
-              '9: buffer.copyFromChannel(SharedArrayBuffer view, 0, 0)')
-              .throw(TypeError);
-        }
+        should(
+            () => {
+              buffer.copyFromChannel(shared_buffer, 0, 0);
+            },
+            '9: buffer.copyFromChannel(SharedArrayBuffer view, 0, 0)')
+            .throw(TypeError);
 
         task.done();
       });
@@ -192,7 +190,7 @@
             .throw(DOMException, 'IndexSizeError');
         should(() => {
           buffer.copyToChannel(x, 0, -1);
-        }, '4: buffer.copyToChannel(x, 0, -1)').throw(DOMException, 'IndexSizeError');
+        }, '4: buffer.copyToChannel(x, 0, -1)').notThrow();
         should(
             () => {
               buffer.copyToChannel(x, 0, bufferLength);
@@ -204,22 +202,20 @@
           buffer.copyToChannel(x, 3);
         }, '6: buffer.copyToChannel(x, 3)').throw(DOMException, 'IndexSizeError');
 
-        if (window.SharedArrayBuffer) {
-          let shared_buffer = new Float32Array(new SharedArrayBuffer(32));
-          should(
-              () => {
-                buffer.copyToChannel(shared_buffer, 0);
-              },
-              '7: buffer.copyToChannel(SharedArrayBuffer view, 0)')
-              .throw(TypeError);
+        let shared_buffer = new Float32Array(new SharedArrayBuffer(32));
+        should(
+            () => {
+              buffer.copyToChannel(shared_buffer, 0);
+            },
+            '7: buffer.copyToChannel(SharedArrayBuffer view, 0)')
+            .throw(TypeError);
 
-          should(
-              () => {
-                buffer.copyToChannel(shared_buffer, 0, 0);
-              },
-              '8: buffer.copyToChannel(SharedArrayBuffer view, 0, 0)')
-              .throw(TypeError);
-        }
+        should(
+            () => {
+              buffer.copyToChannel(shared_buffer, 0, 0);
+            },
+            '8: buffer.copyToChannel(SharedArrayBuffer view, 0, 0)')
+            .throw(TypeError);
 
         task.done();
       });
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
index 8194d1977a..a5dd004981 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html
@@ -16,65 +16,56 @@
 
       let filePath = 'processors/sharedarraybuffer-processor.js';
 
-      if (window.SharedArrayBuffer) {
-        audit.define(
-            'Test postMessage from AudioWorkletProcessor to AudioWorkletNode',
-            (task, should) => {
-              let workletNode =
-                  new AudioWorkletNode(context, 'sharedarraybuffer-processor');
-
-              // After it is created, the worklet will send a new
-              // SharedArrayBuffer to the main thread.
-              //
-              // The worklet will then wait to receive a message from the main
-              // thread.
-              //
-              // When it receives the message, it will check whether it is a
-              // SharedArrayBuffer, and send this information back to the main
-              // thread.
-
-              workletNode.port.onmessage = (event) => {
-                let data = event.data;
-                switch (data.state) {
-                  case 'created':
-                    should(
-                        data.sab instanceof SharedArrayBuffer,
-                        'event.data.sab from worklet is an instance of SharedArrayBuffer')
-                        .beTrue();
-
-                    // Send a SharedArrayBuffer back to the worklet.
-                    let sab = new SharedArrayBuffer(8);
-                    workletNode.port.postMessage(sab);
-                    break;
-
-                  case 'received message':
-                    should(data.isSab, 'event.data from main thread is an instance of SharedArrayBuffer')
-                        .beTrue();
-                    task.done();
-                    break;
-
-                  default:
-                    should(false,
-                           `Got unexpected message from worklet: ${data.state}`)
-                        .beTrue();
-                    task.done();
-                    break;
-                }
-              };
-
-              workletNode.port.onmessageerror = (event) => {
-                should(false, 'Got messageerror from worklet').beTrue();
-                task.done();
-              };
-            });
-      } else {
-        // NOTE(binji): SharedArrayBuffer is only enabled where we have site
-        // isolation.
-        audit.define('Skipping test because SharedArrayBuffer is not defined',
+      audit.define(
+          'Test postMessage from AudioWorkletProcessor to AudioWorkletNode',
           (task, should) => {
-            task.done();
+            let workletNode =
+                new AudioWorkletNode(context, 'sharedarraybuffer-processor');
+
+            // After it is created, the worklet will send a new
+            // SharedArrayBuffer to the main thread.
+            //
+            // The worklet will then wait to receive a message from the main
+            // thread.
+            //
+            // When it receives the message, it will check whether it is a
+            // SharedArrayBuffer, and send this information back to the main
+            // thread.
+
+            workletNode.port.onmessage = (event) => {
+              let data = event.data;
+              switch (data.state) {
+                case 'created':
+                  should(
+                      data.sab instanceof SharedArrayBuffer,
+                      'event.data.sab from worklet is an instance of SharedArrayBuffer')
+                      .beTrue();
+
+                  // Send a SharedArrayBuffer back to the worklet.
+                  let sab = new SharedArrayBuffer(8);
+                  workletNode.port.postMessage(sab);
+                  break;
+
+                case 'received message':
+                  should(data.isSab, 'event.data from main thread is an instance of SharedArrayBuffer')
+                      .beTrue();
+                  task.done();
+                  break;
+
+                default:
+                  should(false,
+                         `Got unexpected message from worklet: ${data.state}`)
+                      .beTrue();
+                  task.done();
+                  break;
+              }
+            };
+
+            workletNode.port.onmessageerror = (event) => {
+              should(false, 'Got messageerror from worklet').beTrue();
+              task.done();
+            };
           });
-      }
 
       context.audioWorklet.addModule(filePath).then(() => {
         audit.run();
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers
new file mode 100644
index 0000000000..63b60e490f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-postmessage-sharedarraybuffer.https.html.headers
@@ -0,0 +1,2 @@
+Cross-Origin-Opener-Policy: same-origin
+Cross-Origin-Embedder-Policy: require-corp
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html
new file mode 100644
index 0000000000..685546aeb5
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworklet-suspend.https.html
@@ -0,0 +1,39 @@
+
+
+  
+    
+      Test if activation of worklet thread does not resume context rendering.
+    
+    
+    
+    
+  
+  
+    
+  
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
index d87e35b571..84458d0aaa 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-sample-rate.https.html
@@ -12,30 +12,32 @@
     
   
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
index 79d402c518..5f4bee7c53 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletglobalscope-timing-info.https.html
@@ -12,45 +12,47 @@
     
   
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
index 0a9966add8..0914edbb3b 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-onerror.https.html
@@ -12,43 +12,45 @@
     
   
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
index 9d65d872b9..a8a7f5ed1d 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/audioworkletnode-output-channel-count.https.html
@@ -13,7 +13,7 @@
       const audit = Audit.createTaskRunner();
       const context = new AudioContext();
 
-      (async function () {
+      setup(async function () {
         await context.audioWorklet.addModule(
             'processors/channel-count-processor.js');
 
@@ -76,7 +76,7 @@
             });
 
         audit.run();
-      })();
+      });
     
   
 
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html
new file mode 100644
index 0000000000..a4c59123a1
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-getter.https.html
@@ -0,0 +1,23 @@
+
+Test use of 'process' getter for AudioWorkletProcessor callback
+
+
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html
new file mode 100644
index 0000000000..4c6a10dfab
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/process-parameters.https.html
@@ -0,0 +1,87 @@
+
+Test parameters of process() AudioWorkletProcessor callback
+
+
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html
new file mode 100644
index 0000000000..6f1aa59225
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processor-construction-port.https.html
@@ -0,0 +1,61 @@
+
+Test processor port assignment on processor callback function construction
+
+
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js
new file mode 100644
index 0000000000..d4c63f7775
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-new.js
@@ -0,0 +1,16 @@
+class NewAfterNew extends AudioWorkletProcessor {
+  constructor() {
+    const processor = new AudioWorkletProcessor()
+    let message = {threw: false};
+    try {
+      new AudioWorkletProcessor();
+    } catch (e) {
+      message.threw = true;
+      message.errorName = e.name;
+      message.isTypeError = e instanceof TypeError;
+    }
+    processor.port.postMessage(message);
+    return processor;
+  }
+}
+registerProcessor("new-after-new", NewAfterNew);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js
new file mode 100644
index 0000000000..a6d4f0e2e8
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-new-after-super.js
@@ -0,0 +1,15 @@
+class NewAfterSuper extends AudioWorkletProcessor {
+  constructor() {
+    super()
+    let message = {threw: false};
+    try {
+      new AudioWorkletProcessor()
+    } catch (e) {
+      message.threw = true;
+      message.errorName = e.name;
+      message.isTypeError = e instanceof TypeError;
+    }
+    this.port.postMessage(message);
+  }
+}
+registerProcessor("new-after-super", NewAfterSuper);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js
new file mode 100644
index 0000000000..c40b5a7179
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-singleton.js
@@ -0,0 +1,16 @@
+let singleton;
+class Singleton extends AudioWorkletProcessor {
+  constructor() {
+    if (!singleton) {
+      singleton = new AudioWorkletProcessor();
+      singleton.process = function() {
+        this.port.postMessage({message: "process called"});
+        // This function will be called at most once for each AudioWorkletNode
+        // if the node has no input connections.
+        return false;
+      }
+    }
+    return singleton;
+  }
+}
+registerProcessor("singleton", Singleton);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js
new file mode 100644
index 0000000000..e447830c5f
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/construction-port-super-after-new.js
@@ -0,0 +1,16 @@
+class SuperAfterNew extends AudioWorkletProcessor {
+  constructor() {
+    const processor = new AudioWorkletProcessor()
+    let message = {threw: false};
+    try {
+      super();
+    } catch (e) {
+      message.threw = true;
+      message.errorName = e.name;
+      message.isTypeError = e instanceof TypeError;
+    }
+    processor.port.postMessage(message);
+    return processor;
+  }
+}
+registerProcessor("super-after-new", SuperAfterNew);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js
new file mode 100644
index 0000000000..b1434f54ba
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-instance-processor.js
@@ -0,0 +1,44 @@
+/**
+ * @class ProcessGetterTestInstanceProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class tests that a 'process' getter on an
+ * AudioWorkletProcessorConstructor instance is called at the right times.
+ */
+
+class ProcessGetterTestInstanceProcessor extends AudioWorkletProcessor {
+  constructor() {
+    super();
+    this.getterCallCount = 0;
+    this.totalProcessCallCount = 0;
+    Object.defineProperty(this, 'process', { get: function() {
+      if (!(this instanceof ProcessGetterTestInstanceProcessor)) {
+        throw new Error('`process` getter called with bad `this`.');
+      }
+      ++this.getterCallCount;
+      let functionCallCount = 0;
+      return () => {
+        if (++functionCallCount > 1) {
+          const message = 'Closure of function returned from `process` getter' +
+              ' should be used for only one call.'
+          this.port.postMessage({message: message});
+          throw new Error(message);
+        }
+        if (++this.totalProcessCallCount < 2) {
+          return true; // Expect another getter call.
+        }
+        if (this.totalProcessCallCount != this.getterCallCount) {
+          const message =
+              'Getter should be called only once for each process() call.'
+          this.port.postMessage({message: message});
+          throw new Error(message);
+        }
+        this.port.postMessage({message: 'done'});
+        return false; // No more calls required.
+      };
+    }});
+  }
+}
+
+registerProcessor('process-getter-test-instance',
+                  ProcessGetterTestInstanceProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js
new file mode 100644
index 0000000000..cef5fa8b52
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-getter-test-prototype-processor.js
@@ -0,0 +1,55 @@
+/**
+ * @class ProcessGetterTestPrototypeProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class tests that a 'process' getter on
+ * AudioWorkletProcessorConstructor is called at the right times.
+ */
+
+// Reporting errors during registerProcess() is awkward.
+// The occurrance of an error is flagged, so that a trial registration can be
+// performed and registration against the expected AudioWorkletNode name is
+// performed only if no errors are flagged during the trial registration.
+let error_flag = false;
+
+class ProcessGetterTestPrototypeProcessor extends AudioWorkletProcessor {
+  constructor() {
+    super();
+    this.getterCallCount = 0;
+    this.totalProcessCallCount = 0;
+  }
+  get process() {
+    if (!(this instanceof ProcessGetterTestPrototypeProcessor)) {
+      error_flag = true;
+      throw new Error('`process` getter called with bad `this`.');
+    }
+    ++this.getterCallCount;
+    let functionCallCount = 0;
+    return () => {
+      if (++functionCallCount > 1) {
+        const message = 'Closure of function returned from `process` getter' +
+            ' should be used for only one call.'
+        this.port.postMessage({message: message});
+        throw new Error(message);
+      }
+      if (++this.totalProcessCallCount < 2) {
+        return true; // Expect another getter call.
+      }
+      if (this.totalProcessCallCount != this.getterCallCount) {
+        const message =
+            'Getter should be called only once for each process() call.'
+        this.port.postMessage({message: message});
+        throw new Error(message);
+      }
+      this.port.postMessage({message: 'done'});
+      return false; // No more calls required.
+    };
+  }
+}
+
+registerProcessor('trial-process-getter-test-prototype',
+                  ProcessGetterTestPrototypeProcessor);
+if (!error_flag) {
+  registerProcessor('process-getter-test-prototype',
+                    ProcessGetterTestPrototypeProcessor);
+}
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js
new file mode 100644
index 0000000000..a300d3cdec
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-audioworklet-interface/processors/process-parameter-test-processor.js
@@ -0,0 +1,18 @@
+/**
+ * @class ProcessParameterTestProcessor
+ * @extends AudioWorkletProcessor
+ *
+ * This processor class forwards input and output parameters to its
+ * AudioWorkletNode.
+ */
+class ProcessParameterTestProcessor extends AudioWorkletProcessor {
+  process(inputs, outputs) {
+    this.port.postMessage({
+      inputs: inputs,
+      outputs: outputs
+    });
+    return false;
+  }
+}
+
+registerProcessor('process-parameter-test', ProcessParameterTestProcessor);
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html
new file mode 100644
index 0000000000..f0f9f771bb
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-convolvernode-interface/active-processing.https.html
@@ -0,0 +1,93 @@
+
+
+  
+    
+      Test Active Processing for ConvolverNode
+    
+    
+    
+    
+    
+  
+
+  
+    
+  
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html b/testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html
new file mode 100644
index 0000000000..cda5668a09
--- /dev/null
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-destinationnode-interface/destination.html
@@ -0,0 +1,51 @@
+
+
+  
+    
+      AudioDestinationNode
+    
+    
+    
+  
+  
+    
+  
+
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html
index a2fa8040b2..38bd94a037 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/cors-check.https.html
@@ -14,60 +14,62 @@
     
   
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html
index 38324a9f67..de2f0b7dd3 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-mediaelementaudiosourcenode-interface/no-cors.https.html
@@ -14,59 +14,61 @@
     
   
diff --git a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html
index d330c9c3de..c434aa8c6a 100644
--- a/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html
+++ b/testing/web-platform/tests/webaudio/the-audio-api/the-pannernode-interface/ctor-panner.html
@@ -108,6 +108,16 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(DOMException, 'NotSupportedError');
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.channelCount = options.channelCount;
+            },
+            `node.channelCount = ${options.channelCount}`)
+            .throw(DOMException, "NotSupportedError");
+        should(node.channelCount,
+               `node.channelCount after setting to ${options.channelCount}`)
+            .beEqualTo(2);
 
         options = {channelCount: 3};
         should(
@@ -116,6 +126,16 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(DOMException, 'NotSupportedError');
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.channelCount = options.channelCount;
+            },
+            `node.channelCount = ${options.channelCount}`)
+            .throw(DOMException, "NotSupportedError");
+        should(node.channelCount,
+               `node.channelCount after setting to ${options.channelCount}`)
+            .beEqualTo(2);
 
         options = {channelCount: 99};
         should(
@@ -124,6 +144,16 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(DOMException, 'NotSupportedError');
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.channelCount = options.channelCount;
+            },
+            `node.channelCount = ${options.channelCount}`)
+            .throw(DOMException, "NotSupportedError");
+        should(node.channelCount,
+               `node.channelCount after setting to ${options.channelCount}`)
+            .beEqualTo(2);
 
         // Test channelCountMode.  A mode of "max" is illegal, but others are
         // ok.
@@ -154,6 +184,16 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(DOMException, 'NotSupportedError');
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.channelCountMode = options.channelCountMode;
+            },
+            `node.channelCountMode = ${options.channelCountMode}`)
+            .throw(DOMException, "NotSupportedError");
+        should(node.channelCountMode,
+               `node.channelCountMode after setting to ${options.channelCountMode}`)
+            .beEqualTo("clamped-max");
 
         options = {channelCountMode: 'foobar'};
         should(
@@ -162,6 +202,16 @@
             },
             'new PannerNode(c, " + JSON.stringify(options) + ")')
             .throw(TypeError);
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.channelCountMode = options.channelCountMode;
+            },
+            `node.channelCountMode = ${options.channelCountMode}`)
+            .notThrow(); // Invalid assignment to enum-valued attrs does not throw.
+        should(node.channelCountMode,
+               `node.channelCountMode after setting to ${options.channelCountMode}`)
+            .beEqualTo("clamped-max");
 
         // Test channelInterpretation.
         options = {channelInterpretation: 'speakers'};
@@ -200,6 +250,17 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(RangeError);
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.maxDistance = options.maxDistance;
+            },
+            `node.maxDistance = ${options.maxDistance}`)
+            .throw(RangeError);
+        should(node.maxDistance,
+               `node.maxDistance after setting to ${options.maxDistance}`)
+            .beEqualTo(10000);
+
         options = {maxDistance: 100};
         should(
             () => {
@@ -218,6 +279,17 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(RangeError);
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.rolloffFactor = options.rolloffFactor;
+            },
+            `node.rolloffFactor = ${options.rolloffFactor}`)
+            .throw(RangeError);
+        should(node.rolloffFactor,
+               `node.rolloffFactor after setting to ${options.rolloffFactor}`)
+            .beEqualTo(1);
+
         options = {rolloffFactor: 0};
         should(
             () => {
@@ -256,6 +328,17 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(DOMException, 'InvalidStateError');
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.coneOuterGain = options.coneOuterGain;
+            },
+            `node.coneOuterGain = ${options.coneOuterGain}`)
+            .throw(DOMException, 'InvalidStateError');
+        should(node.coneOuterGain,
+               `node.coneOuterGain after setting to ${options.coneOuterGain}`)
+            .beEqualTo(0);
+
         options = {coneOuterGain: 1.1};
         should(
             () => {
@@ -263,6 +346,17 @@
             },
             'new PannerNode(c, ' + JSON.stringify(options) + ')')
             .throw(DOMException, 'InvalidStateError');
+        should(
+            () => {
+              node = new PannerNode(context);
+              node.coneOuterGain = options.coneOuterGain;
+            },
+            `node.coneOuterGain = ${options.coneOuterGain}`)
+            .throw(DOMException, 'InvalidStateError');
+        should(node.coneOuterGain,
+               `node.coneOuterGain after setting to ${options.coneOuterGain}`)
+            .beEqualTo(0);
+
         options = {coneOuterGain: 0.0};
         should(
             () => {
diff --git a/widget/nsBaseFilePicker.cpp b/widget/nsBaseFilePicker.cpp
index 105c5ef96b..d68e13c04a 100644
--- a/widget/nsBaseFilePicker.cpp
+++ b/widget/nsBaseFilePicker.cpp
@@ -32,6 +32,8 @@ namespace {
 nsresult LocalFileToDirectoryOrBlob(nsPIDOMWindowInner* aWindow,
                                     bool aIsDirectory, nsIFile* aFile,
                                     nsISupports** aResult) {
+  MOZ_ASSERT(aWindow);
+
   if (aIsDirectory) {
 #ifdef DEBUG
     bool isDir;
@@ -39,14 +41,18 @@ nsresult LocalFileToDirectoryOrBlob(nsPIDOMWindowInner* aWindow,
     MOZ_ASSERT(isDir);
 #endif
 
-    RefPtr directory = Directory::Create(aWindow, aFile);
+    RefPtr directory = Directory::Create(aWindow->AsGlobal(), aFile);
     MOZ_ASSERT(directory);
 
     directory.forget(aResult);
     return NS_OK;
   }
 
-  RefPtr file = File::CreateFromFile(aWindow, aFile);
+  RefPtr file = File::CreateFromFile(aWindow->AsGlobal(), aFile);
+  if (NS_WARN_IF(!file)) {
+    return NS_ERROR_FAILURE;
+  }
+
   file.forget(aResult);
   return NS_OK;
 }
@@ -114,6 +120,10 @@ class nsBaseFilePickerEnumerator : public nsSimpleEnumerator {
       return NS_ERROR_FAILURE;
     }
 
+    if (!mParent) {
+      return NS_ERROR_FAILURE;
+    }
+
     return LocalFileToDirectoryOrBlob(
         mParent, mMode == nsIFilePicker::modeGetFolder, localFile, aResult);
   }
@@ -366,6 +376,10 @@ nsBaseFilePicker::GetDomFileOrDirectory(nsISupports** aValue) {
 
   auto* innerParent = mParent ? mParent->GetCurrentInnerWindow() : nullptr;
 
+  if (!innerParent) {
+    return NS_ERROR_FAILURE;
+  }
+
   return LocalFileToDirectoryOrBlob(
       innerParent, mMode == nsIFilePicker::modeGetFolder, localFile, aValue);
 }
diff --git a/widget/nsFilePickerProxy.cpp b/widget/nsFilePickerProxy.cpp
index c3afc0c508..0d9cfc14c9 100644
--- a/widget/nsFilePickerProxy.cpp
+++ b/widget/nsFilePickerProxy.cpp
@@ -131,6 +131,13 @@ nsFilePickerProxy::Open(nsIFilePickerShownCallback* aCallback) {
 
 mozilla::ipc::IPCResult nsFilePickerProxy::Recv__delete__(
     const MaybeInputData& aData, const int16_t& aResult) {
+  nsPIDOMWindowInner* inner =
+      mParent ? mParent->GetCurrentInnerWindow() : nullptr;
+
+  if (NS_WARN_IF(!inner)) {
+    return IPC_OK();
+  }
+
   if (aData.type() == MaybeInputData::TInputBlobs) {
     const nsTArray& blobs = aData.get_InputBlobs().blobs();
     for (uint32_t i = 0; i < blobs.Length(); ++i) {
@@ -141,10 +148,10 @@ mozilla::ipc::IPCResult nsFilePickerProxy::Recv__delete__(
         return IPC_OK();
       }
 
-      nsPIDOMWindowInner* inner =
-          mParent ? mParent->GetCurrentInnerWindow() : nullptr;
-      RefPtr file = File::Create(inner, blobImpl);
-      MOZ_ASSERT(file);
+      RefPtr file = File::Create(inner->AsGlobal(), blobImpl);
+      if (NS_WARN_IF(!file)) {
+        return IPC_OK();
+      }
 
       OwningFileOrDirectory* element = mFilesOrDirectories.AppendElement();
       element->SetAsFile() = file;
@@ -157,8 +164,7 @@ mozilla::ipc::IPCResult nsFilePickerProxy::Recv__delete__(
       return IPC_OK();
     }
 
-    RefPtr directory =
-        Directory::Create(mParent->GetCurrentInnerWindow(), file);
+    RefPtr directory = Directory::Create(inner->AsGlobal(), file);
     MOZ_ASSERT(directory);
 
     OwningFileOrDirectory* element = mFilesOrDirectories.AppendElement();
diff --git a/xpcom/threads/nsThreadUtils.cpp b/xpcom/threads/nsThreadUtils.cpp
index 2380d984d2..eca4bf64e6 100644
--- a/xpcom/threads/nsThreadUtils.cpp
+++ b/xpcom/threads/nsThreadUtils.cpp
@@ -517,15 +517,17 @@ nsCString nsThreadPoolNaming::GetNextThreadName(const nsACString& aPoolName) {
   return name;
 }
 
-nsresult NS_DispatchToBackgroundThread(already_AddRefed aEvent,
+nsresult NS_DispatchBackgroundTask(already_AddRefed aEvent,
                                        uint32_t aDispatchFlags) {
   nsCOMPtr event(aEvent);
-  return nsThreadManager::get().DispatchToBackgroundThread(event, aDispatchFlags);
+  return nsThreadManager::get().DispatchToBackgroundThread(event,
+                                                           aDispatchFlags);
 }
 
-nsresult NS_DispatchToBackgroundThread(nsIRunnable* aEvent,
+nsresult NS_DispatchBackgroundTask(nsIRunnable* aEvent,
                                        uint32_t aDispatchFlags) {
-  return nsThreadManager::get().DispatchToBackgroundThread(aEvent, aDispatchFlags);
+  return nsThreadManager::get().DispatchToBackgroundThread(aEvent,
+                                                           aDispatchFlags);
 }
 
 // nsAutoLowPriorityIO
diff --git a/xpcom/threads/nsThreadUtils.h b/xpcom/threads/nsThreadUtils.h
index 5c8655045c..885999864a 100644
--- a/xpcom/threads/nsThreadUtils.h
+++ b/xpcom/threads/nsThreadUtils.h
@@ -1695,10 +1695,10 @@ extern mozilla::TimeStamp NS_GetTimerDeadlineHintOnCurrentThread(
  * means less resource usage, as the underlying implementation here can manage
  * spinning up and shutting down threads appropriately.
  */
-extern nsresult NS_DispatchToBackgroundThread(
+extern nsresult NS_DispatchBackgroundTask(
     already_AddRefed aEvent,
     uint32_t aDispatchFlags = NS_DISPATCH_NORMAL);
-extern nsresult NS_DispatchToBackgroundThread(
+extern nsresult NS_DispatchBackgroundTask(
     nsIRunnable* aEvent, uint32_t aDispatchFlags = NS_DISPATCH_NORMAL);
 
 namespace mozilla {