already_AddRefed<Promise> ScreenOrientation::LockInternal(ScreenOrientationInternal aOrientation, ErrorResult& aRv) { // Steps to apply an orientation lock as defined in spec. nsIDocument* doc = GetResponsibleDocument(); if (NS_WARN_IF(!doc)) { aRv.Throw(NS_ERROR_UNEXPECTED); return nullptr; } nsCOMPtr<nsPIDOMWindowInner> owner = GetOwner(); if (NS_WARN_IF(!owner)) { aRv.Throw(NS_ERROR_UNEXPECTED); return nullptr; } nsCOMPtr<nsIDocShell> docShell = owner->GetDocShell(); if (NS_WARN_IF(!docShell)) { aRv.Throw(NS_ERROR_UNEXPECTED); return nullptr; } nsCOMPtr<nsIGlobalObject> go = do_QueryInterface(owner); MOZ_ASSERT(go); RefPtr<Promise> p = Promise::Create(go, aRv); if (NS_WARN_IF(aRv.Failed())) { return nullptr; } #if !defined(MOZ_WIDGET_ANDROID) && !defined(MOZ_WIDGET_GONK) // User agent does not support locking the screen orientation. p->MaybeReject(NS_ERROR_DOM_NOT_SUPPORTED_ERR); return p.forget(); #else LockPermission perm = GetLockOrientationPermission(true); if (perm == LOCK_DENIED) { p->MaybeReject(NS_ERROR_DOM_SECURITY_ERR); return p.forget(); } nsCOMPtr<nsIDocShellTreeItem> root; docShell->GetSameTypeRootTreeItem(getter_AddRefs(root)); nsCOMPtr<nsIDocShell> rootShell(do_QueryInterface(root)); if (!rootShell) { aRv.Throw(NS_ERROR_UNEXPECTED); return nullptr; } rootShell->SetOrientationLock(aOrientation); AbortOrientationPromises(rootShell); doc->SetOrientationPendingPromise(p); nsCOMPtr<nsIRunnable> lockOrientationTask = new LockOrientationTask(this, p, aOrientation, doc, perm == FULLSCREEN_LOCK_ALLOWED); aRv = NS_DispatchToMainThread(lockOrientationTask); if (NS_WARN_IF(aRv.Failed())) { return nullptr; } return p.forget(); #endif }
MediaEventSource<int64_t>& DecodedStreamData::OnOutput() { return mListener->OnOutput(); }
PassRefPtr<HistoryItem> HistoryItem::decodeBackForwardTree(const String& topURLString, const String& topTitle, const String& topOriginalURLString, Decoder& decoder) { // Since the data stream is not trusted, the decode has to be non-recursive. // We don't want bad data to cause a stack overflow. uint32_t version; if (!decoder.decodeUInt32(version)) return 0; if (version != backForwardTreeEncodingVersion) return 0; String urlString = topURLString; String title = topTitle; String originalURLString = topOriginalURLString; Vector<DecodeRecursionStackElement, 16> recursionStack; recurse: RefPtr<HistoryItem> node = create(urlString, title, 0); node->setOriginalURLString(originalURLString); title = String(); uint64_t size; if (!decoder.decodeUInt64(size)) return 0; size_t i; RefPtr<HistoryItem> child; for (i = 0; i < size; ++i) { if (!decoder.decodeString(originalURLString)) return 0; if (!decoder.decodeString(urlString)) return 0; recursionStack.append(DecodeRecursionStackElement(node.release(), i, size)); goto recurse; resume: node->m_children.append(child.release()); } if (!decoder.decodeInt64(node->m_documentSequenceNumber)) return 0; if (!decoder.decodeUInt64(size)) return 0; for (i = 0; i < size; ++i) { String state; if (!decoder.decodeString(state)) return 0; node->m_documentState.append(state); } if (!decoder.decodeString(node->m_formContentType)) return 0; bool hasFormData; if (!decoder.decodeBool(hasFormData)) return 0; if (hasFormData) { node->m_formData = FormData::decode(decoder); if (!node->m_formData) return 0; } if (!decoder.decodeInt64(node->m_itemSequenceNumber)) return 0; if (!decoder.decodeString(node->m_referrer)) return 0; int32_t x; if (!decoder.decodeInt32(x)) return 0; int32_t y; if (!decoder.decodeInt32(y)) return 0; node->m_scrollPoint = IntPoint(x, y); if (!decoder.decodeFloat(node->m_pageScaleFactor)) return 0; bool hasStateObject; if (!decoder.decodeBool(hasStateObject)) return 0; if (hasStateObject) { Vector<uint8_t> bytes; if (!decoder.decodeBytes(bytes)) return 0; node->m_stateObject = SerializedScriptValue::adopt(bytes); } if (!decoder.decodeString(node->m_target)) return 0; // Simulate recursion with our own stack. if (!recursionStack.isEmpty()) { DecodeRecursionStackElement& element = recursionStack.last(); child = node.release(); node = element.node.release(); i = element.i; size = element.size; recursionStack.removeLast(); goto resume; } return node.release(); }
HRESULT D3D11DXVA2Manager::ConfigureForSize(uint32_t aWidth, uint32_t aHeight) { mWidth = aWidth; mHeight = aHeight; RefPtr<IMFMediaType> inputType; HRESULT hr = wmf::MFCreateMediaType(getter_AddRefs(inputType)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = inputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = inputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = inputType->SetUINT32(MF_MT_INTERLACE_MODE, MFVideoInterlace_Progressive); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = inputType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, TRUE); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); RefPtr<IMFAttributes> attr = mTransform->GetAttributes(); hr = attr->SetUINT32(MF_XVP_PLAYBACK_MODE, TRUE); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = attr->SetUINT32(MF_LOW_LATENCY, FALSE); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = MFSetAttributeSize(inputType, MF_MT_FRAME_SIZE, aWidth, aHeight); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); RefPtr<IMFMediaType> outputType; hr = wmf::MFCreateMediaType(getter_AddRefs(outputType)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = outputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = outputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); gfx::IntSize size(mWidth, mHeight); hr = mTransform->SetMediaTypes(inputType, outputType, ConfigureOutput, &size); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); return S_OK; }
void FEComposite::platformApplySoftware() { FilterEffect* in = inputEffect(0); FilterEffect* in2 = inputEffect(1); if (m_type == FECOMPOSITE_OPERATOR_ARITHMETIC) { Uint8ClampedArray* dstPixelArray = createPremultipliedImageResult(); if (!dstPixelArray) return; IntRect effectADrawingRect = requestedRegionOfInputImageData(in->absolutePaintRect()); RefPtr<Uint8ClampedArray> srcPixelArray = in->asPremultipliedImage(effectADrawingRect); IntRect effectBDrawingRect = requestedRegionOfInputImageData(in2->absolutePaintRect()); in2->copyPremultipliedImage(dstPixelArray, effectBDrawingRect); platformArithmeticSoftware(srcPixelArray.get(), dstPixelArray, m_k1, m_k2, m_k3, m_k4); return; } ImageBuffer* resultImage = createImageBufferResult(); if (!resultImage) return; GraphicsContext* filterContext = resultImage->context(); ImageBuffer* imageBuffer = in->asImageBuffer(); ImageBuffer* imageBuffer2 = in2->asImageBuffer(); ASSERT(imageBuffer); ASSERT(imageBuffer2); switch (m_type) { case FECOMPOSITE_OPERATOR_OVER: filterContext->drawImageBuffer(imageBuffer2, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in2->absolutePaintRect())); filterContext->drawImageBuffer(imageBuffer, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in->absolutePaintRect())); break; case FECOMPOSITE_OPERATOR_IN: { // Applies only to the intersected region. IntRect destinationRect = in->absolutePaintRect(); destinationRect.intersect(in2->absolutePaintRect()); destinationRect.intersect(absolutePaintRect()); if (destinationRect.isEmpty()) break; IntPoint destinationPoint(destinationRect.x() - absolutePaintRect().x(), destinationRect.y() - absolutePaintRect().y()); IntRect sourceRect(IntPoint(destinationRect.x() - in->absolutePaintRect().x(), destinationRect.y() - in->absolutePaintRect().y()), destinationRect.size()); IntRect source2Rect(IntPoint(destinationRect.x() - in2->absolutePaintRect().x(), destinationRect.y() - in2->absolutePaintRect().y()), destinationRect.size()); filterContext->drawImageBuffer(imageBuffer2, ColorSpaceDeviceRGB, IntRect(destinationPoint, source2Rect.size()), source2Rect); filterContext->drawImageBuffer(imageBuffer, ColorSpaceDeviceRGB, IntRect(destinationPoint, sourceRect.size()), sourceRect, CompositeSourceIn); break; } case FECOMPOSITE_OPERATOR_OUT: filterContext->drawImageBuffer(imageBuffer, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in->absolutePaintRect())); filterContext->drawImageBuffer(imageBuffer2, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in2->absolutePaintRect()), IntRect(IntPoint(), imageBuffer2->logicalSize()), CompositeDestinationOut); break; case FECOMPOSITE_OPERATOR_ATOP: filterContext->drawImageBuffer(imageBuffer2, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in2->absolutePaintRect())); filterContext->drawImageBuffer(imageBuffer, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in->absolutePaintRect()), IntRect(IntPoint(), imageBuffer->logicalSize()), CompositeSourceAtop); break; case FECOMPOSITE_OPERATOR_XOR: filterContext->drawImageBuffer(imageBuffer2, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in2->absolutePaintRect())); filterContext->drawImageBuffer(imageBuffer, ColorSpaceDeviceRGB, drawingRegionOfInputImage(in->absolutePaintRect()), IntRect(IntPoint(), imageBuffer->logicalSize()), CompositeXOR); break; default: break; } }
static PassRefPtr<RenderStyle> createFullScreenStyle() { RefPtr<RenderStyle> fullscreenStyle = RenderStyle::createDefaultStyle(); // Create a stacking context: fullscreenStyle->setZIndex(INT_MAX); fullscreenStyle->setFontDescription(FontDescription()); fullscreenStyle->font().update(0); fullscreenStyle->setDisplay(FLEX); fullscreenStyle->setJustifyContent(JustifyCenter); fullscreenStyle->setAlignItems(AlignCenter); fullscreenStyle->setFlexDirection(FlowColumn); fullscreenStyle->setPosition(FixedPosition); fullscreenStyle->setWidth(Length(100.0, Percent)); fullscreenStyle->setHeight(Length(100.0, Percent)); fullscreenStyle->setLeft(Length(0, WebCore::Fixed)); fullscreenStyle->setTop(Length(0, WebCore::Fixed)); fullscreenStyle->setBackgroundColor(Color::black); return fullscreenStyle.release(); }
HRESULT D3D11DXVA2Manager::Init(nsACString& aFailureReason) { HRESULT hr; ScopedGfxFeatureReporter reporter("DXVA2D3D11"); gfx::D3D11VideoCrashGuard crashGuard; if (crashGuard.Crashed()) { NS_WARNING("DXVA2D3D11 crash detected"); aFailureReason.AssignLiteral("DXVA2D3D11 crashes detected in the past"); return E_FAIL; } mDevice = gfx::DeviceManagerDx::Get()->CreateDecoderDevice(); if (!mDevice) { aFailureReason.AssignLiteral("Failed to create D3D11 device for decoder"); return E_FAIL; } mDevice->GetImmediateContext(getter_AddRefs(mContext)); if (!mContext) { aFailureReason.AssignLiteral("Failed to get immediate context for d3d11 device"); return E_FAIL; } hr = wmf::MFCreateDXGIDeviceManager(&mDeviceManagerToken, getter_AddRefs(mDXGIDeviceManager)); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("MFCreateDXGIDeviceManager failed with code %X", hr); return hr; } hr = mDXGIDeviceManager->ResetDevice(mDevice, mDeviceManagerToken); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IMFDXGIDeviceManager::ResetDevice failed with code %X", hr); return hr; } mTransform = new MFTDecoder(); hr = mTransform->Create(CLSID_VideoProcessorMFT); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("MFTDecoder::Create(CLSID_VideoProcessorMFT) failed with code %X", hr); return hr; } hr = mTransform->SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER, ULONG_PTR(mDXGIDeviceManager.get())); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("MFTDecoder::SendMFTMessage(MFT_MESSAGE_SET_D3D_MANAGER) failed with code %X", hr); return hr; } RefPtr<ID3D11VideoDevice> videoDevice; hr = mDevice->QueryInterface(static_cast<ID3D11VideoDevice**>(getter_AddRefs(videoDevice))); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("QI to ID3D11VideoDevice failed with code %X", hr); return hr; } bool found = false; UINT profileCount = videoDevice->GetVideoDecoderProfileCount(); for (UINT i = 0; i < profileCount; i++) { GUID id; hr = videoDevice->GetVideoDecoderProfile(i, &id); if (SUCCEEDED(hr) && (id == DXVA2_ModeH264_E || id == DXVA2_Intel_ModeH264_E)) { mDecoderGUID = id; found = true; break; } } if (!found) { aFailureReason.AssignLiteral("Failed to find an appropriate decoder GUID"); return E_FAIL; } BOOL nv12Support = false; hr = videoDevice->CheckVideoDecoderFormat(&mDecoderGUID, DXGI_FORMAT_NV12, &nv12Support); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("CheckVideoDecoderFormat failed with code %X", hr); return hr; } if (!nv12Support) { aFailureReason.AssignLiteral("Decoder doesn't support NV12 surfaces"); return E_FAIL; } RefPtr<IDXGIDevice> dxgiDevice; hr = mDevice->QueryInterface(static_cast<IDXGIDevice**>(getter_AddRefs(dxgiDevice))); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("QI to IDXGIDevice failed with code %X", hr); return hr; } RefPtr<IDXGIAdapter> adapter; hr = dxgiDevice->GetAdapter(adapter.StartAssignment()); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDXGIDevice::GetAdapter failed with code %X", hr); return hr; } DXGI_ADAPTER_DESC adapterDesc; hr = adapter->GetDesc(&adapterDesc); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDXGIAdapter::GetDesc failed with code %X", hr); return hr; } if (adapterDesc.VendorId == 0x1022 && !MediaPrefs::PDMWMFSkipBlacklist()) { for (size_t i = 0; i < MOZ_ARRAY_LENGTH(sAMDPreUVD4); i++) { if (adapterDesc.DeviceId == sAMDPreUVD4[i]) { mIsAMDPreUVD4 = true; break; } } } D3D11_TEXTURE2D_DESC desc; desc.Width = kSyncSurfaceSize; desc.Height = kSyncSurfaceSize; desc.MipLevels = 1; desc.ArraySize = 1; desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM; desc.SampleDesc.Count = 1; desc.SampleDesc.Quality = 0; desc.Usage = D3D11_USAGE_STAGING; desc.BindFlags = 0; desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ; desc.MiscFlags = 0; hr = mDevice->CreateTexture2D(&desc, NULL, getter_AddRefs(mSyncSurface)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); if (layers::ImageBridgeChild::GetSingleton()) { mTextureClientAllocator = new D3D11RecycleAllocator(layers::ImageBridgeChild::GetSingleton().get(), mDevice); } else { mTextureClientAllocator = new D3D11RecycleAllocator(layers::VideoBridgeChild::GetSingleton(), mDevice); } mTextureClientAllocator->SetMaxPoolSize(5); Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED, uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D11)); reporter.SetSuccessful(); return S_OK; }
void InsertLineBreakCommand::doApply() { deleteSelection(); VisibleSelection selection = endingSelection(); if (!selection.isNonOrphanedCaretOrRange()) return; VisiblePosition caret(selection.visibleStart()); // FIXME: If the node is hidden, we should still be able to insert text. // For now, we return to avoid a crash. https://bugs.webkit.org/show_bug.cgi?id=40342 if (caret.isNull()) return; Position pos(caret.deepEquivalent()); pos = positionAvoidingSpecialElementBoundary(pos); pos = positionOutsideTabSpan(pos); RefPtr<Node> nodeToInsert; if (shouldUseBreakElement(pos)) nodeToInsert = createBreakElement(document()); else nodeToInsert = document().createTextNode("\n"); // FIXME: Need to merge text nodes when inserting just after or before text. if (isEndOfParagraph(caret) && !lineBreakExistsAtVisiblePosition(caret)) { bool needExtraLineBreak = !pos.deprecatedNode()->hasTagName(hrTag) && !isHTMLTableElement(pos.deprecatedNode()); insertNodeAt(nodeToInsert.get(), pos); if (needExtraLineBreak) insertNodeBefore(nodeToInsert->cloneNode(false), nodeToInsert); VisiblePosition endingPosition(positionBeforeNode(nodeToInsert.get())); setEndingSelection(VisibleSelection(endingPosition, endingSelection().isDirectional())); } else if (pos.deprecatedEditingOffset() <= caretMinOffset(pos.deprecatedNode())) { insertNodeAt(nodeToInsert.get(), pos); // Insert an extra br or '\n' if the just inserted one collapsed. if (!isStartOfParagraph(positionBeforeNode(nodeToInsert.get()))) insertNodeBefore(nodeToInsert->cloneNode(false).get(), nodeToInsert.get()); setEndingSelection(VisibleSelection(positionInParentAfterNode(nodeToInsert.get()), DOWNSTREAM, endingSelection().isDirectional())); // If we're inserting after all of the rendered text in a text node, or into a non-text node, // a simple insertion is sufficient. } else if (pos.deprecatedEditingOffset() >= caretMaxOffset(pos.deprecatedNode()) || !pos.deprecatedNode()->isTextNode()) { insertNodeAt(nodeToInsert.get(), pos); setEndingSelection(VisibleSelection(positionInParentAfterNode(nodeToInsert.get()), DOWNSTREAM, endingSelection().isDirectional())); } else if (pos.deprecatedNode()->isTextNode()) { // Split a text node Text* textNode = toText(pos.deprecatedNode()); splitTextNode(textNode, pos.deprecatedEditingOffset()); insertNodeBefore(nodeToInsert, textNode); Position endingPosition = firstPositionInNode(textNode); // Handle whitespace that occurs after the split document().updateLayoutIgnorePendingStylesheets(); if (!endingPosition.isRenderedCharacter()) { Position positionBeforeTextNode(positionInParentBeforeNode(textNode)); // Clear out all whitespace and insert one non-breaking space deleteInsignificantTextDownstream(endingPosition); ASSERT(!textNode->renderer() || textNode->renderer()->style().collapseWhiteSpace()); // Deleting insignificant whitespace will remove textNode if it contains nothing but insignificant whitespace. if (textNode->inDocument()) insertTextIntoNode(textNode, 0, nonBreakingSpaceString()); else { RefPtr<Text> nbspNode = document().createTextNode(nonBreakingSpaceString()); insertNodeAt(nbspNode.get(), positionBeforeTextNode); endingPosition = firstPositionInNode(nbspNode.get()); } } setEndingSelection(VisibleSelection(endingPosition, DOWNSTREAM, endingSelection().isDirectional())); } // Handle the case where there is a typing style. RefPtr<EditingStyle> typingStyle = frame().selection().typingStyle(); if (typingStyle && !typingStyle->isEmpty()) { // Apply the typing style to the inserted line break, so that if the selection // leaves and then comes back, new input will have the right style. // FIXME: We shouldn't always apply the typing style to the line break here, // see <rdar://problem/5794462>. applyStyle(typingStyle.get(), firstPositionInOrBeforeNode(nodeToInsert.get()), lastPositionInOrAfterNode(nodeToInsert.get())); // Even though this applyStyle operates on a Range, it still sets an endingSelection(). // It tries to set a VisibleSelection around the content it operated on. So, that VisibleSelection // will either (a) select the line break we inserted, or it will (b) be a caret just // before the line break (if the line break is at the end of a block it isn't selectable). // So, this next call sets the endingSelection() to a caret just after the line break // that we inserted, or just before it if it's at the end of a block. setEndingSelection(endingSelection().visibleEnd()); } rebalanceWhitespace(); }
void NetworkConnectionToWebProcess::registerFileBlobURL(const URL& url, const String& path, const SandboxExtension::Handle& extensionHandle, const String& contentType) { RefPtr<SandboxExtension> extension = SandboxExtension::create(extensionHandle); NetworkBlobRegistry::singleton().registerFileBlobURL(this, url, path, extension.release(), contentType); }
PassRefPtr<FormData> FormData::create(const CString& string) { RefPtr<FormData> result = create(); result->appendData(string.data(), string.length()); return result.release(); }
PassRefPtr<FormData> FormData::create(const Vector<char>& vector) { RefPtr<FormData> result = create(); result->appendData(vector.data(), vector.size()); return result.release(); }
PassRefPtr<FormData> FormData::create(const void* data, size_t size) { RefPtr<FormData> result = create(); result->appendData(data, size); return result.release(); }
PassRefPtr<FormData> FormData::createMultiPart(const FormDataList& list, const TextEncoding& encoding, Document* document) { RefPtr<FormData> result = create(); result->appendKeyValuePairItems(list, encoding, true, document); return result.release(); }
PassRefPtr<FormData> FormData::create(const FormDataList& list, const TextEncoding& encoding, EncodingType encodingType) { RefPtr<FormData> result = create(); result->appendKeyValuePairItems(list, encoding, false, 0, encodingType); return result.release(); }
PassRefPtr<RTCSessionDescriptionRequestImpl> RTCSessionDescriptionRequestImpl::create(ScriptExecutionContext* context, PassRefPtr<RTCSessionDescriptionCallback> successCallback, PassRefPtr<RTCErrorCallback> errorCallback) { RefPtr<RTCSessionDescriptionRequestImpl> request = adoptRef(new RTCSessionDescriptionRequestImpl(context, successCallback, errorCallback)); request->suspendIfNeeded(); return request.release(); }
String CSSMutableStyleDeclaration::getPropertyValue(int propertyID) const { RefPtr<CSSValue> value = getPropertyCSSValue(propertyID); if (value) return value->cssText(); // Shorthand and 4-values properties switch (propertyID) { case CSSPropertyBackgroundPosition: { // FIXME: Is this correct? The code in cssparser.cpp is confusing const int properties[2] = { CSSPropertyBackgroundPositionX, CSSPropertyBackgroundPositionY }; return getLayeredShorthandValue(properties, 2); } case CSSPropertyBackgroundRepeat: { const int properties[2] = { CSSPropertyBackgroundRepeatX, CSSPropertyBackgroundRepeatY }; return getLayeredShorthandValue(properties, 2); } case CSSPropertyBackground: { const int properties[9] = { CSSPropertyBackgroundColor, CSSPropertyBackgroundImage, CSSPropertyBackgroundRepeatX, CSSPropertyBackgroundRepeatY, CSSPropertyBackgroundAttachment, CSSPropertyBackgroundPositionX, CSSPropertyBackgroundPositionY, CSSPropertyBackgroundClip, CSSPropertyBackgroundOrigin }; return getLayeredShorthandValue(properties, 9); } case CSSPropertyBorder: { const int properties[3][4] = {{ CSSPropertyBorderTopWidth, CSSPropertyBorderRightWidth, CSSPropertyBorderBottomWidth, CSSPropertyBorderLeftWidth }, { CSSPropertyBorderTopStyle, CSSPropertyBorderRightStyle, CSSPropertyBorderBottomStyle, CSSPropertyBorderLeftStyle }, { CSSPropertyBorderTopColor, CSSPropertyBorderRightColor, CSSPropertyBorderBottomColor, CSSPropertyBorderLeftColor }}; String res; for (size_t i = 0; i < WTF_ARRAY_LENGTH(properties); ++i) { String value = getCommonValue(properties[i], 4); if (!value.isNull()) { if (!res.isNull()) res += " "; res += value; } } return res; } case CSSPropertyBorderTop: { const int properties[3] = { CSSPropertyBorderTopWidth, CSSPropertyBorderTopStyle, CSSPropertyBorderTopColor}; return getShorthandValue(properties, 3); } case CSSPropertyBorderRight: { const int properties[3] = { CSSPropertyBorderRightWidth, CSSPropertyBorderRightStyle, CSSPropertyBorderRightColor}; return getShorthandValue(properties, 3); } case CSSPropertyBorderBottom: { const int properties[3] = { CSSPropertyBorderBottomWidth, CSSPropertyBorderBottomStyle, CSSPropertyBorderBottomColor}; return getShorthandValue(properties, 3); } case CSSPropertyBorderLeft: { const int properties[3] = { CSSPropertyBorderLeftWidth, CSSPropertyBorderLeftStyle, CSSPropertyBorderLeftColor}; return getShorthandValue(properties, 3); } case CSSPropertyOutline: { const int properties[3] = { CSSPropertyOutlineWidth, CSSPropertyOutlineStyle, CSSPropertyOutlineColor }; return getShorthandValue(properties, 3); } case CSSPropertyBorderColor: { const int properties[4] = { CSSPropertyBorderTopColor, CSSPropertyBorderRightColor, CSSPropertyBorderBottomColor, CSSPropertyBorderLeftColor }; return get4Values(properties); } case CSSPropertyBorderWidth: { const int properties[4] = { CSSPropertyBorderTopWidth, CSSPropertyBorderRightWidth, CSSPropertyBorderBottomWidth, CSSPropertyBorderLeftWidth }; return get4Values(properties); } case CSSPropertyBorderStyle: { const int properties[4] = { CSSPropertyBorderTopStyle, CSSPropertyBorderRightStyle, CSSPropertyBorderBottomStyle, CSSPropertyBorderLeftStyle }; return get4Values(properties); } case CSSPropertyMargin: { const int properties[4] = { CSSPropertyMarginTop, CSSPropertyMarginRight, CSSPropertyMarginBottom, CSSPropertyMarginLeft }; return get4Values(properties); } case CSSPropertyOverflow: { const int properties[2] = { CSSPropertyOverflowX, CSSPropertyOverflowY }; return getCommonValue(properties, 2); } case CSSPropertyPadding: { const int properties[4] = { CSSPropertyPaddingTop, CSSPropertyPaddingRight, CSSPropertyPaddingBottom, CSSPropertyPaddingLeft }; return get4Values(properties); } case CSSPropertyListStyle: { const int properties[3] = { CSSPropertyListStyleType, CSSPropertyListStylePosition, CSSPropertyListStyleImage }; return getShorthandValue(properties, 3); } case CSSPropertyWebkitMaskPosition: { // FIXME: Is this correct? The code in cssparser.cpp is confusing const int properties[2] = { CSSPropertyWebkitMaskPositionX, CSSPropertyWebkitMaskPositionY }; return getLayeredShorthandValue(properties, 2); } case CSSPropertyWebkitMaskRepeat: { const int properties[2] = { CSSPropertyWebkitMaskRepeatX, CSSPropertyWebkitMaskRepeatY }; return getLayeredShorthandValue(properties, 2); } case CSSPropertyWebkitMask: { const int properties[] = { CSSPropertyWebkitMaskImage, CSSPropertyWebkitMaskRepeat, CSSPropertyWebkitMaskAttachment, CSSPropertyWebkitMaskPosition, CSSPropertyWebkitMaskClip, CSSPropertyWebkitMaskOrigin }; return getLayeredShorthandValue(properties, 6); } case CSSPropertyWebkitTransformOrigin: { const int properties[3] = { CSSPropertyWebkitTransformOriginX, CSSPropertyWebkitTransformOriginY, CSSPropertyWebkitTransformOriginZ }; return getShorthandValue(properties, 3); } case CSSPropertyWebkitTransition: { const int properties[4] = { CSSPropertyWebkitTransitionProperty, CSSPropertyWebkitTransitionDuration, CSSPropertyWebkitTransitionTimingFunction, CSSPropertyWebkitTransitionDelay }; return getLayeredShorthandValue(properties, 4); } case CSSPropertyWebkitAnimation: { const int properties[7] = { CSSPropertyWebkitAnimationName, CSSPropertyWebkitAnimationDuration, CSSPropertyWebkitAnimationTimingFunction, CSSPropertyWebkitAnimationDelay, CSSPropertyWebkitAnimationIterationCount, CSSPropertyWebkitAnimationDirection, CSSPropertyWebkitAnimationFillMode }; return getLayeredShorthandValue(properties, 7); } #if ENABLE(SVG) case CSSPropertyMarker: { RefPtr<CSSValue> value = getPropertyCSSValue(CSSPropertyMarkerStart); if (value) return value->cssText(); } #endif } return String(); }
void HTMLLinkElement::process() { if (!inDocument() || m_isInShadowTree) { ASSERT(!m_sheet); return; } String type = m_type.lower(); URL url = getNonEmptyURLAttribute(hrefAttr); if (!m_linkLoader.loadLink(m_relAttribute, type, m_sizes->toString(), url, &document())) return; bool acceptIfTypeContainsTextCSS = document().page() && document().page()->settings().treatsAnyTextCSSLinkAsStylesheet(); if (m_disabledState != Disabled && (m_relAttribute.m_isStyleSheet || (acceptIfTypeContainsTextCSS && type.contains("text/css"))) && document().frame() && url.isValid()) { String charset = getAttribute(charsetAttr); if (charset.isEmpty() && document().frame()) charset = document().charset(); if (m_cachedSheet) { removePendingSheet(); m_cachedSheet->removeClient(this); m_cachedSheet = 0; } if (!shouldLoadLink()) return; m_loading = true; bool mediaQueryMatches = true; if (!m_media.isEmpty()) { RefPtr<RenderStyle> documentStyle = Style::resolveForDocument(document()); RefPtr<MediaQuerySet> media = MediaQuerySet::createAllowingDescriptionSyntax(m_media); MediaQueryEvaluator evaluator(document().frame()->view()->mediaType(), document().frame(), documentStyle.get()); mediaQueryMatches = evaluator.eval(media.get()); } // Don't hold up render tree construction and script execution on stylesheets // that are not needed for the rendering at the moment. bool isActive = mediaQueryMatches && !isAlternate(); addPendingSheet(isActive ? ActiveSheet : InactiveSheet); // Load stylesheets that are not needed for the rendering immediately with low priority. ResourceLoadPriority priority = isActive ? ResourceLoadPriorityUnresolved : ResourceLoadPriorityVeryLow; CachedResourceRequest request(ResourceRequest(document().completeURL(url)), charset, priority); request.setInitiator(this); m_cachedSheet = document().cachedResourceLoader()->requestCSSStyleSheet(request); if (m_cachedSheet) m_cachedSheet->addClient(this); else { // The request may have been denied if (for example) the stylesheet is local and the document is remote. m_loading = false; removePendingSheet(); } } else if (m_sheet) { // we no longer contain a stylesheet, e.g. perhaps rel or type was changed clearSheet(); document().styleResolverChanged(DeferRecalcStyle); } }
String CSSMutableStyleDeclaration::get4Values(const int* properties) const { // Assume the properties are in the usual order top, right, bottom, left. RefPtr<CSSValue> topValue = getPropertyCSSValue(properties[0]); RefPtr<CSSValue> rightValue = getPropertyCSSValue(properties[1]); RefPtr<CSSValue> bottomValue = getPropertyCSSValue(properties[2]); RefPtr<CSSValue> leftValue = getPropertyCSSValue(properties[3]); // All 4 properties must be specified. if (!topValue || !rightValue || !bottomValue || !leftValue) return String(); bool showLeft = rightValue->cssText() != leftValue->cssText(); bool showBottom = (topValue->cssText() != bottomValue->cssText()) || showLeft; bool showRight = (topValue->cssText() != rightValue->cssText()) || showBottom; String res = topValue->cssText(); if (showRight) res += " " + rightValue->cssText(); if (showBottom) res += " " + bottomValue->cssText(); if (showLeft) res += " " + leftValue->cssText(); return res; }
HRESULT D3D9DXVA2Manager::Init(nsACString& aFailureReason) { MOZ_ASSERT(NS_IsMainThread()); ScopedGfxFeatureReporter reporter("DXVA2D3D9"); gfx::D3D9VideoCrashGuard crashGuard; if (crashGuard.Crashed()) { NS_WARNING("DXVA2D3D9 crash detected"); aFailureReason.AssignLiteral("DXVA2D3D9 crashes detected in the past"); return E_FAIL; } // Create D3D9Ex. HMODULE d3d9lib = LoadLibraryW(L"d3d9.dll"); NS_ENSURE_TRUE(d3d9lib, E_FAIL); decltype(Direct3DCreate9Ex)* d3d9Create = (decltype(Direct3DCreate9Ex)*) GetProcAddress(d3d9lib, "Direct3DCreate9Ex"); if (!d3d9Create) { NS_WARNING("Couldn't find Direct3DCreate9Ex symbol in d3d9.dll"); aFailureReason.AssignLiteral("Couldn't find Direct3DCreate9Ex symbol in d3d9.dll"); return E_FAIL; } RefPtr<IDirect3D9Ex> d3d9Ex; HRESULT hr = d3d9Create(D3D_SDK_VERSION, getter_AddRefs(d3d9Ex)); if (!d3d9Ex) { NS_WARNING("Direct3DCreate9 failed"); aFailureReason.AssignLiteral("Direct3DCreate9 failed"); return E_FAIL; } // Ensure we can do the YCbCr->RGB conversion in StretchRect. // Fail if we can't. hr = d3d9Ex->CheckDeviceFormatConversion(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, (D3DFORMAT)MAKEFOURCC('N','V','1','2'), D3DFMT_X8R8G8B8); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("CheckDeviceFormatConversion failed with error %X", hr); return hr; } // Create D3D9DeviceEx. We pass null HWNDs here even though the documentation // suggests that one of them should not be. At this point in time Chromium // does the same thing for video acceleration. D3DPRESENT_PARAMETERS params = {0}; params.BackBufferWidth = 1; params.BackBufferHeight = 1; params.BackBufferFormat = D3DFMT_A8R8G8B8; params.BackBufferCount = 1; params.SwapEffect = D3DSWAPEFFECT_DISCARD; params.hDeviceWindow = nullptr; params.Windowed = TRUE; params.Flags = D3DPRESENTFLAG_VIDEO; RefPtr<IDirect3DDevice9Ex> device; hr = d3d9Ex->CreateDeviceEx(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, nullptr, D3DCREATE_FPU_PRESERVE | D3DCREATE_MULTITHREADED | D3DCREATE_MIXED_VERTEXPROCESSING, ¶ms, nullptr, getter_AddRefs(device)); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("CreateDeviceEx failed with error %X", hr); return hr; } // Ensure we can create queries to synchronize operations between devices. // Without this, when we make a copy of the frame in order to share it with // another device, we can't be sure that the copy has finished before the // other device starts using it. RefPtr<IDirect3DQuery9> query; hr = device->CreateQuery(D3DQUERYTYPE_EVENT, getter_AddRefs(query)); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("CreateQuery failed with error %X", hr); return hr; } // Create and initialize IDirect3DDeviceManager9. UINT resetToken = 0; RefPtr<IDirect3DDeviceManager9> deviceManager; hr = wmf::DXVA2CreateDirect3DDeviceManager9(&resetToken, getter_AddRefs(deviceManager)); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("DXVA2CreateDirect3DDeviceManager9 failed with error %X", hr); return hr; } hr = deviceManager->ResetDevice(device, resetToken); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDirect3DDeviceManager9::ResetDevice failed with error %X", hr); return hr; } HANDLE deviceHandle; RefPtr<IDirectXVideoDecoderService> decoderService; hr = deviceManager->OpenDeviceHandle(&deviceHandle); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDirect3DDeviceManager9::OpenDeviceHandle failed with error %X", hr); return hr; } hr = deviceManager->GetVideoService(deviceHandle, IID_PPV_ARGS(decoderService.StartAssignment())); deviceManager->CloseDeviceHandle(deviceHandle); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDirectXVideoDecoderServer::GetVideoService failed with error %X", hr); return hr; } UINT deviceCount; GUID* decoderDevices = nullptr; hr = decoderService->GetDecoderDeviceGuids(&deviceCount, &decoderDevices); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDirectXVideoDecoderServer::GetDecoderDeviceGuids failed with error %X", hr); return hr; } bool found = false; for (UINT i = 0; i < deviceCount; i++) { if (decoderDevices[i] == DXVA2_ModeH264_E || decoderDevices[i] == DXVA2_Intel_ModeH264_E) { mDecoderGUID = decoderDevices[i]; found = true; break; } } CoTaskMemFree(decoderDevices); if (!found) { aFailureReason.AssignLiteral("Failed to find an appropriate decoder GUID"); return E_FAIL; } D3DADAPTER_IDENTIFIER9 adapter; hr = d3d9Ex->GetAdapterIdentifier(D3DADAPTER_DEFAULT, 0, &adapter); if (!SUCCEEDED(hr)) { aFailureReason = nsPrintfCString("IDirect3D9Ex::GetAdapterIdentifier failed with error %X", hr); return hr; } if (adapter.VendorId == 0x1022 && !MediaPrefs::PDMWMFSkipBlacklist()) { for (size_t i = 0; i < MOZ_ARRAY_LENGTH(sAMDPreUVD4); i++) { if (adapter.DeviceId == sAMDPreUVD4[i]) { mIsAMDPreUVD4 = true; break; } } } RefPtr<IDirect3DSurface9> syncSurf; hr = device->CreateRenderTarget(kSyncSurfaceSize, kSyncSurfaceSize, D3DFMT_X8R8G8B8, D3DMULTISAMPLE_NONE, 0, TRUE, getter_AddRefs(syncSurf), NULL); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); mDecoderService = decoderService; mResetToken = resetToken; mD3D9 = d3d9Ex; mDevice = device; mDeviceManager = deviceManager; mSyncSurface = syncSurf; if (layers::ImageBridgeChild::GetSingleton()) { mTextureClientAllocator = new D3D9RecycleAllocator(layers::ImageBridgeChild::GetSingleton().get(), mDevice); } else { mTextureClientAllocator = new D3D9RecycleAllocator(layers::VideoBridgeChild::GetSingleton(), mDevice); } mTextureClientAllocator->SetMaxPoolSize(5); Telemetry::Accumulate(Telemetry::MEDIA_DECODER_BACKEND_USED, uint32_t(media::MediaDecoderBackend::WMFDXVA2D3D9)); reporter.SetSuccessful(); return S_OK; }
String CSSMutableStyleDeclaration::getLayeredShorthandValue(const int* properties, unsigned number) const { String res; // Begin by collecting the properties into an array. Vector< RefPtr<CSSValue> > values(number); size_t numLayers = 0; for (size_t i = 0; i < number; ++i) { values[i] = getPropertyCSSValue(properties[i]); if (values[i]) { if (values[i]->isValueList()) { CSSValueList* valueList = static_cast<CSSValueList*>(values[i].get()); numLayers = max(valueList->length(), numLayers); } else numLayers = max<size_t>(1U, numLayers); } } // Now stitch the properties together. Implicit initial values are flagged as such and // can safely be omitted. for (size_t i = 0; i < numLayers; i++) { String layerRes; bool useRepeatXShorthand = false; bool useRepeatYShorthand = false; bool useSingleWordShorthand = false; for (size_t j = 0; j < number; j++) { RefPtr<CSSValue> value; if (values[j]) { if (values[j]->isValueList()) value = static_cast<CSSValueList*>(values[j].get())->item(i); else { value = values[j]; // Color only belongs in the last layer. if (properties[j] == CSSPropertyBackgroundColor) { if (i != numLayers - 1) value = 0; } else if (i != 0) // Other singletons only belong in the first layer. value = 0; } } // We need to report background-repeat as it was written in the CSS. If the property is implicit, // then it was written with only one value. Here we figure out which value that was so we can // report back correctly. if (properties[j] == CSSPropertyBackgroundRepeatX && isPropertyImplicit(properties[j])) { // BUG 49055: make sure the value was not reset in the layer check just above. if (j < number - 1 && properties[j + 1] == CSSPropertyBackgroundRepeatY && value) { RefPtr<CSSValue> yValue; RefPtr<CSSValue> nextValue = values[j + 1]; if (nextValue->isValueList()) yValue = static_cast<CSSValueList*>(nextValue.get())->itemWithoutBoundsCheck(i); else yValue = nextValue; int xId = static_cast<CSSPrimitiveValue*>(value.get())->getIdent(); int yId = static_cast<CSSPrimitiveValue*>(yValue.get())->getIdent(); if (xId != yId) { if (xId == CSSValueRepeat && yId == CSSValueNoRepeat) { useRepeatXShorthand = true; ++j; } else if (xId == CSSValueNoRepeat && yId == CSSValueRepeat) { useRepeatYShorthand = true; continue; } } else { useSingleWordShorthand = true; ++j; } } } if (value && !value->isImplicitInitialValue()) { if (!layerRes.isNull()) layerRes += " "; if (useRepeatXShorthand) { useRepeatXShorthand = false; layerRes += getValueName(CSSValueRepeatX); } else if (useRepeatYShorthand) { useRepeatYShorthand = false; layerRes += getValueName(CSSValueRepeatY); } else if (useSingleWordShorthand) { useSingleWordShorthand = false; layerRes += value->cssText(); } else layerRes += value->cssText(); } } if (!layerRes.isNull()) { if (!res.isNull()) res += ", "; res += layerRes; } } return res; }
HRESULT D3D11DXVA2Manager::CopyToImage(IMFSample* aVideoSample, const nsIntRect& aRegion, Image** aOutImage) { NS_ENSURE_TRUE(aVideoSample, E_POINTER); NS_ENSURE_TRUE(aOutImage, E_POINTER); // Our video frame is stored in a non-sharable ID3D11Texture2D. We need // to create a copy of that frame as a sharable resource, save its share // handle, and put that handle into the rendering pipeline. RefPtr<D3D11ShareHandleImage> image = new D3D11ShareHandleImage(gfx::IntSize(mWidth, mHeight), aRegion); bool ok = image->AllocateTexture(mTextureClientAllocator, mDevice); NS_ENSURE_TRUE(ok, E_FAIL); HRESULT hr = mTransform->Input(aVideoSample); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); RefPtr<IMFSample> sample; RefPtr<ID3D11Texture2D> texture = image->GetTexture(); hr = CreateOutputSample(sample, texture); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = mTransform->Output(&sample); RefPtr<ID3D11DeviceContext> ctx; mDevice->GetImmediateContext(getter_AddRefs(ctx)); // Copy a small rect into our sync surface, and then map it // to block until decoding/color conversion completes. D3D11_BOX rect = { 0, 0, 0, kSyncSurfaceSize, kSyncSurfaceSize, 1 }; ctx->CopySubresourceRegion(mSyncSurface, 0, 0, 0, 0, texture, 0, &rect); D3D11_MAPPED_SUBRESOURCE mapped; hr = ctx->Map(mSyncSurface, 0, D3D11_MAP_READ, 0, &mapped); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); ctx->Unmap(mSyncSurface, 0); image.forget(aOutImage); return S_OK; }
void SVGLengthList::calculateAnimatedValue(SVGAnimationElement* animationElement, float percentage, unsigned repeatCount, PassRefPtr<SVGPropertyBase> fromValue, PassRefPtr<SVGPropertyBase> toValue, PassRefPtr<SVGPropertyBase> toAtEndOfDurationValue, SVGElement* contextElement) { RefPtr<SVGLengthList> fromList = toSVGLengthList(fromValue); RefPtr<SVGLengthList> toList = toSVGLengthList(toValue); RefPtr<SVGLengthList> toAtEndOfDurationList = toSVGLengthList(toAtEndOfDurationValue); SVGLengthContext lengthContext(contextElement); ASSERT(m_mode == SVGLength::lengthModeForAnimatedLengthAttribute(animationElement->attributeName())); size_t fromLengthListSize = fromList->length(); size_t toLengthListSize = toList->length(); size_t toAtEndOfDurationListSize = toAtEndOfDurationList->length(); if (!adjustFromToListValues(fromList, toList, percentage, animationElement->animationMode())) return; for (size_t i = 0; i < toLengthListSize; ++i) { float animatedNumber = at(i)->value(lengthContext); SVGLengthType unitType = toList->at(i)->unitType(); float effectiveFrom = 0; if (fromLengthListSize) { if (percentage < 0.5) unitType = fromList->at(i)->unitType(); effectiveFrom = fromList->at(i)->value(lengthContext); } float effectiveTo = toList->at(i)->value(lengthContext); float effectiveToAtEnd = i < toAtEndOfDurationListSize ? toAtEndOfDurationList->at(i)->value(lengthContext) : 0; animationElement->animateAdditiveNumber(percentage, repeatCount, effectiveFrom, effectiveTo, effectiveToAtEnd, animatedNumber); at(i)->setUnitType(unitType); at(i)->setValue(animatedNumber, lengthContext, ASSERT_NO_EXCEPTION); } }
void BackendDispatcher::dispatch(const String& message) { Ref<BackendDispatcher> protect(*this); ASSERT(!m_protocolErrors.size()); long requestId = 0; RefPtr<JSON::Object> messageObject; { // In case this is a re-entrant call from a nested run loop, we don't want to lose // the outer request's id just because the inner request is bogus. SetForScope<std::optional<long>> scopedRequestId(m_currentRequestId, std::nullopt); RefPtr<JSON::Value> parsedMessage; if (!JSON::Value::parseJSON(message, parsedMessage)) { reportProtocolError(ParseError, ASCIILiteral("Message must be in JSON format")); sendPendingErrors(); return; } if (!parsedMessage->asObject(messageObject)) { reportProtocolError(InvalidRequest, ASCIILiteral("Message must be a JSONified object")); sendPendingErrors(); return; } RefPtr<JSON::Value> requestIdValue; if (!messageObject->getValue(ASCIILiteral("id"), requestIdValue)) { reportProtocolError(InvalidRequest, ASCIILiteral("'id' property was not found")); sendPendingErrors(); return; } if (!requestIdValue->asInteger(requestId)) { reportProtocolError(InvalidRequest, ASCIILiteral("The type of 'id' property must be integer")); sendPendingErrors(); return; } } { // We could be called re-entrantly from a nested run loop, so restore the previous id. SetForScope<std::optional<long>> scopedRequestId(m_currentRequestId, requestId); RefPtr<JSON::Value> methodValue; if (!messageObject->getValue(ASCIILiteral("method"), methodValue)) { reportProtocolError(InvalidRequest, ASCIILiteral("'method' property wasn't found")); sendPendingErrors(); return; } String methodString; if (!methodValue->asString(methodString)) { reportProtocolError(InvalidRequest, ASCIILiteral("The type of 'method' property must be string")); sendPendingErrors(); return; } Vector<String> domainAndMethod; methodString.split('.', true, domainAndMethod); if (domainAndMethod.size() != 2 || !domainAndMethod[0].length() || !domainAndMethod[1].length()) { reportProtocolError(InvalidRequest, ASCIILiteral("The 'method' property was formatted incorrectly. It should be 'Domain.method'")); sendPendingErrors(); return; } String domain = domainAndMethod[0]; SupplementalBackendDispatcher* domainDispatcher = m_dispatchers.get(domain); if (!domainDispatcher) { reportProtocolError(MethodNotFound, "'" + domain + "' domain was not found"); sendPendingErrors(); return; } String method = domainAndMethod[1]; domainDispatcher->dispatch(requestId, method, messageObject.releaseNonNull()); if (m_protocolErrors.size()) sendPendingErrors(); } }
inline PassRefPtr<SVGLengthList> toSVGLengthList(PassRefPtr<SVGPropertyBase> passBase) { RefPtr<SVGPropertyBase> base = passBase; ASSERT(base->type() == SVGLengthList::classType()); return static_pointer_cast<SVGLengthList>(base.release()); }
void RestyleTracker::DoProcessRestyles() { nsAutoCString docURL; if (profiler_is_active()) { nsIURI *uri = Document()->GetDocumentURI(); if (uri) { uri->GetSpec(docURL); } else { docURL = "N/A"; } } PROFILER_LABEL_PRINTF("RestyleTracker", "ProcessRestyles", js::ProfileEntry::Category::CSS, "(%s)", docURL.get()); bool isTimelineRecording = false; nsDocShell* docShell = static_cast<nsDocShell*>(mRestyleManager->PresContext()->GetDocShell()); if (docShell) { docShell->GetRecordProfileTimelineMarkers(&isTimelineRecording); } // Create a AnimationsWithDestroyedFrame during restyling process to // stop animations on elements that have no frame at the end of the // restyling process. RestyleManager::AnimationsWithDestroyedFrame animationsWithDestroyedFrame(mRestyleManager); // Create a ReframingStyleContexts struct on the stack and put it in our // mReframingStyleContexts for almost all of the remaining scope of // this function. // // It needs to be *in* scope during BeginProcessingRestyles, which // might (if mDoRebuildAllStyleData is true) do substantial amounts of // restyle processing. // // However, it needs to be *out* of scope during // EndProcessingRestyles, since we should release the style contexts // it holds prior to any EndReconstruct call that // EndProcessingRestyles makes. This is because in EndReconstruct we // try to destroy the old rule tree using the GC mechanism, which // means it only gets destroyed if it's unreferenced (and if it's // referenced, we assert). So we want the ReframingStyleContexts // (which holds old style contexts) to be destroyed before the // EndReconstruct so those style contexts go away before // EndReconstruct. { RestyleManager::ReframingStyleContexts reframingStyleContexts(mRestyleManager); mRestyleManager->BeginProcessingRestyles(*this); LOG_RESTYLE("Processing %d pending %srestyles with %d restyle roots for %s", mPendingRestyles.Count(), mRestyleManager->PresContext()->TransitionManager()-> InAnimationOnlyStyleUpdate() ? (const char*) "animation " : (const char*) "", static_cast<int>(mRestyleRoots.Length()), GetDocumentURI(Document()).get()); LOG_RESTYLE_INDENT(); // loop so that we process any restyle events generated by processing while (mPendingRestyles.Count()) { if (mHaveLaterSiblingRestyles) { // Convert them to individual restyles on all the later siblings nsAutoTArray<RefPtr<Element>, RESTYLE_ARRAY_STACKSIZE> laterSiblingArr; LaterSiblingCollector siblingCollector = { this, &laterSiblingArr }; mPendingRestyles.Enumerate(CollectLaterSiblings, &siblingCollector); for (uint32_t i = 0; i < laterSiblingArr.Length(); ++i) { Element* element = laterSiblingArr[i]; for (nsIContent* sibling = element->GetNextSibling(); sibling; sibling = sibling->GetNextSibling()) { if (sibling->IsElement()) { LOG_RESTYLE("adding pending restyle for %s due to " "eRestyle_LaterSiblings hint on %s", FrameTagToString(sibling->AsElement()).get(), FrameTagToString(element->AsElement()).get()); if (AddPendingRestyle(sibling->AsElement(), eRestyle_Subtree, NS_STYLE_HINT_NONE)) { // Nothing else to do here; we'll handle the following // siblings when we get to |sibling| in laterSiblingArr. break; } } } } // Now remove all those eRestyle_LaterSiblings bits for (uint32_t i = 0; i < laterSiblingArr.Length(); ++i) { Element* element = laterSiblingArr[i]; NS_ASSERTION(element->HasFlag(RestyleBit()), "How did that happen?"); RestyleData* data; #ifdef DEBUG bool found = #endif mPendingRestyles.Get(element, &data); NS_ASSERTION(found, "Where did our entry go?"); data->mRestyleHint = nsRestyleHint(data->mRestyleHint & ~eRestyle_LaterSiblings); } LOG_RESTYLE("%d pending restyles after expanding out " "eRestyle_LaterSiblings", mPendingRestyles.Count()); mHaveLaterSiblingRestyles = false; } uint32_t rootCount; while ((rootCount = mRestyleRoots.Length())) { // Make sure to pop the element off our restyle root array, so // that we can freely append to the array as we process this // element. RefPtr<Element> element; element.swap(mRestyleRoots[rootCount - 1]); mRestyleRoots.RemoveElementAt(rootCount - 1); LOG_RESTYLE("processing style root %s at index %d", FrameTagToString(element).get(), rootCount - 1); LOG_RESTYLE_INDENT(); // Do the document check before calling GetRestyleData, since we // don't want to do the sibling-processing GetRestyleData does if // the node is no longer relevant. if (element->GetCrossShadowCurrentDoc() != Document()) { // Content node has been removed from our document; nothing else // to do here LOG_RESTYLE("skipping, no longer in the document"); continue; } nsAutoPtr<RestyleData> data; if (!GetRestyleData(element, data)) { LOG_RESTYLE("skipping, already restyled"); continue; } if (isTimelineRecording) { UniquePtr<TimelineMarker> marker = MakeUnique<RestyleTimelineMarker>( data->mRestyleHint, MarkerTracingType::START); TimelineConsumers::AddMarkerForDocShell(docShell, Move(marker)); } #if defined(MOZ_ENABLE_PROFILER_SPS) && !defined(MOZILLA_XPCOMRT_API) Maybe<GeckoProfilerTracingRAII> profilerRAII; if (profiler_feature_active("restyle")) { profilerRAII.emplace("Paint", "Styles", Move(data->mBacktrace)); } #endif ProcessOneRestyle(element, data->mRestyleHint, data->mChangeHint, data->mRestyleHintData); AddRestyleRootsIfAwaitingRestyle(data->mDescendants); if (isTimelineRecording) { UniquePtr<TimelineMarker> marker = MakeUnique<RestyleTimelineMarker>( data->mRestyleHint, MarkerTracingType::END); TimelineConsumers::AddMarkerForDocShell(docShell, Move(marker)); } } if (mHaveLaterSiblingRestyles) { // Keep processing restyles for now continue; } // Now we only have entries with change hints left. To be safe in // case of reentry from the handing of the change hint, use a // scratch array instead of calling out to ProcessOneRestyle while // enumerating the hashtable. Use the stack if we can, otherwise // fall back on heap-allocation. nsAutoTArray<RestyleEnumerateData, RESTYLE_ARRAY_STACKSIZE> restyleArr; RestyleEnumerateData* restylesToProcess = restyleArr.AppendElements(mPendingRestyles.Count()); if (restylesToProcess) { RestyleEnumerateData* lastRestyle = restylesToProcess; RestyleCollector collector = { this, &lastRestyle }; mPendingRestyles.Enumerate(CollectRestyles, &collector); // Clear the hashtable now that we don't need it anymore mPendingRestyles.Clear(); #ifdef RESTYLE_LOGGING uint32_t index = 0; #endif for (RestyleEnumerateData* currentRestyle = restylesToProcess; currentRestyle != lastRestyle; ++currentRestyle) { LOG_RESTYLE("processing pending restyle %s at index %d/%d", FrameTagToString(currentRestyle->mElement).get(), index++, collector.count); LOG_RESTYLE_INDENT(); #if defined(MOZ_ENABLE_PROFILER_SPS) && !defined(MOZILLA_XPCOMRT_API) Maybe<GeckoProfilerTracingRAII> profilerRAII; if (profiler_feature_active("restyle")) { profilerRAII.emplace("Paint", "Styles", Move(currentRestyle->mBacktrace)); } #endif if (isTimelineRecording) { UniquePtr<TimelineMarker> marker = MakeUnique<RestyleTimelineMarker>( currentRestyle->mRestyleHint, MarkerTracingType::START); TimelineConsumers::AddMarkerForDocShell(docShell, Move(marker)); } ProcessOneRestyle(currentRestyle->mElement, currentRestyle->mRestyleHint, currentRestyle->mChangeHint, currentRestyle->mRestyleHintData); if (isTimelineRecording) { UniquePtr<TimelineMarker> marker = MakeUnique<RestyleTimelineMarker>( currentRestyle->mRestyleHint, MarkerTracingType::END); TimelineConsumers::AddMarkerForDocShell(docShell, Move(marker)); } } } } } // mPendingRestyles is now empty. mHaveSelectors = false; mRestyleManager->EndProcessingRestyles(); }
PassRefPtr<SVGLengthList> SVGLengthList::clone() { RefPtr<SVGLengthList> ret = SVGLengthList::create(m_mode); ret->deepCopy(this); return ret.release(); }
void DecodedStreamData::Forget() { mListener->Forget(); }
PassRefPtr<SVGPropertyBase> SVGLengthList::cloneForAnimation(const String& value) const { RefPtr<SVGLengthList> ret = SVGLengthList::create(m_mode); ret->setValueAsString(value, IGNORE_EXCEPTION); return ret.release(); }
void IndentOutdentCommand::outdentParagraph() { VisiblePosition visibleStartOfParagraph = startOfParagraph(endingSelection().visibleStart()); VisiblePosition visibleEndOfParagraph = endOfParagraph(visibleStartOfParagraph); Node* enclosingNode = enclosingNodeOfType(visibleStartOfParagraph.deepEquivalent(), &isListOrIndentBlockquote); if (!enclosingNode || !isContentEditable(enclosingNode->parentNode())) // We can't outdent if there is no place to go! return; // Use InsertListCommand to remove the selection from the list if (enclosingNode->hasTagName(olTag)) { applyCommandToComposite(InsertListCommand::create(document(), InsertListCommand::OrderedList)); return; } if (enclosingNode->hasTagName(ulTag)) { applyCommandToComposite(InsertListCommand::create(document(), InsertListCommand::UnorderedList)); return; } // The selection is inside a blockquote i.e. enclosingNode is a blockquote VisiblePosition positionInEnclosingBlock = VisiblePosition(Position(enclosingNode, 0)); VisiblePosition startOfEnclosingBlock = startOfBlock(positionInEnclosingBlock); VisiblePosition lastPositionInEnclosingBlock = VisiblePosition(Position(enclosingNode, enclosingNode->childNodeCount())); VisiblePosition endOfEnclosingBlock = endOfBlock(lastPositionInEnclosingBlock); if (visibleStartOfParagraph == startOfEnclosingBlock && visibleEndOfParagraph == endOfEnclosingBlock) { // The blockquote doesn't contain anything outside the paragraph, so it can be totally removed. Node* splitPoint = enclosingNode->nextSibling(); removeNodePreservingChildren(enclosingNode); // outdentRegion() assumes it is operating on the first paragraph of an enclosing blockquote, but if there are multiply nested blockquotes and we've // just removed one, then this assumption isn't true. By splitting the next containing blockquote after this node, we keep this assumption true if (splitPoint) { if (Node* splitPointParent = splitPoint->parentNode()) { if (splitPointParent->hasTagName(blockquoteTag) && !splitPoint->hasTagName(blockquoteTag) && isContentEditable(splitPointParent->parentNode())) // We can't outdent if there is no place to go! splitElement(static_cast<Element*>(splitPointParent), splitPoint); } } updateLayout(); visibleStartOfParagraph = VisiblePosition(visibleStartOfParagraph.deepEquivalent()); visibleEndOfParagraph = VisiblePosition(visibleEndOfParagraph.deepEquivalent()); if (visibleStartOfParagraph.isNotNull() && !isStartOfParagraph(visibleStartOfParagraph)) insertNodeAt(createBreakElement(document()), visibleStartOfParagraph.deepEquivalent()); if (visibleEndOfParagraph.isNotNull() && !isEndOfParagraph(visibleEndOfParagraph)) insertNodeAt(createBreakElement(document()), visibleEndOfParagraph.deepEquivalent()); return; } Node* enclosingBlockFlow = enclosingBlock(visibleStartOfParagraph.deepEquivalent().node()); RefPtr<Node> splitBlockquoteNode = enclosingNode; if (enclosingBlockFlow != enclosingNode) splitBlockquoteNode = splitTreeToNode(enclosingBlockFlow, enclosingNode, true); else { // We split the blockquote at where we start outdenting. splitElement(static_cast<Element*>(enclosingNode), visibleStartOfParagraph.deepEquivalent().node()); } RefPtr<Node> placeholder = createBreakElement(document()); insertNodeBefore(placeholder, splitBlockquoteNode); moveParagraph(startOfParagraph(visibleStartOfParagraph), endOfParagraph(visibleEndOfParagraph), VisiblePosition(Position(placeholder.get(), 0)), true); }
void BasicPaintedLayer::PaintThebes(gfxContext* aContext, Layer* aMaskLayer, LayerManager::DrawPaintedLayerCallback aCallback, void* aCallbackData) { PROFILER_LABEL("BasicPaintedLayer", "PaintThebes", js::ProfileEntry::Category::GRAPHICS); NS_ASSERTION(BasicManager()->InDrawing(), "Can only draw in drawing phase"); float opacity = GetEffectiveOpacity(); CompositionOp effectiveOperator = GetEffectiveOperator(this); if (!BasicManager()->IsRetained()) { mValidRegion.SetEmpty(); mContentClient->Clear(); nsIntRegion toDraw = IntersectWithClip(GetLocalVisibleRegion().ToUnknownRegion(), aContext); RenderTraceInvalidateStart(this, "FFFF00", toDraw.GetBounds()); if (!toDraw.IsEmpty() && !IsHidden()) { if (!aCallback) { BasicManager()->SetTransactionIncomplete(); return; } aContext->Save(); bool needsGroup = opacity != 1.0 || effectiveOperator != CompositionOp::OP_OVER || aMaskLayer; RefPtr<gfxContext> context = nullptr; BasicLayerManager::PushedGroup group; bool availableGroup = false; if (needsGroup) { availableGroup = BasicManager()->PushGroupForLayer(aContext, this, toDraw, group); if (availableGroup) { context = group.mGroupTarget; } } else { context = aContext; } if (context) { SetAntialiasingFlags(this, context->GetDrawTarget()); aCallback(this, context, toDraw, toDraw, DrawRegionClip::NONE, nsIntRegion(), aCallbackData); } if (needsGroup && availableGroup) { BasicManager()->PopGroupForLayer(group); } aContext->Restore(); } RenderTraceInvalidateEnd(this, "FFFF00"); return; } if (BasicManager()->IsTransactionIncomplete()) return; gfxRect clipExtents; clipExtents = aContext->GetClipExtents(); // Pull out the mask surface and transform here, because the mask // is internal to basic layers AutoMoz2DMaskData mask; SourceSurface* maskSurface = nullptr; Matrix maskTransform; if (GetMaskData(aMaskLayer, aContext->GetDeviceOffset(), &mask)) { maskSurface = mask.GetSurface(); maskTransform = mask.GetTransform(); } if (!IsHidden() && !clipExtents.IsEmpty()) { mContentClient->DrawTo(this, aContext->GetDrawTarget(), opacity, effectiveOperator, maskSurface, &maskTransform); } }