void UninitializedObjectChecker::checkEndFunction(
    const ReturnStmt *RS, CheckerContext &Context) const {

  const auto *CtorDecl = dyn_cast_or_null<CXXConstructorDecl>(
      Context.getLocationContext()->getDecl());
  if (!CtorDecl)
    return;

  if (!CtorDecl->isUserProvided())
    return;

  if (CtorDecl->getParent()->isUnion())
    return;

  // This avoids essentially the same error being reported multiple times.
  if (willObjectBeAnalyzedLater(CtorDecl, Context))
    return;

  Optional<nonloc::LazyCompoundVal> Object = getObjectVal(CtorDecl, Context);
  if (!Object)
    return;

  FindUninitializedFields F(Context.getState(), Object->getRegion(),
                            CheckPointeeInitialization);

  const UninitFieldMap &UninitFields = F.getUninitFields();

  if (UninitFields.empty())
    return;

  // In non-pedantic mode, if Object's region doesn't contain a single
  // initialized field, we'll assume that Object was intentionally left
  // uninitialized.
  if (!IsPedantic && !F.isAnyFieldInitialized())
    return;

  // There are uninitialized fields in the record.

  ExplodedNode *Node = Context.generateNonFatalErrorNode(Context.getState());
  if (!Node)
    return;

  PathDiagnosticLocation LocUsedForUniqueing;
  const Stmt *CallSite = Context.getStackFrame()->getCallSite();
  if (CallSite)
    LocUsedForUniqueing = PathDiagnosticLocation::createBegin(
        CallSite, Context.getSourceManager(), Node->getLocationContext());

  // For Plist consumers that don't support notes just yet, we'll convert notes
  // to warnings.
  if (ShouldConvertNotesToWarnings) {
    for (const auto &Pair : UninitFields) {

      auto Report = llvm::make_unique<BugReport>(
          *BT_uninitField, Pair.second, Node, LocUsedForUniqueing,
          Node->getLocationContext()->getDecl());
      Context.emitReport(std::move(Report));
    }
    return;
  }

  SmallString<100> WarningBuf;
  llvm::raw_svector_ostream WarningOS(WarningBuf);
  WarningOS << UninitFields.size() << " uninitialized field"
            << (UninitFields.size() == 1 ? "" : "s")
            << " at the end of the constructor call";

  auto Report = llvm::make_unique<BugReport>(
      *BT_uninitField, WarningOS.str(), Node, LocUsedForUniqueing,
      Node->getLocationContext()->getDecl());

  for (const auto &Pair : UninitFields) {
    Report->addNote(Pair.second,
                    PathDiagnosticLocation::create(Pair.first->getDecl(),
                                                   Context.getSourceManager()));
  }
  Context.emitReport(std::move(Report));
}
PaintLayerPainter::PaintResult PaintLayerPainter::paintLayerContentsInternal(GraphicsContext* context, const PaintLayerPaintingInfo& paintingInfoArg, PaintLayerFlags paintFlags, FragmentPolicy fragmentPolicy)
{
    ASSERT(m_paintLayer.isSelfPaintingLayer() || m_paintLayer.hasSelfPaintingLayerDescendant());
    ASSERT(!(paintFlags & PaintLayerAppliedTransform));

    bool isSelfPaintingLayer = m_paintLayer.isSelfPaintingLayer();
    bool isPaintingOverlayScrollbars = paintFlags & PaintLayerPaintingOverlayScrollbars;
    bool isPaintingScrollingContent = paintFlags & PaintLayerPaintingCompositingScrollingPhase;
    bool isPaintingCompositedForeground = paintFlags & PaintLayerPaintingCompositingForegroundPhase;
    bool isPaintingCompositedBackground = paintFlags & PaintLayerPaintingCompositingBackgroundPhase;
    bool isPaintingOverflowContents = paintFlags & PaintLayerPaintingOverflowContents;
    // Outline always needs to be painted even if we have no visible content. Also,
    // the outline is painted in the background phase during composited scrolling.
    // If it were painted in the foreground phase, it would move with the scrolled
    // content. When not composited scrolling, the outline is painted in the
    // foreground phase. Since scrolled contents are moved by paint invalidation in this
    // case, the outline won't get 'dragged along'.
    bool shouldPaintOutline = isSelfPaintingLayer && !isPaintingOverlayScrollbars
        && ((isPaintingScrollingContent && isPaintingCompositedBackground)
        || (!isPaintingScrollingContent && isPaintingCompositedForeground));
    bool shouldPaintContent = m_paintLayer.hasVisibleContent() && isSelfPaintingLayer && !isPaintingOverlayScrollbars;

    PaintResult result = FullyPainted;

    if (paintFlags & PaintLayerPaintingRootBackgroundOnly && !m_paintLayer.layoutObject()->isLayoutView() && !m_paintLayer.layoutObject()->isDocumentElement())
        return result;

    PaintLayerPaintingInfo paintingInfo = paintingInfoArg;

    // Ensure our lists are up-to-date.
    m_paintLayer.stackingNode()->updateLayerListsIfNeeded();

    LayoutPoint offsetFromRoot;
    m_paintLayer.convertToLayerCoords(paintingInfo.rootLayer, offsetFromRoot);

    if (m_paintLayer.compositingState() == PaintsIntoOwnBacking)
        offsetFromRoot.move(m_paintLayer.subpixelAccumulation());
    else
        offsetFromRoot.move(paintingInfo.subPixelAccumulation);

    LayoutRect bounds = m_paintLayer.physicalBoundingBox(offsetFromRoot);
    if (!paintingInfo.paintDirtyRect.contains(bounds))
        result = MaybeNotFullyPainted;

    LayoutRect rootRelativeBounds;
    bool rootRelativeBoundsComputed = false;

    if (paintingInfo.ancestorHasClipPathClipping && m_paintLayer.layoutObject()->style()->position() != StaticPosition)
        UseCounter::count(m_paintLayer.layoutObject()->document(), UseCounter::ClipPathOfPositionedElement);

    // These helpers output clip and compositing operations using a RAII pattern. Stack-allocated-varibles are destructed in the reverse order of construction,
    // so they are nested properly.
    ClipPathHelper clipPathHelper(context, m_paintLayer, paintingInfo, rootRelativeBounds, rootRelativeBoundsComputed, offsetFromRoot, paintFlags);

    Optional<CompositingRecorder> compositingRecorder;
    // Blending operations must be performed only with the nearest ancestor stacking context.
    // Note that there is no need to composite if we're painting the root.
    // FIXME: this should be unified further into PaintLayer::paintsWithTransparency().
    bool shouldCompositeForBlendMode = (!m_paintLayer.layoutObject()->isDocumentElement() || m_paintLayer.layoutObject()->isSVGRoot()) && m_paintLayer.stackingNode()->isStackingContext() && m_paintLayer.hasNonIsolatedDescendantWithBlendMode();
    if (shouldCompositeForBlendMode || m_paintLayer.paintsWithTransparency(paintingInfo.globalPaintFlags())) {
        FloatRect compositingBounds = FloatRect(m_paintLayer.paintingExtent(paintingInfo.rootLayer, paintingInfo.subPixelAccumulation, paintingInfo.globalPaintFlags()));
        compositingRecorder.emplace(*context, *m_paintLayer.layoutObject(),
            WebCoreCompositeToSkiaComposite(CompositeSourceOver, m_paintLayer.layoutObject()->style()->blendMode()),
            m_paintLayer.layoutObject()->opacity(), &compositingBounds);
    }

    PaintLayerPaintingInfo localPaintingInfo(paintingInfo);
    if (m_paintLayer.compositingState() == PaintsIntoOwnBacking)
        localPaintingInfo.subPixelAccumulation = m_paintLayer.subpixelAccumulation();

    PaintLayerFragments layerFragments;
    if (shouldPaintContent || shouldPaintOutline || isPaintingOverlayScrollbars) {
        // Collect the fragments. This will compute the clip rectangles and paint offsets for each layer fragment.
        ClipRectsCacheSlot cacheSlot = (paintFlags & PaintLayerUncachedClipRects) ? UncachedClipRects : PaintingClipRects;
        ShouldRespectOverflowClip respectOverflowClip = shouldRespectOverflowClip(paintFlags, m_paintLayer.layoutObject());
        if (fragmentPolicy == ForceSingleFragment)
            m_paintLayer.appendSingleFragmentIgnoringPagination(layerFragments, localPaintingInfo.rootLayer, localPaintingInfo.paintDirtyRect, cacheSlot, IgnoreOverlayScrollbarSize, respectOverflowClip, &offsetFromRoot, localPaintingInfo.subPixelAccumulation);
        else
            m_paintLayer.collectFragments(layerFragments, localPaintingInfo.rootLayer, localPaintingInfo.paintDirtyRect, cacheSlot, IgnoreOverlayScrollbarSize, respectOverflowClip, &offsetFromRoot, localPaintingInfo.subPixelAccumulation);
        if (shouldPaintContent) {
            // TODO(wangxianzhu): This is for old slow scrolling. Implement similar optimization for slimming paint v2.
            shouldPaintContent = atLeastOneFragmentIntersectsDamageRect(layerFragments, localPaintingInfo, paintFlags, offsetFromRoot);
            if (!shouldPaintContent)
                result = MaybeNotFullyPainted;
        }
    }

    bool selectionOnly = localPaintingInfo.globalPaintFlags() & GlobalPaintSelectionOnly;
    // If this layer's layoutObject is a child of the paintingRoot, we paint unconditionally, which
    // is done by passing a nil paintingRoot down to our layoutObject (as if no paintingRoot was ever set).
    // Else, our layout tree may or may not contain the painting root, so we pass that root along
    // so it will be tested against as we descend through the layoutObjects.
    LayoutObject* paintingRootForLayoutObject = 0;
    if (localPaintingInfo.paintingRoot && !m_paintLayer.layoutObject()->isDescendantOf(localPaintingInfo.paintingRoot))
        paintingRootForLayoutObject = localPaintingInfo.paintingRoot;

    { // Begin block for the lifetime of any filter.
        FilterPainter filterPainter(m_paintLayer, context, offsetFromRoot, layerFragments.isEmpty() ? ClipRect() : layerFragments[0].backgroundRect, localPaintingInfo, paintFlags,
            rootRelativeBounds, rootRelativeBoundsComputed);

        bool shouldPaintBackground = isPaintingCompositedBackground && shouldPaintContent && !selectionOnly;
        bool shouldPaintNegZOrderList = (isPaintingScrollingContent && isPaintingOverflowContents) || (!isPaintingScrollingContent && isPaintingCompositedBackground);
        bool shouldPaintOwnContents = isPaintingCompositedForeground && shouldPaintContent;
        bool shouldPaintNormalFlowAndPosZOrderLists = isPaintingCompositedForeground;
        bool shouldPaintOverlayScrollbars = isPaintingOverlayScrollbars;

        if (shouldPaintBackground) {
            paintBackgroundForFragments(layerFragments, context, paintingInfo.paintDirtyRect,
                localPaintingInfo, paintingRootForLayoutObject, paintFlags);
        }

        if (shouldPaintNegZOrderList) {
            if (paintChildren(NegativeZOrderChildren, context, paintingInfo, paintFlags) == MaybeNotFullyPainted)
                result = MaybeNotFullyPainted;
        }

        if (shouldPaintOwnContents) {
            paintForegroundForFragments(layerFragments, context, paintingInfo.paintDirtyRect,
                localPaintingInfo, paintingRootForLayoutObject, selectionOnly, paintFlags);
        }

        if (shouldPaintOutline)
            paintOutlineForFragments(layerFragments, context, localPaintingInfo, paintingRootForLayoutObject, paintFlags);

        if (shouldPaintNormalFlowAndPosZOrderLists) {
            if (paintChildren(NormalFlowChildren | PositiveZOrderChildren, context, paintingInfo, paintFlags) == MaybeNotFullyPainted)
                result = MaybeNotFullyPainted;
        }

        if (shouldPaintOverlayScrollbars)
            paintOverflowControlsForFragments(layerFragments, context, localPaintingInfo, paintFlags);
    } // FilterPainter block

    bool shouldPaintMask = (paintFlags & PaintLayerPaintingCompositingMaskPhase) && shouldPaintContent && m_paintLayer.layoutObject()->hasMask() && !selectionOnly;
    bool shouldPaintClippingMask = (paintFlags & PaintLayerPaintingChildClippingMaskPhase) && shouldPaintContent && !selectionOnly;

    if (shouldPaintMask)
        paintMaskForFragments(layerFragments, context, localPaintingInfo, paintingRootForLayoutObject, paintFlags);
    if (shouldPaintClippingMask) {
        // Paint the border radius mask for the fragments.
        paintChildClippingMaskForFragments(layerFragments, context, localPaintingInfo, paintingRootForLayoutObject, paintFlags);
    }

    m_paintLayer.setPreviousScrollOffsetAccumulationForPainting(paintingInfoArg.scrollOffsetAccumulation);

    return result;
}
PaintLayerPainter::PaintResult PaintLayerPainter::paintChildren(unsigned childrenToVisit, GraphicsContext* context, const PaintLayerPaintingInfo& paintingInfo, PaintLayerFlags paintFlags)
{
    PaintResult result = FullyPainted;
    if (!m_paintLayer.hasSelfPaintingLayerDescendant())
        return result;

#if ENABLE(ASSERT)
    LayerListMutationDetector mutationChecker(m_paintLayer.stackingNode());
#endif

    PaintLayerStackingNodeIterator iterator(*m_paintLayer.stackingNode(), childrenToVisit);
    PaintLayerStackingNode* child = iterator.next();
    if (!child)
        return result;

    DisplayItem::Type subsequenceType;
    if (childrenToVisit == NegativeZOrderChildren) {
        subsequenceType = DisplayItem::SubsequenceNegativeZOrder;
    } else {
        ASSERT(childrenToVisit == (NormalFlowChildren | PositiveZOrderChildren));
        subsequenceType = DisplayItem::SubsequenceNormalFlowAndPositiveZOrder;
    }

    Optional<SubsequenceRecorder> subsequenceRecorder;
    if (!paintingInfo.disableSubsequenceCache
        && !(paintingInfo.globalPaintFlags() & GlobalPaintFlattenCompositingLayers)
        && !(paintFlags & PaintLayerPaintingReflection)
        && !(paintFlags & PaintLayerPaintingRootBackgroundOnly)) {
        if (!m_paintLayer.needsRepaint()
            && paintingInfo.scrollOffsetAccumulation == m_paintLayer.previousScrollOffsetAccumulationForPainting()
            && SubsequenceRecorder::useCachedSubsequenceIfPossible(*context, m_paintLayer, subsequenceType))
            return result;
        subsequenceRecorder.emplace(*context, m_paintLayer, subsequenceType);
    }

    IntSize scrollOffsetAccumulationForChildren = paintingInfo.scrollOffsetAccumulation;
    if (m_paintLayer.layoutObject()->hasOverflowClip())
        scrollOffsetAccumulationForChildren += m_paintLayer.layoutBox()->scrolledContentOffset();

    bool disableChildSubsequenceCache = !RuntimeEnabledFeatures::slimmingPaintV2Enabled()
        && (m_paintLayer.layoutObject()->hasOverflowClip() || m_paintLayer.layoutObject()->hasClip());

    for (; child; child = iterator.next()) {
        PaintLayerPainter childPainter(*child->layer());
        // If this Layer should paint into its own backing or a grouped backing, that will be done via CompositedLayerMapping::paintContents()
        // and CompositedLayerMapping::doPaintTask().
        if (!childPainter.shouldPaintLayerInSoftwareMode(paintingInfo.globalPaintFlags(), paintFlags))
            continue;

        PaintLayerPaintingInfo childPaintingInfo = paintingInfo;
        childPaintingInfo.disableSubsequenceCache = disableChildSubsequenceCache;
        childPaintingInfo.scrollOffsetAccumulation = scrollOffsetAccumulationForChildren;
        // Rare case: accumulate scroll offset of non-stacking-context ancestors up to m_paintLayer.
        for (PaintLayer* parentLayer = child->layer()->parent(); parentLayer != &m_paintLayer; parentLayer = parentLayer->parent()) {
            if (parentLayer->layoutObject()->hasOverflowClip())
                childPaintingInfo.scrollOffsetAccumulation += parentLayer->layoutBox()->scrolledContentOffset();
        }

        if (childPainter.paintLayer(context, childPaintingInfo, paintFlags) == MaybeNotFullyPainted)
            result = MaybeNotFullyPainted;
    }

    // Set subsequence not cacheable if the bounding box of this layer and descendants is not fully contained
    // by paintRect, because later paintRect changes may expose new contents which will need repainting.
    if (result == MaybeNotFullyPainted && subsequenceRecorder)
        subsequenceRecorder->setUncacheable();

    return result;
}
static ReductionKind
matchVectorSplittingReduction(const ExtractElementInst *ReduxRoot,
                              unsigned &Opcode, Type *&Ty) {
  if (!EnableReduxCost)
    return RK_None;

  // Need to extract the first element.
  ConstantInt *CI = dyn_cast<ConstantInt>(ReduxRoot->getOperand(1));
  unsigned Idx = ~0u;
  if (CI)
    Idx = CI->getZExtValue();
  if (Idx != 0)
    return RK_None;

  auto *RdxStart = dyn_cast<Instruction>(ReduxRoot->getOperand(0));
  if (!RdxStart)
    return RK_None;
  Optional<ReductionData> RD = getReductionData(RdxStart);
  if (!RD)
    return RK_None;

  Type *VecTy = ReduxRoot->getOperand(0)->getType();
  unsigned NumVecElems = VecTy->getVectorNumElements();
  if (!isPowerOf2_32(NumVecElems))
    return RK_None;

  // We look for a sequence of shuffles and adds like the following matching one
  // fadd, shuffle vector pair at a time.
  //  
  // %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
  //                           <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
  // %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
  // %rdx.shuf7 = shufflevector <4 x float> %bin.rdx, <4 x float> undef,
  //                          <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
  // %bin.rdx8 = fadd <4 x float> %bin.rdx, %rdx.shuf7
  // %r = extractelement <4 x float> %bin.rdx8, i32 0

  unsigned MaskStart = 1;
  Instruction *RdxOp = RdxStart;
  SmallVector<int, 32> ShuffleMask(NumVecElems, 0); 
  unsigned NumVecElemsRemain = NumVecElems;
  while (NumVecElemsRemain - 1) {
    // Check for the right reduction operation.
    if (!RdxOp)
      return RK_None;
    Optional<ReductionData> RDLevel = getReductionData(RdxOp);
    if (!RDLevel || !RDLevel->hasSameData(*RD))
      return RK_None;

    Value *NextRdxOp;
    ShuffleVectorInst *Shuffle;
    std::tie(NextRdxOp, Shuffle) =
        getShuffleAndOtherOprd(RDLevel->LHS, RDLevel->RHS);

    // Check the current reduction operation and the shuffle use the same value.
    if (Shuffle == nullptr)
      return RK_None;
    if (Shuffle->getOperand(0) != NextRdxOp)
      return RK_None;

    // Check that shuffle masks matches.
    for (unsigned j = 0; j != MaskStart; ++j)
      ShuffleMask[j] = MaskStart + j;
    // Fill the rest of the mask with -1 for undef.
    std::fill(&ShuffleMask[MaskStart], ShuffleMask.end(), -1);

    SmallVector<int, 16> Mask = Shuffle->getShuffleMask();
    if (ShuffleMask != Mask)
      return RK_None;

    RdxOp = dyn_cast<Instruction>(NextRdxOp);
    NumVecElemsRemain /= 2;
    MaskStart *= 2;
  }

  Opcode = RD->Opcode;
  Ty = VecTy;
  return RD->Kind;
}
Exemple #5
0
 static void TestDict(DictPtr dict) {
   Optional<DictEntry> entry;
   entry = dict->MatchPrefix("BYVoid");
   AssertTrue(!entry.IsNull());
   AssertEquals("BYVoid", entry.Get().key);
   AssertEquals("byv", entry.Get().GetDefault());
   
   entry = dict->MatchPrefix("BYVoid123");
   AssertTrue(!entry.IsNull());
   AssertEquals("BYVoid", entry.Get().key);
   AssertEquals("byv", entry.Get().GetDefault());
   
   entry = dict->MatchPrefix(utf8("積羽沉舟"));
   AssertTrue(!entry.IsNull());
   AssertEquals(utf8("積羽沉舟"), entry.Get().key);
   AssertEquals(utf8("羣輕折軸"), entry.Get().GetDefault());
   
   entry = dict->MatchPrefix("Unknown");
   AssertTrue(entry.IsNull());
   
   const vector<DictEntry> matches = dict->MatchAllPrefixes(utf8("清華大學計算機系"));
   AssertEquals(3, matches.size());
   AssertEquals(utf8("清華大學"), matches.at(0).key);
   AssertEquals("TsinghuaUniversity", matches.at(0).GetDefault());
   AssertEquals(utf8("清華"), matches.at(1).key);
   AssertEquals("Tsinghua", matches.at(1).GetDefault());
   AssertEquals(utf8("清"), matches.at(2).key);
   AssertEquals("Tsing", matches.at(2).GetDefault());
 }
Exemple #6
0
/// Try to classify the dynamic-cast relationship between two types.
DynamicCastFeasibility
swift::classifyDynamicCast(Module *M,
                           CanType source,
                           CanType target,
                           bool isSourceTypeExact,
                           bool isWholeModuleOpts) {
  if (source == target) return DynamicCastFeasibility::WillSucceed;

  auto sourceObject = source.getAnyOptionalObjectType();
  auto targetObject = target.getAnyOptionalObjectType();

  // A common level of optionality doesn't affect the feasibility.
  if (sourceObject && targetObject) {
    return classifyDynamicCast(M, sourceObject, targetObject);

  // Nor does casting to a more optional type.
  } else if (targetObject) {
    return classifyDynamicCast(M, source, targetObject,
                               /* isSourceTypeExact */ false,
                               isWholeModuleOpts);

  // Casting to a less-optional type can always fail.
  } else if (sourceObject) {
    return weakenSuccess(classifyDynamicCast(M, sourceObject, target,
                                             /* isSourceTypeExact */ false,
                                             isWholeModuleOpts));
  }
  assert(!sourceObject && !targetObject);

  // Assume that casts to or from existential types or involving
  // dependent types can always succeed.  This is over-conservative.
  if (source->hasArchetype() || source.isExistentialType() ||
      target->hasArchetype() || target.isExistentialType()) {

    auto *SourceNominalTy = source.getAnyNominal();

    // Check conversions from non-protocol types into protocol types.
    if (!source.isExistentialType() &&
        SourceNominalTy &&
        target.isExistentialType())
      return classifyDynamicCastToProtocol(source, target, isWholeModuleOpts);

    // Casts from class existential into a non-class can never succeed.
    if (source->isClassExistentialType() &&
        !target.isAnyExistentialType() &&
        !target.getClassOrBoundGenericClass() &&
        !isa<ArchetypeType>(target) &&
        !mayBridgeToObjectiveC(M, target)) {
      assert((target.getEnumOrBoundGenericEnum() ||
              target.getStructOrBoundGenericStruct() ||
              isa<TupleType>(target) ||
              isa<SILFunctionType>(target) ||
              isa<FunctionType>(target) ||
              isa<MetatypeType>(target)) &&
             "Target should be an enum, struct, tuple, metatype or function type");
      return DynamicCastFeasibility::WillFail;
    }

    return DynamicCastFeasibility::MaySucceed;
  }

  // Metatype casts.
  if (auto sourceMetatype = dyn_cast<AnyMetatypeType>(source)) {
    auto targetMetatype = dyn_cast<AnyMetatypeType>(target);
    if (!targetMetatype) return DynamicCastFeasibility::WillFail;

    source = sourceMetatype.getInstanceType();
    target = targetMetatype.getInstanceType();

    if (source == target &&
        targetMetatype.isAnyExistentialType() ==
            sourceMetatype.isAnyExistentialType())
      return DynamicCastFeasibility::WillSucceed;

    if (targetMetatype.isAnyExistentialType() &&
        (isa<ProtocolType>(target) || isa<ProtocolCompositionType>(target))) {
      auto Feasibility = classifyDynamicCastToProtocol(source,
                                                       target,
                                                       isWholeModuleOpts);
      // Cast from existential metatype to existential metatype may still
      // succeed, even if we cannot prove anything statically.
      if (Feasibility != DynamicCastFeasibility::WillFail ||
          !sourceMetatype.isAnyExistentialType())
        return Feasibility;
    }

    // If isSourceTypeExact is true, we know we are casting the result of a
    // MetatypeInst instruction.
    if (isSourceTypeExact) {
      // If source or target are existentials, then it can be cast
      // successfully only into itself.
      if ((target.isAnyExistentialType() || source.isAnyExistentialType()) &&
          target != source)
        return DynamicCastFeasibility::WillFail;
    }

    // Casts from class existential metatype into a concrete non-class metatype
    // can never succeed.
    if (source->isClassExistentialType() &&
        !target.isAnyExistentialType() &&
        !target.getClassOrBoundGenericClass())
      return DynamicCastFeasibility::WillFail;

    // TODO: prove that some conversions to existential metatype will
    // obviously succeed/fail.
    // TODO: prove that some conversions from class existential metatype
    // to a concrete non-class metatype will obviously fail.
    // TODO: class metatype to/from AnyObject
    // TODO: protocol concrete metatype to/from ObjCProtocol
    if (isa<ExistentialMetatypeType>(sourceMetatype) ||
        isa<ExistentialMetatypeType>(targetMetatype))
      return (getAnyMetatypeDepth(source) == getAnyMetatypeDepth(target)
              ? DynamicCastFeasibility::MaySucceed
              : DynamicCastFeasibility::WillFail);

    // If both metatypes are class metatypes, check if classes can be
    // cast.
    if (source.getClassOrBoundGenericClass() &&
        target.getClassOrBoundGenericClass())
      return classifyDynamicCast(M, source, target, false, isWholeModuleOpts);

    // Different structs cannot be cast to each other.
    if (source.getStructOrBoundGenericStruct() &&
        target.getStructOrBoundGenericStruct() &&
        source != target)
      return DynamicCastFeasibility::WillFail;

    // Different enums cannot be cast to each other.
    if (source.getEnumOrBoundGenericEnum() &&
        target.getEnumOrBoundGenericEnum() &&
        source != target)
      return DynamicCastFeasibility::WillFail;

    // If we don't know any better, assume that the cast may succeed.
    return DynamicCastFeasibility::MaySucceed;
  }
  
  // Function casts.
  if (auto sourceFunction = dyn_cast<FunctionType>(source)) {
    if (auto targetFunction = dyn_cast<FunctionType>(target)) {
      // A function cast can succeed if the function types can be identical,
      // or if the target type is throwier than the original.

      // A non-throwing source function can be cast to a throwing target type,
      // but not vice versa.
      if (sourceFunction->throws() && !targetFunction->throws())
        return DynamicCastFeasibility::WillFail;
      
      // A noreturn source function can be cast to a returning target type,
      // but not vice versa.
      // (noreturn isn't really reified at runtime though.)
      if (targetFunction->isNoReturn() && !sourceFunction->isNoReturn())
        return DynamicCastFeasibility::WillFail;
      
      // The cast can't change the representation at runtime.
      if (targetFunction->getRepresentation()
            != sourceFunction->getRepresentation())
        return DynamicCastFeasibility::WillFail;
      
      if (sourceFunction.getInput() == targetFunction.getInput()
          && sourceFunction.getResult() == targetFunction.getResult())
        return DynamicCastFeasibility::WillSucceed;

      auto isSubstitutable = [](CanType a, CanType b) -> bool {
        // FIXME: Unnecessarily conservative; should structurally check for
        // substitutability.
        return a == b || a->hasArchetype() || b->hasArchetype();
      };
    
      if (isSubstitutable(sourceFunction.getInput(), targetFunction.getInput())
          && isSubstitutable(targetFunction.getInput(),
                             targetFunction.getResult()))
        return DynamicCastFeasibility::MaySucceed;
      
      return DynamicCastFeasibility::WillFail;
    }
  }

  // Class casts.
  auto sourceClass = source.getClassOrBoundGenericClass();
  auto targetClass = target.getClassOrBoundGenericClass();
  if (sourceClass) {
    if (targetClass) {
      if (target->isSuperclassOf(source, nullptr))
        return DynamicCastFeasibility::WillSucceed;
      if (source->isSuperclassOf(target, nullptr))
        return DynamicCastFeasibility::MaySucceed;

      // FIXME: bridged types, e.g. CF <-> NS (but not for metatypes).
      return DynamicCastFeasibility::WillFail;
    }

    // In the Objective-C runtime, class metatypes are also class instances.
    // The cast may succeed if the target type can be inhabited by a class
    // metatype.
    // TODO: Narrow this to the sourceClass being exactly NSObject.
    if (M->getASTContext().LangOpts.EnableObjCInterop) {
      if (auto targetMeta = dyn_cast<MetatypeType>(target)) {
        if (isa<ArchetypeType>(targetMeta.getInstanceType())
            || targetMeta.getInstanceType()->mayHaveSuperclass())
          return DynamicCastFeasibility::MaySucceed;
      } else if (isa<ExistentialMetatypeType>(target)) {
        return DynamicCastFeasibility::MaySucceed;
      }
    }
  }

  // If the source is not existential, an archetype, or (under the ObjC runtime)
  // a class, and the destination is a metatype, there is no way the cast can
  // succeed.
  if (target->is<AnyMetatypeType>()) return DynamicCastFeasibility::WillFail;

  // FIXME: tuple conversions?

  // FIXME: Be more careful with briding conversions from
  // NSArray, NSDictionary and NSSet as they may fail?

  // Check if there might be a bridging conversion.
  if (source->isBridgeableObjectType() && mayBridgeToObjectiveC(M, target)) {
    // Try to get the ObjC type which is bridged to target type.
    assert(!target.isAnyExistentialType());
    Optional<Type> ObjCTy = M->getASTContext().getBridgedToObjC(
        M, target, nullptr);
    if (ObjCTy && ObjCTy.getValue()) {
      // If the bridged ObjC type is known, check if
      // source type can be cast into it.
      return classifyDynamicCast(M, source,
          ObjCTy.getValue().getCanonicalTypeOrNull(),
          /* isSourceTypeExact */ false, isWholeModuleOpts);
    }
    return DynamicCastFeasibility::MaySucceed;
  }
  
  if (target->isBridgeableObjectType() && mayBridgeToObjectiveC(M, source)) {
    // Try to get the ObjC type which is bridged to source type.
    assert(!source.isAnyExistentialType());
    Optional<Type> ObjCTy = M->getASTContext().getBridgedToObjC(
        M, source, nullptr);
    if (ObjCTy && ObjCTy.getValue()) {
      // If the bridged ObjC type is known, check if
      // this type can be cast into target type.
      return classifyDynamicCast(M,
          ObjCTy.getValue().getCanonicalTypeOrNull(),
          target,
          /* isSourceTypeExact */ false, isWholeModuleOpts);
    }
    return DynamicCastFeasibility::MaySucceed;
  }

  // Check if it is a cast between bridged error types.
  if (isErrorType(M, source) && isErrorType(M, target)) {
    // TODO: Cast to NSError succeeds always.
    return DynamicCastFeasibility::MaySucceed;
  }

  return DynamicCastFeasibility::WillFail;
}
Exemple #7
0
static Reloc::Model getEffectiveRelocModel(CodeModel::Model CM,
                                           Optional<Reloc::Model> RM) {
  if (!RM.hasValue() || CM == CodeModel::JITDefault)
    return Reloc::Static;
  return *RM;
}
static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
  if (!RM.hasValue())
    return Reloc::Static;
  return *RM;
}
void
HTMLCanvasElement::ToBlob(JSContext* aCx,
                          FileCallback& aCallback,
                          const nsAString& aType,
                          const Optional<JS::Handle<JS::Value> >& aParams,
                          ErrorResult& aRv)
{
  // do a trust check if this is a write-only canvas
  if (mWriteOnly && !nsContentUtils::IsCallerChrome()) {
    aRv.Throw(NS_ERROR_DOM_SECURITY_ERR);
    return;
  }

  nsAutoString type;
  aRv = nsContentUtils::ASCIIToLower(aType, type);
  if (aRv.Failed()) {
    return;
  }

  JS::Value encoderOptions = aParams.WasPassed()
                             ? aParams.Value()
                             : JS::UndefinedValue();

  nsAutoString params;
  bool usingCustomParseOptions;
  aRv = ParseParams(aCx, type, encoderOptions, params, &usingCustomParseOptions);
  if (aRv.Failed()) {
    return;
  }

#ifdef DEBUG
  if (mCurrentContext) {
    // We disallow canvases of width or height zero, and set them to 1, so
    // we will have a discrepancy with the sizes of the canvas and the context.
    // That discrepancy is OK, the rest are not.
    nsIntSize elementSize = GetWidthHeight();
    MOZ_ASSERT(elementSize.width == mCurrentContext->GetWidth() ||
               (elementSize.width == 0 && mCurrentContext->GetWidth() == 1));
    MOZ_ASSERT(elementSize.height == mCurrentContext->GetHeight() ||
               (elementSize.height == 0 && mCurrentContext->GetHeight() == 1));
  }
#endif

  nsCOMPtr<nsIScriptContext> scriptContext =
    GetScriptContextFromJSContext(nsContentUtils::GetCurrentJSContext());

  uint8_t* imageBuffer = nullptr;
  int32_t format = 0;
  if (mCurrentContext) {
    mCurrentContext->GetImageBuffer(&imageBuffer, &format);
  }

  aRv = ImageEncoder::ExtractDataAsync(type,
                                       params,
                                       usingCustomParseOptions,
                                       imageBuffer,
                                       format,
                                       GetSize(),
                                       mCurrentContext,
                                       scriptContext,
                                       aCallback);
}
void NameBinder::addImport(
    SmallVectorImpl<std::pair<ImportedModule, ImportOptions>> &imports,
    ImportDecl *ID) {
  if (ID->getModulePath().front().first == SF.getParentModule()->getName() &&
      ID->getModulePath().size() == 1 && !shouldImportSelfImportClang(ID, SF)) {
    // If the imported module name is the same as the current module,
    // produce a diagnostic.
    StringRef filename = llvm::sys::path::filename(SF.getFilename());
    if (filename.empty())
      Context.Diags.diagnose(ID, diag::sema_import_current_module,
                             ID->getModulePath().front().first);
    else
      Context.Diags.diagnose(ID, diag::sema_import_current_module_with_file,
                             filename, ID->getModulePath().front().first);
    ID->setModule(SF.getParentModule());
    return;
  }

  Module *M = getModule(ID->getModulePath());
  if (!M) {
    SmallString<64> modulePathStr;
    interleave(ID->getModulePath(),
               [&](ImportDecl::AccessPathElement elem) {
                 modulePathStr += elem.first.str();
               },
               [&] { modulePathStr += "."; });

    auto diagKind = diag::sema_no_import;
    if (SF.Kind == SourceFileKind::REPL || Context.LangOpts.DebuggerSupport)
      diagKind = diag::sema_no_import_repl;
    diagnose(ID->getLoc(), diagKind, modulePathStr);

    if (Context.SearchPathOpts.SDKPath.empty() &&
        llvm::Triple(llvm::sys::getProcessTriple()).isMacOSX()) {
      diagnose(SourceLoc(), diag::sema_no_import_no_sdk);
      diagnose(SourceLoc(), diag::sema_no_import_no_sdk_xcrun);
    }
    return;
  }

  ID->setModule(M);

  Module *topLevelModule;
  if (ID->getModulePath().size() == 1) {
    topLevelModule = M;
  } else {
    // If we imported a submodule, import the top-level module as well.
    Identifier topLevelName = ID->getModulePath().front().first;
    topLevelModule = Context.getLoadedModule(topLevelName);
    assert(topLevelModule && "top-level module missing");
  }

  auto *testableAttr = ID->getAttrs().getAttribute<TestableAttr>();
  if (testableAttr && !topLevelModule->isTestingEnabled() &&
      Context.LangOpts.EnableTestableAttrRequiresTestableModule) {
    diagnose(ID->getModulePath().front().second, diag::module_not_testable,
             topLevelModule->getName());
    testableAttr->setInvalid();
  }

  ImportOptions options;
  if (ID->isExported())
    options |= SourceFile::ImportFlags::Exported;
  if (testableAttr)
    options |= SourceFile::ImportFlags::Testable;
  imports.push_back({ { ID->getDeclPath(), M }, options });

  if (topLevelModule != M)
    imports.push_back({ { ID->getDeclPath(), topLevelModule }, options });

  if (ID->getImportKind() != ImportKind::Module) {
    // If we're importing a specific decl, validate the import kind.
    using namespace namelookup;
    auto declPath = ID->getDeclPath();

    // FIXME: Doesn't handle scoped testable imports correctly.
    assert(declPath.size() == 1 && "can't handle sub-decl imports");
    SmallVector<ValueDecl *, 8> decls;
    lookupInModule(topLevelModule, declPath, declPath.front().first, decls,
                   NLKind::QualifiedLookup, ResolutionKind::Overloadable,
                   /*resolver*/nullptr, &SF);

    if (decls.empty()) {
      diagnose(ID, diag::no_decl_in_module)
        .highlight(SourceRange(declPath.front().second,
                               declPath.back().second));
      return;
    }

    ID->setDecls(Context.AllocateCopy(decls));

    Optional<ImportKind> actualKind = ImportDecl::findBestImportKind(decls);
    if (!actualKind.hasValue()) {
      // FIXME: print entire module name?
      diagnose(ID, diag::ambiguous_decl_in_module,
               declPath.front().first, M->getName());
      for (auto next : decls)
        diagnose(next, diag::found_candidate);

    } else if (!isCompatibleImportKind(ID->getImportKind(), *actualKind)) {
      diagnose(ID, diag::imported_decl_is_wrong_kind,
               declPath.front().first,
               getImportKindString(ID->getImportKind()),
               static_cast<unsigned>(*actualKind))
        .fixItReplace(SourceRange(ID->getKindLoc()),
                      getImportKindString(*actualKind));

      if (decls.size() == 1)
        diagnose(decls.front(), diag::decl_declared_here,
                 decls.front()->getFullName());
    }
  }
}
Exemple #11
0
/// Utility function for reservePreviousStackSlotForValue. Tries to find
/// stack slot index to which we have spilled value for previous statepoints.
/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
static Optional<int> findPreviousSpillSlot(const Value *Val,
                                           SelectionDAGBuilder &Builder,
                                           int LookUpDepth) {
  // Can not look any further - give up now
  if (LookUpDepth <= 0)
    return None;

  // Spill location is known for gc relocates
  if (const auto *Relocate = dyn_cast<GCRelocateInst>(Val)) {
    const auto &SpillMap =
        Builder.FuncInfo.StatepointSpillMaps[Relocate->getStatepoint()];

    auto It = SpillMap.find(Relocate->getDerivedPtr());
    if (It == SpillMap.end())
      return None;

    return It->second;
  }

  // Look through bitcast instructions.
  if (const BitCastInst *Cast = dyn_cast<BitCastInst>(Val))
    return findPreviousSpillSlot(Cast->getOperand(0), Builder, LookUpDepth - 1);

  // Look through phi nodes
  // All incoming values should have same known stack slot, otherwise result
  // is unknown.
  if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
    Optional<int> MergedResult = None;

    for (auto &IncomingValue : Phi->incoming_values()) {
      Optional<int> SpillSlot =
          findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
      if (!SpillSlot.hasValue())
        return None;

      if (MergedResult.hasValue() && *MergedResult != *SpillSlot)
        return None;

      MergedResult = SpillSlot;
    }
    return MergedResult;
  }

  // TODO: We can do better for PHI nodes. In cases like this:
  //   ptr = phi(relocated_pointer, not_relocated_pointer)
  //   statepoint(ptr)
  // We will return that stack slot for ptr is unknown. And later we might
  // assign different stack slots for ptr and relocated_pointer. This limits
  // llvm's ability to remove redundant stores.
  // Unfortunately it's hard to accomplish in current infrastructure.
  // We use this function to eliminate spill store completely, while
  // in example we still need to emit store, but instead of any location
  // we need to use special "preferred" location.

  // TODO: handle simple updates.  If a value is modified and the original
  // value is no longer live, it would be nice to put the modified value in the
  // same slot.  This allows folding of the memory accesses for some
  // instructions types (like an increment).
  //   statepoint (i)
  //   i1 = i+1
  //   statepoint (i1)
  // However we need to be careful for cases like this:
  //   statepoint(i)
  //   i1 = i+1
  //   statepoint(i, i1)
  // Here we want to reserve spill slot for 'i', but not for 'i+1'. If we just
  // put handling of simple modifications in this function like it's done
  // for bitcasts we might end up reserving i's slot for 'i+1' because order in
  // which we visit values is unspecified.

  // Don't know any information about this instruction
  return None;
}
void CFNumberChecker::checkPreStmt(const CallExpr *CE,
                                         CheckerContext &C) const {
  ProgramStateRef state = C.getState();
  const FunctionDecl *FD = C.getCalleeDecl(CE);
  if (!FD)
    return;

  ASTContext &Ctx = C.getASTContext();
  if (!ICreate) {
    ICreate = &Ctx.Idents.get("CFNumberCreate");
    IGetValue = &Ctx.Idents.get("CFNumberGetValue");
  }
  if (!(FD->getIdentifier() == ICreate || FD->getIdentifier() == IGetValue) ||
      CE->getNumArgs() != 3)
    return;

  // Get the value of the "theType" argument.
  const LocationContext *LCtx = C.getLocationContext();
  SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);

  // FIXME: We really should allow ranges of valid theType values, and
  //   bifurcate the state appropriately.
  Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
  if (!V)
    return;

  uint64_t NumberKind = V->getValue().getLimitedValue();
  Optional<uint64_t> OptCFNumberSize = GetCFNumberSize(Ctx, NumberKind);

  // FIXME: In some cases we can emit an error.
  if (!OptCFNumberSize)
    return;

  uint64_t CFNumberSize = *OptCFNumberSize;

  // Look at the value of the integer being passed by reference.  Essentially
  // we want to catch cases where the value passed in is not equal to the
  // size of the type being created.
  SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);

  // FIXME: Eventually we should handle arbitrary locations.  We can do this
  //  by having an enhanced memory model that does low-level typing.
  Optional<loc::MemRegionVal> LV = TheValueExpr.getAs<loc::MemRegionVal>();
  if (!LV)
    return;

  const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
  if (!R)
    return;

  QualType T = Ctx.getCanonicalType(R->getValueType());

  // FIXME: If the pointee isn't an integer type, should we flag a warning?
  //  People can do weird stuff with pointers.

  if (!T->isIntegralOrEnumerationType())
    return;

  uint64_t PrimitiveTypeSize = Ctx.getTypeSize(T);

  if (PrimitiveTypeSize == CFNumberSize)
    return;

  // FIXME: We can actually create an abstract "CFNumber" object that has
  //  the bits initialized to the provided values.
  ExplodedNode *N = C.generateNonFatalErrorNode();
  if (N) {
    SmallString<128> sbuf;
    llvm::raw_svector_ostream os(sbuf);
    bool isCreate = (FD->getIdentifier() == ICreate);

    if (isCreate) {
      os << (PrimitiveTypeSize == 8 ? "An " : "A ")
         << PrimitiveTypeSize << "-bit integer is used to initialize a "
         << "CFNumber object that represents "
         << (CFNumberSize == 8 ? "an " : "a ")
         << CFNumberSize << "-bit integer; ";
    } else {
      os << "A CFNumber object that represents "
         << (CFNumberSize == 8 ? "an " : "a ")
         << CFNumberSize << "-bit integer is used to initialize "
         << (PrimitiveTypeSize == 8 ? "an " : "a ")
         << PrimitiveTypeSize << "-bit integer; ";
    }

    if (PrimitiveTypeSize < CFNumberSize)
      os << (CFNumberSize - PrimitiveTypeSize)
      << " bits of the CFNumber value will "
      << (isCreate ? "be garbage." : "overwrite adjacent storage.");
    else
      os << (PrimitiveTypeSize - CFNumberSize)
      << " bits of the integer value will be "
      << (isCreate ? "lost." : "garbage.");

    if (!BT)
      BT.reset(new APIMisuse(this, "Bad use of CFNumber APIs"));

    auto report = llvm::make_unique<BugReport>(*BT, os.str(), N);
    report->addRange(CE->getArg(2)->getSourceRange());
    C.emitReport(std::move(report));
  }
}
Exemple #13
0
void Messaging::onTelegram(const Optional<CoAP::Telegram>& telegram) {
  auto message = messageFromTelegram(telegram);
  if (message) onMessage(message.value(), telegram.value().getIP(), telegram.value().getPort());
}
SILAnalysis::InvalidationKind
processFunction(SILFunction &F, bool EnableDiagnostics,
                unsigned AssertConfiguration) {
  DEBUG(llvm::dbgs() << "*** ConstPropagation processing: " << F.getName()
        << "\n");

  // This is the list of traits that this transformation might preserve.
  bool InvalidateBranches = false;
  bool InvalidateCalls = false;
  bool InvalidateInstructions = false;

  // Should we replace calls to assert_configuration by the assert
  // configuration.
  bool InstantiateAssertConfiguration =
      (AssertConfiguration != SILOptions::DisableReplacement);

  // The list of instructions whose evaluation resulted in error or warning.
  // This is used to avoid duplicate error reporting in case we reach the same
  // instruction from different entry points in the WorkList.
  llvm::DenseSet<SILInstruction *> ErrorSet;

  // The worklist of the constants that could be folded into their users.
  llvm::SetVector<SILInstruction *> WorkList;
  initializeWorklist(F, InstantiateAssertConfiguration, WorkList);

  llvm::SetVector<SILInstruction *> FoldedUsers;
  CastOptimizer CastOpt(
      [&](SILInstruction *I, ValueBase *V) { /* ReplaceInstUsesAction */

        InvalidateInstructions = true;
        I->replaceAllUsesWith(V);
      },
      [&](SILInstruction *I) { /* EraseAction */
        auto *TI = dyn_cast<TermInst>(I);

        if (TI) {
          // Invalidate analysis information related to branches. Replacing
          // unconditional_check_branch type instructions by a trap will also
          // invalidate branches/the CFG.
          InvalidateBranches = true;
        }

        InvalidateInstructions = true;

        WorkList.remove(I);
        I->eraseFromParent();
      });

  while (!WorkList.empty()) {
    SILInstruction *I = WorkList.pop_back_val();
    assert(I->getParent() && "SILInstruction must have parent.");

    DEBUG(llvm::dbgs() << "Visiting: " << *I);

    // Replace assert_configuration instructions by their constant value. We
    // want them to be replace even if we can't fully propagate the constant.
    if (InstantiateAssertConfiguration)
      if (auto *BI = dyn_cast<BuiltinInst>(I)) {
        if (isApplyOfBuiltin(*BI, BuiltinValueKind::AssertConf)) {
          // Instantiate the constant.
          SILBuilderWithScope B(BI);
          auto AssertConfInt = B.createIntegerLiteral(
            BI->getLoc(), BI->getType(), AssertConfiguration);
          BI->replaceAllUsesWith(AssertConfInt);
          // Schedule users for constant folding.
          WorkList.insert(AssertConfInt);
          // Delete the call.
          recursivelyDeleteTriviallyDeadInstructions(BI);

          InvalidateInstructions = true;
          continue;
        }

        // Kill calls to conditionallyUnreachable if we've folded assert
        // configuration calls.
        if (isApplyOfBuiltin(*BI, BuiltinValueKind::CondUnreachable)) {
          assert(BI->use_empty() && "use of conditionallyUnreachable?!");
          recursivelyDeleteTriviallyDeadInstructions(BI, /*force*/ true);
          InvalidateInstructions = true;
          continue;
        }
      }

    if (auto *AI = dyn_cast<ApplyInst>(I)) {
      // Apply may only come from a string.concat invocation.
      if (constantFoldStringConcatenation(AI, WorkList)) {
        // Invalidate all analysis that's related to the call graph.
        InvalidateInstructions = true;
      }

      continue;
    }

    if (isa<CheckedCastBranchInst>(I) || isa<CheckedCastAddrBranchInst>(I) ||
        isa<UnconditionalCheckedCastInst>(I) ||
        isa<UnconditionalCheckedCastAddrInst>(I)) {
      // Try to perform cast optimizations. Invalidation is handled by a
      // callback inside the cast optimizer.
      ValueBase *Result = nullptr;
      switch(I->getKind()) {
      default:
        llvm_unreachable("Unexpected instruction for cast optimizations");
      case ValueKind::CheckedCastBranchInst:
        Result = CastOpt.simplifyCheckedCastBranchInst(cast<CheckedCastBranchInst>(I));
        break;
      case ValueKind::CheckedCastAddrBranchInst:
        Result = CastOpt.simplifyCheckedCastAddrBranchInst(cast<CheckedCastAddrBranchInst>(I));
        break;
      case ValueKind::UnconditionalCheckedCastInst:
        Result = CastOpt.optimizeUnconditionalCheckedCastInst(cast<UnconditionalCheckedCastInst>(I));
        break;
      case ValueKind::UnconditionalCheckedCastAddrInst:
        Result = CastOpt.optimizeUnconditionalCheckedCastAddrInst(cast<UnconditionalCheckedCastAddrInst>(I));
        break;
      }

      if (Result) {
        if (isa<CheckedCastBranchInst>(Result) ||
            isa<CheckedCastAddrBranchInst>(Result) ||
            isa<UnconditionalCheckedCastInst>(Result) ||
            isa<UnconditionalCheckedCastAddrInst>(Result))
          WorkList.insert(cast<SILInstruction>(Result));
      }
      continue;
    }


    // Go through all users of the constant and try to fold them.
    FoldedUsers.clear();
    for (auto Use : I->getUses()) {
      SILInstruction *User = Use->getUser();
      DEBUG(llvm::dbgs() << "    User: " << *User);

      // It is possible that we had processed this user already. Do not try
      // to fold it again if we had previously produced an error while folding
      // it.  It is not always possible to fold an instruction in case of error.
      if (ErrorSet.count(User))
        continue;

      // Some constant users may indirectly cause folding of their users.
      if (isa<StructInst>(User) || isa<TupleInst>(User)) {
        WorkList.insert(User);
        continue;
      }

      // Always consider cond_fail instructions as potential for DCE.  If the
      // expression feeding them is false, they are dead.  We can't handle this
      // as part of the constant folding logic, because there is no value
      // they can produce (other than empty tuple, which is wasteful).
      if (isa<CondFailInst>(User))
        FoldedUsers.insert(User);

      // Initialize ResultsInError as a None optional.
      //
      // We are essentially using this optional to represent 3 states: true,
      // false, and n/a.
      Optional<bool> ResultsInError;

      // If we are asked to emit diagnostics, override ResultsInError with a
      // Some optional initialized to false.
      if (EnableDiagnostics)
        ResultsInError = false;

      // Try to fold the user. If ResultsInError is None, we do not emit any
      // diagnostics. If ResultsInError is some, we use it as our return value.
      SILValue C = constantFoldInstruction(*User, ResultsInError);

      // If we did not pass in a None and the optional is set to true, add the
      // user to our error set.
      if (ResultsInError.hasValue() && ResultsInError.getValue())
        ErrorSet.insert(User);

      // We failed to constant propagate... continue...
      if (!C)
        continue;

      // Ok, we have succeeded. Add user to the FoldedUsers list and perform the
      // necessary cleanups, RAUWs, etc.
      FoldedUsers.insert(User);
      ++NumInstFolded;

      InvalidateInstructions = true;

      // If the constant produced a tuple, be smarter than RAUW: explicitly nuke
      // any tuple_extract instructions using the apply.  This is a common case
      // for functions returning multiple values.
      if (auto *TI = dyn_cast<TupleInst>(C)) {
        for (auto UI = User->use_begin(), E = User->use_end(); UI != E;) {
          Operand *O = *UI++;

          // If the user is a tuple_extract, just substitute the right value in.
          if (auto *TEI = dyn_cast<TupleExtractInst>(O->getUser())) {
            SILValue NewVal = TI->getOperand(TEI->getFieldNo());
            TEI->replaceAllUsesWith(NewVal);
            TEI->dropAllReferences();
            FoldedUsers.insert(TEI);
            if (auto *Inst = dyn_cast<SILInstruction>(NewVal))
              WorkList.insert(Inst);
          }
        }

        if (User->use_empty())
          FoldedUsers.insert(TI);
      }


      // We were able to fold, so all users should use the new folded value.
      User->replaceAllUsesWith(C);

      // The new constant could be further folded now, add it to the worklist.
      if (auto *Inst = dyn_cast<SILInstruction>(C))
        WorkList.insert(Inst);
    }

    // Eagerly DCE. We do this after visiting all users to ensure we don't
    // invalidate the uses iterator.
    auto UserArray = ArrayRef<SILInstruction *>(&*FoldedUsers.begin(),
                                                FoldedUsers.size());
    if (!UserArray.empty()) {
      InvalidateInstructions = true;
    }

    recursivelyDeleteTriviallyDeadInstructions(UserArray, false,
                                               [&](SILInstruction *DeadI) {
                                                 WorkList.remove(DeadI);
                                               });
  }

  // TODO: refactor this code outside of the method. Passes should not merge
  // invalidation kinds themselves.
  using InvalidationKind = SILAnalysis::InvalidationKind;

  unsigned Inv = InvalidationKind::Nothing;
  if (InvalidateInstructions) Inv |= (unsigned) InvalidationKind::Instructions;
  if (InvalidateCalls)        Inv |= (unsigned) InvalidationKind::Calls;
  if (InvalidateBranches)     Inv |= (unsigned) InvalidationKind::Branches;
  return InvalidationKind(Inv);
}
Exemple #15
0
void
MessagePort::PostMessage(JSContext* aCx, JS::Handle<JS::Value> aMessage,
                         const Optional<Sequence<JS::Value>>& aTransferable,
                         ErrorResult& aRv)
{
  // We *must* clone the data here, or the JS::Value could be modified
  // by script

  JS::Rooted<JS::Value> transferable(aCx, JS::UndefinedValue());
  if (aTransferable.WasPassed()) {
    const Sequence<JS::Value>& realTransferable = aTransferable.Value();

    // Here we want to check if the transerable object list contains
    // this port. No other checks are done.
    for (const JS::Value& value : realTransferable) {
      if (!value.isObject()) {
        continue;
      }

      MessagePort* port = nullptr;
      nsresult rv = UNWRAP_OBJECT(MessagePort, &value.toObject(), port);
      if (NS_FAILED(rv)) {
        continue;
      }

      if (port == this) {
        aRv.Throw(NS_ERROR_DOM_DATA_CLONE_ERR);
        return;
      }
    }

    // The input sequence only comes from the generated bindings code, which
    // ensures it is rooted.
    JS::HandleValueArray elements =
      JS::HandleValueArray::fromMarkedLocation(realTransferable.Length(),
                                               realTransferable.Elements());

    JSObject* array =
      JS_NewArrayObject(aCx, elements);
    if (!array) {
      aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
      return;
    }

    transferable.setObject(*array);
  }

  RefPtr<SharedMessagePortMessage> data = new SharedMessagePortMessage();

  UniquePtr<AbstractTimelineMarker> start;
  UniquePtr<AbstractTimelineMarker> end;
  RefPtr<TimelineConsumers> timelines = TimelineConsumers::Get();
  bool isTimelineRecording = timelines && !timelines->IsEmpty();

  if (isTimelineRecording) {
    start = MakeUnique<MessagePortTimelineMarker>(
      ProfileTimelineMessagePortOperationType::SerializeData,
      MarkerTracingType::START);
  }

  data->Write(aCx, aMessage, transferable, aRv);

  if (isTimelineRecording) {
    end = MakeUnique<MessagePortTimelineMarker>(
      ProfileTimelineMessagePortOperationType::SerializeData,
      MarkerTracingType::END);
    timelines->AddMarkerForAllObservedDocShells(start);
    timelines->AddMarkerForAllObservedDocShells(end);
  }

  if (NS_WARN_IF(aRv.Failed())) {
    return;
  }

  // This message has to be ignored.
  if (mState > eStateEntangled) {
    return;
  }

  // If we are unshipped we are connected to the other port on the same thread.
  if (mState == eStateUnshippedEntangled) {
    MOZ_ASSERT(mUnshippedEntangledPort);
    mUnshippedEntangledPort->mMessages.AppendElement(data);
    mUnshippedEntangledPort->Dispatch();
    return;
  }

  // Not entangled yet, but already closed/disentangled.
  if (mState == eStateEntanglingForDisentangle ||
      mState == eStateEntanglingForClose) {
    return;
  }

  RemoveDocFromBFCache();

  // Not entangled yet.
  if (mState == eStateEntangling) {
    mMessagesForTheOtherPort.AppendElement(data);
    return;
  }

  MOZ_ASSERT(mActor);
  MOZ_ASSERT(mMessagesForTheOtherPort.IsEmpty());

  AutoTArray<RefPtr<SharedMessagePortMessage>, 1> array;
  array.AppendElement(data);

  AutoTArray<ClonedMessageData, 1> messages;
  // note: `messages` will borrow the underlying buffer, but this is okay
  // because reverse destruction order means `messages` will be destroyed prior
  // to `array`/`data`.
  SharedMessagePortMessage::FromSharedToMessagesChild(mActor, array, messages);
  mActor->SendPostMessages(messages);
}
already_AddRefed<IDBOpenDBRequest>
IDBFactory::OpenInternal(nsIPrincipal* aPrincipal,
                         const nsAString& aName,
                         const Optional<uint64_t>& aVersion,
                         const Optional<StorageType>& aStorageType,
                         bool aDeleting,
                         ErrorResult& aRv)
{
  MOZ_ASSERT(mWindow || mOwningObject);
  MOZ_ASSERT_IF(!mWindow, !mPrivateBrowsingMode);

  CommonFactoryRequestParams commonParams;
  commonParams.privateBrowsingMode() = mPrivateBrowsingMode;

  PrincipalInfo& principalInfo = commonParams.principalInfo();

  if (aPrincipal) {
    if (!NS_IsMainThread()) {
      MOZ_CRASH("Figure out security checks for workers!");
    }
    MOZ_ASSERT(nsContentUtils::IsCallerChrome());

    if (NS_WARN_IF(NS_FAILED(PrincipalToPrincipalInfo(aPrincipal,
                                                      &principalInfo)))) {
      IDB_REPORT_INTERNAL_ERR();
      aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
      return nullptr;
    }

    if (principalInfo.type() != PrincipalInfo::TContentPrincipalInfo &&
        principalInfo.type() != PrincipalInfo::TSystemPrincipalInfo) {
      IDB_REPORT_INTERNAL_ERR();
      aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
      return nullptr;
    }
  } else {
    principalInfo = *mPrincipalInfo;
  }

  uint64_t version = 0;
  if (!aDeleting && aVersion.WasPassed()) {
    if (aVersion.Value() < 1) {
      aRv.ThrowTypeError(MSG_INVALID_VERSION);
      return nullptr;
    }
    version = aVersion.Value();
  }

  // Nothing can be done here if we have previously failed to create a
  // background actor.
  if (mBackgroundActorFailed) {
    IDB_REPORT_INTERNAL_ERR();
    aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
    return nullptr;
  }

  PersistenceType persistenceType;

  if (principalInfo.type() == PrincipalInfo::TSystemPrincipalInfo) {
    // Chrome privilege always gets persistent storage.
    persistenceType = PERSISTENCE_TYPE_PERSISTENT;
  } else {
    persistenceType = PersistenceTypeFromStorage(aStorageType);
  }

  DatabaseMetadata& metadata = commonParams.metadata();
  metadata.name() = aName;
  metadata.persistenceType() = persistenceType;

  FactoryRequestParams params;
  if (aDeleting) {
    metadata.version() = 0;
    params = DeleteDatabaseRequestParams(commonParams);
  } else {
    metadata.version() = version;
    params = OpenDatabaseRequestParams(commonParams);
  }

  if (!mBackgroundActor && mPendingRequests.IsEmpty()) {
    // We need to start the sequence to create a background actor for this
    // thread.
    BackgroundChildImpl::ThreadLocal* threadLocal =
      BackgroundChildImpl::GetThreadLocalForCurrentThread();

    nsAutoPtr<ThreadLocal> newIDBThreadLocal;
    ThreadLocal* idbThreadLocal;

    if (threadLocal && threadLocal->mIndexedDBThreadLocal) {
      idbThreadLocal = threadLocal->mIndexedDBThreadLocal;
    } else {
      nsCOMPtr<nsIUUIDGenerator> uuidGen =
        do_GetService("@mozilla.org/uuid-generator;1");
      MOZ_ASSERT(uuidGen);

      nsID id;
      MOZ_ALWAYS_TRUE(NS_SUCCEEDED(uuidGen->GenerateUUIDInPlace(&id)));

      newIDBThreadLocal = idbThreadLocal = new ThreadLocal(id);
    }

    nsRefPtr<BackgroundCreateCallback> cb =
      new BackgroundCreateCallback(this, idbThreadLocal->GetLoggingInfo());
    if (NS_WARN_IF(!BackgroundChild::GetOrCreateForCurrentThread(cb))) {
      IDB_REPORT_INTERNAL_ERR();
      aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
      return nullptr;
    }

    if (newIDBThreadLocal) {
      if (!threadLocal) {
        threadLocal = BackgroundChildImpl::GetThreadLocalForCurrentThread();
      }
      MOZ_ASSERT(threadLocal);
      MOZ_ASSERT(!threadLocal->mIndexedDBThreadLocal);

      threadLocal->mIndexedDBThreadLocal = newIDBThreadLocal.forget();
    }
  }

  AutoJSAPI autoJS;
  nsRefPtr<IDBOpenDBRequest> request;

  if (mWindow) {
    AutoJSContext cx;
    if (NS_WARN_IF(!autoJS.Init(mWindow, cx))) {
      IDB_REPORT_INTERNAL_ERR();
      aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
      return nullptr;
    }

    JS::Rooted<JSObject*> scriptOwner(cx,
      static_cast<nsGlobalWindow*>(mWindow.get())->FastGetGlobalJSObject());
    MOZ_ASSERT(scriptOwner);

    request = IDBOpenDBRequest::CreateForWindow(this, mWindow, scriptOwner);
  } else {
    autoJS.Init(mOwningObject.get());
    JS::Rooted<JSObject*> scriptOwner(autoJS.cx(), mOwningObject);

    request = IDBOpenDBRequest::CreateForJS(this, scriptOwner);
  }

  MOZ_ASSERT(request);

  if (aDeleting) {
    IDB_LOG_MARK("IndexedDB %s: Child  Request[%llu]: "
                   "indexedDB.deleteDatabase(\"%s\")",
                 "IndexedDB %s: C R[%llu]: IDBFactory.deleteDatabase()",
                 IDB_LOG_ID_STRING(),
                 request->LoggingSerialNumber(),
                 NS_ConvertUTF16toUTF8(aName).get());
  } else {
    IDB_LOG_MARK("IndexedDB %s: Child  Request[%llu]: "
                   "indexedDB.open(\"%s\", %s)",
                 "IndexedDB %s: C R[%llu]: IDBFactory.open()",
                 IDB_LOG_ID_STRING(),
                 request->LoggingSerialNumber(),
                 NS_ConvertUTF16toUTF8(aName).get(),
                 IDB_LOG_STRINGIFY(aVersion));
  }

  // If we already have a background actor then we can start this request now.
  if (mBackgroundActor) {
    nsresult rv = InitiateRequest(request, params);
    if (NS_WARN_IF(NS_FAILED(rv))) {
      IDB_REPORT_INTERNAL_ERR();
      aRv.Throw(NS_ERROR_DOM_INDEXEDDB_UNKNOWN_ERR);
      return nullptr;
    }
  } else {
    mPendingRequests.AppendElement(new PendingRequestInfo(request, params));
  }

  return request.forget();
}
Exemple #17
0
void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
                                         CheckerContext &C) const {
  ProgramStateRef state = C.getState();
  const FunctionDecl *FD = C.getCalleeDecl(CE);
  if (!FD)
    return;

  ASTContext &Ctx = C.getASTContext();
  if (!II)
    II = &Ctx.Idents.get("CFNumberCreate");

  if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
    return;

  // Get the value of the "theType" argument.
  const LocationContext *LCtx = C.getLocationContext();
  SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);

  // FIXME: We really should allow ranges of valid theType values, and
  //   bifurcate the state appropriately.
  nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
  if (!V)
    return;

  uint64_t NumberKind = V->getValue().getLimitedValue();
  Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);

  // FIXME: In some cases we can emit an error.
  if (!TargetSize.isKnown())
    return;

  // Look at the value of the integer being passed by reference.  Essentially
  // we want to catch cases where the value passed in is not equal to the
  // size of the type being created.
  SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);

  // FIXME: Eventually we should handle arbitrary locations.  We can do this
  //  by having an enhanced memory model that does low-level typing.
  loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
  if (!LV)
    return;

  const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
  if (!R)
    return;

  QualType T = Ctx.getCanonicalType(R->getValueType());

  // FIXME: If the pointee isn't an integer type, should we flag a warning?
  //  People can do weird stuff with pointers.

  if (!T->isIntegerType())
    return;

  uint64_t SourceSize = Ctx.getTypeSize(T);

  // CHECK: is SourceSize == TargetSize
  if (SourceSize == TargetSize)
    return;

  // Generate an error.  Only generate a sink if 'SourceSize < TargetSize';
  // otherwise generate a regular node.
  //
  // FIXME: We can actually create an abstract "CFNumber" object that has
  //  the bits initialized to the provided values.
  //
  if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
                                                : C.addTransition()) {
    SmallString<128> sbuf;
    llvm::raw_svector_ostream os(sbuf);

    os << (SourceSize == 8 ? "An " : "A ")
       << SourceSize << " bit integer is used to initialize a CFNumber "
                        "object that represents "
       << (TargetSize == 8 ? "an " : "a ")
       << TargetSize << " bit integer. ";

    if (SourceSize < TargetSize)
      os << (TargetSize - SourceSize)
      << " bits of the CFNumber value will be garbage." ;
    else
      os << (SourceSize - TargetSize)
      << " bits of the input integer will be lost.";

    if (!BT)
      BT.reset(new APIMisuse("Bad use of CFNumberCreate"));

    BugReport *report = new BugReport(*BT, os.str(), N);
    report->addRange(CE->getArg(2)->getSourceRange());
    C.emitReport(report);
  }
}
Exemple #18
0
/// Gather the various unrolling parameters based on the defaults, compiler
/// flags, TTI overrides, pragmas, and user specified parameters.
static TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
    Loop *L, const TargetTransformInfo &TTI, Optional<unsigned> UserThreshold,
    Optional<unsigned> UserCount, Optional<bool> UserAllowPartial,
    Optional<bool> UserRuntime, unsigned PragmaCount, bool PragmaFullUnroll,
    bool PragmaEnableUnroll, unsigned TripCount) {
  TargetTransformInfo::UnrollingPreferences UP;

  // Set up the defaults
  UP.Threshold = 150;
  UP.PercentDynamicCostSavedThreshold = 20;
  UP.DynamicCostSavingsDiscount = 2000;
  UP.OptSizeThreshold = 0;
  UP.PartialThreshold = UP.Threshold;
  UP.PartialOptSizeThreshold = 0;
  UP.Count = 0;
  UP.MaxCount = UINT_MAX;
  UP.FullUnrollMaxCount = UINT_MAX;
  UP.Partial = false;
  UP.Runtime = false;
  UP.AllowExpensiveTripCount = false;

  // Override with any target specific settings
  TTI.getUnrollingPreferences(L, UP);

  // Apply size attributes
  if (L->getHeader()->getParent()->optForSize()) {
    UP.Threshold = UP.OptSizeThreshold;
    UP.PartialThreshold = UP.PartialOptSizeThreshold;
  }

  // Apply unroll count pragmas
  if (PragmaCount)
    UP.Count = PragmaCount;
  else if (PragmaFullUnroll)
    UP.Count = TripCount;

  // Apply any user values specified by cl::opt
  if (UnrollThreshold.getNumOccurrences() > 0) {
    UP.Threshold = UnrollThreshold;
    UP.PartialThreshold = UnrollThreshold;
  }
  if (UnrollPercentDynamicCostSavedThreshold.getNumOccurrences() > 0)
    UP.PercentDynamicCostSavedThreshold =
        UnrollPercentDynamicCostSavedThreshold;
  if (UnrollDynamicCostSavingsDiscount.getNumOccurrences() > 0)
    UP.DynamicCostSavingsDiscount = UnrollDynamicCostSavingsDiscount;
  if (UnrollCount.getNumOccurrences() > 0)
    UP.Count = UnrollCount;
  if (UnrollMaxCount.getNumOccurrences() > 0)
    UP.MaxCount = UnrollMaxCount;
  if (UnrollFullMaxCount.getNumOccurrences() > 0)
    UP.FullUnrollMaxCount = UnrollFullMaxCount;
  if (UnrollAllowPartial.getNumOccurrences() > 0)
    UP.Partial = UnrollAllowPartial;
  if (UnrollRuntime.getNumOccurrences() > 0)
    UP.Runtime = UnrollRuntime;

  // Apply user values provided by argument
  if (UserThreshold.hasValue()) {
    UP.Threshold = *UserThreshold;
    UP.PartialThreshold = *UserThreshold;
  }
  if (UserCount.hasValue())
    UP.Count = *UserCount;
  if (UserAllowPartial.hasValue())
    UP.Partial = *UserAllowPartial;
  if (UserRuntime.hasValue())
    UP.Runtime = *UserRuntime;

  if (PragmaCount > 0 ||
      ((PragmaFullUnroll || PragmaEnableUnroll) && TripCount != 0)) {
    // If the loop has an unrolling pragma, we want to be more aggressive with
    // unrolling limits. Set thresholds to at least the PragmaTheshold value
    // which is larger than the default limits.
    if (UP.Threshold != NoThreshold)
      UP.Threshold = std::max<unsigned>(UP.Threshold, PragmaUnrollThreshold);
    if (UP.PartialThreshold != NoThreshold)
      UP.PartialThreshold =
          std::max<unsigned>(UP.PartialThreshold, PragmaUnrollThreshold);
  }

  return UP;
}
Exemple #19
0
void DWARFDebugInfoEntryMinimal::dumpAttribute(raw_ostream &OS,
                                               DWARFUnit *u,
                                               uint32_t *offset_ptr,
                                               uint16_t attr, uint16_t form,
                                               unsigned indent) const {
  const char BaseIndent[] = "            ";
  OS << BaseIndent;
  OS.indent(indent+2);
  const char *attrString = AttributeString(attr);
  if (attrString)
    WithColor(OS, syntax::Attribute) << attrString;
  else
    WithColor(OS, syntax::Attribute).get() << format("DW_AT_Unknown_%x", attr);

  const char *formString = FormEncodingString(form);
  if (formString)
    OS << " [" << formString << ']';
  else
    OS << format(" [DW_FORM_Unknown_%x]", form);

  DWARFFormValue formValue(form);

  if (!formValue.extractValue(u->getDebugInfoExtractor(), offset_ptr, u))
    return;

  OS << "\t(";
  
  const char *Name = nullptr;
  std::string File;
  auto Color = syntax::Enumerator;
  if (attr == DW_AT_decl_file || attr == DW_AT_call_file) {
  Color = syntax::String;
    if (const auto *LT = u->getContext().getLineTableForUnit(u))
      if (LT->getFileNameByIndex(
             formValue.getAsUnsignedConstant().getValue(),
             u->getCompilationDir(),
             DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath, File)) {
        File = '"' + File + '"';
        Name = File.c_str();
      }
  } else if (Optional<uint64_t> Val = formValue.getAsUnsignedConstant())
    Name = AttributeValueString(attr, *Val);

  if (Name)
    WithColor(OS, Color) << Name;
  else if (attr == DW_AT_decl_line || attr == DW_AT_call_line)
    OS << *formValue.getAsUnsignedConstant();
  else
    formValue.dump(OS, u);

  // We have dumped the attribute raw value. For some attributes
  // having both the raw value and the pretty-printed value is
  // interesting. These attributes are handled below.
  if (attr == DW_AT_specification || attr == DW_AT_abstract_origin) {
    Optional<uint64_t> Ref = formValue.getAsReference(u);
    if (Ref.hasValue()) {
      uint32_t RefOffset = Ref.getValue();
      DWARFDebugInfoEntryMinimal DIE;
      if (const DWARFUnit *RefU = findUnitAndExtractFast(DIE, u, &RefOffset))
        if (const char *Name = DIE.getName(RefU, DINameKind::LinkageName))
          OS << " \"" << Name << '\"';
    }
  } else if (attr == DW_AT_APPLE_property_attribute) {
    if (Optional<uint64_t> OptVal = formValue.getAsUnsignedConstant())
      dumpApplePropertyAttribute(OS, *OptVal);
  } else if (attr == DW_AT_ranges) {
    dumpRanges(OS, getAddressRanges(u), u->getAddressByteSize(),
               sizeof(BaseIndent)+indent+4);
  }

  OS << ")\n";
}
Exemple #20
0
/// Generate code to emit a thunk with native conventions that calls a
/// function with foreign conventions.
void SILGenFunction::emitForeignToNativeThunk(SILDeclRef thunk) {
  assert(!thunk.isForeign && "foreign-to-native thunks only");

  // Wrap the function in its original form.

  auto fd = cast<AbstractFunctionDecl>(thunk.getDecl());
  auto nativeCI = getConstantInfo(thunk);
  auto nativeFormalResultTy = nativeCI.LoweredInterfaceType.getResult();
  auto nativeFnTy = F.getLoweredFunctionType();
  assert(nativeFnTy == nativeCI.SILFnType);

  // Find the foreign error convention.
  Optional<ForeignErrorConvention> foreignError;
  if (nativeFnTy->hasErrorResult()) {
    foreignError = fd->getForeignErrorConvention();
    assert(foreignError && "couldn't find foreign error convention!");
  }

  // Forward the arguments.
  auto forwardedParameters = fd->getParameterLists();

  // For allocating constructors, 'self' is a metatype, not the 'self' value
  // formally present in the constructor body.
  Type allocatorSelfType;
  if (thunk.kind == SILDeclRef::Kind::Allocator) {
    allocatorSelfType = forwardedParameters[0]->getType(getASTContext());
    forwardedParameters = forwardedParameters.slice(1);
  }

  SmallVector<SILValue, 8> params;
  for (auto *paramList : reversed(forwardedParameters))
    bindParametersForForwarding(paramList, params);

  if (allocatorSelfType) {
    auto selfMetatype = CanMetatypeType::get(allocatorSelfType->getCanonicalType(),
                                             MetatypeRepresentation::Thick);
    auto selfArg = new (F.getModule()) SILArgument(
                                 F.begin(),
                                 SILType::getPrimitiveObjectType(selfMetatype),
                                 fd->getImplicitSelfDecl());
    params.push_back(selfArg);
  }

  // Set up the throw destination if necessary.
  CleanupLocation cleanupLoc(fd);
  if (foreignError) {
    prepareRethrowEpilog(cleanupLoc);
  }

  SILValue result;
  {
    Scope scope(Cleanups, fd);

    SILDeclRef foreignDeclRef = thunk.asForeign(true);
    SILConstantInfo foreignCI = getConstantInfo(foreignDeclRef);
    auto foreignFnTy = foreignCI.SILFnType;

    // Bridge all the arguments.
    SmallVector<ManagedValue, 8> args;
    unsigned foreignArgIndex = 0;

    // A helper function to add a function error argument in the
    // appropriate position.
    auto maybeAddForeignErrorArg = [&] {
      if (foreignError &&
          foreignArgIndex == foreignError->getErrorParameterIndex()) {
        args.push_back(ManagedValue());
        foreignArgIndex++;
      }
    };

    for (unsigned nativeParamIndex : indices(params)) {
      // Bring the parameter to +1.
      auto paramValue = params[nativeParamIndex];
      auto thunkParam = nativeFnTy->getParameters()[nativeParamIndex];
      // TODO: Could avoid a retain if the bridged parameter is also +0 and
      // doesn't require a bridging conversion.
      ManagedValue param;
      switch (thunkParam.getConvention()) {
      case ParameterConvention::Direct_Owned:
        param = emitManagedRValueWithCleanup(paramValue);
        break;
      case ParameterConvention::Direct_Guaranteed:
      case ParameterConvention::Direct_Unowned:
        param = emitManagedRetain(fd, paramValue);
        break;
      case ParameterConvention::Direct_Deallocating:
        param = ManagedValue::forUnmanaged(paramValue);
        break;
      case ParameterConvention::Indirect_Inout:
      case ParameterConvention::Indirect_InoutAliasable:
        param = ManagedValue::forUnmanaged(paramValue);
        break;
      case ParameterConvention::Indirect_In:
      case ParameterConvention::Indirect_In_Guaranteed:
      case ParameterConvention::Indirect_Out:
        llvm_unreachable("indirect args in foreign thunked method not implemented");
      }

      maybeAddForeignErrorArg();

      SILType foreignArgTy =
        foreignFnTy->getParameters()[foreignArgIndex++].getSILType();
      args.push_back(emitNativeToBridgedValue(fd, param,
                                SILFunctionTypeRepresentation::CFunctionPointer,
                                AbstractionPattern(param.getSwiftType()),
                                param.getSwiftType(),
                                foreignArgTy.getSwiftRValueType()));
    }

    maybeAddForeignErrorArg();

    // Call the original.
    auto subs = getForwardingSubstitutions();
    auto fn = getThunkedForeignFunctionRef(*this, fd, foreignDeclRef, args, subs,
                                           foreignCI);

    auto fnType = fn.getType().castTo<SILFunctionType>();
    fnType = fnType->substGenericArgs(SGM.M, SGM.SwiftModule, subs);

    result = emitApply(fd, ManagedValue::forUnmanaged(fn),
                       subs, args, fnType,
                       AbstractionPattern(nativeFormalResultTy),
                       nativeFormalResultTy,
                       ApplyOptions::None, None, foreignError,
                       SGFContext())
      .forward(*this);
  }
  B.createReturn(ImplicitReturnLocation::getImplicitReturnLoc(fd), result);

  // Emit the throw destination.
  emitRethrowEpilog(fd);
}
static ReductionKind matchPairwiseReductionAtLevel(Instruction *I,
                                                   unsigned Level,
                                                   unsigned NumLevels) {
  // Match one level of pairwise operations.
  // %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
  //       <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
  // %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
  //       <4 x i32> <i32 1, i32 3, i32 undef, i32 undef>
  // %bin.rdx.0 = fadd <4 x float> %rdx.shuf.0.0, %rdx.shuf.0.1
  if (!I)
    return RK_None;

  assert(I->getType()->isVectorTy() && "Expecting a vector type");

  Optional<ReductionData> RD = getReductionData(I);
  if (!RD)
    return RK_None;

  ShuffleVectorInst *LS = dyn_cast<ShuffleVectorInst>(RD->LHS);
  if (!LS && Level)
    return RK_None;
  ShuffleVectorInst *RS = dyn_cast<ShuffleVectorInst>(RD->RHS);
  if (!RS && Level)
    return RK_None;

  // On level 0 we can omit one shufflevector instruction.
  if (!Level && !RS && !LS)
    return RK_None;

  // Shuffle inputs must match.
  Value *NextLevelOpL = LS ? LS->getOperand(0) : nullptr;
  Value *NextLevelOpR = RS ? RS->getOperand(0) : nullptr;
  Value *NextLevelOp = nullptr;
  if (NextLevelOpR && NextLevelOpL) {
    // If we have two shuffles their operands must match.
    if (NextLevelOpL != NextLevelOpR)
      return RK_None;

    NextLevelOp = NextLevelOpL;
  } else if (Level == 0 && (NextLevelOpR || NextLevelOpL)) {
    // On the first level we can omit the shufflevector <0, undef,...>. So the
    // input to the other shufflevector <1, undef> must match with one of the
    // inputs to the current binary operation.
    // Example:
    //  %NextLevelOpL = shufflevector %R, <1, undef ...>
    //  %BinOp        = fadd          %NextLevelOpL, %R
    if (NextLevelOpL && NextLevelOpL != RD->RHS)
      return RK_None;
    else if (NextLevelOpR && NextLevelOpR != RD->LHS)
      return RK_None;

    NextLevelOp = NextLevelOpL ? RD->RHS : RD->LHS;
  } else
    return RK_None;

  // Check that the next levels binary operation exists and matches with the
  // current one.
  if (Level + 1 != NumLevels) {
    Optional<ReductionData> NextLevelRD =
        getReductionData(cast<Instruction>(NextLevelOp));
    if (!NextLevelRD || !RD->hasSameData(*NextLevelRD))
      return RK_None;
  }

  // Shuffle mask for pairwise operation must match.
  if (matchPairwiseShuffleMask(LS, /*IsLeft=*/true, Level)) {
    if (!matchPairwiseShuffleMask(RS, /*IsLeft=*/false, Level))
      return RK_None;
  } else if (matchPairwiseShuffleMask(RS, /*IsLeft=*/true, Level)) {
    if (!matchPairwiseShuffleMask(LS, /*IsLeft=*/false, Level))
      return RK_None;
  } else {
    return RK_None;
  }

  if (++Level == NumLevels)
    return RD->Kind;

  // Match next level.
  return matchPairwiseReductionAtLevel(cast<Instruction>(NextLevelOp), Level,
                                       NumLevels);
}
Exemple #22
0
void SILGenFunction::emitNativeToForeignThunk(SILDeclRef thunk) {
  assert(thunk.isForeign);
  SILDeclRef native = thunk.asForeign(false);

  auto loc = thunk.getAsRegularLocation();
  loc.markAutoGenerated();
  Scope scope(Cleanups, CleanupLocation::get(loc));

  // Bridge the arguments.
  SmallVector<SILValue, 4> args;
  Optional<ForeignErrorConvention> foreignError;
  SILValue foreignErrorSlot;
  auto objcFnTy = emitObjCThunkArguments(*this, loc, thunk, args,
                                         foreignErrorSlot, foreignError);
  auto nativeInfo = getConstantInfo(native);
  auto swiftResultTy = nativeInfo.SILFnType->getResult()
    .map([&](CanType t) { return F.mapTypeIntoContext(t)->getCanonicalType(); });
  auto objcResultTy = objcFnTy->getResult()
    .map([&](CanType t) { return F.mapTypeIntoContext(t)->getCanonicalType(); });

  // Call the native entry point.
  SILValue nativeFn = emitGlobalFunctionRef(loc, native, nativeInfo);
  auto subs = F.getForwardingSubstitutions();
  auto substTy = nativeInfo.SILFnType->substGenericArgs(
    SGM.M, SGM.M.getSwiftModule(), subs);
  SILType substSILTy = SILType::getPrimitiveObjectType(substTy);

  CanType substNativeResultType = nativeInfo.LoweredType.getResult();
  AbstractionPattern origNativeResultType =
    AbstractionPattern(substNativeResultType);
  CanType bridgedResultType = objcResultTy.getType();

  SILValue result;
  assert(foreignError.hasValue() == substTy->hasErrorResult());
  if (!substTy->hasErrorResult()) {
    // Create the apply.
    result = B.createApply(loc, nativeFn, substSILTy,
                           swiftResultTy.getSILType(), subs, args);

    // Leave the scope immediately.  This isn't really necessary; it
    // just limits lifetimes a little bit more.
    scope.pop();

    // Now bridge the return value.
    result = emitBridgeReturnValue(*this, loc, result,
                                   objcFnTy->getRepresentation(),
                                   origNativeResultType,
                                   substNativeResultType,
                                   bridgedResultType);
  } else {
    SILBasicBlock *contBB = createBasicBlock();
    SILBasicBlock *errorBB = createBasicBlock();
    SILBasicBlock *normalBB = createBasicBlock();
    B.createTryApply(loc, nativeFn, substSILTy, subs, args,
                     normalBB, errorBB);

    // Emit the non-error destination.
    {
      B.emitBlock(normalBB);
      SILValue nativeResult =
        normalBB->createBBArg(swiftResultTy.getSILType());

      // In this branch, the eventual return value is mostly created
      // by bridging the native return value, but we may need to
      // adjust it slightly.
      SILValue bridgedResult =
        emitBridgeReturnValueForForeignError(loc, nativeResult, 
                                             objcFnTy->getRepresentation(),
                                             origNativeResultType,
                                             substNativeResultType,
                                             objcResultTy.getSILType(),
                                             foreignErrorSlot, *foreignError);
      B.createBranch(loc, contBB, bridgedResult);
    }

    // Emit the error destination.
    {
      B.emitBlock(errorBB);
      SILValue nativeError =
        errorBB->createBBArg(substTy->getErrorResult().getSILType());

      // In this branch, the eventual return value is mostly invented.
      // Store the native error in the appropriate location and return.
      SILValue bridgedResult =
        emitBridgeErrorForForeignError(loc, nativeError,
                                       objcResultTy.getSILType(),
                                       foreignErrorSlot, *foreignError);
      B.createBranch(loc, contBB, bridgedResult);
    }

    // Emit the join block.
    B.emitBlock(contBB);
    result = contBB->createBBArg(objcResultTy.getSILType());

    // Leave the scope now.
    scope.pop();
  }

  emitObjCReturnValue(*this, loc, result, objcResultTy);
}
Exemple #23
0
void ConfigFile::setAutomaticDeleteOldLogsAge(Optional<chrono::hours> expireTime)
{
    QSettings settings(configFile(), QSettings::IniFormat);
    if (!expireTime) {
        settings.setValue(QLatin1String(deleteOldLogsAfterHoursC), -1);
    } else {
        settings.setValue(QLatin1String(deleteOldLogsAfterHoursC), QVariant::fromValue(expireTime->count()));
    }
}
Exemple #24
0
/// Gather the various unrolling parameters based on the defaults, compiler
/// flags, TTI overrides and user specified parameters.
static TargetTransformInfo::UnrollingPreferences gatherUnrollingPreferences(
    Loop *L, const TargetTransformInfo &TTI, Optional<unsigned> UserThreshold,
    Optional<unsigned> UserCount, Optional<bool> UserAllowPartial,
    Optional<bool> UserRuntime, Optional<bool> UserUpperBound) {
  TargetTransformInfo::UnrollingPreferences UP;

  // Set up the defaults
  UP.Threshold = 150;
  UP.MaxPercentThresholdBoost = 400;
  UP.OptSizeThreshold = 0;
  UP.PartialThreshold = 150;
  UP.PartialOptSizeThreshold = 0;
  UP.Count = 0;
  UP.PeelCount = 0;
  UP.DefaultUnrollRuntimeCount = 8;
  UP.MaxCount = UINT_MAX;
  UP.FullUnrollMaxCount = UINT_MAX;
  UP.BEInsns = 2;
  UP.Partial = false;
  UP.Runtime = false;
  UP.AllowRemainder = true;
  UP.AllowExpensiveTripCount = false;
  UP.Force = false;
  UP.UpperBound = false;
  UP.AllowPeeling = false;

  // Override with any target specific settings
  TTI.getUnrollingPreferences(L, UP);

  // Apply size attributes
  if (L->getHeader()->getParent()->optForSize()) {
    UP.Threshold = UP.OptSizeThreshold;
    UP.PartialThreshold = UP.PartialOptSizeThreshold;
  }

  // Apply any user values specified by cl::opt
  if (UnrollThreshold.getNumOccurrences() > 0)
    UP.Threshold = UnrollThreshold;
  if (UnrollPartialThreshold.getNumOccurrences() > 0)
    UP.PartialThreshold = UnrollPartialThreshold;
  if (UnrollMaxPercentThresholdBoost.getNumOccurrences() > 0)
    UP.MaxPercentThresholdBoost = UnrollMaxPercentThresholdBoost;
  if (UnrollMaxCount.getNumOccurrences() > 0)
    UP.MaxCount = UnrollMaxCount;
  if (UnrollFullMaxCount.getNumOccurrences() > 0)
    UP.FullUnrollMaxCount = UnrollFullMaxCount;
  if (UnrollAllowPartial.getNumOccurrences() > 0)
    UP.Partial = UnrollAllowPartial;
  if (UnrollAllowRemainder.getNumOccurrences() > 0)
    UP.AllowRemainder = UnrollAllowRemainder;
  if (UnrollRuntime.getNumOccurrences() > 0)
    UP.Runtime = UnrollRuntime;
  if (UnrollMaxUpperBound == 0)
    UP.UpperBound = false;
  if (UnrollAllowPeeling.getNumOccurrences() > 0)
    UP.AllowPeeling = UnrollAllowPeeling;

  // Apply user values provided by argument
  if (UserThreshold.hasValue()) {
    UP.Threshold = *UserThreshold;
    UP.PartialThreshold = *UserThreshold;
  }
  if (UserCount.hasValue())
    UP.Count = *UserCount;
  if (UserAllowPartial.hasValue())
    UP.Partial = *UserAllowPartial;
  if (UserRuntime.hasValue())
    UP.Runtime = *UserRuntime;
  if (UserUpperBound.hasValue())
    UP.UpperBound = *UserUpperBound;

  return UP;
}
Exemple #25
0
void ObjectLoadListener::getDebugInfoForLocals(
    DWARFContextInMemory &DwarfContext, uint64_t Addr, uint64_t Size) {
  for (const auto &CU : DwarfContext.compile_units()) {
    const DWARFDebugInfoEntryMinimal *UnitDIE = CU->getUnitDIE(false);
    const DWARFDebugInfoEntryMinimal *SubprogramDIE = getSubprogramDIE(UnitDIE);

    ICorDebugInfo::RegNum FrameBaseRegister = ICorDebugInfo::REGNUM_COUNT;
    DWARFFormValue FormValue;

    // Find the frame_base register value
    if (SubprogramDIE->getAttributeValue(CU.get(), dwarf::DW_AT_frame_base,
                                         FormValue)) {
      Optional<ArrayRef<uint8_t>> FormValues = FormValue.getAsBlock();
      FrameBaseRegister = mapDwarfRegisterToRegNum(FormValues->back());
    }

    if (SubprogramDIE->getAttributeValue(CU.get(), dwarf::DW_AT_low_pc,
                                         FormValue)) {
      Optional<uint64_t> FormAddress = FormValue.getAsAddress(CU.get());

      // If the Form address doesn't match the address for the function passed
      // do not collect debug for locals since they do not go with the current
      // function being processed
      if (FormAddress.getValue() != Addr) {
        return;
      }
    }

    std::vector<uint64_t> Offsets;
    extractLocalLocations(CU.get(), SubprogramDIE, Offsets);

    // Allocate the array of NativeVarInfo objects that will be sent to the EE
    ICorDebugInfo::NativeVarInfo *LocalVars;
    unsigned SizeOfArray =
        Offsets.size() * sizeof(ICorDebugInfo::NativeVarInfo);
    if (SizeOfArray > 0) {
      LocalVars =
          (ICorDebugInfo::NativeVarInfo *)Context->JitInfo->allocateArray(
              SizeOfArray);

      unsigned CurrentDebugEntry = 0;

      for (auto &Offset : Offsets) {
        LocalVars[CurrentDebugEntry].startOffset = Addr;
        LocalVars[CurrentDebugEntry].endOffset = Addr + Size;
        LocalVars[CurrentDebugEntry].varNumber = CurrentDebugEntry;

        // Assume all locals are on the stack
        ICorDebugInfo::VarLoc VarLoc;
        VarLoc.vlType = ICorDebugInfo::VLT_STK;
        VarLoc.vlStk.vlsBaseReg = FrameBaseRegister;
        VarLoc.vlStk.vlsOffset = Offset;

        LocalVars[CurrentDebugEntry].loc = VarLoc;

        CurrentDebugEntry++;
      }

      CORINFO_METHOD_INFO *MethodInfo = Context->MethodInfo;
      CORINFO_METHOD_HANDLE MethodHandle = MethodInfo->ftn;

      Context->JitInfo->setVars(MethodHandle, Offsets.size(), LocalVars);
    }
  }
}
void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
                                                    CheckerContext &C) const {
  if (!BT) {
    BT.reset(new APIMisuse("Arguments passed to variadic method aren't all "
                           "Objective-C pointer types"));

    ASTContext &Ctx = C.getASTContext();
    arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
    dictionaryWithObjectsAndKeysS = 
      GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
    setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
    orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);

    initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
    initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
  }

  if (!isVariadicMessage(msg))
      return;

  // We are not interested in the selector arguments since they have
  // well-defined types, so the compiler will issue a warning for them.
  unsigned variadicArgsBegin = msg.getSelector().getNumArgs();

  // We're not interested in the last argument since it has to be nil or the
  // compiler would have issued a warning for it elsewhere.
  unsigned variadicArgsEnd = msg.getNumArgs() - 1;

  if (variadicArgsEnd <= variadicArgsBegin)
    return;

  // Verify that all arguments have Objective-C types.
  Optional<ExplodedNode*> errorNode;
  ProgramStateRef state = C.getState();
  
  for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
    QualType ArgTy = msg.getArgExpr(I)->getType();
    if (ArgTy->isObjCObjectPointerType())
      continue;

    // Block pointers are treaded as Objective-C pointers.
    if (ArgTy->isBlockPointerType())
      continue;

    // Ignore pointer constants.
    if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
      continue;
    
    // Ignore pointer types annotated with 'NSObject' attribute.
    if (C.getASTContext().isObjCNSObjectType(ArgTy))
      continue;
    
    // Ignore CF references, which can be toll-free bridged.
    if (coreFoundation::isCFObjectRef(ArgTy))
      continue;

    // Generate only one error node to use for all bug reports.
    if (!errorNode.hasValue())
      errorNode = C.addTransition();

    if (!errorNode.getValue())
      continue;

    SmallString<128> sbuf;
    llvm::raw_svector_ostream os(sbuf);

    StringRef TypeName = GetReceiverInterfaceName(msg);
    if (!TypeName.empty())
      os << "Argument to '" << TypeName << "' method '";
    else
      os << "Argument to method '";

    os << msg.getSelector().getAsString() 
       << "' should be an Objective-C pointer type, not '";
    ArgTy.print(os, C.getLangOpts());
    os << "'";

    BugReport *R = new BugReport(*BT, os.str(), errorNode.getValue());
    R->addRange(msg.getArgSourceRange(I));
    C.emitReport(R);
  }
}
PaintLayerPainter::PaintResult PaintLayerPainter::paintLayerWithTransform(GraphicsContext* context, const PaintLayerPaintingInfo& paintingInfo, PaintLayerFlags paintFlags)
{
    TransformationMatrix layerTransform = m_paintLayer.renderableTransform(paintingInfo.globalPaintFlags());
    // If the transform can't be inverted, then don't paint anything.
    if (!layerTransform.isInvertible())
        return FullyPainted;

    // FIXME: We should make sure that we don't walk past paintingInfo.rootLayer here.
    // m_paintLayer may be the "root", and then we should avoid looking at its parent.
    PaintLayer* parentLayer = m_paintLayer.parent();

    ClipRect ancestorBackgroundClipRect;
    if (parentLayer) {
        // Calculate the clip rectangle that the ancestors establish.
        ClipRectsContext clipRectsContext(paintingInfo.rootLayer, (paintFlags & PaintLayerUncachedClipRects) ? UncachedClipRects : PaintingClipRects, IgnoreOverlayScrollbarSize);
        if (shouldRespectOverflowClip(paintFlags, m_paintLayer.layoutObject()) == IgnoreOverflowClip)
            clipRectsContext.setIgnoreOverflowClip();
        ancestorBackgroundClipRect = m_paintLayer.clipper().backgroundClipRect(clipRectsContext);
    }

    PaintLayer* paginationLayer = m_paintLayer.enclosingPaginationLayer();
    PaintLayerFragments fragments;
    if (paginationLayer) {
        // FIXME: This is a mess. Look closely at this code and the code in Layer and fix any
        // issues in it & refactor to make it obvious from code structure what it does and that it's
        // correct.
        ClipRectsCacheSlot cacheSlot = (paintFlags & PaintLayerUncachedClipRects) ? UncachedClipRects : PaintingClipRects;
        ShouldRespectOverflowClip respectOverflowClip = shouldRespectOverflowClip(paintFlags, m_paintLayer.layoutObject());
        // Calculate the transformed bounding box in the current coordinate space, to figure out
        // which fragmentainers (e.g. columns) we need to visit.
        LayoutRect transformedExtent = PaintLayer::transparencyClipBox(&m_paintLayer, paginationLayer, PaintLayer::PaintingTransparencyClipBox, PaintLayer::RootOfTransparencyClipBox, paintingInfo.subPixelAccumulation, paintingInfo.globalPaintFlags());
        // FIXME: we don't check if paginationLayer is within paintingInfo.rootLayer here.
        paginationLayer->collectFragments(fragments, paintingInfo.rootLayer, paintingInfo.paintDirtyRect, cacheSlot, IgnoreOverlayScrollbarSize, respectOverflowClip, 0, paintingInfo.subPixelAccumulation, &transformedExtent);
    } else {
        // We don't need to collect any fragments in the regular way here. We have already
        // calculated a clip rectangle for the ancestry if it was needed, and clipping this
        // layer is something that can be done further down the path, when the transform has
        // been applied.
        PaintLayerFragment fragment;
        fragment.backgroundRect = paintingInfo.paintDirtyRect;
        fragments.append(fragment);
    }

    bool needsScope = fragments.size() > 1;
    PaintResult result = FullyPainted;
    for (const auto& fragment : fragments) {
        Optional<ScopeRecorder> scopeRecorder;
        if (needsScope)
            scopeRecorder.emplace(*context);
        Optional<LayerClipRecorder> clipRecorder;
        if (parentLayer) {
            ClipRect clipRectForFragment(ancestorBackgroundClipRect);
            clipRectForFragment.moveBy(fragment.paginationOffset);
            clipRectForFragment.intersect(fragment.backgroundRect);
            if (clipRectForFragment.isEmpty())
                continue;
            if (needsToClip(paintingInfo, clipRectForFragment)) {
                if (m_paintLayer.layoutObject()->style()->position() != StaticPosition && clipRectForFragment.isClippedByClipCss())
                    UseCounter::count(m_paintLayer.layoutObject()->document(), UseCounter::ClipCssOfPositionedElement);
                clipRecorder.emplace(*context, *parentLayer->layoutObject(), DisplayItem::ClipLayerParent, clipRectForFragment, &paintingInfo, fragment.paginationOffset, paintFlags);
            }
        }
        if (paintFragmentByApplyingTransform(context, paintingInfo, paintFlags, fragment.paginationOffset) == MaybeNotFullyPainted)
            result = MaybeNotFullyPainted;
    }
    return result;
}
already_AddRefed<HTMLOptionElement>
HTMLOptionElement::Option(const GlobalObject& aGlobal,
                          const Optional<nsAString>& aText,
                          const Optional<nsAString>& aValue,
                          const Optional<bool>& aDefaultSelected,
                          const Optional<bool>& aSelected, ErrorResult& aError)
{
  nsCOMPtr<nsPIDOMWindow> win = do_QueryInterface(aGlobal.GetAsSupports());
  nsIDocument* doc;
  if (!win || !(doc = win->GetExtantDoc())) {
    aError.Throw(NS_ERROR_FAILURE);
    return nullptr;
  }

  already_AddRefed<mozilla::dom::NodeInfo> nodeInfo =
    doc->NodeInfoManager()->GetNodeInfo(nsGkAtoms::option, nullptr,
                                        kNameSpaceID_XHTML,
                                        nsIDOMNode::ELEMENT_NODE);

  nsRefPtr<HTMLOptionElement> option = new HTMLOptionElement(nodeInfo);

  if (aText.WasPassed()) {
    // Create a new text node and append it to the option
    nsRefPtr<nsTextNode> textContent =
      new nsTextNode(option->NodeInfo()->NodeInfoManager());

    textContent->SetText(aText.Value(), false);

    aError = option->AppendChildTo(textContent, false);
    if (aError.Failed()) {
      return nullptr;
    }

    if (aValue.WasPassed()) {
      // Set the value attribute for this element. We're calling SetAttr
      // directly because we want to pass aNotify == false.
      aError = option->SetAttr(kNameSpaceID_None, nsGkAtoms::value,
                               aValue.Value(), false);
      if (aError.Failed()) {
        return nullptr;
      }

      if (aDefaultSelected.WasPassed()) {
        if (aDefaultSelected.Value()) {
          // We're calling SetAttr directly because we want to pass
          // aNotify == false.
          aError = option->SetAttr(kNameSpaceID_None, nsGkAtoms::selected,
                                   EmptyString(), false);
          if (aError.Failed()) {
            return nullptr;
          }
        }

        if (aSelected.WasPassed()) {
          option->SetSelected(aSelected.Value(), aError);
          if (aError.Failed()) {
            return nullptr;
          }
        }
      }
    }
  }

  return option.forget();
}
Exemple #29
0
void PartPainter::paint(const PaintInfo& paintInfo,
                        const LayoutPoint& paintOffset) {
  LayoutPoint adjustedPaintOffset = paintOffset + m_layoutPart.location();
  if (!ReplacedPainter(m_layoutPart)
           .shouldPaint(paintInfo, adjustedPaintOffset))
    return;

  LayoutRect borderRect(adjustedPaintOffset, m_layoutPart.size());

  if (m_layoutPart.hasBoxDecorationBackground() &&
      (paintInfo.phase == PaintPhaseForeground ||
       paintInfo.phase == PaintPhaseSelection))
    BoxPainter(m_layoutPart)
        .paintBoxDecorationBackground(paintInfo, adjustedPaintOffset);

  if (paintInfo.phase == PaintPhaseMask) {
    BoxPainter(m_layoutPart).paintMask(paintInfo, adjustedPaintOffset);
    return;
  }

  if (shouldPaintSelfOutline(paintInfo.phase))
    ObjectPainter(m_layoutPart).paintOutline(paintInfo, adjustedPaintOffset);

  if (paintInfo.phase != PaintPhaseForeground)
    return;

  if (m_layoutPart.widget()) {
    // TODO(schenney) crbug.com/93805 Speculative release assert to verify that
    // the crashes we see in widget painting are due to a destroyed LayoutPart
    // object.
    CHECK(m_layoutPart.node());
    Optional<RoundedInnerRectClipper> clipper;
    if (m_layoutPart.style()->hasBorderRadius()) {
      if (borderRect.isEmpty())
        return;

      FloatRoundedRect roundedInnerRect =
          m_layoutPart.style()->getRoundedInnerBorderFor(
              borderRect,
              LayoutRectOutsets(
                  -(m_layoutPart.paddingTop() + m_layoutPart.borderTop()),
                  -(m_layoutPart.paddingRight() + m_layoutPart.borderRight()),
                  -(m_layoutPart.paddingBottom() + m_layoutPart.borderBottom()),
                  -(m_layoutPart.paddingLeft() + m_layoutPart.borderLeft())),
              true, true);
      clipper.emplace(m_layoutPart, paintInfo, borderRect, roundedInnerRect,
                      ApplyToDisplayList);
    }

    m_layoutPart.paintContents(paintInfo, paintOffset);
  }

  // Paint a partially transparent wash over selected widgets.
  if (isSelected() && !paintInfo.isPrinting() &&
      !LayoutObjectDrawingRecorder::useCachedDrawingIfPossible(
          paintInfo.context, m_layoutPart, paintInfo.phase)) {
    LayoutRect rect = m_layoutPart.localSelectionRect();
    rect.moveBy(adjustedPaintOffset);
    IntRect selectionRect = pixelSnappedIntRect(rect);
    LayoutObjectDrawingRecorder drawingRecorder(paintInfo.context, m_layoutPart,
                                                paintInfo.phase, selectionRect);
    paintInfo.context.fillRect(selectionRect,
                               m_layoutPart.selectionBackgroundColor());
  }

  if (m_layoutPart.canResize())
    ScrollableAreaPainter(*m_layoutPart.layer()->getScrollableArea())
        .paintResizer(paintInfo.context, roundedIntPoint(adjustedPaintOffset),
                      paintInfo.cullRect());
}
Exemple #30
0
already_AddRefed<IDBRequest>
IDBIndex::GetAllInternal(bool aKeysOnly,
                         JSContext* aCx,
                         JS::Handle<JS::Value> aKey,
                         const Optional<uint32_t>& aLimit,
                         ErrorResult& aRv)
{
  AssertIsOnOwningThread();

  if (mDeletedMetadata) {
    aRv.Throw(NS_ERROR_DOM_INDEXEDDB_NOT_ALLOWED_ERR);
    return nullptr;
  }

  IDBTransaction* transaction = mObjectStore->Transaction();
  if (!transaction->IsOpen()) {
    aRv.Throw(NS_ERROR_DOM_INDEXEDDB_TRANSACTION_INACTIVE_ERR);
    return nullptr;
  }

  RefPtr<IDBKeyRange> keyRange;
  aRv = IDBKeyRange::FromJSVal(aCx, aKey, getter_AddRefs(keyRange));
  if (NS_WARN_IF(aRv.Failed())) {
    return nullptr;
  }

  const int64_t objectStoreId = mObjectStore->Id();
  const int64_t indexId = Id();

  OptionalKeyRange optionalKeyRange;
  if (keyRange) {
    SerializedKeyRange serializedKeyRange;
    keyRange->ToSerialized(serializedKeyRange);
    optionalKeyRange = serializedKeyRange;
  } else {
    optionalKeyRange = void_t();
  }

  const uint32_t limit = aLimit.WasPassed() ? aLimit.Value() : 0;

  RequestParams params;
  if (aKeysOnly) {
    params = IndexGetAllKeysParams(objectStoreId, indexId, optionalKeyRange,
                                   limit);
  } else {
    params = IndexGetAllParams(objectStoreId, indexId, optionalKeyRange, limit);
  }

  RefPtr<IDBRequest> request = GenerateRequest(aCx, this);
  MOZ_ASSERT(request);

  if (aKeysOnly) {
    IDB_LOG_MARK("IndexedDB %s: Child  Transaction[%lld] Request[%llu]: "
                   "database(%s).transaction(%s).objectStore(%s).index(%s)."
                   "getAllKeys(%s, %s)",
                 "IndexedDB %s: C T[%lld] R[%llu]: IDBIndex.getAllKeys()",
                 IDB_LOG_ID_STRING(),
                 transaction->LoggingSerialNumber(),
                 request->LoggingSerialNumber(),
                 IDB_LOG_STRINGIFY(transaction->Database()),
                 IDB_LOG_STRINGIFY(transaction),
                 IDB_LOG_STRINGIFY(mObjectStore),
                 IDB_LOG_STRINGIFY(this),
                 IDB_LOG_STRINGIFY(keyRange),
                 IDB_LOG_STRINGIFY(aLimit));
  } else {
    IDB_LOG_MARK("IndexedDB %s: Child  Transaction[%lld] Request[%llu]: "
                   "database(%s).transaction(%s).objectStore(%s).index(%s)."
                   "getAll(%s, %s)",
                 "IndexedDB %s: C T[%lld] R[%llu]: IDBIndex.getAll()",
                 IDB_LOG_ID_STRING(),
                 transaction->LoggingSerialNumber(),
                 request->LoggingSerialNumber(),
                 IDB_LOG_STRINGIFY(transaction->Database()),
                 IDB_LOG_STRINGIFY(transaction),
                 IDB_LOG_STRINGIFY(mObjectStore),
                 IDB_LOG_STRINGIFY(this),
                 IDB_LOG_STRINGIFY(keyRange),
                 IDB_LOG_STRINGIFY(aLimit));
  }

  transaction->StartRequest(request, params);

  return request.forget();
}