bool
nsAttrAndChildArray::AddAttrSlot()
{
  uint32_t slotCount = AttrSlotCount();
  uint32_t childCount = ChildCount();

  CheckedUint32 size = slotCount;
  size += 1;
  size *= ATTRSIZE;
  size += childCount;
  if (!size.isValid()) {
    return false;
  }

  // Grow buffer if needed
  if (!(mImpl && mImpl->mBufferSize >= size.value()) &&
      !GrowBy(ATTRSIZE)) {
    return false;
  }

  void** offset = mImpl->mBuffer + slotCount * ATTRSIZE;

  if (childCount > 0) {
    memmove(&ATTRS(mImpl)[slotCount + 1], &ATTRS(mImpl)[slotCount],
            childCount * sizeof(nsIContent*));
  }

  SetAttrSlotCount(slotCount + 1);
  offset[0] = nullptr;
  offset[1] = nullptr;

  return true;
}
Exemple #2
0
/* static */
already_AddRefed<VideoData>
VideoData::Create(const VideoInfo& aInfo,
                  ImageContainer* aContainer,
                  int64_t aOffset,
                  int64_t aTime,
                  int64_t aDuration,
                  mozilla::layers::TextureClient* aBuffer,
                  bool aKeyframe,
                  int64_t aTimecode,
                  const IntRect& aPicture)
{
  if (!aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    RefPtr<VideoData> v(new VideoData(aOffset,
                                        aTime,
                                        aDuration,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay,
                                        0));
    return v.forget();
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    NS_WARNING("Empty picture rect");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || !yLimit.isValid())
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  RefPtr<VideoData> v(new VideoData(aOffset,
                                      aTime,
                                      aDuration,
                                      aKeyframe,
                                      aTimecode,
                                      aInfo.mDisplay,
                                      0));

  RefPtr<layers::GrallocImage> image = new layers::GrallocImage();
  image->AdoptData(aBuffer, aPicture.Size());
  v->mImage = image;

  return v.forget();
}
Exemple #3
0
nsresult nsRawReader::Seek(int64_t aTime, int64_t aStartTime, int64_t aEndTime, int64_t aCurrentTime)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(),
               "Should be on decode thread.");

  MediaResource *resource = mDecoder->GetResource();
  NS_ASSERTION(resource, "Decoder has no media resource");

  uint32_t frame = mCurrentFrame;
  if (aTime >= UINT_MAX)
    return NS_ERROR_FAILURE;
  mCurrentFrame = aTime * mFrameRate / USECS_PER_S;

  CheckedUint32 offset = CheckedUint32(mCurrentFrame) * mFrameSize;
  offset += sizeof(nsRawVideoHeader);
  NS_ENSURE_TRUE(offset.isValid(), NS_ERROR_FAILURE);

  nsresult rv = resource->Seek(nsISeekableStream::NS_SEEK_SET, offset.value());
  NS_ENSURE_SUCCESS(rv, rv);

  mVideoQueue.Erase();

  while(mVideoQueue.GetSize() == 0) {
    bool keyframeSkip = false;
    if (!DecodeVideoFrame(keyframeSkip, 0)) {
      mCurrentFrame = frame;
      return NS_ERROR_FAILURE;
    }

    {
      mozilla::ReentrantMonitorAutoEnter autoMonitor(mDecoder->GetReentrantMonitor());
      if (mDecoder->GetDecodeState() ==
          nsBuiltinDecoderStateMachine::DECODER_STATE_SHUTDOWN) {
        mCurrentFrame = frame;
        return NS_ERROR_FAILURE;
      }
    }

    nsAutoPtr<VideoData> video(mVideoQueue.PeekFront());
    if (video && video->mEndTime < aTime) {
      mVideoQueue.PopFront();
      video = nullptr;
    } else {
      video.forget();
    }
  }

  return NS_OK;
}
Exemple #4
0
nsresult RawReader::SeekInternal(int64_t aTime)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(),
               "Should be on decode thread.");

  MediaResource *resource = mDecoder->GetResource();
  NS_ASSERTION(resource, "Decoder has no media resource");

  uint32_t frame = mCurrentFrame;
  if (aTime >= UINT_MAX)
    return NS_ERROR_FAILURE;
  mCurrentFrame = aTime * mFrameRate / USECS_PER_S;

  CheckedUint32 offset = CheckedUint32(mCurrentFrame) * mFrameSize;
  offset += sizeof(RawVideoHeader);
  NS_ENSURE_TRUE(offset.isValid(), NS_ERROR_FAILURE);

  nsresult rv = resource->Seek(nsISeekableStream::NS_SEEK_SET, offset.value());
  NS_ENSURE_SUCCESS(rv, rv);

  mVideoQueue.Reset();

  while(mVideoQueue.GetSize() == 0) {
    bool keyframeSkip = false;
    if (!DecodeVideoFrame(keyframeSkip, 0)) {
      mCurrentFrame = frame;
      return NS_ERROR_FAILURE;
    }

    {
      ReentrantMonitorAutoEnter autoMonitor(mDecoder->GetReentrantMonitor());
      if (mDecoder->IsShutdown()) {
        mCurrentFrame = frame;
        return NS_ERROR_FAILURE;
      }
    }

    if (mVideoQueue.PeekFront() && mVideoQueue.PeekFront()->GetEndTime() < aTime) {
      nsRefPtr<VideoData> releaseMe = mVideoQueue.PopFront();
    }
  }

  return NS_OK;
}
Exemple #5
0
bool
BlobSet::ExpandBufferSize(uint64_t aSize)
{
  if (mDataBufferLen >= mDataLen + aSize) {
    mDataLen += aSize;
    return true;
  }

  // Start at 1 or we'll loop forever.
  CheckedUint32 bufferLen =
    std::max<uint32_t>(static_cast<uint32_t>(mDataBufferLen), 1);
  while (bufferLen.isValid() && bufferLen.value() < mDataLen + aSize) {
    bufferLen *= 2;
  }

  if (!bufferLen.isValid()) {
    return false;
  }

  void* data = realloc(mData, bufferLen.value());
  if (!data) {
    return false;
  }

  mData = data;
  mDataBufferLen = bufferLen.value();
  mDataLen += aSize;
  return true;
}
    bool ExpandBufferSize(PRUint64 aSize)
    {
        if (mDataBufferLen >= mDataLen + aSize) {
            mDataLen += aSize;
            return true;
        }

        // Start at 1 or we'll loop forever.
        CheckedUint32 bufferLen = NS_MAX<PRUint32>(mDataBufferLen, 1);
        while (bufferLen.valid() && bufferLen.value() < mDataLen + aSize)
            bufferLen *= 2;

        if (!bufferLen.valid())
            return false;

        // PR_ memory functions are still fallible
        void* data = PR_Realloc(mData, bufferLen.value());
        if (!data)
            return false;

        mData = data;
        mDataBufferLen = bufferLen.value();
        mDataLen += aSize;
        return true;
    }
VideoData* VideoData::Create(VideoInfo& aInfo,
                             ImageContainer* aContainer,
                             int64_t aOffset,
                             int64_t aTime,
                             int64_t aEndTime,
                             const YCbCrBuffer& aBuffer,
                             bool aKeyframe,
                             int64_t aTimecode,
                             nsIntRect aPicture)
{
  if (!aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    nsAutoPtr<VideoData> v(new VideoData(aOffset,
                                         aTime,
                                         aEndTime,
                                         aKeyframe,
                                         aTimecode,
                                         aInfo.mDisplay));
    return v.forget();
  }

  // The following situation should never happen unless there is a bug
  // in the decoder
  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
      aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
    NS_ERROR("C planes with different sizes");
    return nullptr;
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    NS_WARNING("Empty picture rect");
    return nullptr;
  }
  if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) ||
      !ValidatePlane(aBuffer.mPlanes[2])) {
    NS_WARNING("Invalid plane size");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
      !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight)
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  nsAutoPtr<VideoData> v(new VideoData(aOffset,
                                       aTime,
                                       aEndTime,
                                       aKeyframe,
                                       aTimecode,
                                       aInfo.mDisplay));
  const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
  const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
  const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];

  // Currently our decoder only knows how to output to PLANAR_YCBCR
  // format.
  ImageFormat format[2] = {PLANAR_YCBCR, GRALLOC_PLANAR_YCBCR};
  if (IsYV12Format(Y, Cb, Cr)) {
    v->mImage = aContainer->CreateImage(format, 2);
  } else {
    v->mImage = aContainer->CreateImage(format, 1);
  }
  if (!v->mImage) {
    return nullptr;
  }
  NS_ASSERTION(v->mImage->GetFormat() == PLANAR_YCBCR ||
               v->mImage->GetFormat() == GRALLOC_PLANAR_YCBCR,
               "Wrong format?");
  PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());

  PlanarYCbCrImage::Data data;
  data.mYChannel = Y.mData + Y.mOffset;
  data.mYSize = gfxIntSize(Y.mWidth, Y.mHeight);
  data.mYStride = Y.mStride;
  data.mYSkip = Y.mSkip;
  data.mCbChannel = Cb.mData + Cb.mOffset;
  data.mCrChannel = Cr.mData + Cr.mOffset;
  data.mCbCrSize = gfxIntSize(Cb.mWidth, Cb.mHeight);
  data.mCbCrStride = Cb.mStride;
  data.mCbSkip = Cb.mSkip;
  data.mCrSkip = Cr.mSkip;
  data.mPicX = aPicture.x;
  data.mPicY = aPicture.y;
  data.mPicSize = gfxIntSize(aPicture.width, aPicture.height);
  data.mStereoMode = aInfo.mStereoMode;

  videoImage->SetDelayedConversion(true);
  videoImage->SetData(data);
  return v.forget();
}
VideoData* VideoData::Create(VideoInfo& aInfo,
                             ImageContainer* aContainer,
                             int64_t aOffset,
                             int64_t aTime,
                             int64_t aEndTime,
                             mozilla::layers::GraphicBufferLocked *aBuffer,
                             bool aKeyframe,
                             int64_t aTimecode,
                             nsIntRect aPicture)
{
  if (!aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    nsAutoPtr<VideoData> v(new VideoData(aOffset,
                                         aTime,
                                         aEndTime,
                                         aKeyframe,
                                         aTimecode,
                                         aInfo.mDisplay));
    return v.forget();
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    NS_WARNING("Empty picture rect");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || !yLimit.isValid())
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  nsAutoPtr<VideoData> v(new VideoData(aOffset,
                                       aTime,
                                       aEndTime,
                                       aKeyframe,
                                       aTimecode,
                                       aInfo.mDisplay));

  ImageFormat format = GONK_IO_SURFACE;
  v->mImage = aContainer->CreateImage(&format, 1);
  if (!v->mImage) {
    return nullptr;
  }
  NS_ASSERTION(v->mImage->GetFormat() == GONK_IO_SURFACE,
               "Wrong format?");
  typedef mozilla::layers::GonkIOSurfaceImage GonkIOSurfaceImage;
  GonkIOSurfaceImage* videoImage = static_cast<GonkIOSurfaceImage*>(v->mImage.get());
  GonkIOSurfaceImage::Data data;

  data.mPicSize = gfxIntSize(aPicture.width, aPicture.height);
  data.mGraphicBuffer = aBuffer;

  videoImage->SetData(data);

  return v.forget();
}
Exemple #9
0
/* static */
already_AddRefed<VideoData>
VideoData::Create(const VideoInfo& aInfo,
                  ImageContainer* aContainer,
                  Image* aImage,
                  int64_t aOffset,
                  int64_t aTime,
                  int64_t aDuration,
                  const YCbCrBuffer& aBuffer,
                  bool aKeyframe,
                  int64_t aTimecode,
                  const IntRect& aPicture)
{
  if (!aImage && !aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    RefPtr<VideoData> v(new VideoData(aOffset,
                                        aTime,
                                        aDuration,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay,
                                        0));
    return v.forget();
  }

  // The following situation should never happen unless there is a bug
  // in the decoder
  if (aBuffer.mPlanes[1].mWidth != aBuffer.mPlanes[2].mWidth ||
      aBuffer.mPlanes[1].mHeight != aBuffer.mPlanes[2].mHeight) {
    NS_ERROR("C planes with different sizes");
    return nullptr;
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    // In debug mode, makes the error more noticeable
    MOZ_ASSERT(false, "Empty picture rect");
    return nullptr;
  }
  if (!ValidatePlane(aBuffer.mPlanes[0]) || !ValidatePlane(aBuffer.mPlanes[1]) ||
      !ValidatePlane(aBuffer.mPlanes[2])) {
    NS_WARNING("Invalid plane size");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || xLimit.value() > aBuffer.mPlanes[0].mStride ||
      !yLimit.isValid() || yLimit.value() > aBuffer.mPlanes[0].mHeight)
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  RefPtr<VideoData> v(new VideoData(aOffset,
                                      aTime,
                                      aDuration,
                                      aKeyframe,
                                      aTimecode,
                                      aInfo.mDisplay,
                                      0));
#ifdef MOZ_WIDGET_GONK
  const YCbCrBuffer::Plane &Y = aBuffer.mPlanes[0];
  const YCbCrBuffer::Plane &Cb = aBuffer.mPlanes[1];
  const YCbCrBuffer::Plane &Cr = aBuffer.mPlanes[2];
#endif

  if (!aImage) {
    // Currently our decoder only knows how to output to ImageFormat::PLANAR_YCBCR
    // format.
#ifdef MOZ_WIDGET_GONK
    if (IsYV12Format(Y, Cb, Cr) && !IsInEmulator()) {
      v->mImage = aContainer->CreateImage(ImageFormat::GRALLOC_PLANAR_YCBCR);
    }
#endif
    if (!v->mImage) {
      v->mImage = aContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
    }
  } else {
    v->mImage = aImage;
  }

  if (!v->mImage) {
    return nullptr;
  }
  NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::PLANAR_YCBCR ||
               v->mImage->GetFormat() == ImageFormat::GRALLOC_PLANAR_YCBCR,
               "Wrong format?");
  PlanarYCbCrImage* videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());

  bool shouldCopyData = (aImage == nullptr);
  if (!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
                                      shouldCopyData)) {
    return nullptr;
  }

#ifdef MOZ_WIDGET_GONK
  if (!videoImage->IsValid() && !aImage && IsYV12Format(Y, Cb, Cr)) {
    // Failed to allocate gralloc. Try fallback.
    v->mImage = aContainer->CreateImage(ImageFormat::PLANAR_YCBCR);
    if (!v->mImage) {
      return nullptr;
    }
    videoImage = static_cast<PlanarYCbCrImage*>(v->mImage.get());
    if(!VideoData::SetVideoDataToImage(videoImage, aInfo, aBuffer, aPicture,
                                       true /* aCopyData */)) {
      return nullptr;
    }
  }
#endif
  return v.forget();
}
Exemple #10
0
/* static */
already_AddRefed<VideoData>
VideoData::Create(const VideoInfo& aInfo,
                  ImageContainer* aContainer,
                  int64_t aOffset,
                  int64_t aTime,
                  int64_t aDuration,
                  mozilla::layers::TextureClient* aBuffer,
                  bool aKeyframe,
                  int64_t aTimecode,
                  const IntRect& aPicture)
{
  if (!aContainer) {
    // Create a dummy VideoData with no image. This gives us something to
    // send to media streams if necessary.
    RefPtr<VideoData> v(new VideoData(aOffset,
                                        aTime,
                                        aDuration,
                                        aKeyframe,
                                        aTimecode,
                                        aInfo.mDisplay,
                                        0));
    return v.forget();
  }

  // The following situations could be triggered by invalid input
  if (aPicture.width <= 0 || aPicture.height <= 0) {
    NS_WARNING("Empty picture rect");
    return nullptr;
  }

  // Ensure the picture size specified in the headers can be extracted out of
  // the frame we've been supplied without indexing out of bounds.
  CheckedUint32 xLimit = aPicture.x + CheckedUint32(aPicture.width);
  CheckedUint32 yLimit = aPicture.y + CheckedUint32(aPicture.height);
  if (!xLimit.isValid() || !yLimit.isValid())
  {
    // The specified picture dimensions can't be contained inside the video
    // frame, we'll stomp memory if we try to copy it. Fail.
    NS_WARNING("Overflowing picture rect");
    return nullptr;
  }

  RefPtr<VideoData> v(new VideoData(aOffset,
                                      aTime,
                                      aDuration,
                                      aKeyframe,
                                      aTimecode,
                                      aInfo.mDisplay,
                                      0));

  v->mImage = aContainer->CreateImage(ImageFormat::GRALLOC_PLANAR_YCBCR);
  if (!v->mImage) {
    return nullptr;
  }
  NS_ASSERTION(v->mImage->GetFormat() == ImageFormat::GRALLOC_PLANAR_YCBCR,
               "Wrong format?");
  typedef mozilla::layers::GrallocImage GrallocImage;
  GrallocImage* videoImage = static_cast<GrallocImage*>(v->mImage.get());
  GrallocImage::GrallocData data;

  data.mPicSize = aPicture.Size();
  data.mGraphicBuffer = aBuffer;

  if (!videoImage->SetData(data)) {
    return nullptr;
  }

  return v.forget();
}
bool
nsAttrAndChildArray::GrowBy(uint32_t aGrowSize)
{
  CheckedUint32 size = 0;
  if (mImpl) {
    size += mImpl->mBufferSize;
    size += NS_IMPL_EXTRA_SIZE;
    if (!size.isValid()) {
      return false;
    }
  }

  CheckedUint32 minSize = size.value();
  minSize += aGrowSize;
  if (!minSize.isValid()) {
    return false;
  }

  if (minSize.value() <= ATTRCHILD_ARRAY_LINEAR_THRESHOLD) {
    do {
      size += ATTRCHILD_ARRAY_GROWSIZE;
      if (!size.isValid()) {
        return false;
      }
    } while (size.value() < minSize.value());
  }
  else {
    size = 1u << mozilla::CeilingLog2(minSize.value());
  }

  bool needToInitialize = !mImpl;
  CheckedUint32 neededSize = size;
  neededSize *= sizeof(void*);
  if (!neededSize.isValid()) {
    return false;
  }

  Impl* newImpl = static_cast<Impl*>(realloc(mImpl, neededSize.value()));
  NS_ENSURE_TRUE(newImpl, false);

  mImpl = newImpl;

  // Set initial counts if we didn't have a buffer before
  if (needToInitialize) {
    mImpl->mMappedAttrs = nullptr;
    SetAttrSlotAndChildCount(0, 0);
  }

  mImpl->mBufferSize = size.value() - NS_IMPL_EXTRA_SIZE;

  return true;
}
bool
WebGLContext::DrawElements_check(GLsizei count, GLenum type,
                                 WebGLintptr byteOffset, GLsizei primcount,
                                 const char* info, GLuint* out_upperBound)
{
    if (count < 0 || byteOffset < 0) {
        ErrorInvalidValue("%s: negative count or offset", info);
        return false;
    }

    if (primcount < 0) {
        ErrorInvalidValue("%s: negative primcount", info);
        return false;
    }

    if (!ValidateStencilParamsForDrawCall()) {
        return false;
    }

    // If count is 0, there's nothing to do.
    if (count == 0 || primcount == 0)
        return false;

    uint8_t bytesPerElem = 0;
    switch (type) {
    case LOCAL_GL_UNSIGNED_BYTE:
        bytesPerElem = 1;
        break;

    case LOCAL_GL_UNSIGNED_SHORT:
        bytesPerElem = 2;
        break;

    case LOCAL_GL_UNSIGNED_INT:
        if (IsWebGL2() || IsExtensionEnabled(WebGLExtensionID::OES_element_index_uint)) {
            bytesPerElem = 4;
        }
        break;
    }

    if (!bytesPerElem) {
        ErrorInvalidEnum("%s: Invalid `type`: 0x%04x", info, type);
        return false;
    }

    if (byteOffset % bytesPerElem != 0) {
        ErrorInvalidOperation("%s: `byteOffset` must be a multiple of the size of `type`",
                              info);
        return false;
    }

    const GLsizei first = byteOffset / bytesPerElem;
    const CheckedUint32 checked_byteCount = bytesPerElem * CheckedUint32(count);

    if (!checked_byteCount.isValid()) {
        ErrorInvalidValue("%s: overflow in byteCount", info);
        return false;
    }

    // Any checks below this depend on a program being available.
    if (!mCurrentProgram) {
        ErrorInvalidOperation("%s: null CURRENT_PROGRAM", info);
        return false;
    }

    if (!mBoundVertexArray->mElementArrayBuffer) {
        ErrorInvalidOperation("%s: must have element array buffer binding", info);
        return false;
    }

    WebGLBuffer& elemArrayBuffer = *mBoundVertexArray->mElementArrayBuffer;

    if (!elemArrayBuffer.ByteLength()) {
        ErrorInvalidOperation("%s: bound element array buffer doesn't have any data", info);
        return false;
    }

    CheckedInt<GLsizei> checked_neededByteCount = checked_byteCount.toChecked<GLsizei>() + byteOffset;

    if (!checked_neededByteCount.isValid()) {
        ErrorInvalidOperation("%s: overflow in byteOffset+byteCount", info);
        return false;
    }

    if (uint32_t(checked_neededByteCount.value()) > elemArrayBuffer.ByteLength()) {
        ErrorInvalidOperation("%s: bound element array buffer is too small for given count and offset", info);
        return false;
    }

    if (!ValidateBufferFetching(info))
        return false;

    if (!mMaxFetchedVertices ||
        !elemArrayBuffer.Validate(type, mMaxFetchedVertices - 1, first, count, out_upperBound))
    {
        ErrorInvalidOperation(
                              "%s: bound vertex attribute buffers do not have sufficient "
                              "size for given indices from the bound element array", info);
        return false;
    }

    if (uint32_t(primcount) > mMaxFetchedInstances) {
        ErrorInvalidOperation("%s: bound instance attribute buffers do not have sufficient size for given primcount", info);
        return false;
    }

    // Bug 1008310 - Check if buffer has been used with a different previous type
    if (elemArrayBuffer.IsElementArrayUsedWithMultipleTypes()) {
        GenerateWarning("%s: bound element array buffer previously used with a type other than "
                        "%s, this will affect performance.",
                        info,
                        WebGLContext::EnumName(type));
    }

    MOZ_ASSERT(gl->IsCurrent());

    if (mBoundDrawFramebuffer) {
        if (!mBoundDrawFramebuffer->ValidateAndInitAttachments(info))
            return false;
    } else {
        ClearBackbufferIfNeeded();
    }

    if (!DoFakeVertexAttrib0(mMaxFetchedVertices)) {
        return false;
    }

    return true;
}
Exemple #13
0
nsresult RawReader::ReadMetadata(MediaInfo* aInfo,
                                 MetadataTags** aTags)
{
  NS_ASSERTION(mDecoder->OnDecodeThread(),
               "Should be on decode thread.");

  MediaResource* resource = mDecoder->GetResource();
  NS_ASSERTION(resource, "Decoder has no media resource");

  if (!ReadFromResource(resource, reinterpret_cast<uint8_t*>(&mMetadata),
                        sizeof(mMetadata)))
    return NS_ERROR_FAILURE;

  // Validate the header
  if (!(mMetadata.headerPacketID == 0 /* Packet ID of 0 for the header*/ &&
        mMetadata.codecID == RAW_ID /* "YUV" */ &&
        mMetadata.majorVersion == 0 &&
        mMetadata.minorVersion == 1))
    return NS_ERROR_FAILURE;

  CheckedUint32 dummy = CheckedUint32(static_cast<uint32_t>(mMetadata.frameWidth)) *
                          static_cast<uint32_t>(mMetadata.frameHeight);
  NS_ENSURE_TRUE(dummy.isValid(), NS_ERROR_FAILURE);

  if (mMetadata.aspectDenominator == 0 ||
      mMetadata.framerateDenominator == 0)
    return NS_ERROR_FAILURE; // Invalid data

  // Determine and verify frame display size.
  float pixelAspectRatio = static_cast<float>(mMetadata.aspectNumerator) /
                            mMetadata.aspectDenominator;
  nsIntSize display(mMetadata.frameWidth, mMetadata.frameHeight);
  ScaleDisplayByAspectRatio(display, pixelAspectRatio);
  mPicture = nsIntRect(0, 0, mMetadata.frameWidth, mMetadata.frameHeight);
  nsIntSize frameSize(mMetadata.frameWidth, mMetadata.frameHeight);
  if (!IsValidVideoRegion(frameSize, mPicture, display)) {
    // Video track's frame sizes will overflow. Fail.
    return NS_ERROR_FAILURE;
  }

  mInfo.mVideo.mDisplay = display;

  mFrameRate = static_cast<float>(mMetadata.framerateNumerator) /
               mMetadata.framerateDenominator;

  // Make some sanity checks
  if (mFrameRate > 45 ||
      mFrameRate == 0 ||
      pixelAspectRatio == 0 ||
      mMetadata.frameWidth > 2000 ||
      mMetadata.frameHeight > 2000 ||
      mMetadata.chromaChannelBpp != 4 ||
      mMetadata.lumaChannelBpp != 8 ||
      mMetadata.colorspace != 1 /* 4:2:0 */)
    return NS_ERROR_FAILURE;

  mFrameSize = mMetadata.frameWidth * mMetadata.frameHeight *
    (mMetadata.lumaChannelBpp + mMetadata.chromaChannelBpp) / 8.0 +
    sizeof(RawPacketHeader);

  int64_t length = resource->GetLength();
  if (length != -1) {
    ReentrantMonitorAutoEnter autoMonitor(mDecoder->GetReentrantMonitor());
    mDecoder->SetMediaDuration(USECS_PER_S *
                                      (length - sizeof(RawVideoHeader)) /
                                      (mFrameSize * mFrameRate));
  }

  *aInfo = mInfo;

  *aTags = nullptr;

  return NS_OK;
}
Exemple #14
0
/*static*/ bool
TexUnpackSurface::ConvertSurface(WebGLContext* webgl, const webgl::DriverUnpackInfo* dui,
                                 gfx::DataSourceSurface* surf, bool isSurfAlphaPremult,
                                 UniqueBuffer* const out_convertedBuffer,
                                 uint8_t* const out_convertedAlignment,
                                 bool* const out_outOfMemory)
{
    *out_outOfMemory = false;

    const size_t width = surf->GetSize().width;
    const size_t height = surf->GetSize().height;

    // Source args:

    // After we call this, on OSX, our GLContext will no longer be current.
    gfx::DataSourceSurface::ScopedMap srcMap(surf, gfx::DataSourceSurface::MapType::READ);
    if (!srcMap.IsMapped())
        return false;

    const void* const srcBegin = srcMap.GetData();
    const size_t srcStride = srcMap.GetStride();

    WebGLTexelFormat srcFormat;
    if (!GetFormatForSurf(surf, &srcFormat))
        return false;

    const bool srcPremultiplied = isSurfAlphaPremult;

    // Dest args:

    WebGLTexelFormat dstFormat;
    if (!GetFormatForPackingTuple(dui->unpackFormat, dui->unpackType, &dstFormat))
        return false;

    const auto bytesPerPixel = webgl::BytesPerPixel({dui->unpackFormat, dui->unpackType});
    const size_t dstRowBytes = bytesPerPixel * width;

    const size_t dstAlignment = 8; // Just use the max!
    const size_t dstStride = RoundUpToMultipleOf(dstRowBytes, dstAlignment);

    CheckedUint32 checkedDstSize = dstStride;
    checkedDstSize *= height;
    if (!checkedDstSize.isValid()) {
        *out_outOfMemory = true;
        return false;
    }

    const size_t dstSize = checkedDstSize.value();

    UniqueBuffer dstBuffer = malloc(dstSize);
    if (!dstBuffer) {
        *out_outOfMemory = true;
        return false;
    }
    void* const dstBegin = dstBuffer.get();

    gl::OriginPos srcOrigin, dstOrigin;
    OriginsForDOM(webgl, &srcOrigin, &dstOrigin);

    const bool dstPremultiplied = webgl->mPixelStore_PremultiplyAlpha;

    // And go!:
    if (!ConvertImage(width, height,
                      srcBegin, srcStride, srcOrigin, srcFormat, srcPremultiplied,
                      dstBegin, dstStride, dstOrigin, dstFormat, dstPremultiplied))
    {
        MOZ_ASSERT(false, "ConvertImage failed unexpectedly.");
        NS_ERROR("ConvertImage failed unexpectedly.");
        *out_outOfMemory = true;
        return false;
    }

    *out_convertedBuffer = Move(dstBuffer);
    *out_convertedAlignment = dstAlignment;
    return true;
}
static bool ZeroTextureData(const WebGLContext* webgl, GLuint tex,
                            TexImageTarget target, uint32_t level,
                            const webgl::FormatUsageInfo* usage, uint32_t width,
                            uint32_t height, uint32_t depth) {
  // This has two usecases:
  // 1. Lazy zeroing of uninitialized textures:
  //    a. Before draw.
  //    b. Before partial upload. (TexStorage + TexSubImage)
  // 2. Zero subrects from out-of-bounds blits. (CopyTex(Sub)Image)

  // We have no sympathy for any of these cases.

  // "Doctor, it hurts when I do this!" "Well don't do that!"
  const auto targetStr = EnumString(target.get());
  webgl->GeneratePerfWarning(
      "Tex image %s level %u is incurring lazy initialization.",
      targetStr.c_str(), level);

  gl::GLContext* gl = webgl->GL();

  GLenum scopeBindTarget;
  switch (target.get()) {
    case LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X:
    case LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
    case LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
    case LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
    case LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
    case LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
      scopeBindTarget = LOCAL_GL_TEXTURE_CUBE_MAP;
      break;
    default:
      scopeBindTarget = target.get();
      break;
  }
  const gl::ScopedBindTexture scopeBindTexture(gl, tex, scopeBindTarget);
  const auto& compression = usage->format->compression;
  if (compression) {
    auto sizedFormat = usage->format->sizedFormat;
    MOZ_RELEASE_ASSERT(sizedFormat, "GFX: texture sized format not set");

    const auto fnSizeInBlocks = [](CheckedUint32 pixels,
                                   uint8_t pixelsPerBlock) {
      return RoundUpToMultipleOf(pixels, pixelsPerBlock) / pixelsPerBlock;
    };

    const auto widthBlocks = fnSizeInBlocks(width, compression->blockWidth);
    const auto heightBlocks = fnSizeInBlocks(height, compression->blockHeight);

    CheckedUint32 checkedByteCount = compression->bytesPerBlock;
    checkedByteCount *= widthBlocks;
    checkedByteCount *= heightBlocks;
    checkedByteCount *= depth;

    if (!checkedByteCount.isValid()) return false;

    const size_t byteCount = checkedByteCount.value();

    UniqueBuffer zeros = calloc(1, byteCount);
    if (!zeros) return false;

    ScopedUnpackReset scopedReset(webgl);
    gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 1);  // Don't bother with
                                                     // striding it well.

    const auto error =
        DoCompressedTexSubImage(gl, target.get(), level, 0, 0, 0, width, height,
                                depth, sizedFormat, byteCount, zeros.get());
    return !error;
  }

  const auto driverUnpackInfo = usage->idealUnpack;
  MOZ_RELEASE_ASSERT(driverUnpackInfo, "GFX: ideal unpack info not set.");

  if (usage->format->d) {
    // ANGLE_depth_texture does not allow uploads, so we have to clear.
    // (Restriction because of D3D9)
    // Also, depth resources are cleared to 1.0f and are always renderable, so
    // just use FB clears.
    return ClearDepthTexture(*webgl, tex, target, level, usage, depth);
  }

  const webgl::PackingInfo packing = driverUnpackInfo->ToPacking();

  const auto bytesPerPixel = webgl::BytesPerPixel(packing);

  CheckedUint32 checkedByteCount = bytesPerPixel;
  checkedByteCount *= width;
  checkedByteCount *= height;
  checkedByteCount *= depth;

  if (!checkedByteCount.isValid()) return false;

  const size_t byteCount = checkedByteCount.value();

  UniqueBuffer zeros = calloc(1, byteCount);
  if (!zeros) return false;

  ScopedUnpackReset scopedReset(webgl);
  gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT,
                   1);  // Don't bother with striding it well.
  const auto error = DoTexSubImage(gl, target, level, 0, 0, 0, width, height,
                                   depth, packing, zeros.get());
  return !error;
}
Exemple #16
0
bool
nsTextFragment::Append(const char16_t* aBuffer, uint32_t aLength,
                       bool aUpdateBidi, bool aForce2b)
{
  // This is a common case because some callsites create a textnode
  // with a value by creating the node and then calling AppendData.
  if (mState.mLength == 0) {
    return SetTo(aBuffer, aLength, aUpdateBidi, aForce2b);
  }

  // Should we optimize for aData.Length() == 0?

  CheckedUint32 length = mState.mLength;
  length += aLength;

  if (!length.isValid()) {
    return false;
  }

  if (mState.mIs2b) {
    length *= sizeof(char16_t);
    if (!length.isValid()) {
      return false;
    }

    // Already a 2-byte string so the result will be too
    char16_t* buff = static_cast<char16_t*>(realloc(m2b, length.value()));
    if (!buff) {
      return false;
    }

    memcpy(buff + mState.mLength, aBuffer, aLength * sizeof(char16_t));
    mState.mLength += aLength;
    m2b = buff;

    if (aUpdateBidi) {
      UpdateBidiFlag(aBuffer, aLength);
    }

    return true;
  }

  // Current string is a 1-byte string, check if the new data fits in one byte too.
  int32_t first16bit = aForce2b ? 0 : FirstNon8Bit(aBuffer, aBuffer + aLength);

  if (first16bit != -1) { // aBuffer contains no non-8bit character
    length *= sizeof(char16_t);
    if (!length.isValid()) {
      return false;
    }

    // The old data was 1-byte, but the new is not so we have to expand it
    // all to 2-byte
    char16_t* buff = static_cast<char16_t*>(malloc(length.value()));
    if (!buff) {
      return false;
    }

    // Copy data into buff
    LossyConvertEncoding8to16 converter(buff);
    copy_string(m1b, m1b+mState.mLength, converter);

    memcpy(buff + mState.mLength, aBuffer, aLength * sizeof(char16_t));
    mState.mLength += aLength;
    mState.mIs2b = true;

    if (mState.mInHeap) {
      free(m2b);
    }
    m2b = buff;

    mState.mInHeap = true;

    if (aUpdateBidi) {
      UpdateBidiFlag(aBuffer + first16bit, aLength - first16bit);
    }

    return true;
  }

  // The new and the old data is all 1-byte
  char* buff;
  if (mState.mInHeap) {
    buff = static_cast<char*>(realloc(const_cast<char*>(m1b), length.value()));
    if (!buff) {
      return false;
    }
  }
  else {
    buff = static_cast<char*>(malloc(length.value()));
    if (!buff) {
      return false;
    }

    memcpy(buff, m1b, mState.mLength);
    mState.mInHeap = true;
  }

  // Copy aBuffer into buff.
  LossyConvertEncoding16to8 converter(buff + mState.mLength);
  copy_string(aBuffer, aBuffer + aLength, converter);

  m1b = buff;
  mState.mLength += aLength;

  return true;
}
Exemple #17
0
static bool
ZeroTextureData(WebGLContext* webgl, const char* funcName, GLuint tex,
                TexImageTarget target, uint32_t level,
                const webgl::FormatUsageInfo* usage, uint32_t width, uint32_t height,
                uint32_t depth)
{
    // This has two usecases:
    // 1. Lazy zeroing of uninitialized textures:
    //    a. Before draw, when FakeBlack isn't viable. (TexStorage + Draw*)
    //    b. Before partial upload. (TexStorage + TexSubImage)
    // 2. Zero subrects from out-of-bounds blits. (CopyTex(Sub)Image)

    // We have no sympathy for any of these cases.

    // "Doctor, it hurts when I do this!" "Well don't do that!"
    webgl->GenerateWarning("%s: This operation requires zeroing texture data. This is"
                           " slow.",
                           funcName);

    gl::GLContext* gl = webgl->GL();
    gl->MakeCurrent();

    GLenum scopeBindTarget;
    switch (target.get()) {
    case LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_X:
    case LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
    case LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
    case LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
    case LOCAL_GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
    case LOCAL_GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
        scopeBindTarget = LOCAL_GL_TEXTURE_CUBE_MAP;
        break;
    default:
        scopeBindTarget = target.get();
        break;
    }
    const gl::ScopedBindTexture scopeBindTexture(gl, tex, scopeBindTarget);
    auto compression = usage->format->compression;
    if (compression) {
        auto sizedFormat = usage->format->sizedFormat;
        MOZ_RELEASE_ASSERT(sizedFormat, "GFX: texture sized format not set");

        const auto fnSizeInBlocks = [](CheckedUint32 pixels, uint8_t pixelsPerBlock) {
            return RoundUpToMultipleOf(pixels, pixelsPerBlock) / pixelsPerBlock;
        };

        const auto widthBlocks = fnSizeInBlocks(width, compression->blockWidth);
        const auto heightBlocks = fnSizeInBlocks(height, compression->blockHeight);

        CheckedUint32 checkedByteCount = compression->bytesPerBlock;
        checkedByteCount *= widthBlocks;
        checkedByteCount *= heightBlocks;
        checkedByteCount *= depth;

        if (!checkedByteCount.isValid())
            return false;

        const size_t byteCount = checkedByteCount.value();

        UniqueBuffer zeros = calloc(1, byteCount);
        if (!zeros)
            return false;

        ScopedUnpackReset scopedReset(webgl);
        gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 1); // Don't bother with striding it
                                                        // well.

        const auto error = DoCompressedTexSubImage(gl, target.get(), level, 0, 0, 0,
                                                   width, height, depth, sizedFormat,
                                                   byteCount, zeros.get());
        return !error;
    }

    const auto driverUnpackInfo = usage->idealUnpack;
    MOZ_RELEASE_ASSERT(driverUnpackInfo, "GFX: ideal unpack info not set.");

    if (webgl->IsExtensionEnabled(WebGLExtensionID::WEBGL_depth_texture) &&
        gl->IsANGLE() &&
        usage->format->d)
    {
        // ANGLE_depth_texture does not allow uploads, so we have to clear.
        // (Restriction because of D3D9)
        MOZ_ASSERT(target == LOCAL_GL_TEXTURE_2D);
        MOZ_ASSERT(level == 0);
        ZeroANGLEDepthTexture(webgl, tex, usage, width, height);
        return true;
    }

    const webgl::PackingInfo packing = driverUnpackInfo->ToPacking();

    const auto bytesPerPixel = webgl::BytesPerPixel(packing);

    CheckedUint32 checkedByteCount = bytesPerPixel;
    checkedByteCount *= width;
    checkedByteCount *= height;
    checkedByteCount *= depth;

    if (!checkedByteCount.isValid())
        return false;

    const size_t byteCount = checkedByteCount.value();

    UniqueBuffer zeros = calloc(1, byteCount);
    if (!zeros)
        return false;

    ScopedUnpackReset scopedReset(webgl);
    gl->fPixelStorei(LOCAL_GL_UNPACK_ALIGNMENT, 1); // Don't bother with striding it well.
    const auto error = DoTexSubImage(gl, target, level, 0, 0, 0, width, height, depth,
                                     packing, zeros.get());
    return !error;
}