Пример #1
0
// The text that should be displayed for this counter.
void
nsCounterUseNode::GetText(nsString& aResult)
{
  aResult.Truncate();

  AutoTArray<nsCounterNode*, 8> stack;
  stack.AppendElement(static_cast<nsCounterNode*>(this));

  if (mAllCounters && mScopeStart) {
    for (nsCounterNode* n = mScopeStart; n->mScopePrev; n = n->mScopeStart) {
      stack.AppendElement(n->mScopePrev);
    }
  }

  WritingMode wm = mPseudoFrame ?
    mPseudoFrame->GetWritingMode() : WritingMode();
  for (uint32_t i = stack.Length() - 1;; --i) {
    nsCounterNode* n = stack[i];
    nsAutoString text;
    bool isTextRTL;
    mCounterStyle->GetCounterText(n->mValueAfter, wm, text, isTextRTL);
    aResult.Append(text);
    if (i == 0) {
      break;
    }
    aResult.Append(mSeparator);
  }
}
void
HTMLOptionsCollection::GetSupportedNames(nsTArray<nsString>& aNames)
{
  AutoTArray<nsIAtom*, 8> atoms;
  for (uint32_t i = 0; i < mElements.Length(); ++i) {
    HTMLOptionElement* content = mElements.ElementAt(i);
    if (content) {
      // Note: HasName means the names is exposed on the document,
      // which is false for options, so we don't check it here.
      const nsAttrValue* val = content->GetParsedAttr(nsGkAtoms::name);
      if (val && val->Type() == nsAttrValue::eAtom) {
        nsIAtom* name = val->GetAtomValue();
        if (!atoms.Contains(name)) {
          atoms.AppendElement(name);
        }
      }
      if (content->HasID()) {
        nsIAtom* id = content->GetID();
        if (!atoms.Contains(id)) {
          atoms.AppendElement(id);
        }
      }
    }
  }

  uint32_t atomsLen = atoms.Length();
  nsString* names = aNames.AppendElements(atomsLen);
  for (uint32_t i = 0; i < atomsLen; ++i) {
    atoms[i]->ToString(names[i]);
  }
}
Пример #3
0
/* static */ void
nsFontFaceUtils::MarkDirtyForFontChange(nsIFrame* aSubtreeRoot,
                                        const gfxUserFontEntry* aFont)
{
  AutoTArray<nsIFrame*, 4> subtrees;
  subtrees.AppendElement(aSubtreeRoot);

  nsIPresShell* ps = aSubtreeRoot->PresContext()->PresShell();

  // check descendants, iterating over subtrees that may include
  // additional subtrees associated with placeholders
  do {
    nsIFrame* subtreeRoot = subtrees.ElementAt(subtrees.Length() - 1);
    subtrees.RemoveElementAt(subtrees.Length() - 1);

    // Check all descendants to see if they use the font
    AutoTArray<nsIFrame*, 32> stack;
    stack.AppendElement(subtreeRoot);

    do {
      nsIFrame* f = stack.ElementAt(stack.Length() - 1);
      stack.RemoveElementAt(stack.Length() - 1);

      // if this frame uses the font, mark its descendants dirty
      // and skip checking its children
      if (FrameUsesFont(f, aFont)) {
        ScheduleReflow(ps, f);
      } else {
        if (f->GetType() == nsGkAtoms::placeholderFrame) {
          nsIFrame* oof = nsPlaceholderFrame::GetRealFrameForPlaceholder(f);
          if (!nsLayoutUtils::IsProperAncestorFrame(subtreeRoot, oof)) {
            // We have another distinct subtree we need to mark.
            subtrees.AppendElement(oof);
          }
        }

        nsIFrame::ChildListIterator lists(f);
        for (; !lists.IsDone(); lists.Next()) {
          nsFrameList::Enumerator childFrames(lists.CurrentList());
          for (; !childFrames.AtEnd(); childFrames.Next()) {
            nsIFrame* kid = childFrames.get();
            stack.AppendElement(kid);
          }
        }
      }
    } while (!stack.IsEmpty());
  } while (!subtrees.IsEmpty());
}
Пример #4
0
void
nsIGlobalObject::ForEachEventTargetObject(const std::function<void(DOMEventTargetHelper*, bool* aDoneOut)>& aFunc) const
{
  // Protect against the function call triggering a mutation of the list
  // while we are iterating by copying the DETH references to a temporary
  // list.
  AutoTArray<RefPtr<DOMEventTargetHelper>, 64> targetList;
  for (const DOMEventTargetHelper* deth = mEventTargetObjects.getFirst();
       deth; deth = deth->getNext()) {
    targetList.AppendElement(const_cast<DOMEventTargetHelper*>(deth));
  }

  // Iterate the target list and call the function on each one.
  bool done = false;
  for (auto target : targetList) {
    // Check to see if a previous iteration's callback triggered the removal
    // of this target as a side-effect.  If it did, then just ignore it.
    if (target->GetOwnerGlobal() != this) {
      continue;
    }
    aFunc(target, &done);
    if (done) {
      break;
    }
  }
}
Пример #5
0
bool
nsDOMTokenList::Toggle(const nsAString& aToken,
                       const Optional<bool>& aForce,
                       ErrorResult& aError)
{
  aError = CheckToken(aToken);
  if (aError.Failed()) {
    return false;
  }

  const nsAttrValue* attr = GetParsedAttr();
  const bool forceOn = aForce.WasPassed() && aForce.Value();
  const bool forceOff = aForce.WasPassed() && !aForce.Value();

  bool isPresent = attr && attr->Contains(aToken);
  AutoTArray<nsString, 1> tokens;
  (*tokens.AppendElement()).Rebind(aToken.Data(), aToken.Length());

  if (isPresent) {
    if (!forceOn) {
      RemoveInternal(attr, tokens);
      isPresent = false;
    }
  } else {
    if (!forceOff) {
      AddInternal(attr, tokens);
      isPresent = true;
    }
  }

  return isPresent;
}
Пример #6
0
void
nsDOMTokenList::Remove(const nsAString& aToken, mozilla::ErrorResult& aError)
{
  AutoTArray<nsString, 1> tokens;
  tokens.AppendElement(aToken);
  Remove(tokens, aError);
}
Пример #7
0
void
nsDOMTokenList::Add(const nsAString& aToken, ErrorResult& aError)
{
  AutoTArray<nsString, 1> tokens;
  tokens.AppendElement(aToken);
  Add(tokens, aError);
}
already_AddRefed<Layer>
ImageBitmapRenderingContext::GetCanvasLayer(nsDisplayListBuilder* aBuilder,
                                            Layer* aOldLayer,
                                            LayerManager* aManager)
{
  if (!mImage) {
    // No DidTransactionCallback will be received, so mark the context clean
    // now so future invalidations will be dispatched.
    MarkContextClean();
    return nullptr;
  }

  RefPtr<ImageLayer> imageLayer;

  if (aOldLayer) {
    imageLayer = static_cast<ImageLayer*>(aOldLayer);
  } else {
    imageLayer = aManager->CreateImageLayer();
  }

  RefPtr<ImageContainer> imageContainer = imageLayer->GetContainer();
  if (!imageContainer) {
    imageContainer = aManager->CreateImageContainer();
    imageLayer->SetContainer(imageContainer);
  }

  AutoTArray<ImageContainer::NonOwningImage, 1> imageList;
  RefPtr<layers::Image> image = ClipToIntrinsicSize();
  imageList.AppendElement(ImageContainer::NonOwningImage(image));
  imageContainer->SetCurrentImages(imageList);

  return imageLayer.forget();
}
Пример #9
0
void
ImageContainer::SetCurrentImageInTransaction(Image *aImage)
{
  AutoTArray<NonOwningImage,1> images;
  images.AppendElement(NonOwningImage(aImage));
  SetCurrentImagesInTransaction(images);
}
Пример #10
0
void
ContentClientRemoteBuffer::Updated(const nsIntRegion& aRegionToDraw,
                                   const nsIntRegion& aVisibleRegion,
                                   bool aDidSelfCopy)
{
  nsIntRegion updatedRegion = GetUpdatedRegion(aRegionToDraw,
                                               aVisibleRegion,
                                               aDidSelfCopy);

  MOZ_ASSERT(mTextureClient);
  if (mTextureClientOnWhite) {
    mForwarder->UseComponentAlphaTextures(this, mTextureClient,
                                          mTextureClientOnWhite);
  } else {
    AutoTArray<CompositableForwarder::TimedTextureClient,1> textures;
    CompositableForwarder::TimedTextureClient* t = textures.AppendElement();
    t->mTextureClient = mTextureClient;
    IntSize size = mTextureClient->GetSize();
    t->mPictureRect = nsIntRect(0, 0, size.width, size.height);
    GetForwarder()->UseTextures(this, textures);
  }
  mForwarder->UpdateTextureRegion(this,
                                  ThebesBufferData(BufferRect(),
                                                   BufferRotation()),
                                  updatedRegion);
}
Пример #11
0
Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate)
{
    float scale = 1;

    AutoTArray<const float*,4> irChannels;
    for (size_t i = 0; i < impulseResponse->GetChannels(); ++i) {
        irChannels.AppendElement(impulseResponse->GetData(i));
    }
    AutoTArray<float,1024> tempBuf;

    if (normalize) {
        scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate);

        if (scale) {
            tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength);
            for (uint32_t i = 0; i < irChannels.Length(); ++i) {
                float* buf = &tempBuf[i*impulseResponseBufferLength];
                AudioBufferCopyWithScale(irChannels[i], scale, buf,
                                         impulseResponseBufferLength);
                irChannels[i] = buf;
            }
        }
    }

    initialize(irChannels, impulseResponseBufferLength,
               maxFFTSize, useBackgroundThreads);
}
Пример #12
0
bool 
MsgMapiListContext::DeleteMessage(nsMsgKey key)
{
  if (!m_db)
    return FALSE;
  
  if ( !IsIMAPHost() )
  {
    return NS_SUCCEEDED((m_db->DeleteMessages(1, &key, nullptr)));
  }
#if 0 
  else if ( m_folder->GetIMAPFolderInfoMail() )
  {
    AutoTArray<nsMsgKey, 1> messageKeys;
    messageKeys.AppendElement(key);

    (m_folder->GetIMAPFolderInfoMail())->DeleteSpecifiedMessages(pane, messageKeys, nsMsgKey_None);
    m_db->DeleteMessage(key, nullptr, FALSE);
    return TRUE;
  }
#endif
  else
  {
    return FALSE;
  }
}
Пример #13
0
void
nsShmImage::Put(const mozilla::LayoutDeviceIntRegion& aRegion)
{
  AutoTArray<xcb_rectangle_t, 32> xrects;
  xrects.SetCapacity(aRegion.GetNumRects());

  for (auto iter = aRegion.RectIter(); !iter.Done(); iter.Next()) {
    const mozilla::LayoutDeviceIntRect &r = iter.Get();
    xcb_rectangle_t xrect = { (short)r.x, (short)r.y, (unsigned short)r.width, (unsigned short)r.height };
    xrects.AppendElement(xrect);
  }

  if (!mGC) {
    mGC = xcb_generate_id(mConnection);
    xcb_create_gc(mConnection, mGC, mWindow, 0, nullptr);
  }

  xcb_set_clip_rectangles(mConnection, XCB_CLIP_ORDERING_YX_BANDED, mGC, 0, 0,
                          xrects.Length(), xrects.Elements());

  if (mPixmap != XCB_NONE) {
    mLastRequest = xcb_copy_area_checked(mConnection, mPixmap, mWindow, mGC,
                                         0, 0, 0, 0, mSize.width, mSize.height);
  } else {
    mLastRequest = xcb_shm_put_image_checked(mConnection, mWindow, mGC,
                                             mSize.width, mSize.height,
                                             0, 0, mSize.width, mSize.height,
                                             0, 0, mDepth,
                                             XCB_IMAGE_FORMAT_Z_PIXMAP, 0,
                                             mShmSeg, 0);
  }

  xcb_flush(mConnection);
}
Пример #14
0
void
VideoSink::RenderVideoFrames(int32_t aMaxFrames,
                             int64_t aClockTime,
                             const TimeStamp& aClockTimeStamp)
{
  AssertOwnerThread();

  AutoTArray<RefPtr<MediaData>,16> frames;
  VideoQueue().GetFirstElements(aMaxFrames, &frames);
  if (frames.IsEmpty() || !mContainer) {
    return;
  }

  AutoTArray<ImageContainer::NonOwningImage,16> images;
  TimeStamp lastFrameTime;
  MediaSink::PlaybackParams params = mAudioSink->GetPlaybackParams();
  for (uint32_t i = 0; i < frames.Length(); ++i) {
    VideoData* frame = frames[i]->As<VideoData>();

    frame->mSentToCompositor = true;

    if (!frame->mImage || !frame->mImage->IsValid() ||
        !frame->mImage->GetSize().width || !frame->mImage->GetSize().height) {
      continue;
    }

    int64_t frameTime = frame->mTime;
    if (frameTime < 0) {
      // Frame times before the start time are invalid; drop such frames
      continue;
    }

    TimeStamp t;
    if (aMaxFrames > 1) {
      MOZ_ASSERT(!aClockTimeStamp.IsNull());
      int64_t delta = frame->mTime - aClockTime;
      t = aClockTimeStamp +
          TimeDuration::FromMicroseconds(delta / params.mPlaybackRate);
      if (!lastFrameTime.IsNull() && t <= lastFrameTime) {
        // Timestamps out of order; drop the new frame. In theory we should
        // probably replace the previous frame with the new frame if the
        // timestamps are equal, but this is a corrupt video file already so
        // never mind.
        continue;
      }
      lastFrameTime = t;
    }

    ImageContainer::NonOwningImage* img = images.AppendElement();
    img->mTimeStamp = t;
    img->mImage = frame->mImage;
    img->mFrameID = frame->mFrameID;
    img->mProducerID = mProducerID;

    VSINK_LOG_V("playing video frame %lld (id=%x) (vq-queued=%i)",
                frame->mTime, frame->mFrameID, VideoQueue().GetSize());
  }
  mContainer->SetCurrentFrames(frames[0]->As<VideoData>()->mDisplay, images);
}
Пример #15
0
void
AudioNodeStream::ObtainInputBlock(AudioBlock& aTmpChunk,
                                  uint32_t aPortIndex)
{
  uint32_t inputCount = mInputs.Length();
  uint32_t outputChannelCount = 1;
  AutoTArray<const AudioBlock*,250> inputChunks;
  for (uint32_t i = 0; i < inputCount; ++i) {
    if (aPortIndex != mInputs[i]->InputNumber()) {
      // This input is connected to a different port
      continue;
    }
    MediaStream* s = mInputs[i]->GetSource();
    AudioNodeStream* a = static_cast<AudioNodeStream*>(s);
    MOZ_ASSERT(a == s->AsAudioNodeStream());
    if (a->IsAudioParamStream()) {
      continue;
    }

    const AudioBlock* chunk = &a->mLastChunks[mInputs[i]->OutputNumber()];
    MOZ_ASSERT(chunk);
    if (chunk->IsNull() || chunk->mChannelData.IsEmpty()) {
      continue;
    }

    inputChunks.AppendElement(chunk);
    outputChannelCount =
      GetAudioChannelsSuperset(outputChannelCount, chunk->ChannelCount());
  }

  outputChannelCount = ComputedNumberOfChannels(outputChannelCount);

  uint32_t inputChunkCount = inputChunks.Length();
  if (inputChunkCount == 0 ||
      (inputChunkCount == 1 && inputChunks[0]->ChannelCount() == 0)) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  if (inputChunkCount == 1 &&
      inputChunks[0]->ChannelCount() == outputChannelCount) {
    aTmpChunk = *inputChunks[0];
    return;
  }

  if (outputChannelCount == 0) {
    aTmpChunk.SetNull(WEBAUDIO_BLOCK_SIZE);
    return;
  }

  aTmpChunk.AllocateChannels(outputChannelCount);
  DownmixBufferType downmixBuffer;
  ASSERT_ALIGNED16(downmixBuffer.Elements());

  for (uint32_t i = 0; i < inputChunkCount; ++i) {
    AccumulateInputChunk(i, *inputChunks[i], &aTmpChunk, &downmixBuffer);
  }
}
Пример #16
0
void
SpeechRecognition::FeedAudioData(already_AddRefed<SharedBuffer> aSamples,
                                 uint32_t aDuration,
                                 MediaStreamListener* aProvider, TrackRate aTrackRate)
{
  NS_ASSERTION(!NS_IsMainThread(),
               "FeedAudioData should not be called in the main thread");

  // Endpointer expects to receive samples in chunks whose size is a
  // multiple of its frame size.
  // Since we can't assume we will receive the frames in appropriate-sized
  // chunks, we must buffer and split them in chunks of mAudioSamplesPerChunk
  // (a multiple of Endpointer's frame size) before feeding to Endpointer.

  // ensure aSamples is deleted
  RefPtr<SharedBuffer> refSamples = aSamples;

  uint32_t samplesIndex = 0;
  const int16_t* samples = static_cast<int16_t*>(refSamples->Data());
  AutoTArray<RefPtr<SharedBuffer>, 5> chunksToSend;

  // fill up our buffer and make a chunk out of it, if possible
  if (mBufferedSamples > 0) {
    samplesIndex += FillSamplesBuffer(samples, aDuration);

    if (mBufferedSamples == mAudioSamplesPerChunk) {
      chunksToSend.AppendElement(mAudioSamplesBuffer.forget());
      mBufferedSamples = 0;
    }
  }

  // create sample chunks of correct size
  if (samplesIndex < aDuration) {
    samplesIndex += SplitSamplesBuffer(samples + samplesIndex,
                                       aDuration - samplesIndex,
                                       chunksToSend);
  }

  // buffer remaining samples
  if (samplesIndex < aDuration) {
    mBufferedSamples = 0;
    mAudioSamplesBuffer =
      SharedBuffer::Create(mAudioSamplesPerChunk * sizeof(int16_t));

    FillSamplesBuffer(samples + samplesIndex, aDuration - samplesIndex);
  }

  AudioSegment* segment = CreateAudioSegment(chunksToSend);
  RefPtr<SpeechEvent> event = new SpeechEvent(this, EVENT_AUDIO_DATA);
  event->mAudioSegment = segment;
  event->mProvider = aProvider;
  event->mTrackRate = aTrackRate;
  NS_DispatchToMainThread(event);

  return;
}
Пример #17
0
RasterImage::GetImageContainer(LayerManager* aManager, uint32_t aFlags)
{
  MOZ_ASSERT(NS_IsMainThread());
  MOZ_ASSERT(aManager);
  MOZ_ASSERT((aFlags & ~(FLAG_SYNC_DECODE |
                         FLAG_SYNC_DECODE_IF_FAST |
                         FLAG_ASYNC_NOTIFY))
               == FLAG_NONE,
             "Unsupported flag passed to GetImageContainer");

  int32_t maxTextureSize = aManager->GetMaxTextureSize();
  if (!mHasSize ||
      mSize.width > maxTextureSize ||
      mSize.height > maxTextureSize) {
    return nullptr;
  }

  if (IsUnlocked() && mProgressTracker) {
    mProgressTracker->OnUnlockedDraw();
  }

  RefPtr<layers::ImageContainer> container = mImageContainer.get();

  bool mustRedecode =
    (aFlags & (FLAG_SYNC_DECODE | FLAG_SYNC_DECODE_IF_FAST)) &&
    mLastImageContainerDrawResult != DrawResult::SUCCESS &&
    mLastImageContainerDrawResult != DrawResult::BAD_IMAGE;

  if (container && !mustRedecode) {
    return container.forget();
  }

  // We need a new ImageContainer, so create one.
  container = LayerManager::CreateImageContainer();

  DrawResult drawResult;
  RefPtr<layers::Image> image;
  Tie(drawResult, image) = GetCurrentImage(container, aFlags);
  if (!image) {
    return nullptr;
  }

  // |image| holds a reference to a SourceSurface which in turn holds a lock on
  // the current frame's VolatileBuffer, ensuring that it doesn't get freed as
  // long as the layer system keeps this ImageContainer alive.
  AutoTArray<ImageContainer::NonOwningImage, 1> imageList;
  imageList.AppendElement(ImageContainer::NonOwningImage(image, TimeStamp(),
                                                         mLastFrameID++,
                                                         mImageProducerID));
  container->SetCurrentImagesInTransaction(imageList);

  mLastImageContainerDrawResult = drawResult;
  mImageContainer = container;

  return container.forget();
}
Пример #18
0
void
CacheOpParent::Execute(cache::Manager* aManager)
{
  NS_ASSERT_OWNINGTHREAD(CacheOpParent);
  MOZ_ASSERT(!mManager);
  MOZ_ASSERT(!mVerifier);

  mManager = aManager;

  // Handle put op
  if (mOpArgs.type() == CacheOpArgs::TCachePutAllArgs) {
    MOZ_ASSERT(mCacheId != INVALID_CACHE_ID);

    const CachePutAllArgs& args = mOpArgs.get_CachePutAllArgs();
    const nsTArray<CacheRequestResponse>& list = args.requestResponseList();

    AutoTArray<nsCOMPtr<nsIInputStream>, 256> requestStreamList;
    AutoTArray<nsCOMPtr<nsIInputStream>, 256> responseStreamList;

    for (uint32_t i = 0; i < list.Length(); ++i) {
      requestStreamList.AppendElement(
        DeserializeCacheStream(list[i].request().body()));
      responseStreamList.AppendElement(
        DeserializeCacheStream(list[i].response().body()));
    }

    mManager->ExecutePutAll(this, mCacheId, args.requestResponseList(),
                            requestStreamList, responseStreamList);
    return;
  }

  // Handle all other cache ops
  if (mCacheId != INVALID_CACHE_ID) {
    MOZ_ASSERT(mNamespace == INVALID_NAMESPACE);
    mManager->ExecuteCacheOp(this, mCacheId, mOpArgs);
    return;
  }

  // Handle all storage ops
  MOZ_ASSERT(mNamespace != INVALID_NAMESPACE);
  mManager->ExecuteStorageOp(this, mNamespace, mOpArgs);
}
Пример #19
0
NS_IMETHODIMP nsPrefBranch::GetChildList(const char *aStartingAt, uint32_t *aCount, char ***aChildArray)
{
  char            **outArray;
  int32_t         numPrefs;
  int32_t         dwIndex;
  AutoTArray<nsCString, 32> prefArray;

  NS_ENSURE_ARG(aStartingAt);
  NS_ENSURE_ARG_POINTER(aCount);
  NS_ENSURE_ARG_POINTER(aChildArray);

  *aChildArray = nullptr;
  *aCount = 0;

  // this will contain a list of all the pref name strings
  // allocate on the stack for speed

  const char* parent = getPrefName(aStartingAt);
  size_t parentLen = strlen(parent);
  for (auto iter = gHashTable->Iter(); !iter.Done(); iter.Next()) {
    auto entry = static_cast<PrefHashEntry*>(iter.Get());
    if (strncmp(entry->key, parent, parentLen) == 0) {
      prefArray.AppendElement(entry->key);
    }
  }

  // now that we've built up the list, run the callback on
  // all the matching elements
  numPrefs = prefArray.Length();

  if (numPrefs) {
    outArray = (char **)moz_xmalloc(numPrefs * sizeof(char *));
    if (!outArray)
      return NS_ERROR_OUT_OF_MEMORY;

    for (dwIndex = 0; dwIndex < numPrefs; ++dwIndex) {
      // we need to lop off mPrefRoot in case the user is planning to pass this
      // back to us because if they do we are going to add mPrefRoot again.
      const nsCString& element = prefArray[dwIndex];
      outArray[dwIndex] = (char *)nsMemory::Clone(
        element.get() + mPrefRootLength, element.Length() - mPrefRootLength + 1);

      if (!outArray[dwIndex]) {
        // we ran out of memory... this is annoying
        NS_FREE_XPCOM_ALLOCATED_POINTER_ARRAY(dwIndex, outArray);
        return NS_ERROR_OUT_OF_MEMORY;
      }
    }
    *aChildArray = outArray;
  }
  *aCount = numPrefs;

  return NS_OK;
}
Пример #20
0
already_AddRefed<Promise>
Cache::AddAll(const GlobalObject& aGlobal,
              nsTArray<RefPtr<Request>>&& aRequestList,
              CallerType aCallerType, ErrorResult& aRv)
{
  MOZ_DIAGNOSTIC_ASSERT(mActor);

  // If there is no work to do, then resolve immediately
  if (aRequestList.IsEmpty()) {
    RefPtr<Promise> promise = Promise::Create(mGlobal, aRv);
    if (NS_WARN_IF(!promise)) {
      return nullptr;
    }

    promise->MaybeResolveWithUndefined();
    return promise.forget();
  }

  AutoTArray<RefPtr<Promise>, 256> fetchList;
  fetchList.SetCapacity(aRequestList.Length());

  // Begin fetching each request in parallel.  For now, if an error occurs just
  // abandon our previous fetch calls.  In theory we could cancel them in the
  // future once fetch supports it.

  for (uint32_t i = 0; i < aRequestList.Length(); ++i) {
    RequestOrUSVString requestOrString;
    requestOrString.SetAsRequest() = aRequestList[i];
    RefPtr<Promise> fetch = FetchRequest(mGlobal, requestOrString,
                                         RequestInit(), aCallerType, aRv);
    if (NS_WARN_IF(aRv.Failed())) {
      return nullptr;
    }

    fetchList.AppendElement(std::move(fetch));
  }

  RefPtr<Promise> promise = Promise::Create(mGlobal, aRv);
  if (NS_WARN_IF(aRv.Failed())) {
    return nullptr;
  }

  RefPtr<FetchHandler> handler =
    new FetchHandler(mActor->GetWorkerHolder(), this,
                     std::move(aRequestList), promise);

  RefPtr<Promise> fetchPromise = Promise::All(aGlobal.Context(), fetchList, aRv);
  if (NS_WARN_IF(aRv.Failed())) {
    return nullptr;
  }
  fetchPromise->AppendNativeHandler(handler);

  return promise.forget();
}
Пример #21
0
 void Generate(AudioSegment& aSegment, const int32_t& aSamples)
 {
   RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
   mGenerator.generate(dest, aSamples);
   AutoTArray<const int16_t*, 1> channels;
   for (int32_t i = 0; i < mChannels; i++) {
     channels.AppendElement(dest);
   }
   aSegment.AppendFrames(buffer.forget(), channels, aSamples, PRINCIPAL_HANDLE_NONE);
 }
Пример #22
0
void
MediaEngineDefaultAudioSource::AppendToSegment(AudioSegment& aSegment,
                                               TrackTicks aSamples)
{
  RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, aSamples);
  AutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  aSegment.AppendFrames(buffer.forget(), channels, aSamples);
}
Пример #23
0
void CacheOpChild::HandleRequestList(
    const nsTArray<CacheRequest>& aRequestList) {
  AutoTArray<RefPtr<Request>, 256> requests;
  requests.SetCapacity(aRequestList.Length());

  for (uint32_t i = 0; i < aRequestList.Length(); ++i) {
    AddWorkerHolderToStreamChild(aRequestList[i], GetWorkerHolder());
    requests.AppendElement(ToRequest(aRequestList[i]));
  }

  mPromise->MaybeResolve(requests);
}
Пример #24
0
void
CacheOpChild::HandleResponseList(const nsTArray<CacheResponse>& aResponseList)
{
  AutoTArray<RefPtr<Response>, 256> responses;
  responses.SetCapacity(aResponseList.Length());

  for (uint32_t i = 0; i < aResponseList.Length(); ++i) {
    AddFeatureToStreamChild(aResponseList[i], GetFeature());
    responses.AppendElement(ToResponse(aResponseList[i]));
  }

  mPromise->MaybeResolve(responses);
}
Пример #25
0
void VideoFrameContainer::SetCurrentFrame(const gfx::IntSize& aIntrinsicSize,
        Image* aImage,
        const TimeStamp& aTargetTime)
{
    if (aImage) {
        MutexAutoLock lock(mMutex);
        AutoTArray<ImageContainer::NonOwningImage,1> imageList;
        imageList.AppendElement(
            ImageContainer::NonOwningImage(aImage, aTargetTime, ++mFrameID));
        SetCurrentFramesLocked(aIntrinsicSize, imageList);
    } else {
        ClearCurrentFrame(aIntrinsicSize);
    }
}
Пример #26
0
void
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                 size_t aFrames,
                                                 uint32_t aChannels)
{
  if (mState != kStarted) {
    return;
  }

  if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
    mTotalFrames += aFrames;
    if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second
      MOZ_LOG(AudioLogModule(), LogLevel::Debug,
              ("%p: Inserting %" PRIuSIZE " samples into graph, total frames = %" PRIu64,
               (void*)this, aFrames, mTotalFrames));
      mLastLogFrames = mTotalFrames;
    }
  }

  size_t len = mSources.Length();
  for (size_t i = 0; i < len; i++) {
    if (!mSources[i]) {
      continue;
    }
    RefPtr<SharedBuffer> buffer =
      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
    PodCopy(static_cast<T*>(buffer->Data()),
            aBuffer, aFrames * aChannels);

    TimeStamp insertTime;
    // Make sure we include the stream and the track.
    // The 0:1 is a flag to note when we've done the final insert for a given input block.
    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
            LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
            (i+1 < len) ? 0 : 1, insertTime);

    nsAutoPtr<AudioSegment> segment(new AudioSegment());
    AutoTArray<const T*, 1> channels;
    // XXX Bug 971528 - Support stereo capture in gUM
    MOZ_ASSERT(aChannels == 1,
        "GraphDriver only supports us stereo audio for now");
    channels.AppendElement(static_cast<T*>(buffer->Data()));
    segment->AppendFrames(buffer.forget(), channels, aFrames,
                         mPrincipalHandles[i]);
    segment->GetStartTime(insertTime);

    mSources[i]->AppendToTrack(mTrackID, segment);
  }
}
static nscoord
ComputeDescendantISize(const ReflowInput& aAncestorReflowInput,
                       nsIFrame *aDescendantFrame)
{
  nsIFrame *ancestorFrame = aAncestorReflowInput.mFrame->FirstInFlow();
  if (aDescendantFrame == ancestorFrame) {
    return aAncestorReflowInput.ComputedISize();
  }

  AutoTArray<nsIFrame*, 16> frames;
  for (nsIFrame *f = aDescendantFrame; f != ancestorFrame;
       f = f->GetParent()->FirstInFlow()) {
    frames.AppendElement(f);
  }

  // This ignores the inline-size contributions made by scrollbars, though in
  // reality we don't have any scrollbars on the sorts of devices on
  // which we use font inflation, so it's not a problem.  But it may
  // occasionally cause problems when writing tests on desktop.

  uint32_t len = frames.Length();
  ReflowInput *reflowInputs = static_cast<ReflowInput*>
                                (moz_xmalloc(sizeof(ReflowInput) * len));
  nsPresContext *presContext = aDescendantFrame->PresContext();
  for (uint32_t i = 0; i < len; ++i) {
    const ReflowInput &parentReflowInput =
      (i == 0) ? aAncestorReflowInput : reflowInputs[i - 1];
    nsIFrame *frame = frames[len - i - 1];
    WritingMode wm = frame->GetWritingMode();
    LogicalSize availSize = parentReflowInput.ComputedSize(wm);
    availSize.BSize(wm) = NS_UNCONSTRAINEDSIZE;
    MOZ_ASSERT(frame->GetParent()->FirstInFlow() ==
                 parentReflowInput.mFrame->FirstInFlow(),
               "bad logic in this function");
    new (reflowInputs + i) ReflowInput(presContext, parentReflowInput,
                                             frame, availSize);
  }

  MOZ_ASSERT(reflowInputs[len - 1].mFrame == aDescendantFrame,
             "bad logic in this function");
  nscoord result = reflowInputs[len - 1].ComputedISize();

  for (uint32_t i = len; i-- != 0; ) {
    reflowInputs[i].~ReflowInput();
  }
  free(reflowInputs);

  return result;
}
Пример #28
0
void
nsSpeechTask::SendAudioImpl(RefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen)
{
  if (aDataLen == 0) {
    mStream->EndAllTrackAndFinish();
    return;
  }

  AudioSegment segment;
  AutoTArray<const int16_t*, 1> channelData;
  channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
  segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
  mStream->AppendToTrack(1, &segment);
  mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
Пример #29
0
AudioSegment*
SpeechRecognition::CreateAudioSegment(nsTArray<RefPtr<SharedBuffer>>& aChunks)
{
  AudioSegment* segment = new AudioSegment();
  for (uint32_t i = 0; i < aChunks.Length(); ++i) {
    RefPtr<SharedBuffer> buffer = aChunks[i];
    const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());

    AutoTArray<const int16_t*, 1> channels;
    channels.AppendElement(chunkData);
    segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk,
                          PRINCIPAL_HANDLE_NONE);
  }

  return segment;
}
Пример #30
0
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
                const PrincipalHandle& aPrincipalHandle)
{
  // The amount of audio frames that is used to fuzz rounding errors.
  static const int64_t AUDIO_FUZZ_FRAMES = 1;

  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      audio->GetEndTime() <= aStream->mNextAudioTime) {
    return;
  }

  if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  // Always write the whole sample without truncation to be consistent with
  // DecodedAudioDataSink::PlayFromAudioQueue()
  audio->EnsureAudioBuffer();
  RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  AutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames);
  }
  aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
  aStream->mAudioFramesWritten += audio->mFrames;

  aStream->mNextAudioTime = audio->GetEndTime();
}