Example #1
0
// This constructor is for dynamic atoms and HTML5 atoms.
nsAtom::nsAtom(AtomKind aKind, const nsAString& aString, uint32_t aHash)
  : mRefCnt(1)
  , mLength(aString.Length())
  , mKind(static_cast<uint32_t>(aKind))
  , mHash(aHash)
{
  MOZ_ASSERT(aKind == AtomKind::DynamicAtom || aKind == AtomKind::HTML5Atom);
  RefPtr<nsStringBuffer> buf = nsStringBuffer::FromString(aString);
  if (buf) {
    mString = static_cast<char16_t*>(buf->Data());
  } else {
    const size_t size = (mLength + 1) * sizeof(char16_t);
    buf = nsStringBuffer::Alloc(size);
    if (MOZ_UNLIKELY(!buf)) {
      // We OOM because atom allocations should be small and it's hard to
      // handle them more gracefully in a constructor.
      NS_ABORT_OOM(size);
    }
    mString = static_cast<char16_t*>(buf->Data());
    CopyUnicodeTo(aString, 0, mString, mLength);
    mString[mLength] = char16_t(0);
  }

  MOZ_ASSERT_IF(IsDynamicAtom(), mHash == HashString(mString, mLength));

  MOZ_ASSERT(mString[mLength] == char16_t(0), "null terminated");
  MOZ_ASSERT(buf && buf->StorageSize() >= (mLength + 1) * sizeof(char16_t),
             "enough storage");
  MOZ_ASSERT(Equals(aString), "correct data");

  // Take ownership of buffer
  mozilla::Unused << buf.forget();
}
Example #2
0
TEST(Layers, TextureYCbCrSerialization) {
  RefPtr<gfxImageSurface> ySurface = new gfxImageSurface(gfxIntSize(400,300), gfxImageFormat::A8);
  RefPtr<gfxImageSurface> cbSurface = new gfxImageSurface(gfxIntSize(200,150), gfxImageFormat::A8);
  RefPtr<gfxImageSurface> crSurface = new gfxImageSurface(gfxIntSize(200,150), gfxImageFormat::A8);
  SetupSurface(ySurface.get());
  SetupSurface(cbSurface.get());
  SetupSurface(crSurface.get());

  PlanarYCbCrData clientData;
  clientData.mYChannel = ySurface->Data();
  clientData.mCbChannel = cbSurface->Data();
  clientData.mCrChannel = crSurface->Data();
  clientData.mYSize = ySurface->GetSize().ToIntSize();
  clientData.mPicSize = ySurface->GetSize().ToIntSize();
  clientData.mCbCrSize = cbSurface->GetSize().ToIntSize();
  clientData.mYStride = ySurface->Stride();
  clientData.mCbCrStride = cbSurface->Stride();
  clientData.mStereoMode = StereoMode::MONO;
  clientData.mYSkip = 0;
  clientData.mCbSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mPicX = 0;
  clientData.mPicX = 0;

  RefPtr<TextureClient> client
    = new MemoryTextureClient(nullptr,
                              mozilla::gfx::SurfaceFormat::YUV,
                              gfx::BackendType::CAIRO,
                              TEXTURE_DEALLOCATE_CLIENT);

  TestTextureClientYCbCr(client, clientData);

  // XXX - Test more texture client types.
}
TEST(Layers, TextureYCbCrSerialization) {
  RefPtr<gfxImageSurface> ySurface = new gfxImageSurface(gfxIntSize(400,300), gfxImageFormatA8);
  RefPtr<gfxImageSurface> cbSurface = new gfxImageSurface(gfxIntSize(200,150), gfxImageFormatA8);
  RefPtr<gfxImageSurface> crSurface = new gfxImageSurface(gfxIntSize(200,150), gfxImageFormatA8);
  SetupSurface(ySurface.get());
  SetupSurface(cbSurface.get());
  SetupSurface(crSurface.get());

  PlanarYCbCrData clientData;
  clientData.mYChannel = ySurface->Data();
  clientData.mCbChannel = cbSurface->Data();
  clientData.mCrChannel = crSurface->Data();
  clientData.mYSize = ySurface->GetSize();
  clientData.mPicSize = ySurface->GetSize();
  clientData.mCbCrSize = cbSurface->GetSize();
  clientData.mYStride = ySurface->Stride();
  clientData.mCbCrStride = cbSurface->Stride();
  clientData.mStereoMode = STEREO_MODE_MONO;
  clientData.mYSkip = 0;
  clientData.mCbSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mPicX = 0;
  clientData.mPicX = 0;

  RefPtr<TextureClient> client
    = new MemoryTextureClient(nullptr,
                              mozilla::gfx::FORMAT_YUV,
                              TEXTURE_FLAGS_DEFAULT);

  TestTextureClientYCbCr(client, clientData);

  // XXX - Test more texture client types.
}
int32_t
GonkDecoderManager::ProcessQueuedSamples()
{
  MOZ_ASSERT(OnTaskLooper());

  MutexAutoLock lock(mMutex);
  status_t rv;
  while (mQueuedSamples.Length()) {
    RefPtr<MediaRawData> data = mQueuedSamples.ElementAt(0);
    rv = mDecoder->Input(reinterpret_cast<const uint8_t*>(data->Data()),
                         data->Size(),
                         data->mTime,
                         0,
                         INPUT_TIMEOUT_US);
    if (rv == OK) {
      mQueuedSamples.RemoveElementAt(0);
      mWaitOutput.AppendElement(WaitOutputInfo(data->mOffset, data->mTime,
                                               /* eos */ data->Data() == nullptr));
    } else if (rv == -EAGAIN || rv == -ETIMEDOUT) {
      // In most cases, EAGAIN or ETIMEOUT are safe because OMX can't fill
      // buffer on time.
      break;
    } else {
      return rv;
    }
  }
  return mQueuedSamples.Length();
}
Example #5
0
TEST(Layers, TextureYCbCrSerialization) {
  RefPtr<gfxImageSurface> ySurface = new gfxImageSurface(IntSize(400,300), SurfaceFormat::A8);
  RefPtr<gfxImageSurface> cbSurface = new gfxImageSurface(IntSize(200,150), SurfaceFormat::A8);
  RefPtr<gfxImageSurface> crSurface = new gfxImageSurface(IntSize(200,150), SurfaceFormat::A8);
  SetupSurface(ySurface.get());
  SetupSurface(cbSurface.get());
  SetupSurface(crSurface.get());

  PlanarYCbCrData clientData;
  clientData.mYChannel = ySurface->Data();
  clientData.mCbChannel = cbSurface->Data();
  clientData.mCrChannel = crSurface->Data();
  clientData.mYSize = ySurface->GetSize();
  clientData.mPicSize = ySurface->GetSize();
  clientData.mCbCrSize = cbSurface->GetSize();
  clientData.mYStride = ySurface->Stride();
  clientData.mCbCrStride = cbSurface->Stride();
  clientData.mStereoMode = StereoMode::MONO;
  clientData.mYUVColorSpace = YUVColorSpace::BT601;
  clientData.mYSkip = 0;
  clientData.mCbSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mPicX = 0;
  clientData.mPicX = 0;

  RefPtr<TextureClient> client = TextureClient::CreateForYCbCr(nullptr, clientData.mYSize, clientData.mCbCrSize,
                                                               StereoMode::MONO, YUVColorSpace::BT601,
                                                               TextureFlags::DEALLOCATE_CLIENT);

  TestTextureClientYCbCr(client, clientData);

  // XXX - Test more texture client types.
}
Example #6
0
TEST(Layers, TextureYCbCrSerialization) {
  RefPtr<gfxImageSurface> ySurface = new gfxImageSurface(IntSize(400,300), SurfaceFormat::A8);
  RefPtr<gfxImageSurface> cbSurface = new gfxImageSurface(IntSize(200,150), SurfaceFormat::A8);
  RefPtr<gfxImageSurface> crSurface = new gfxImageSurface(IntSize(200,150), SurfaceFormat::A8);
  SetupSurface(ySurface.get());
  SetupSurface(cbSurface.get());
  SetupSurface(crSurface.get());

  PlanarYCbCrData clientData;
  clientData.mYChannel = ySurface->Data();
  clientData.mCbChannel = cbSurface->Data();
  clientData.mCrChannel = crSurface->Data();
  clientData.mYSize = ySurface->GetSize();
  clientData.mPicSize = ySurface->GetSize();
  clientData.mCbCrSize = cbSurface->GetSize();
  clientData.mYStride = ySurface->Stride();
  clientData.mCbCrStride = cbSurface->Stride();
  clientData.mStereoMode = StereoMode::MONO;
  clientData.mYUVColorSpace = YUVColorSpace::BT601;
  clientData.mYSkip = 0;
  clientData.mCbSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mCrSkip = 0;
  clientData.mPicX = 0;
  clientData.mPicX = 0;

  ImageBridgeChild::InitSameProcess();

  RefPtr<ImageBridgeChild> imageBridge = ImageBridgeChild::GetSingleton();
  static int retry = 5;
  while(!imageBridge->IPCOpen() && retry) {
    // IPDL connection takes time especially in slow testing environment, like
    // VM machines. Here we added retry mechanism to wait for IPDL connnection.
#ifdef XP_WIN
    Sleep(1);
#else
    sleep(1);
#endif
    retry--;
  }

  // Skip this testing if IPDL connection is not ready
  if (!retry && !imageBridge->IPCOpen()) {
    return;
  }

  RefPtr<TextureClient> client = TextureClient::CreateForYCbCr(imageBridge, clientData.mYSize, clientData.mCbCrSize,
                                                               StereoMode::MONO, YUVColorSpace::BT601,
                                                               TextureFlags::DEALLOCATE_CLIENT);

  TestTextureClientYCbCr(client, clientData);

  // XXX - Test more texture client types.
}
void
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                 size_t aFrames,
                                                 uint32_t aChannels)
{
  if (mState != kStarted) {
    return;
  }

  if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
    mTotalFrames += aFrames;
    if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second
      MOZ_LOG(AudioLogModule(), LogLevel::Debug,
              ("%p: Inserting %" PRIuSIZE " samples into graph, total frames = %" PRIu64,
               (void*)this, aFrames, mTotalFrames));
      mLastLogFrames = mTotalFrames;
    }
  }

  size_t len = mSources.Length();
  for (size_t i = 0; i < len; i++) {
    if (!mSources[i]) {
      continue;
    }
    RefPtr<SharedBuffer> buffer =
      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
    PodCopy(static_cast<T*>(buffer->Data()),
            aBuffer, aFrames * aChannels);

    TimeStamp insertTime;
    // Make sure we include the stream and the track.
    // The 0:1 is a flag to note when we've done the final insert for a given input block.
    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
            LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
            (i+1 < len) ? 0 : 1, insertTime);

    nsAutoPtr<AudioSegment> segment(new AudioSegment());
    AutoTArray<const T*, 1> channels;
    // XXX Bug 971528 - Support stereo capture in gUM
    MOZ_ASSERT(aChannels == 1,
        "GraphDriver only supports us stereo audio for now");
    channels.AppendElement(static_cast<T*>(buffer->Data()));
    segment->AppendFrames(buffer.forget(), channels, aFrames,
                         mPrincipalHandles[i]);
    segment->GetStartTime(insertTime);

    mSources[i]->AppendToTrack(mTrackID, segment);
  }
}
Example #8
0
void
DIBTextureHost::UpdatedInternal(const nsIntRegion* aRegion)
{
  if (!mProvider) {
    // This can happen if we send textures to a compositable that isn't yet
    // attached to a layer.
    return;
  }

  if (!mTextureSource) {
    mTextureSource = mProvider->CreateDataTextureSource(mFlags);
  }

  if (mSurface->CairoStatus()) {
      gfxWarning() << "Bad Cairo surface internal update " << mSurface->CairoStatus();
      mTextureSource = nullptr;
      return;
  }
  RefPtr<gfxImageSurface> imgSurf = mSurface->GetAsImageSurface();

  RefPtr<DataSourceSurface> surf = Factory::CreateWrappingDataSourceSurface(imgSurf->Data(), imgSurf->Stride(), mSize, mFormat);

  if (!surf || !mTextureSource->Update(surf, const_cast<nsIntRegion*>(aRegion))) {
    mTextureSource = nullptr;
  }

  ReadUnlock();
}
Example #9
0
bool
MemoryDIBTextureData::UpdateFromSurface(gfx::SourceSurface* aSurface)
{
  RefPtr<gfxImageSurface> imgSurf = mSurface->GetAsImageSurface();

  RefPtr<DataSourceSurface> srcSurf = aSurface->GetDataSurface();

  if (!srcSurf) {
    gfxCriticalError() << "Failed to GetDataSurface in UpdateFromSurface (DIB).";
    return false;
  }

  DataSourceSurface::MappedSurface sourceMap;
  if (!srcSurf->Map(gfx::DataSourceSurface::READ, &sourceMap)) {
    gfxCriticalError() << "Failed to map source surface for UpdateFromSurface.";
    return false;
  }

  for (int y = 0; y < srcSurf->GetSize().height; y++) {
    memcpy(imgSurf->Data() + imgSurf->Stride() * y,
           sourceMap.mData + sourceMap.mStride * y,
           srcSurf->GetSize().width * BytesPerPixel(srcSurf->GetFormat()));
  }

  srcSurf->Unmap();
  return true;
}
mozilla::ipc::IPCResult RemoteDecoderParent::RecvInput(
    const MediaRawDataIPDL& aData) {
  MOZ_ASSERT(OnManagerThread());
  // XXX: This copies the data into a buffer owned by the MediaRawData. Ideally
  // we'd just take ownership of the shmem.
  RefPtr<MediaRawData> data = new MediaRawData(aData.buffer().get<uint8_t>(),
                                               aData.buffer().Size<uint8_t>());
  if (aData.buffer().Size<uint8_t>() && !data->Data()) {
    // OOM
    Error(NS_ERROR_OUT_OF_MEMORY);
    return IPC_OK();
  }
  data->mOffset = aData.base().offset();
  data->mTime = aData.base().time();
  data->mTimecode = aData.base().timecode();
  data->mDuration = aData.base().duration();
  data->mKeyframe = aData.base().keyframe();

  DeallocShmem(aData.buffer());

  RefPtr<RemoteDecoderParent> self = this;
  mDecoder->Decode(data)->Then(
      mManagerTaskQueue, __func__,
      [self, this](const MediaDataDecoder::DecodedData& aResults) {
        if (mDestroyed) {
          return;
        }
        ProcessDecodedData(aResults);
        Unused << SendInputExhausted();
      },
      [self](const MediaResult& aError) { self->Error(aError); });
  return IPC_OK();
}
Example #11
0
void
SpeechRecognition::FeedAudioData(already_AddRefed<SharedBuffer> aSamples,
                                 uint32_t aDuration,
                                 MediaStreamListener* aProvider, TrackRate aTrackRate)
{
  NS_ASSERTION(!NS_IsMainThread(),
               "FeedAudioData should not be called in the main thread");

  // Endpointer expects to receive samples in chunks whose size is a
  // multiple of its frame size.
  // Since we can't assume we will receive the frames in appropriate-sized
  // chunks, we must buffer and split them in chunks of mAudioSamplesPerChunk
  // (a multiple of Endpointer's frame size) before feeding to Endpointer.

  // ensure aSamples is deleted
  RefPtr<SharedBuffer> refSamples = aSamples;

  uint32_t samplesIndex = 0;
  const int16_t* samples = static_cast<int16_t*>(refSamples->Data());
  nsAutoTArray<RefPtr<SharedBuffer>, 5> chunksToSend;

  // fill up our buffer and make a chunk out of it, if possible
  if (mBufferedSamples > 0) {
    samplesIndex += FillSamplesBuffer(samples, aDuration);

    if (mBufferedSamples == mAudioSamplesPerChunk) {
      chunksToSend.AppendElement(mAudioSamplesBuffer.forget());
      mBufferedSamples = 0;
    }
  }

  // create sample chunks of correct size
  if (samplesIndex < aDuration) {
    samplesIndex += SplitSamplesBuffer(samples + samplesIndex,
                                       aDuration - samplesIndex,
                                       chunksToSend);
  }

  // buffer remaining samples
  if (samplesIndex < aDuration) {
    mBufferedSamples = 0;
    mAudioSamplesBuffer =
      SharedBuffer::Create(mAudioSamplesPerChunk * sizeof(int16_t));

    FillSamplesBuffer(samples + samplesIndex, aDuration - samplesIndex);
  }

  AudioSegment* segment = CreateAudioSegment(chunksToSend);
  RefPtr<SpeechEvent> event = new SpeechEvent(this, EVENT_AUDIO_DATA);
  event->mAudioSegment = segment;
  event->mProvider = aProvider;
  event->mTrackRate = aTrackRate;
  NS_DispatchToMainThread(event);

  return;
}
void
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                 size_t aFrames,
                                                 uint32_t aChannels)
{
  if (mState != kStarted) {
    return;
  }

  size_t len = mSources.Length();
  for (size_t i = 0; i < len; i++) {
    if (!mSources[i]) {
      continue;
    }
    RefPtr<SharedBuffer> buffer =
      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
    PodCopy(static_cast<T*>(buffer->Data()),
            aBuffer, aFrames * aChannels);

    TimeStamp insertTime;
    // Make sure we include the stream and the track.
    // The 0:1 is a flag to note when we've done the final insert for a given input block.
    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
            LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
            (i+1 < len) ? 0 : 1, insertTime);

    nsAutoPtr<AudioSegment> segment(new AudioSegment());
    AutoTArray<const T*, 1> channels;
    // XXX Bug 971528 - Support stereo capture in gUM
    MOZ_ASSERT(aChannels == 1,
        "GraphDriver only supports us stereo audio for now");
    channels.AppendElement(static_cast<T*>(buffer->Data()));
    segment->AppendFrames(buffer.forget(), channels, aFrames,
                         mPrincipalHandles[i]);
    segment->GetStartTime(insertTime);

    RUN_ON_THREAD(mThread,
                  WrapRunnable(mSources[i], &SourceMediaStream::AppendToTrack,
                               mTrackID, segment,
                               static_cast<AudioSegment*>(nullptr)),
                  NS_DISPATCH_NORMAL);
  }
}
Example #13
0
bool
WAVTrackDemuxer::HeaderParserInit()
{
  RefPtr<MediaRawData> header = GetFileHeader(FindChunkHeader());
  if (!header) {
    return false;
  }
  ByteReader HeaderReader(header->Data(), 8);
  mHeaderParser.Parse(HeaderReader);
  return true;
}
Example #14
0
bool
WAVTrackDemuxer::RIFFParserInit()
{
  RefPtr<MediaRawData> riffHeader = GetFileHeader(FindRIFFHeader());
  if (!riffHeader) {
    return false;
  }
  ByteReader RIFFReader(riffHeader->Data(), 12);
  mRIFFParser.Parse(RIFFReader);
  return mRIFFParser.RiffHeader().IsValid(11);
}
 void Generate(AudioSegment& aSegment, const int32_t& aSamples)
 {
   RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
   int16_t* dest = static_cast<int16_t*>(buffer->Data());
   mGenerator.generate(dest, aSamples);
   AutoTArray<const int16_t*, 1> channels;
   for (int32_t i = 0; i < mChannels; i++) {
     channels.AppendElement(dest);
   }
   aSegment.AppendFrames(buffer.forget(), channels, aSamples, PRINCIPAL_HANDLE_NONE);
 }
Example #16
0
bool
WAVTrackDemuxer::FmtChunkParserInit()
{
  RefPtr<MediaRawData> fmtChunk = GetFileHeader(FindFmtChunk());
  if (!fmtChunk) {
    return false;
  }
  ByteReader fmtReader(fmtChunk->Data(),
                       mHeaderParser.GiveHeader().ChunkSize());
  mFmtParser.Parse(fmtReader);
  return true;
}
void
MediaEngineDefaultAudioSource::AppendToSegment(AudioSegment& aSegment,
                                               TrackTicks aSamples)
{
  RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aSamples * sizeof(int16_t));
  int16_t* dest = static_cast<int16_t*>(buffer->Data());

  mSineGenerator->generate(dest, aSamples);
  AutoTArray<const int16_t*,1> channels;
  channels.AppendElement(dest);
  aSegment.AppendFrames(buffer.forget(), channels, aSamples);
}
Example #18
0
nsHtml5Atom::nsHtml5Atom(const nsAString& aString)
{
  mLength = aString.Length();
  RefPtr<nsStringBuffer> buf = nsStringBuffer::FromString(aString);
  if (buf) {
    mString = static_cast<char16_t*>(buf->Data());
  } else {
    buf = nsStringBuffer::Alloc((mLength + 1) * sizeof(char16_t));
    mString = static_cast<char16_t*>(buf->Data());
    CopyUnicodeTo(aString, 0, mString, mLength);
    mString[mLength] = char16_t(0);
  }

  NS_ASSERTION(mString[mLength] == char16_t(0), "null terminated");
  NS_ASSERTION(buf && buf->StorageSize() >= (mLength+1) * sizeof(char16_t),
               "enough storage");
  NS_ASSERTION(Equals(aString), "correct data");

  // Take ownership of buffer
  mozilla::Unused << buf.forget();
}
Example #19
0
static RefPtr<mozilla::SharedBuffer>
makeSamples(int16_t* aData, uint32_t aDataLen)
{
  RefPtr<mozilla::SharedBuffer> samples =
    SharedBuffer::Create(aDataLen * sizeof(int16_t));
  int16_t* frames = static_cast<int16_t*>(samples->Data());

  for (uint32_t i = 0; i < aDataLen; i++) {
    frames[i] = aData[i];
  }

  return samples;
}
Example #20
0
already_AddRefed<MediaRawData>
MP3TrackDemuxer::GetNextFrame(const MediaByteRange& aRange) {
  MP3LOG("GetNext() Begin({mStart=%" PRId64 " Length()=%" PRId64 "})",
         aRange.mStart, aRange.Length());
  if (!aRange.Length()) {
    return nullptr;
  }

  RefPtr<MediaRawData> frame = new MediaRawData();
  frame->mOffset = aRange.mStart;

  nsAutoPtr<MediaRawDataWriter> frameWriter(frame->CreateWriter());
  if (!frameWriter->SetSize(aRange.Length())) {
    MP3LOG("GetNext() Exit failed to allocated media buffer");
    return nullptr;
  }

  const uint32_t read = Read(frameWriter->Data(), frame->mOffset, frame->Size());

  if (read != aRange.Length()) {
    MP3LOG("GetNext() Exit read=%u frame->Size()=%u", read, frame->Size());
    return nullptr;
  }

  UpdateState(aRange);

  frame->mTime = Duration(mFrameIndex - 1).ToMicroseconds();
  frame->mDuration = Duration(1).ToMicroseconds();
  frame->mTimecode = frame->mTime;
  frame->mKeyframe = true;

  MOZ_ASSERT(frame->mTime >= 0);
  MOZ_ASSERT(frame->mDuration > 0);

  if (mNumParsedFrames == 1) {
    // First frame parsed, let's read VBR info if available.
    // TODO: read info that helps with seeking (bug 1163667).
    ByteReader reader(frame->Data(), frame->Size());
    mParser.ParseVBRHeader(&reader);
    reader.DiscardRemaining();
    mFirstFrameOffset = frame->mOffset;
  }

  MP3LOGV("GetNext() End mOffset=%" PRIu64 " mNumParsedFrames=%" PRIu64
          " mFrameIndex=%" PRId64 " mTotalFrameLen=%" PRIu64
          " mSamplesPerFrame=%d mSamplesPerSecond=%d mChannels=%d",
          mOffset, mNumParsedFrames, mFrameIndex, mTotalFrameLen,
          mSamplesPerFrame, mSamplesPerSecond, mChannels);

  return frame.forget();
}
Example #21
0
nsresult
DataTransfer::GetDataAtInternal(const nsAString& aFormat, uint32_t aIndex,
                                nsIPrincipal* aSubjectPrincipal,
                                nsIVariant** aData)
{
  *aData = nullptr;

  if (aFormat.IsEmpty()) {
    return NS_OK;
  }

  if (aIndex >= MozItemCount()) {
    return NS_ERROR_DOM_INDEX_SIZE_ERR;
  }

  // Only the first item is valid for clipboard events
  if (aIndex > 0 &&
      (mEventMessage == eCut || mEventMessage == eCopy ||
       mEventMessage == ePaste)) {
    return NS_ERROR_DOM_INDEX_SIZE_ERR;
  }

  nsAutoString format;
  GetRealFormat(aFormat, format);

  MOZ_ASSERT(aSubjectPrincipal);

  RefPtr<DataTransferItem> item = mItems->MozItemByTypeAt(format, aIndex);
  if (!item) {
    // The index exists but there's no data for the specified format, in this
    // case we just return undefined
    return NS_OK;
  }

  // If we have chrome only content, and we aren't chrome, don't allow access
  if (!nsContentUtils::IsSystemPrincipal(aSubjectPrincipal) && item->ChromeOnly()) {
    return NS_OK;
  }

  // DataTransferItem::Data() handles the principal checks
  ErrorResult result;
  nsCOMPtr<nsIVariant> data = item->Data(aSubjectPrincipal, result);
  if (NS_WARN_IF(!data || result.Failed())) {
    return result.StealNSResult();
  }

  data.forget(aData);
  return NS_OK;
}
Example #22
0
void
nsSpeechTask::SendAudioImpl(RefPtr<mozilla::SharedBuffer>& aSamples, uint32_t aDataLen)
{
  if (aDataLen == 0) {
    mStream->EndAllTrackAndFinish();
    return;
  }

  AudioSegment segment;
  nsAutoTArray<const int16_t*, 1> channelData;
  channelData.AppendElement(static_cast<int16_t*>(aSamples->Data()));
  segment.AppendFrames(aSamples.forget(), channelData, aDataLen);
  mStream->AppendToTrack(1, &segment);
  mStream->AdvanceKnownTracksTime(STREAM_TIME_MAX);
}
Example #23
0
AudioSegment*
SpeechRecognition::CreateAudioSegment(nsTArray<RefPtr<SharedBuffer>>& aChunks)
{
  AudioSegment* segment = new AudioSegment();
  for (uint32_t i = 0; i < aChunks.Length(); ++i) {
    RefPtr<SharedBuffer> buffer = aChunks[i];
    const int16_t* chunkData = static_cast<const int16_t*>(buffer->Data());

    nsAutoTArray<const int16_t*, 1> channels;
    channels.AppendElement(chunkData);
    segment->AppendFrames(buffer.forget(), channels, mAudioSamplesPerChunk);
  }

  return segment;
}
Example #24
0
static void
SendStreamAudio(DecodedStreamData* aStream, int64_t aStartTime,
                MediaData* aData, AudioSegment* aOutput, uint32_t aRate,
                const PrincipalHandle& aPrincipalHandle)
{
  // The amount of audio frames that is used to fuzz rounding errors.
  static const int64_t AUDIO_FUZZ_FRAMES = 1;

  MOZ_ASSERT(aData);
  AudioData* audio = aData->As<AudioData>();
  // This logic has to mimic AudioSink closely to make sure we write
  // the exact same silences
  CheckedInt64 audioWrittenOffset = aStream->mAudioFramesWritten +
                                    UsecsToFrames(aStartTime, aRate);
  CheckedInt64 frameOffset = UsecsToFrames(audio->mTime, aRate);

  if (!audioWrittenOffset.isValid() ||
      !frameOffset.isValid() ||
      // ignore packet that we've already processed
      audio->GetEndTime() <= aStream->mNextAudioTime) {
    return;
  }

  if (audioWrittenOffset.value() + AUDIO_FUZZ_FRAMES < frameOffset.value()) {
    int64_t silentFrames = frameOffset.value() - audioWrittenOffset.value();
    // Write silence to catch up
    AudioSegment silence;
    silence.InsertNullDataAtStart(silentFrames);
    aStream->mAudioFramesWritten += silentFrames;
    audioWrittenOffset += silentFrames;
    aOutput->AppendFrom(&silence);
  }

  // Always write the whole sample without truncation to be consistent with
  // DecodedAudioDataSink::PlayFromAudioQueue()
  audio->EnsureAudioBuffer();
  RefPtr<SharedBuffer> buffer = audio->mAudioBuffer;
  AudioDataValue* bufferData = static_cast<AudioDataValue*>(buffer->Data());
  AutoTArray<const AudioDataValue*, 2> channels;
  for (uint32_t i = 0; i < audio->mChannels; ++i) {
    channels.AppendElement(bufferData + i * audio->mFrames);
  }
  aOutput->AppendFrames(buffer.forget(), channels, audio->mFrames, aPrincipalHandle);
  aStream->mAudioFramesWritten += audio->mFrames;

  aStream->mNextAudioTime = audio->GetEndTime();
}
void
gfxWindowsNativeDrawing::PaintToContext()
{
    if (mRenderState == RENDER_STATE_NATIVE_DRAWING_DONE) {
        // nothing to do, it already went to the context
        mRenderState = RENDER_STATE_DONE;
    } else if (mRenderState == RENDER_STATE_ALPHA_RECOVERY_WHITE_DONE) {
        RefPtr<gfxImageSurface> black = mBlackSurface->GetAsImageSurface();
        RefPtr<gfxImageSurface> white = mWhiteSurface->GetAsImageSurface();
        if (!gfxAlphaRecovery::RecoverAlpha(black, white)) {
            NS_ERROR("Alpha recovery failure");
            return;
        }
        RefPtr<DataSourceSurface> source =
            Factory::CreateWrappingDataSourceSurface(black->Data(),
                                                     black->Stride(),
                                                     black->GetSize(),
                                                     SurfaceFormat::B8G8R8A8);

        mContext->Save();
        mContext->SetMatrix(
          mContext->CurrentMatrix().Translate(mNativeRect.TopLeft()));
        mContext->NewPath();
        mContext->Rectangle(gfxRect(gfxPoint(0.0, 0.0), mNativeRect.Size()));

        RefPtr<gfxPattern> pat = new gfxPattern(source, Matrix());

        gfxMatrix m;
        m.Scale(mScale.width, mScale.height);
        pat->SetMatrix(m);

        if (mNativeDrawFlags & DO_NEAREST_NEIGHBOR_FILTERING)
            pat->SetFilter(Filter::LINEAR);

        pat->SetExtend(ExtendMode::CLAMP);
        mContext->SetPattern(pat);
        mContext->Fill();
        mContext->Restore();

        mRenderState = RENDER_STATE_DONE;
    } else {
        NS_ERROR("Invalid RenderState in gfxWindowsNativeDrawing::PaintToContext");
    }
}
Example #26
0
/*
 * Split a samples buffer starting of a given size into
 * chunks of equal size. The chunks are stored in the array
 * received as argument.
 * Returns the offset of the end of the last chunk that was
 * created.
 */
uint32_t
SpeechRecognition::SplitSamplesBuffer(const int16_t* aSamplesBuffer,
                                      uint32_t aSampleCount,
                                      nsTArray<RefPtr<SharedBuffer>>& aResult)
{
  uint32_t chunkStart = 0;

  while (chunkStart + mAudioSamplesPerChunk <= aSampleCount) {
    RefPtr<SharedBuffer> chunk =
      SharedBuffer::Create(mAudioSamplesPerChunk * sizeof(int16_t));

    memcpy(chunk->Data(), aSamplesBuffer + chunkStart,
           mAudioSamplesPerChunk * sizeof(int16_t));

    aResult.AppendElement(chunk.forget());
    chunkStart += mAudioSamplesPerChunk;
  }

  return chunkStart;
}
void
gfxWindowsNativeDrawing::PaintToContext()
{
    if (mRenderState == RENDER_STATE_NATIVE_DRAWING_DONE) {
        // nothing to do, it already went to the context
        mRenderState = RENDER_STATE_DONE;
    } else if (mRenderState == RENDER_STATE_ALPHA_RECOVERY_WHITE_DONE) {
        RefPtr<gfxImageSurface> black = mBlackSurface->GetAsImageSurface();
        RefPtr<gfxImageSurface> white = mWhiteSurface->GetAsImageSurface();
        if (!gfxAlphaRecovery::RecoverAlpha(black, white)) {
            NS_ERROR("Alpha recovery failure");
            return;
        }
        RefPtr<DataSourceSurface> source =
            Factory::CreateWrappingDataSourceSurface(black->Data(),
                                                     black->Stride(),
                                                     black->GetSize(),
                                                     SurfaceFormat::B8G8R8A8);
        {
            DrawTarget* dt = mContext->GetDrawTarget();
            AutoRestoreTransform autoRestoreTransform(dt);

            Matrix newTransform = dt->GetTransform();
            newTransform.PreTranslate(ToPoint(mNativeRect.TopLeft()));
            dt->SetTransform(newTransform);

            Rect rect(Point(0.0, 0.0), ToSize(mNativeRect.Size()));
            Matrix m = Matrix::Scaling(1.0 / mScale.width, 1.0 / mScale.height);
            Filter filter = (mNativeDrawFlags & DO_NEAREST_NEIGHBOR_FILTERING)
                          ? Filter::LINEAR
                          : Filter::GOOD;
            SurfacePattern pat(source, ExtendMode::CLAMP, m, filter);
            dt->FillRect(rect, pat);
        }

        mRenderState = RENDER_STATE_DONE;
    } else {
        NS_ERROR("Invalid RenderState in gfxWindowsNativeDrawing::PaintToContext");
    }
}
Example #28
0
already_AddRefed<nsStringBuffer>
nsAttrValue::GetStringBuffer(const nsAString& aValue) const
{
  uint32_t len = aValue.Length();
  if (!len) {
    return nullptr;
  }

  RefPtr<nsStringBuffer> buf = nsStringBuffer::FromString(aValue);
  if (buf && (buf->StorageSize()/sizeof(char16_t) - 1) == len) {
    return buf.forget();
  }

  buf = nsStringBuffer::Alloc((len + 1) * sizeof(char16_t));
  if (!buf) {
    return nullptr;
  }
  char16_t *data = static_cast<char16_t*>(buf->Data());
  CopyUnicodeTo(aValue, 0, data, len);
  data[len] = char16_t(0);
  return buf.forget();
}
Example #29
0
NS_IMETHODIMP
PicoCallbackRunnable::Run()
{
  MOZ_ASSERT(!NS_IsMainThread());
  PicoApi::pico_Status status = 0;

  if (mService->CurrentVoice() != mVoice) {
    mService->LoadEngine(mVoice);
  } else {
    status = sPicoApi.pico_resetEngine(mService->mPicoEngine, PICO_RESET_SOFT);
    PICO_ENSURE_SUCCESS("pico_unloadResource", status, NS_ERROR_FAILURE);
  }

  // Add SSML markup for pitch and rate. Pico uses a minimal parser,
  // so no namespace is needed.
  nsPrintfCString markedUpText(
    "<pitch level=\"%0.0f\"><speed level=\"%0.0f\">%s</speed></pitch>",
    std::min(std::max(50.0f, mPitch * 100), 200.0f),
    std::min(std::max(20.0f, mRate * 100), 500.0f),
    mText.get());

  const char* text = markedUpText.get();
  size_t buffer_size = 512, buffer_offset = 0;
  RefPtr<SharedBuffer> buffer = SharedBuffer::Create(buffer_size);
  int16_t text_offset = 0, bytes_recv = 0, bytes_sent = 0, out_data_type = 0;
  int16_t text_remaining = markedUpText.Length() + 1;

  // Run this loop while this is the current task
  while (IsCurrentTask()) {
    if (text_remaining) {
      status = sPicoApi.pico_putTextUtf8(mService->mPicoEngine,
                                         text + text_offset, text_remaining,
                                         &bytes_sent);
      PICO_ENSURE_SUCCESS("pico_putTextUtf8", status, NS_ERROR_FAILURE);
      // XXX: End speech task on error
      text_remaining -= bytes_sent;
      text_offset += bytes_sent;
    } else {
      // If we already fed all the text to the engine, send a zero length buffer
      // and quit.
      DispatchSynthDataRunnable(already_AddRefed<SharedBuffer>(), 0);
      break;
    }

    do {
      // Run this loop while the result of getData is STEP_BUSY, when it finishes
      // synthesizing audio for the given text, it returns STEP_IDLE. We then
      // break to the outer loop and feed more text, if there is any left.
      if (!IsCurrentTask()) {
        // If the task has changed, quit.
        break;
      }

      if (buffer_size - buffer_offset < PICO_MAX_CHUNK_SIZE) {
        // The next audio chunk retrieved may be bigger than our buffer,
        // so send the data and flush the buffer.
        DispatchSynthDataRunnable(buffer.forget(), buffer_offset);
        buffer_offset = 0;
        buffer = SharedBuffer::Create(buffer_size);
      }

      status = sPicoApi.pico_getData(mService->mPicoEngine,
                                     (uint8_t*)buffer->Data() + buffer_offset,
                                     PICO_MAX_CHUNK_SIZE,
                                     &bytes_recv, &out_data_type);
      PICO_ENSURE_SUCCESS("pico_getData", status, NS_ERROR_FAILURE);
      buffer_offset += bytes_recv;
    } while (status == PICO_STEP_BUSY);
  }

  return NS_OK;
}
Example #30
0
already_AddRefed<nsITransferable>
DataTransfer::GetTransferable(uint32_t aIndex, nsILoadContext* aLoadContext)
{
  if (aIndex >= mItems.Length()) {
    return nullptr;
  }

  nsTArray<TransferItem>& item = mItems[aIndex];
  uint32_t count = item.Length();
  if (!count) {
    return nullptr;
  }

  nsCOMPtr<nsITransferable> transferable =
    do_CreateInstance("@mozilla.org/widget/transferable;1");
  if (!transferable) {
    return nullptr;
  }
  transferable->Init(aLoadContext);

  nsCOMPtr<nsIStorageStream> storageStream;
  nsCOMPtr<nsIBinaryOutputStream> stream;

  bool added = false;
  bool handlingCustomFormats = true;
  uint32_t totalCustomLength = 0;

  const char* knownFormats[] = { kTextMime, kHTMLMime, kNativeHTMLMime, kRTFMime,
                                 kURLMime, kURLDataMime, kURLDescriptionMime, kURLPrivateMime,
                                 kPNGImageMime, kJPEGImageMime, kGIFImageMime, kNativeImageMime,
                                 kFileMime, kFilePromiseMime, kFilePromiseDirectoryMime,
                                 kMozTextInternal, kHTMLContext, kHTMLInfo };

  /*
   * Two passes are made here to iterate over all of the types. First, look for
   * any types that are not in the list of known types. For this pass, handlingCustomFormats
   * will be true. Data that corresponds to unknown types will be pulled out and
   * inserted into a single type (kCustomTypesMime) by writing the data into a stream.
   *
   * The second pass will iterate over the formats looking for known types. These are
   * added as is. The unknown types are all then inserted as a single type (kCustomTypesMime)
   * in the same position of the first custom type. This model is used to maintain the
   * format order as best as possible.
   *
   * The format of the kCustomTypesMime type is one or more of the following stored sequentially:
   *   <32-bit> type (only none or string is supported)
   *   <32-bit> length of format
   *   <wide string> format
   *   <32-bit> length of data
   *   <wide string> data
   * A type of eCustomClipboardTypeId_None ends the list, without any following data.
   */
  do {
    for (uint32_t f = 0; f < count; f++) {
      const TransferItem& formatitem = item[f];
      if (!formatitem.mData) { // skip empty items
        continue;
      }

      // If the data is of one of the well-known formats, use it directly.
      bool isCustomFormat = true;
      for (uint32_t f = 0; f < ArrayLength(knownFormats); f++) {
        if (formatitem.mFormat.EqualsASCII(knownFormats[f])) {
          isCustomFormat = false;
          break;
        }
      }

      uint32_t lengthInBytes;
      nsCOMPtr<nsISupports> convertedData;

      if (handlingCustomFormats) {
        if (!ConvertFromVariant(formatitem.mData, getter_AddRefs(convertedData), &lengthInBytes)) {
          continue;
        }

        // When handling custom types, add the data to the stream if this is a
        // custom type.
        if (isCustomFormat) {
          // If it isn't a string, just ignore it. The dataTransfer is cached in the
          // drag sesion during drag-and-drop, so non-strings will be available when
          // dragging locally.
          nsCOMPtr<nsISupportsString> str(do_QueryInterface(convertedData));
          if (str) {
            nsAutoString data;
            str->GetData(data);

            if (!stream) {
              // Create a storage stream to write to.
              NS_NewStorageStream(1024, UINT32_MAX, getter_AddRefs(storageStream));

              nsCOMPtr<nsIOutputStream> outputStream;
              storageStream->GetOutputStream(0, getter_AddRefs(outputStream));

              stream = do_CreateInstance("@mozilla.org/binaryoutputstream;1");
              stream->SetOutputStream(outputStream);
            }

            int32_t formatLength = formatitem.mFormat.Length() * sizeof(nsString::char_type);

            stream->Write32(eCustomClipboardTypeId_String);
            stream->Write32(formatLength);
            stream->WriteBytes((const char *)formatitem.mFormat.get(), formatLength);
            stream->Write32(lengthInBytes);
            stream->WriteBytes((const char *)data.get(), lengthInBytes);

            // The total size of the stream is the format length, the data length,
            // two integers to hold the lengths and one integer for the string flag.
            totalCustomLength += formatLength + lengthInBytes + (sizeof(uint32_t) * 3);
          }
        }
      } else if (isCustomFormat && stream) {
        // This is the second pass of the loop (handlingCustomFormats is false).
        // When encountering the first custom format, append all of the stream
        // at this position.

        // Write out a terminator.
        totalCustomLength += sizeof(uint32_t);
        stream->Write32(eCustomClipboardTypeId_None);

        nsCOMPtr<nsIInputStream> inputStream;
        storageStream->NewInputStream(0, getter_AddRefs(inputStream));

        RefPtr<nsStringBuffer> stringBuffer = nsStringBuffer::Alloc(totalCustomLength + 1);

        // Read the data from the string and add a null-terminator as ToString needs it.
        uint32_t amountRead;
        inputStream->Read(static_cast<char*>(stringBuffer->Data()), totalCustomLength, &amountRead);
        static_cast<char*>(stringBuffer->Data())[amountRead] = 0;

        nsCString str;
        stringBuffer->ToString(totalCustomLength, str);
        nsCOMPtr<nsISupportsCString> strSupports(do_CreateInstance(NS_SUPPORTS_CSTRING_CONTRACTID));
        strSupports->SetData(str);

        nsresult rv = transferable->SetTransferData(kCustomTypesMime, strSupports, totalCustomLength);
        if (NS_FAILED(rv)) {
          return nullptr;
        }

        added = true;

        // Clear the stream so it doesn't get used again.
        stream = nullptr;
      } else {
        // This is the second pass of the loop and a known type is encountered.
        // Add it as is.
        if (!ConvertFromVariant(formatitem.mData, getter_AddRefs(convertedData), &lengthInBytes)) {
          continue;
        }

        // The underlying drag code uses text/unicode, so use that instead of text/plain
        const char* format;
        NS_ConvertUTF16toUTF8 utf8format(formatitem.mFormat);
        if (utf8format.EqualsLiteral(kTextMime)) {
          format = kUnicodeMime;
        } else {
          format = utf8format.get();
        }

        // If a converter is set for a format, set the converter for the
        // transferable and don't add the item
        nsCOMPtr<nsIFormatConverter> converter = do_QueryInterface(convertedData);
        if (converter) {
          transferable->AddDataFlavor(format);
          transferable->SetConverter(converter);
          continue;
        }

        nsresult rv = transferable->SetTransferData(format, convertedData, lengthInBytes);
        if (NS_FAILED(rv)) {
          return nullptr;
        }

        added = true;
      }
    }

    handlingCustomFormats = !handlingCustomFormats;
  } while (!handlingCustomFormats);

  // only return the transferable if data was successfully added to it
  if (added) {
    return transferable.forget();
  }

  return nullptr;
}