void
nsCSSExpandedDataBlock::Compress(nsCSSCompressedDataBlock **aNormalBlock,
                                 nsCSSCompressedDataBlock **aImportantBlock)
{
    nsAutoPtr<nsCSSCompressedDataBlock> result_normal, result_important;
    char *cursor_normal, *cursor_important;

    ComputeSizeResult size = ComputeSize();

    result_normal = new(size.normal) nsCSSCompressedDataBlock();
    cursor_normal = result_normal->Block();

    if (size.important != 0) {
        result_important = new(size.important) nsCSSCompressedDataBlock();
        cursor_important = result_important->Block();
    } else {
        result_important = nsnull;
        cursor_important = nsnull;
    }

    /*
     * Save needless copying and allocation by copying the memory
     * corresponding to the stored data in the expanded block, and then
     * clearing the data in the expanded block.
     */
    for (size_t iHigh = 0; iHigh < nsCSSPropertySet::kChunkCount; ++iHigh) {
        if (!mPropertiesSet.HasPropertyInChunk(iHigh))
            continue;
        for (size_t iLow = 0; iLow < nsCSSPropertySet::kBitsInChunk; ++iLow) {
            if (!mPropertiesSet.HasPropertyAt(iHigh, iLow))
                continue;
            nsCSSProperty iProp = nsCSSPropertySet::CSSPropertyAt(iHigh, iLow);
            NS_ABORT_IF_FALSE(!nsCSSProps::IsShorthand(iProp), "out of range");
            PRBool important =
                mPropertiesImportant.HasPropertyAt(iHigh, iLow);
            char *&cursor = important ? cursor_important : cursor_normal;
            nsCSSCompressedDataBlock *result =
                important ? result_important : result_normal;
            nsCSSValue* val = PropertyAt(iProp);
            NS_ABORT_IF_FALSE(val->GetUnit() != eCSSUnit_Null,
                              "Null value while compressing");
            CDBValueStorage *storage =
                reinterpret_cast<CDBValueStorage*>(cursor);
            storage->property = iProp;
            memcpy(&storage->value, val, sizeof(nsCSSValue));
            new (val) nsCSSValue();
            cursor += CDBValueStorage_advance;
            result->mStyleBits |=
                nsCachedStyleData::GetBitForSID(nsCSSProps::kSIDTable[iProp]);
        }
    }

    result_normal->mBlockEnd = cursor_normal;
    NS_ABORT_IF_FALSE(result_normal->DataSize() == ptrdiff_t(size.normal),
                      "size miscalculation");

    if (result_important) {
        result_important->mBlockEnd = cursor_important;
        NS_ABORT_IF_FALSE(result_important->DataSize() ==
                          ptrdiff_t(size.important),
                          "size miscalculation");
    }

    ClearSets();
    AssertInitialState();
    *aNormalBlock = result_normal.forget();
    *aImportantBlock = result_important.forget();
}
示例#2
0
int64_t
WaveReader::TimeToBytes(double aTime) const
{
  NS_ABORT_IF_FALSE(aTime >= 0.0f, "Must be >= 0");
  return RoundDownToFrame(int64_t(aTime * mSampleRate * mFrameSize));
}
示例#3
0
bool
WaveReader::LoadListChunk(uint32_t aChunkSize,
    nsAutoPtr<nsHTMLMediaElement::MetadataTags> &aTags)
{
  // List chunks are always word (two byte) aligned.
  NS_ABORT_IF_FALSE(mDecoder->GetResource()->Tell() % 2 == 0,
                    "LoadListChunk called with unaligned resource");

  static const unsigned int MAX_CHUNK_SIZE = 1 << 16;
  PR_STATIC_ASSERT(MAX_CHUNK_SIZE < UINT_MAX / sizeof(char));

  if (aChunkSize > MAX_CHUNK_SIZE) {
    return false;
  }

  nsAutoArrayPtr<char> chunk(new char[aChunkSize]);
  if (!ReadAll(chunk.get(), aChunkSize)) {
    return false;
  }

  static const uint32_t INFO_LIST_MAGIC = 0x494e464f;
  const char *p = chunk.get();
  if (ReadUint32BE(&p) != INFO_LIST_MAGIC) {
    return false;
  }

  const waveIdToName ID_TO_NAME[] = {
    { 0x49415254, NS_LITERAL_CSTRING("artist") },   // IART
    { 0x49434d54, NS_LITERAL_CSTRING("comments") }, // ICMT
    { 0x49474e52, NS_LITERAL_CSTRING("genre") },    // IGNR
    { 0x494e414d, NS_LITERAL_CSTRING("name") },     // INAM
  };

  const char* const end = chunk.get() + aChunkSize;

  aTags = new nsHTMLMediaElement::MetadataTags;
  aTags->Init();

  while (p + 8 < end) {
    uint32_t id = ReadUint32BE(&p);
    // Uppercase tag id, inspired by GStreamer's Wave parser.
    id &= 0xDFDFDFDF;

    uint32_t length = ReadUint32LE(&p);

    // Subchunk shall not exceed parent chunk.
    if (p + length > end) {
      break;
    }

    nsCString val(p, length);
    if (val[length - 1] == '\0') {
      val.SetLength(length - 1);
    }

    // Chunks in List::INFO are always word (two byte) aligned. So round up if
    // necessary.
    length += length % 2;
    p += length;

    if (!IsUTF8(val)) {
      continue;
    }

    for (size_t i = 0; i < mozilla::ArrayLength(ID_TO_NAME); ++i) {
      if (id == ID_TO_NAME[i].id) {
        aTags->Put(ID_TO_NAME[i].name, val);
        break;
      }
    }
  }

  return true;
}
double
TimeDuration::ToSeconds() const
{
  NS_ABORT_IF_FALSE(gInitialized, "calling TimeDuration too early");
  return (mValue * sNsPerTick) / kNsPerSecd;
}
示例#5
0
bool
WaveReader::LoadFormatChunk(uint32_t aChunkSize)
{
  uint32_t rate, channels, frameSize, sampleFormat;
  char waveFormat[WAVE_FORMAT_CHUNK_SIZE];
  const char* p = waveFormat;

  // RIFF chunks are always word (two byte) aligned.
  NS_ABORT_IF_FALSE(mDecoder->GetResource()->Tell() % 2 == 0,
                    "LoadFormatChunk called with unaligned resource");

  if (!ReadAll(waveFormat, sizeof(waveFormat))) {
    return false;
  }

  PR_STATIC_ASSERT(sizeof(uint16_t) +
                   sizeof(uint16_t) +
                   sizeof(uint32_t) +
                   4 +
                   sizeof(uint16_t) +
                   sizeof(uint16_t) <= sizeof(waveFormat));
  if (ReadUint16LE(&p) != WAVE_FORMAT_ENCODING_PCM) {
    NS_WARNING("WAVE is not uncompressed PCM, compressed encodings are not supported");
    return false;
  }

  channels = ReadUint16LE(&p);
  rate = ReadUint32LE(&p);

  // Skip over average bytes per second field.
  p += 4;

  frameSize = ReadUint16LE(&p);

  sampleFormat = ReadUint16LE(&p);

  // PCM encoded WAVEs are not expected to have an extended "format" chunk,
  // but I have found WAVEs that have a extended "format" chunk with an
  // extension size of 0 bytes.  Be polite and handle this rather than
  // considering the file invalid.  This code skips any extension of the
  // "format" chunk.
  if (aChunkSize > WAVE_FORMAT_CHUNK_SIZE) {
    char extLength[2];
    const char* p = extLength;

    if (!ReadAll(extLength, sizeof(extLength))) {
      return false;
    }

    PR_STATIC_ASSERT(sizeof(uint16_t) <= sizeof(extLength));
    uint16_t extra = ReadUint16LE(&p);
    if (aChunkSize - (WAVE_FORMAT_CHUNK_SIZE + 2) != extra) {
      NS_WARNING("Invalid extended format chunk size");
      return false;
    }
    extra += extra % 2;

    if (extra > 0) {
      PR_STATIC_ASSERT(UINT16_MAX + (UINT16_MAX % 2) < UINT_MAX / sizeof(char));
      nsAutoArrayPtr<char> chunkExtension(new char[extra]);
      if (!ReadAll(chunkExtension.get(), extra)) {
        return false;
      }
    }
  }

  // RIFF chunks are always word (two byte) aligned.
  NS_ABORT_IF_FALSE(mDecoder->GetResource()->Tell() % 2 == 0,
                    "LoadFormatChunk left resource unaligned");

  // Make sure metadata is fairly sane.  The rate check is fairly arbitrary,
  // but the channels check is intentionally limited to mono or stereo
  // because that's what the audio backend currently supports.
  unsigned int actualFrameSize = sampleFormat == 8 ? 1 : 2 * channels;
  if (rate < 100 || rate > 96000 ||
      channels < 1 || channels > MAX_CHANNELS ||
      (frameSize != 1 && frameSize != 2 && frameSize != 4) ||
      (sampleFormat != 8 && sampleFormat != 16) ||
      frameSize != actualFrameSize) {
    NS_WARNING("Invalid WAVE metadata");
    return false;
  }

  ReentrantMonitorAutoEnter monitor(mDecoder->GetReentrantMonitor());
  mSampleRate = rate;
  mChannels = channels;
  mFrameSize = frameSize;
  if (sampleFormat == 8) {
    mSampleFormat = FORMAT_U8;
  } else {
    mSampleFormat = FORMAT_S16;
  }
  return true;
}
示例#6
0
// aTime is the time in ms the samples were inserted into MediaStreamGraph
nsresult
AudioStream::Write(const AudioDataValue* aBuf, uint32_t aFrames, TimeStamp *aTime)
{
  MonitorAutoLock mon(mMonitor);
  if (!mCubebStream || mState == ERRORED) {
    return NS_ERROR_FAILURE;
  }
  NS_ASSERTION(mState == INITIALIZED || mState == STARTED,
    "Stream write in unexpected state.");

  // Downmix to Stereo.
  if (mChannels > 2 && mChannels <= 8) {
    DownmixAudioToStereo(const_cast<AudioDataValue*> (aBuf), mChannels, aFrames);
  }
  else if (mChannels > 8) {
    return NS_ERROR_FAILURE;
  }

  const uint8_t* src = reinterpret_cast<const uint8_t*>(aBuf);
  uint32_t bytesToCopy = FramesToBytes(aFrames);

  // XXX this will need to change if we want to enable this on-the-fly!
  if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG)) {
    // Record the position and time this data was inserted
    int64_t timeMs;
    if (aTime && !aTime->IsNull()) {
      if (mStartTime.IsNull()) {
        AsyncLatencyLogger::Get(true)->GetStartTime(mStartTime);
      }
      timeMs = (*aTime - mStartTime).ToMilliseconds();
    } else {
      timeMs = 0;
    }
    struct Inserts insert = { timeMs, aFrames};
    mInserts.AppendElement(insert);
  }

  while (bytesToCopy > 0) {
    uint32_t available = std::min(bytesToCopy, mBuffer.Available());
    NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0,
        "Must copy complete frames.");

    mBuffer.AppendElements(src, available);
    src += available;
    bytesToCopy -= available;

    if (bytesToCopy > 0) {
      // If we are not playing, but our buffer is full, start playing to make
      // room for soon-to-be-decoded data.
      if (mState != STARTED) {
        StartUnlocked();
        if (mState != STARTED) {
          return NS_ERROR_FAILURE;
        }
      }
      mon.Wait();
    }
  }

  mWritten += aFrames;
  return NS_OK;
}
示例#7
0
nsresult imgRequest::RemoveProxy(imgRequestProxy *proxy, nsresult aStatus, PRBool aNotify)
{
  LOG_SCOPE_WITH_PARAM(gImgLog, "imgRequest::RemoveProxy", "proxy", proxy);

  mObservers.RemoveElement(proxy);

  /* Check mState below before we potentially call Cancel() below. Since
     Cancel() may result in OnStopRequest being called back before Cancel()
     returns, leaving mState in a different state then the one it was in at
     this point.
   */

  if (aNotify) {
    // make sure that observer gets an OnStopDecode message sent to it
    if (!(mState & onStopDecode)) {
      proxy->OnStopDecode(aStatus, nsnull);
    }

  }

  // make sure that observer gets an OnStopRequest message sent to it
  if (!(mState & onStopRequest)) {
    proxy->OnStopRequest(nsnull, nsnull, NS_BINDING_ABORTED, PR_TRUE);
  }

  if (mImage && !HaveProxyWithObserver(nsnull)) {
    LOG_MSG(gImgLog, "imgRequest::RemoveProxy", "stopping animation");

    mImage->StopAnimation();
  }

  if (mObservers.IsEmpty()) {
    // If we have no observers, there's nothing holding us alive. If we haven't
    // been cancelled and thus removed from the cache, tell the image loader so
    // we can be evicted from the cache.
    if (mCacheEntry) {
      NS_ABORT_IF_FALSE(mKeyURI, "Removing last observer without key uri.");

      imgLoader::SetHasNoProxies(mKeyURI, mCacheEntry);
    } 
#if defined(PR_LOGGING)
    else {
      nsCAutoString spec;
      mKeyURI->GetSpec(spec);
      LOG_MSG_WITH_PARAM(gImgLog, "imgRequest::RemoveProxy no cache entry", "uri", spec.get());
    }
#endif

    /* If |aStatus| is a failure code, then cancel the load if it is still in progress.
       Otherwise, let the load continue, keeping 'this' in the cache with no observers.
       This way, if a proxy is destroyed without calling cancel on it, it won't leak
       and won't leave a bad pointer in mObservers.
     */
    if (mRequest && mLoading && NS_FAILED(aStatus)) {
      LOG_MSG(gImgLog, "imgRequest::RemoveProxy", "load in progress.  canceling");

      mImageStatus |= imgIRequest::STATUS_LOAD_PARTIAL;

      this->Cancel(NS_BINDING_ABORTED);
    }

    /* break the cycle from the cache entry. */
    mCacheEntry = nsnull;
  }

  // If a proxy is removed for a reason other than its owner being
  // changed, remove the proxy from the loadgroup.
  if (aStatus != NS_IMAGELIB_CHANGING_OWNER)
    proxy->RemoveFromLoadGroup(PR_TRUE);

  return NS_OK;
}
示例#8
0
nsresult
SpdyStream2::ReadSegments(nsAHttpSegmentReader *reader,
                         uint32_t count,
                         uint32_t *countRead)
{
  LOG3(("SpdyStream2 %p ReadSegments reader=%p count=%d state=%x",
        this, reader, count, mUpstreamState));

  NS_ABORT_IF_FALSE(PR_GetCurrentThread() == gSocketThread, "wrong thread");
  
  nsresult rv = NS_ERROR_UNEXPECTED;
  mRequestBlockedOnRead = 0;

  switch (mUpstreamState) {
  case GENERATING_SYN_STREAM:
  case GENERATING_REQUEST_BODY:
  case SENDING_REQUEST_BODY:
    // Call into the HTTP Transaction to generate the HTTP request
    // stream. That stream will show up in OnReadSegment().
    mSegmentReader = reader;
    rv = mTransaction->ReadSegments(this, count, countRead);
    mSegmentReader = nullptr;

    // Check to see if the transaction's request could be written out now.
    // If not, mark the stream for callback when writing can proceed.
    if (NS_SUCCEEDED(rv) &&
        mUpstreamState == GENERATING_SYN_STREAM &&
        !mSynFrameComplete)
      mSession->TransactionHasDataToWrite(this);
    
    // mTxinlineFrameUsed represents any queued un-sent frame. It might
    // be 0 if there is no such frame, which is not a gurantee that we
    // don't have more request body to send - just that any data that was
    // sent comprised a complete SPDY frame. Likewise, a non 0 value is
    // a queued, but complete, spdy frame length.

    // Mark that we are blocked on read if the http transaction needs to
    // provide more of the request message body and there is nothing queued
    // for writing
    if (rv == NS_BASE_STREAM_WOULD_BLOCK && !mTxInlineFrameUsed)
      mRequestBlockedOnRead = 1;

    if (!mTxInlineFrameUsed && NS_SUCCEEDED(rv) && (!*countRead)) {
      LOG3(("ReadSegments %p: Sending request data complete, mUpstreamState=%x",
            this, mUpstreamState));
      if (mSentFinOnData) {
        ChangeState(UPSTREAM_COMPLETE);
      }
      else {
        GenerateDataFrameHeader(0, true);
        ChangeState(SENDING_FIN_STREAM);
        mSession->TransactionHasDataToWrite(this);
        rv = NS_BASE_STREAM_WOULD_BLOCK;
      }
    }

    break;

  case SENDING_FIN_STREAM:
    // We were trying to send the FIN-STREAM but were blocked from
    // sending it out - try again.
    if (!mSentFinOnData) {
      mSegmentReader = reader;
      rv = TransmitFrame(nullptr, nullptr, false);
      mSegmentReader = nullptr;
      NS_ABORT_IF_FALSE(NS_FAILED(rv) || !mTxInlineFrameUsed,
                        "Transmit Frame should be all or nothing");
      if (NS_SUCCEEDED(rv))
        ChangeState(UPSTREAM_COMPLETE);
    }
    else {
      rv = NS_OK;
      mTxInlineFrameUsed = 0;         // cancel fin data packet
      ChangeState(UPSTREAM_COMPLETE);
    }
    
    *countRead = 0;

    // don't change OK to WOULD BLOCK. we are really done sending if OK
    break;

  case UPSTREAM_COMPLETE:
    *countRead = 0;
    rv = NS_OK;
    break;

  default:
    NS_ABORT_IF_FALSE(false, "SpdyStream2::ReadSegments unknown state");
    break;
  }

  return rv;
}
示例#9
0
nsresult
SpdyStream2::OnReadSegment(const char *buf,
                          uint32_t count,
                          uint32_t *countRead)
{
  LOG3(("SpdyStream2::OnReadSegment %p count=%d state=%x",
        this, count, mUpstreamState));

  NS_ABORT_IF_FALSE(PR_GetCurrentThread() == gSocketThread, "wrong thread");
  NS_ABORT_IF_FALSE(mSegmentReader, "OnReadSegment with null mSegmentReader");
  
  nsresult rv = NS_ERROR_UNEXPECTED;
  uint32_t dataLength;

  switch (mUpstreamState) {
  case GENERATING_SYN_STREAM:
    // The buffer is the HTTP request stream, including at least part of the
    // HTTP request header. This state's job is to build a SYN_STREAM frame
    // from the header information. count is the number of http bytes available
    // (which may include more than the header), and in countRead we return
    // the number of those bytes that we consume (i.e. the portion that are
    // header bytes)

    rv = ParseHttpRequestHeaders(buf, count, countRead);
    if (NS_FAILED(rv))
      return rv;
    LOG3(("ParseHttpRequestHeaders %p used %d of %d. complete = %d",
          this, *countRead, count, mSynFrameComplete));
    if (mSynFrameComplete) {
      NS_ABORT_IF_FALSE(mTxInlineFrameUsed,
                        "OnReadSegment SynFrameComplete 0b");
      rv = TransmitFrame(nullptr, nullptr, true);
      if (rv == NS_BASE_STREAM_WOULD_BLOCK) {
        // this can't happen
        NS_ABORT_IF_FALSE(false,
                          "Transmit Frame SYN_FRAME must at least buffer data");
        rv = NS_ERROR_UNEXPECTED;
      }

      ChangeState(GENERATING_REQUEST_BODY);
      break;
    }
    NS_ABORT_IF_FALSE(*countRead == count,
                      "Header parsing not complete but unused data");
    break;

  case GENERATING_REQUEST_BODY:
    dataLength = std::min(count, mChunkSize);
    LOG3(("SpdyStream2 %p id %x request len remaining %d, "
          "count avail %d, chunk used %d",
          this, mStreamID, mRequestBodyLenRemaining, count, dataLength));
    if (dataLength > mRequestBodyLenRemaining)
      return NS_ERROR_UNEXPECTED;
    mRequestBodyLenRemaining -= dataLength;
    GenerateDataFrameHeader(dataLength, !mRequestBodyLenRemaining);
    ChangeState(SENDING_REQUEST_BODY);
    // NO BREAK

  case SENDING_REQUEST_BODY:
    NS_ABORT_IF_FALSE(mTxInlineFrameUsed, "OnReadSegment Send Data Header 0b");
    rv = TransmitFrame(buf, countRead, false);
    NS_ABORT_IF_FALSE(NS_FAILED(rv) || !mTxInlineFrameUsed,
                      "Transmit Frame should be all or nothing");

    LOG3(("TransmitFrame() rv=%x returning %d data bytes. "
          "Header is %d Body is %d.",
          rv, *countRead, mTxInlineFrameUsed, mTxStreamFrameSize));

    // normalize a partial write with a WOULD_BLOCK into just a partial write
    // as some code will take WOULD_BLOCK to mean an error with nothing
    // written (e.g. nsHttpTransaction::ReadRequestSegment()
    if (rv == NS_BASE_STREAM_WOULD_BLOCK && *countRead)
      rv = NS_OK;

    // If that frame was all sent, look for another one
    if (!mTxInlineFrameUsed)
        ChangeState(GENERATING_REQUEST_BODY);
    break;

  case SENDING_FIN_STREAM:
    NS_ABORT_IF_FALSE(false,
                      "resuming partial fin stream out of OnReadSegment");
    break;
    
  default:
    NS_ABORT_IF_FALSE(false, "SpdyStream2::OnReadSegment non-write state");
    break;
  }
  
  return rv;
}
示例#10
0
nsresult
SpdyStream2::ParseHttpRequestHeaders(const char *buf,
                                    uint32_t avail,
                                    uint32_t *countUsed)
{
  // Returns NS_OK even if the headers are incomplete
  // set mSynFrameComplete flag if they are complete

  NS_ABORT_IF_FALSE(PR_GetCurrentThread() == gSocketThread, "wrong thread");
  NS_ABORT_IF_FALSE(mUpstreamState == GENERATING_SYN_STREAM, "wrong state");

  LOG3(("SpdyStream2::ParseHttpRequestHeaders %p avail=%d state=%x",
        this, avail, mUpstreamState));

  mFlatHttpRequestHeaders.Append(buf, avail);

  // We can use the simple double crlf because firefox is the
  // only client we are parsing
  int32_t endHeader = mFlatHttpRequestHeaders.Find("\r\n\r\n");
  
  if (endHeader == kNotFound) {
    // We don't have all the headers yet
    LOG3(("SpdyStream2::ParseHttpRequestHeaders %p "
          "Need more header bytes. Len = %d",
          this, mFlatHttpRequestHeaders.Length()));
    *countUsed = avail;
    return NS_OK;
  }
           
  // We have recvd all the headers, trim the local
  // buffer of the final empty line, and set countUsed to reflect
  // the whole header has been consumed.
  uint32_t oldLen = mFlatHttpRequestHeaders.Length();
  mFlatHttpRequestHeaders.SetLength(endHeader + 2);
  *countUsed = avail - (oldLen - endHeader) + 4;
  mSynFrameComplete = 1;

  // It is now OK to assign a streamID that we are assured will
  // be monotonically increasing amongst syn-streams on this
  // session
  mStreamID = mSession->RegisterStreamID(this);
  NS_ABORT_IF_FALSE(mStreamID & 1,
                    "Spdy Stream Channel ID must be odd");

  if (mStreamID >= 0x80000000) {
    // streamID must fit in 31 bits. This is theoretically possible
    // because stream ID assignment is asynchronous to stream creation
    // because of the protocol requirement that the ID in syn-stream
    // be monotonically increasing. In reality this is really not possible
    // because new streams stop being added to a session with 0x10000000 / 2
    // IDs still available and no race condition is going to bridge that gap,
    // so we can be comfortable on just erroring out for correctness in that
    // case.
    LOG3(("Stream assigned out of range ID: 0x%X", mStreamID));
    return NS_ERROR_UNEXPECTED;
  }

  // Now we need to convert the flat http headers into a set
  // of SPDY headers..  writing to mTxInlineFrame{sz}

  mTxInlineFrame[0] = SpdySession2::kFlag_Control;
  mTxInlineFrame[1] = 2;                          /* version */
  mTxInlineFrame[2] = 0;
  mTxInlineFrame[3] = SpdySession2::CONTROL_TYPE_SYN_STREAM;
  // 4 to 7 are length and flags, we'll fill that in later
  
  uint32_t networkOrderID = PR_htonl(mStreamID);
  memcpy(mTxInlineFrame + 8, &networkOrderID, 4);
  
  // this is the associated-to field, which is not used sending
  // from the client in the http binding
  memset (mTxInlineFrame + 12, 0, 4);

  // Priority flags are the C0 mask of byte 16.
  //
  // The other 6 bits of 16 are unused. Spdy/3 will expand
  // priority to 3 bits.
  //
  // When Spdy/3 implements WINDOW_UPDATE the lowest priority
  // streams over a threshold (32?) should be given tiny
  // receive windows, separate from their spdy priority
  //
  if (mPriority >= nsISupportsPriority::PRIORITY_LOW)
    mTxInlineFrame[16] = SpdySession2::kPri03;
  else if (mPriority >= nsISupportsPriority::PRIORITY_NORMAL)
    mTxInlineFrame[16] = SpdySession2::kPri02;
  else if (mPriority >= nsISupportsPriority::PRIORITY_HIGH)
    mTxInlineFrame[16] = SpdySession2::kPri01;
  else
    mTxInlineFrame[16] = SpdySession2::kPri00;

  mTxInlineFrame[17] = 0;                         /* unused */
  
  const char *methodHeader = mTransaction->RequestHead()->Method().get();

  nsCString hostHeader;
  mTransaction->RequestHead()->GetHeader(nsHttp::Host, hostHeader);

  nsCString versionHeader;
  if (mTransaction->RequestHead()->Version() == NS_HTTP_VERSION_1_1)
    versionHeader = NS_LITERAL_CSTRING("HTTP/1.1");
  else
    versionHeader = NS_LITERAL_CSTRING("HTTP/1.0");

  nsClassHashtable<nsCStringHashKey, nsCString> hdrHash;
  
  // use mRequestHead() to get a sense of how big to make the hash,
  // even though we are parsing the actual text stream because
  // it is legit to append headers.
  hdrHash.Init(1 + (mTransaction->RequestHead()->Headers().Count() * 2));
  
  const char *beginBuffer = mFlatHttpRequestHeaders.BeginReading();

  // need to hash all the headers together to remove duplicates, special
  // headers, etc..

  int32_t crlfIndex = mFlatHttpRequestHeaders.Find("\r\n");
  while (true) {
    int32_t startIndex = crlfIndex + 2;

    crlfIndex = mFlatHttpRequestHeaders.Find("\r\n", false, startIndex);
    if (crlfIndex == -1)
      break;
    
    int32_t colonIndex = mFlatHttpRequestHeaders.Find(":", false, startIndex,
                                                      crlfIndex - startIndex);
    if (colonIndex == -1)
      break;
    
    nsDependentCSubstring name = Substring(beginBuffer + startIndex,
                                           beginBuffer + colonIndex);
    // all header names are lower case in spdy
    ToLowerCase(name);

    if (name.Equals("method") ||
        name.Equals("version") ||
        name.Equals("scheme") ||
        name.Equals("keep-alive") ||
        name.Equals("accept-encoding") ||
        name.Equals("te") ||
        name.Equals("connection") ||
        name.Equals("url"))
      continue;
    
    nsCString *val = hdrHash.Get(name);
    if (!val) {
      val = new nsCString();
      hdrHash.Put(name, val);
    }

    int32_t valueIndex = colonIndex + 1;
    while (valueIndex < crlfIndex && beginBuffer[valueIndex] == ' ')
      ++valueIndex;
    
    nsDependentCSubstring v = Substring(beginBuffer + valueIndex,
                                        beginBuffer + crlfIndex);
    if (!val->IsEmpty())
      val->Append(static_cast<char>(0));
    val->Append(v);

    if (name.Equals("content-length")) {
      int64_t len;
      if (nsHttp::ParseInt64(val->get(), nullptr, &len))
        mRequestBodyLenRemaining = len;
    }
  }
  
  mTxInlineFrameUsed = 18;

  // Do not naively log the request headers here beacuse they might
  // contain auth. The http transaction already logs the sanitized request
  // headers at this same level so it is not necessary to do so here.

  // The header block length
  uint16_t count = hdrHash.Count() + 4; /* method, scheme, url, version */
  CompressToFrame(count);

  // method, scheme, url, and version headers for request line

  CompressToFrame(NS_LITERAL_CSTRING("method"));
  CompressToFrame(methodHeader, strlen(methodHeader));
  CompressToFrame(NS_LITERAL_CSTRING("scheme"));
  CompressToFrame(NS_LITERAL_CSTRING("https"));
  CompressToFrame(NS_LITERAL_CSTRING("url"));
  CompressToFrame(mTransaction->RequestHead()->RequestURI());
  CompressToFrame(NS_LITERAL_CSTRING("version"));
  CompressToFrame(versionHeader);
  
  hdrHash.Enumerate(hdrHashEnumerate, this);
  CompressFlushFrame();
  
  // 4 to 7 are length and flags, which we can now fill in
  (reinterpret_cast<uint32_t *>(mTxInlineFrame.get()))[1] =
    PR_htonl(mTxInlineFrameUsed - 8);

  NS_ABORT_IF_FALSE(!mTxInlineFrame[4],
                    "Size greater than 24 bits");
  
  // Determine whether to put the fin bit on the syn stream frame or whether
  // to wait for a data packet to put it on.

  if (mTransaction->RequestHead()->Method() == nsHttp::Get ||
      mTransaction->RequestHead()->Method() == nsHttp::Connect ||
      mTransaction->RequestHead()->Method() == nsHttp::Head) {
    // for GET, CONNECT, and HEAD place the fin bit right on the
    // syn stream packet

    mSentFinOnData = 1;
    mTxInlineFrame[4] = SpdySession2::kFlag_Data_FIN;
  }
  else if (mTransaction->RequestHead()->Method() == nsHttp::Post ||
           mTransaction->RequestHead()->Method() == nsHttp::Put ||
           mTransaction->RequestHead()->Method() == nsHttp::Options) {
    // place fin in a data frame even for 0 length messages, I've seen
    // the google gateway be unhappy with fin-on-syn for 0 length POST
  }
  else if (!mRequestBodyLenRemaining) {
    // for other HTTP extension methods, rely on the content-length
    // to determine whether or not to put fin on syn
    mSentFinOnData = 1;
    mTxInlineFrame[4] = SpdySession2::kFlag_Data_FIN;
  }
  
  Telemetry::Accumulate(Telemetry::SPDY_SYN_SIZE, mTxInlineFrameUsed - 18);

  // The size of the input headers is approximate
  uint32_t ratio =
    (mTxInlineFrameUsed - 18) * 100 /
    (11 + mTransaction->RequestHead()->RequestURI().Length() +
     mFlatHttpRequestHeaders.Length());
  
  Telemetry::Accumulate(Telemetry::SPDY_SYN_RATIO, ratio);
  return NS_OK;
}
示例#11
0
nsresult
SpdyStream2::TransmitFrame(const char *buf,
                           uint32_t *countUsed,
                           bool forceCommitment)
{
  // If TransmitFrame returns SUCCESS than all the data is sent (or at least
  // buffered at the session level), if it returns WOULD_BLOCK then none of
  // the data is sent.

  // You can call this function with no data and no out parameter in order to
  // flush internal buffers that were previously blocked on writing. You can
  // of course feed new data to it as well.

  NS_ABORT_IF_FALSE(mTxInlineFrameUsed, "empty stream frame in transmit");
  NS_ABORT_IF_FALSE(mSegmentReader, "TransmitFrame with null mSegmentReader");
  NS_ABORT_IF_FALSE((buf && countUsed) || (!buf && !countUsed),
                    "TransmitFrame arguments inconsistent");

  uint32_t transmittedCount;
  nsresult rv;
  
  LOG3(("SpdyStream2::TransmitFrame %p inline=%d stream=%d",
        this, mTxInlineFrameUsed, mTxStreamFrameSize));
  if (countUsed)
    *countUsed = 0;

  // In the (relatively common) event that we have a small amount of data
  // split between the inlineframe and the streamframe, then move the stream
  // data into the inlineframe via copy in order to coalesce into one write.
  // Given the interaction with ssl this is worth the small copy cost.
  if (mTxStreamFrameSize && mTxInlineFrameUsed &&
      mTxStreamFrameSize < SpdySession2::kDefaultBufferSize &&
      mTxInlineFrameUsed + mTxStreamFrameSize < mTxInlineFrameSize) {
    LOG3(("Coalesce Transmit"));
    memcpy (mTxInlineFrame + mTxInlineFrameUsed,
            buf, mTxStreamFrameSize);
    if (countUsed)
      *countUsed += mTxStreamFrameSize;
    mTxInlineFrameUsed += mTxStreamFrameSize;
    mTxStreamFrameSize = 0;
  }

  rv =
    mSegmentReader->CommitToSegmentSize(mTxStreamFrameSize + mTxInlineFrameUsed,
                                        forceCommitment);

  if (rv == NS_BASE_STREAM_WOULD_BLOCK) {
    NS_ABORT_IF_FALSE(!forceCommitment, "forceCommitment with WOULD_BLOCK");
    mSession->TransactionHasDataToWrite(this);
  }
  if (NS_FAILED(rv))     // this will include WOULD_BLOCK
    return rv;

  // This function calls mSegmentReader->OnReadSegment to report the actual SPDY
  // bytes through to the SpdySession2 and then the HttpConnection which calls
  // the socket write function. It will accept all of the inline and stream
  // data because of the above 'commitment' even if it has to buffer
  
  rv = mSegmentReader->OnReadSegment(mTxInlineFrame, mTxInlineFrameUsed,
                                     &transmittedCount);
  LOG3(("SpdyStream2::TransmitFrame for inline session=%p "
        "stream=%p result %x len=%d",
        mSession, this, rv, transmittedCount));

  NS_ABORT_IF_FALSE(rv != NS_BASE_STREAM_WOULD_BLOCK,
                    "inconsistent inline commitment result");

  if (NS_FAILED(rv))
    return rv;

  NS_ABORT_IF_FALSE(transmittedCount == mTxInlineFrameUsed,
                    "inconsistent inline commitment count");
    
  SpdySession2::LogIO(mSession, this, "Writing from Inline Buffer",
                     mTxInlineFrame, transmittedCount);

  if (mTxStreamFrameSize) {
    if (!buf) {
      // this cannot happen
      NS_ABORT_IF_FALSE(false, "Stream transmit with null buf argument to "
                        "TransmitFrame()");
      LOG(("Stream transmit with null buf argument to TransmitFrame()\n"));
      return NS_ERROR_UNEXPECTED;
    }

    rv = mSegmentReader->OnReadSegment(buf, mTxStreamFrameSize,
                                       &transmittedCount);

    LOG3(("SpdyStream2::TransmitFrame for regular session=%p "
          "stream=%p result %x len=%d",
          mSession, this, rv, transmittedCount));
  
    NS_ABORT_IF_FALSE(rv != NS_BASE_STREAM_WOULD_BLOCK,
                      "inconsistent stream commitment result");

    if (NS_FAILED(rv))
      return rv;

    NS_ABORT_IF_FALSE(transmittedCount == mTxStreamFrameSize,
                      "inconsistent stream commitment count");
    
    SpdySession2::LogIO(mSession, this, "Writing from Transaction Buffer",
                       buf, transmittedCount);

    *countUsed += mTxStreamFrameSize;
  }
  
  // calling this will trigger waiting_for if mRequestBodyLenRemaining is 0
  UpdateTransportSendEvents(mTxInlineFrameUsed + mTxStreamFrameSize);

  mTxInlineFrameUsed = 0;
  mTxStreamFrameSize = 0;

  return NS_OK;
}
示例#12
0
void
nsGIFDecoder2::WriteInternal(const char *aBuffer, uint32_t aCount)
{
  NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

  // These variables changed names, and renaming would make a much bigger patch :(
  const uint8_t *buf = (const uint8_t *)aBuffer;
  uint32_t len = aCount;

  const uint8_t *q = buf;

  // Add what we have sofar to the block
  // If previous call to me left something in the hold first complete current block
  // Or if we are filling the colormaps, first complete the colormap
  uint8_t* p = (mGIFStruct.state == gif_global_colormap) ? (uint8_t*)mGIFStruct.global_colormap :
               (mGIFStruct.state == gif_image_colormap) ? (uint8_t*)mColormap :
               (mGIFStruct.bytes_in_hold) ? mGIFStruct.hold : nullptr;
  if (p) {
    // Add what we have sofar to the block
    uint32_t l = NS_MIN(len, mGIFStruct.bytes_to_consume);
    memcpy(p+mGIFStruct.bytes_in_hold, buf, l);

    if (l < mGIFStruct.bytes_to_consume) {
      // Not enough in 'buf' to complete current block, get more
      mGIFStruct.bytes_in_hold += l;
      mGIFStruct.bytes_to_consume -= l;
      return;
    }
    // Reset hold buffer count
    mGIFStruct.bytes_in_hold = 0;
    // Point 'q' to complete block in hold (or in colormap)
    q = p;
  }

  // Invariant:
  //    'q' is start of current to be processed block (hold, colormap or buf)
  //    'bytes_to_consume' is number of bytes to consume from 'buf'
  //    'buf' points to the bytes to be consumed from the input buffer
  //    'len' is number of bytes left in input buffer from position 'buf'.
  //    At entrance of the for loop will 'buf' will be moved 'bytes_to_consume'
  //    to point to next buffer, 'len' is adjusted accordingly.
  //    So that next round in for loop, q gets pointed to the next buffer.

  for (;len >= mGIFStruct.bytes_to_consume; q=buf) {
    // Eat the current block from the buffer, q keeps pointed at current block
    buf += mGIFStruct.bytes_to_consume;
    len -= mGIFStruct.bytes_to_consume;

    switch (mGIFStruct.state)
    {
    case gif_lzw:
      if (!DoLzw(q)) {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(1, gif_sub_block);
      break;

    case gif_lzw_start:
    {
      // Make sure the transparent pixel is transparent in the colormap
      if (mGIFStruct.is_transparent) {
        // Save old value so we can restore it later
        if (mColormap == mGIFStruct.global_colormap)
            mOldColor = mColormap[mGIFStruct.tpixel];
        mColormap[mGIFStruct.tpixel] = 0;
      }

      /* Initialize LZW parser/decoder */
      mGIFStruct.datasize = *q;
      const int clear_code = ClearCode();
      if (mGIFStruct.datasize > MAX_LZW_BITS ||
          clear_code >= MAX_BITS) {
        mGIFStruct.state = gif_error;
        break;
      }

      mGIFStruct.avail = clear_code + 2;
      mGIFStruct.oldcode = -1;
      mGIFStruct.codesize = mGIFStruct.datasize + 1;
      mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1;
      mGIFStruct.datum = mGIFStruct.bits = 0;

      /* init the tables */
      for (int i = 0; i < clear_code; i++)
        mGIFStruct.suffix[i] = i;

      mGIFStruct.stackp = mGIFStruct.stack;

      GETN(1, gif_sub_block);
    }
    break;

    /* All GIF files begin with "GIF87a" or "GIF89a" */
    case gif_type:
      if (!strncmp((char*)q, "GIF89a", 6)) {
        mGIFStruct.version = 89;
      } else if (!strncmp((char*)q, "GIF87a", 6)) {
        mGIFStruct.version = 87;
      } else {
        mGIFStruct.state = gif_error;
        break;
      }
      GETN(7, gif_global_header);
      break;

    case gif_global_header:
      /* This is the height and width of the "screen" or
       * frame into which images are rendered.  The
       * individual images can be smaller than the
       * screen size and located with an origin anywhere
       * within the screen.
       */

      mGIFStruct.screen_width = GETINT16(q);
      mGIFStruct.screen_height = GETINT16(q + 2);
      mGIFStruct.global_colormap_depth = (q[4]&0x07) + 1;

      // screen_bgcolor is not used
      //mGIFStruct.screen_bgcolor = q[5];
      // q[6] = Pixel Aspect Ratio
      //   Not used
      //   float aspect = (float)((q[6] + 15) / 64.0);

      if (q[4] & 0x80) { /* global map */
        // Get the global colormap
        const uint32_t size = (3 << mGIFStruct.global_colormap_depth);
        if (len < size) {
          // Use 'hold' pattern to get the global colormap
          GETN(size, gif_global_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mGIFStruct.global_colormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_global_colormap);
        break;
      }

      GETN(1, gif_image_start);
      break;

    case gif_global_colormap:
      // Everything is already copied into global_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mGIFStruct.global_colormap, 1<<mGIFStruct.global_colormap_depth);
      GETN(1, gif_image_start);
      break;

    case gif_image_start:
      switch (*q) {
        case GIF_TRAILER:
          mGIFStruct.state = gif_done;
          break;

        case GIF_EXTENSION_INTRODUCER:
          GETN(2, gif_extension);
          break;

        case GIF_IMAGE_SEPARATOR:
          GETN(9, gif_image_header);
          break;

        default:
          /* If we get anything other than GIF_IMAGE_SEPARATOR, 
           * GIF_EXTENSION_INTRODUCER, or GIF_TRAILER, there is extraneous data
           * between blocks. The GIF87a spec tells us to keep reading
           * until we find an image separator, but GIF89a says such
           * a file is corrupt. We follow GIF89a and bail out. */
          if (mGIFStruct.images_decoded > 0) {
            /* The file is corrupt, but one or more images have
             * been decoded correctly. In this case, we proceed
             * as if the file were correctly terminated and set
             * the state to gif_done, so the GIF will display.
             */
            mGIFStruct.state = gif_done;
          } else {
            /* No images decoded, there is nothing to display. */
            mGIFStruct.state = gif_error;
          }
      }
      break;

    case gif_extension:
      mGIFStruct.bytes_to_consume = q[1];
      if (mGIFStruct.bytes_to_consume) {
        switch (*q) {
        case GIF_GRAPHIC_CONTROL_LABEL:
          mGIFStruct.state = gif_control_extension;
          break;
  
        case GIF_APPLICATION_EXTENSION_LABEL:
          mGIFStruct.state = gif_application_extension;
          break;
  
        case GIF_COMMENT_LABEL:
          mGIFStruct.state = gif_consume_comment;
          break;
  
        default:
          mGIFStruct.state = gif_skip_block;
        }
      } else {
        GETN(1, gif_image_start);
      }
      break;

    case gif_consume_block:
      if (!*q)
        GETN(1, gif_image_start);
      else
        GETN(*q, gif_skip_block);
      break;

    case gif_skip_block:
      GETN(1, gif_consume_block);
      break;

    case gif_control_extension:
      mGIFStruct.is_transparent = *q & 0x1;
      mGIFStruct.tpixel = q[3];
      mGIFStruct.disposal_method = ((*q) >> 2) & 0x7;
      // Some specs say 3rd bit (value 4), other specs say value 3
      // Let's choose 3 (the more popular)
      if (mGIFStruct.disposal_method == 4)
        mGIFStruct.disposal_method = 3;
      mGIFStruct.delay_time = GETINT16(q + 1) * 10;
      GETN(1, gif_consume_block);
      break;

    case gif_comment_extension:
      if (*q)
        GETN(*q, gif_consume_comment);
      else
        GETN(1, gif_image_start);
      break;

    case gif_consume_comment:
      GETN(1, gif_comment_extension);
      break;

    case gif_application_extension:
      /* Check for netscape application extension */
      if (!strncmp((char*)q, "NETSCAPE2.0", 11) ||
        !strncmp((char*)q, "ANIMEXTS1.0", 11))
        GETN(1, gif_netscape_extension_block);
      else
        GETN(1, gif_consume_block);
      break;

    /* Netscape-specific GIF extension: animation looping */
    case gif_netscape_extension_block:
      if (*q)
        GETN(*q, gif_consume_netscape_extension);
      else
        GETN(1, gif_image_start);
      break;

    /* Parse netscape-specific application extensions */
    case gif_consume_netscape_extension:
      switch (q[0] & 7) {
        case 1:
          /* Loop entire animation specified # of times.  Only read the
             loop count during the first iteration. */
          mGIFStruct.loop_count = GETINT16(q + 1);
          GETN(1, gif_netscape_extension_block);
          break;
        
        case 2:
          /* Wait for specified # of bytes to enter buffer */
          // Don't do this, this extension doesn't exist (isn't used at all) 
          // and doesn't do anything, as our streaming/buffering takes care of it all...
          // See: http://semmix.pl/color/exgraf/eeg24.htm
          GETN(1, gif_netscape_extension_block);
          break;
  
        default:
          // 0,3-7 are yet to be defined netscape extension codes
          mGIFStruct.state = gif_error;
      }
      break;

    case gif_image_header:
    {
      /* Get image offsets, with respect to the screen origin */
      mGIFStruct.x_offset = GETINT16(q);
      mGIFStruct.y_offset = GETINT16(q + 2);

      /* Get image width and height. */
      mGIFStruct.width  = GETINT16(q + 4);
      mGIFStruct.height = GETINT16(q + 6);

      if (!mGIFStruct.images_decoded) {
        /* Work around broken GIF files where the logical screen
         * size has weird width or height.  We assume that GIF87a
         * files don't contain animations.
         */
        if ((mGIFStruct.screen_height < mGIFStruct.height) ||
            (mGIFStruct.screen_width < mGIFStruct.width) ||
            (mGIFStruct.version == 87)) {
          mGIFStruct.screen_height = mGIFStruct.height;
          mGIFStruct.screen_width = mGIFStruct.width;
          mGIFStruct.x_offset = 0;
          mGIFStruct.y_offset = 0;
        }    
        // Create the image container with the right size.
        BeginGIF();
        if (HasError()) {
          // Setting the size led to an error.
          mGIFStruct.state = gif_error;
          return;
        }

        // If we were doing a size decode, we're done
        if (IsSizeDecode())
          return;
      }

      /* Work around more broken GIF files that have zero image
         width or height */
      if (!mGIFStruct.height || !mGIFStruct.width) {
        mGIFStruct.height = mGIFStruct.screen_height;
        mGIFStruct.width = mGIFStruct.screen_width;
        if (!mGIFStruct.height || !mGIFStruct.width) {
          mGIFStruct.state = gif_error;
          break;
        }
      }

      /* Depth of colors is determined by colormap */
      /* (q[8] & 0x80) indicates local colormap */
      /* bits per pixel is (q[8]&0x07 + 1) when local colormap is set */
      uint32_t depth = mGIFStruct.global_colormap_depth;
      if (q[8] & 0x80)
        depth = (q[8]&0x07) + 1;
      uint32_t realDepth = depth;
      while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) {
        realDepth++;
      } 
      // Mask to limit the color values within the colormap
      mColorMask = 0xFF >> (8 - realDepth);
      nsresult rv = BeginImageFrame(realDepth);
      if (NS_FAILED(rv) || !mImageData) {
        mGIFStruct.state = gif_error;
        break;
      }

      if (q[8] & 0x40) {
        mGIFStruct.interlaced = true;
        mGIFStruct.ipass = 1;
      } else {
        mGIFStruct.interlaced = false;
        mGIFStruct.ipass = 0;
      }

      /* Only apply the Haeberli display hack on the first frame */
      mGIFStruct.progressive_display = (mGIFStruct.images_decoded == 0);

      /* Clear state from last image */
      mGIFStruct.irow = 0;
      mGIFStruct.rows_remaining = mGIFStruct.height;
      mGIFStruct.rowp = mImageData;

      /* bits per pixel is q[8]&0x07 */

      if (q[8] & 0x80) /* has a local colormap? */
      {
        mGIFStruct.local_colormap_size = 1 << depth;
        if (!mGIFStruct.images_decoded) {
          // First frame has local colormap, allocate space for it
          // as the image frame doesn't have its own palette
          mColormapSize = sizeof(uint32_t) << realDepth;
          if (!mGIFStruct.local_colormap) {
            mGIFStruct.local_colormap = (uint32_t*)moz_xmalloc(mColormapSize);
          }
          mColormap = mGIFStruct.local_colormap;
        }
        const uint32_t size = 3 << depth;
        if (mColormapSize > size) {
          // Clear the notfilled part of the colormap
          memset(((uint8_t*)mColormap) + size, 0, mColormapSize - size);
        }
        if (len < size) {
          // Use 'hold' pattern to get the image colormap
          GETN(size, gif_image_colormap);
          break;
        }
        // Copy everything, go to colormap state to do CMS correction
        memcpy(mColormap, buf, size);
        buf += size;
        len -= size;
        GETN(0, gif_image_colormap);
        break;
      } else {
        /* Switch back to the global palette */
        if (mGIFStruct.images_decoded) {
          // Copy global colormap into the palette of current frame
          memcpy(mColormap, mGIFStruct.global_colormap, mColormapSize);
        } else {
          mColormap = mGIFStruct.global_colormap;
        }
      }
      GETN(1, gif_lzw_start);
    }
    break;

    case gif_image_colormap:
      // Everything is already copied into local_colormap
      // Convert into Cairo colors including CMS transformation
      ConvertColormap(mColormap, mGIFStruct.local_colormap_size);
      GETN(1, gif_lzw_start);
      break;

    case gif_sub_block:
      mGIFStruct.count = *q;
      if (mGIFStruct.count) {
        /* Still working on the same image: Process next LZW data block */
        /* Make sure there are still rows left. If the GIF data */
        /* is corrupt, we may not get an explicit terminator.   */
        if (!mGIFStruct.rows_remaining) {
#ifdef DONT_TOLERATE_BROKEN_GIFS
          mGIFStruct.state = gif_error;
          break;
#else
          /* This is an illegal GIF, but we remain tolerant. */
          GETN(1, gif_sub_block);
#endif
          if (mGIFStruct.count == GIF_TRAILER) {
            /* Found a terminator anyway, so consider the image done */
            GETN(1, gif_done);
            break;
          }
        }
        GETN(mGIFStruct.count, gif_lzw);
      } else {
        /* See if there are any more images in this sequence. */
        EndImageFrame();
        GETN(1, gif_image_start);
      }
      break;

    case gif_done:
      PostDecodeDone();
      mGIFOpen = false;
      goto done;

    case gif_error:
      PostDataError();
      return;

    // We shouldn't ever get here.
    default:
      break;
    }
  }

  // if an error state is set but no data remains, code flow reaches here
  if (mGIFStruct.state == gif_error) {
      PostDataError();
      return;
  }
  
  // Copy the leftover into mGIFStruct.hold
  mGIFStruct.bytes_in_hold = len;
  if (len) {
    // Add what we have sofar to the block
    uint8_t* p = (mGIFStruct.state == gif_global_colormap) ? (uint8_t*)mGIFStruct.global_colormap :
                 (mGIFStruct.state == gif_image_colormap) ? (uint8_t*)mColormap :
                 mGIFStruct.hold;
    memcpy(p, buf, len);
    mGIFStruct.bytes_to_consume -= len;
  }

// We want to flush before returning if we're on the first frame
done:
  if (!mGIFStruct.images_decoded) {
    FlushImageData();
    mLastFlushedRow = mCurrentRow;
    mLastFlushedPass = mCurrentPass;
  }

  return;
}
NS_IMETHODIMP
nsSVGForeignObjectFrame::PaintSVG(nsSVGRenderState *aContext,
                                  const nsIntRect *aDirtyRect)
{
  NS_ABORT_IF_FALSE(aDirtyRect, "We expect aDirtyRect to be non-null");

  if (IsDisabled())
    return NS_OK;

  nsIFrame* kid = GetFirstPrincipalChild();
  if (!kid)
    return NS_OK;

  gfxMatrix matrixForChildren = GetCanvasTMForChildren();
  gfxMatrix matrix = GetCanvasTM();

  nsRenderingContext *ctx = aContext->GetRenderingContext(this);

  if (!ctx || matrixForChildren.IsSingular()) {
    NS_WARNING("Can't render foreignObject element!");
    return NS_ERROR_FAILURE;
  }

  /* Check if we need to draw anything. */
  PRInt32 appUnitsPerDevPx = PresContext()->AppUnitsPerDevPixel();
  if (!mRect.ToOutsidePixels(appUnitsPerDevPx).Intersects(*aDirtyRect))
    return NS_OK;

  gfxContext *gfx = aContext->GetGfxContext();

  gfx->Save();

  if (GetStyleDisplay()->IsScrollableOverflow()) {
    float x, y, width, height;
    static_cast<nsSVGElement*>(mContent)->
      GetAnimatedLengthValues(&x, &y, &width, &height, nsnull);

    gfxRect clipRect =
      nsSVGUtils::GetClipRectForFrame(this, 0.0f, 0.0f, width, height);
    nsSVGUtils::SetClipRect(gfx, matrix, clipRect);
  }

  gfx->Multiply(matrixForChildren);

  // Transform the dirty rect into the rectangle containing the
  // transformed dirty rect.
  gfxMatrix invmatrix = matrix.Invert();
  NS_ASSERTION(!invmatrix.IsSingular(),
               "inverse of non-singular matrix should be non-singular");

  gfxRect transDirtyRect = gfxRect(aDirtyRect->x, aDirtyRect->y,
                                   aDirtyRect->width, aDirtyRect->height);
  transDirtyRect = invmatrix.TransformBounds(transDirtyRect);

  transDirtyRect.Scale(nsPresContext::AppUnitsPerCSSPixel());
  nsPoint tl(NSToCoordFloor(transDirtyRect.X()),
             NSToCoordFloor(transDirtyRect.Y()));
  nsPoint br(NSToCoordCeil(transDirtyRect.XMost()),
             NSToCoordCeil(transDirtyRect.YMost()));
  nsRect kidDirtyRect(tl.x, tl.y, br.x - tl.x, br.y - tl.y);

  kidDirtyRect.IntersectRect(kidDirtyRect, kid->GetRect());

  PRUint32 flags = nsLayoutUtils::PAINT_IN_TRANSFORM;
  if (aContext->IsPaintingToWindow()) {
    flags |= nsLayoutUtils::PAINT_TO_WINDOW;
  }
  nsresult rv = nsLayoutUtils::PaintFrame(ctx, kid, nsRegion(kidDirtyRect),
                                          NS_RGBA(0,0,0,0), flags);

  gfx->Restore();

  return rv;
}
示例#14
0
void
nsICODecoder::WriteInternal(const char* aBuffer, PRUint32 aCount)
{
    NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!");

    if (!aCount) // aCount=0 means EOF
        return;

    while (aCount && (mPos < ICONCOUNTOFFSET)) { // Skip to the # of icons.
        if (mPos == 2) { // if the third byte is 1: This is an icon, 2: a cursor
            if ((*aBuffer != 1) && (*aBuffer != 2)) {
                PostDataError();
                return;
            }
            mIsCursor = (*aBuffer == 2);
        }
        mPos++;
        aBuffer++;
        aCount--;
    }

    if (mPos == ICONCOUNTOFFSET && aCount >= 2) {
        mNumIcons = LITTLE_TO_NATIVE16(((PRUint16*)aBuffer)[0]);
        aBuffer += 2;
        mPos += 2;
        aCount -= 2;
    }

    if (mNumIcons == 0)
        return; // Nothing to do.

    PRUint16 colorDepth = 0;
    // Loop through each entry's dir entry
    while (mCurrIcon < mNumIcons) {
        if (mPos >= DIRENTRYOFFSET + (mCurrIcon * sizeof(mDirEntryArray)) &&
                mPos < DIRENTRYOFFSET + ((mCurrIcon + 1) * sizeof(mDirEntryArray))) {
            PRUint32 toCopy = sizeof(mDirEntryArray) -
                              (mPos - DIRENTRYOFFSET - mCurrIcon * sizeof(mDirEntryArray));
            if (toCopy > aCount) {
                toCopy = aCount;
            }
            memcpy(mDirEntryArray + sizeof(mDirEntryArray) - toCopy, aBuffer, toCopy);
            mPos += toCopy;
            aCount -= toCopy;
            aBuffer += toCopy;
        }
        if (aCount == 0)
            return; // Need more data

        IconDirEntry e;
        if (mPos == (DIRENTRYOFFSET + ICODIRENTRYSIZE) +
                (mCurrIcon * sizeof(mDirEntryArray))) {
            mCurrIcon++;
            ProcessDirEntry(e);
            // We can't use GetRealWidth and GetRealHeight here because those operate
            // on mDirEntry, here we are going through each item in the directory
            if (((e.mWidth == 0 ? 256 : e.mWidth) == PREFICONSIZE &&
                    (e.mHeight == 0 ? 256 : e.mHeight) == PREFICONSIZE &&
                    (e.mBitCount >= colorDepth)) ||
                    (mCurrIcon == mNumIcons && mImageOffset == 0)) {
                mImageOffset = e.mImageOffset;

                // ensure mImageOffset is >= size of the direntry headers (bug #245631)
                PRUint32 minImageOffset = DIRENTRYOFFSET +
                                          mNumIcons * sizeof(mDirEntryArray);
                if (mImageOffset < minImageOffset) {
                    PostDataError();
                    return;
                }

                colorDepth = e.mBitCount;
                memcpy(&mDirEntry, &e, sizeof(IconDirEntry));
            }
        }
    }

    if (mPos < mImageOffset) {
        // Skip to (or at least towards) the desired image offset
        PRUint32 toSkip = mImageOffset - mPos;
        if (toSkip > aCount)
            toSkip = aCount;

        mPos    += toSkip;
        aBuffer += toSkip;
        aCount  -= toSkip;
    }

    // If we are within the first PNGSIGNATURESIZE bytes of the image data,
    // then we have either a BMP or a PNG.  We use the first PNGSIGNATURESIZE
    // bytes to determine which one we have.
    if (mCurrIcon == mNumIcons && mPos >= mImageOffset &&
            mPos < mImageOffset + PNGSIGNATURESIZE)
    {
        PRUint32 toCopy = PNGSIGNATURESIZE - (mPos - mImageOffset);
        if (toCopy > aCount) {
            toCopy = aCount;
        }

        memcpy(mSignature + (mPos - mImageOffset), aBuffer, toCopy);
        mPos += toCopy;
        aCount -= toCopy;
        aBuffer += toCopy;

        mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes,
                         PNGSIGNATURESIZE);
        if (mIsPNG) {
            mContainedDecoder = new nsPNGDecoder(mImage, mObserver);
            mContainedDecoder->InitSharedDecoder();
            mContainedDecoder->Write(mSignature, PNGSIGNATURESIZE);
            mDataError = mContainedDecoder->HasDataError();
            if (mContainedDecoder->HasDataError()) {
                return;
            }
        }
    }

    // If we have a PNG, let the PNG decoder do all of the rest of the work
    if (mIsPNG && mContainedDecoder && mPos >= mImageOffset + PNGSIGNATURESIZE) {
        mContainedDecoder->Write(aBuffer, aCount);
        mDataError = mContainedDecoder->HasDataError();
        if (mContainedDecoder->HasDataError()) {
            return;
        }
        mPos += aCount;
        aBuffer += aCount;
        aCount = 0;

        // Raymond Chen says that 32bpp only are valid PNG ICOs
        // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx
        if (static_cast<nsPNGDecoder*>(mContainedDecoder.get())->HasValidInfo() &&
                static_cast<nsPNGDecoder*>(mContainedDecoder.get())->GetPixelDepth() != 32) {
            PostDataError();
        }
        return;
    }

    // We've processed all of the icon dir entries and are within the
    // bitmap info size
    if (!mIsPNG && mCurrIcon == mNumIcons && mPos >= mImageOffset &&
            mPos >= mImageOffset + PNGSIGNATURESIZE &&
            mPos < mImageOffset + BITMAPINFOSIZE) {

        // As we were decoding, we did not know if we had a PNG signature or the
        // start of a bitmap information header.  At this point we know we had
        // a bitmap information header and not a PNG signature, so fill the bitmap
        // information header with the data it should already have.
        memcpy(mBIHraw, mSignature, PNGSIGNATURESIZE);

        // We've found the icon.
        PRUint32 toCopy = sizeof(mBIHraw) - (mPos - mImageOffset);
        if (toCopy > aCount)
            toCopy = aCount;

        memcpy(mBIHraw + (mPos - mImageOffset), aBuffer, toCopy);
        mPos += toCopy;
        aCount -= toCopy;
        aBuffer += toCopy;
    }

    // If we have a BMP inside the ICO and we have read the BIH header
    if (!mIsPNG && mPos == mImageOffset + BITMAPINFOSIZE) {

        // Make sure we have a sane value for the bitmap information header
        PRInt32 bihSize = ExtractBIHSizeFromBitmap(reinterpret_cast<PRInt8*>(mBIHraw));
        if (bihSize != BITMAPINFOSIZE) {
            PostDataError();
            return;
        }
        // We are extracting the BPP from the BIH header as it should be trusted
        // over the one we have from the icon header
        mBPP = ExtractBPPFromBitmap(reinterpret_cast<PRInt8*>(mBIHraw));

        // Init the bitmap decoder which will do most of the work for us
        // It will do everything except the AND mask which isn't present in bitmaps
        // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder
        nsBMPDecoder *bmpDecoder = new nsBMPDecoder(mImage, mObserver);
        mContainedDecoder = bmpDecoder;
        bmpDecoder->SetUseAlphaData(true);
        mContainedDecoder->SetSizeDecode(IsSizeDecode());
        mContainedDecoder->InitSharedDecoder();

        // The ICO format when containing a BMP does not include the 14 byte
        // bitmap file header. To use the code of the BMP decoder we need to
        // generate this header ourselves and feed it to the BMP decoder.
        PRInt8 bfhBuffer[BMPFILEHEADERSIZE];
        if (!FillBitmapFileHeaderBuffer(bfhBuffer)) {
            PostDataError();
            return;
        }
        mContainedDecoder->Write((const char*)bfhBuffer, sizeof(bfhBuffer));
        mDataError = mContainedDecoder->HasDataError();
        if (mContainedDecoder->HasDataError()) {
            return;
        }

        // Setup the cursor hot spot if one is present
        SetHotSpotIfCursor();

        // Fix the height on the BMP resource
        FillBitmapInformationBufferHeight((PRInt8*)mBIHraw);

        // Write out the BMP's bitmap info header
        mContainedDecoder->Write(mBIHraw, sizeof(mBIHraw));
        mDataError = mContainedDecoder->HasDataError();
        if (mContainedDecoder->HasDataError()) {
            return;
        }

        // We have the size. If we're doing a size decode, we got what
        // we came for.
        if (IsSizeDecode())
            return;

        // Sometimes the ICO BPP header field is not filled out
        // so we should trust the contained resource over our own
        // information.
        mBPP = bmpDecoder->GetBitsPerPixel();

        // Check to make sure we have valid color settings
        PRUint16 numColors = GetNumColors();
        if (numColors == (PRUint16)-1) {
            PostDataError();
            return;
        }
    }

    // If we have a BMP
    if (!mIsPNG && mContainedDecoder && mPos >= mImageOffset + BITMAPINFOSIZE) {
        PRUint16 numColors = GetNumColors();
        if (numColors == (PRUint16)-1) {
            PostDataError();
            return;
        }
        // Feed the actual image data (not including headers) into the BMP decoder
        PRInt32 bmpDataOffset = mDirEntry.mImageOffset + BITMAPINFOSIZE;
        PRInt32 bmpDataEnd = mDirEntry.mImageOffset + BITMAPINFOSIZE +
                             static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetCompressedImageSize() +
                             4 * numColors;

        // If we are feeding in the core image data, but we have not yet
        // reached the ICO's 'AND buffer mask'
        if (mPos >= bmpDataOffset && mPos < bmpDataEnd) {

            // Figure out how much data the BMP decoder wants
            PRUint32 toFeed = bmpDataEnd - mPos;
            if (toFeed > aCount) {
                toFeed = aCount;
            }

            mContainedDecoder->Write(aBuffer, toFeed);
            mDataError = mContainedDecoder->HasDataError();
            if (mContainedDecoder->HasDataError()) {
                return;
            }

            mPos += toFeed;
            aCount -= toFeed;
            aBuffer += toFeed;
        }

        // If the bitmap is fully processed, treat any left over data as the ICO's
        // 'AND buffer mask' which appears after the bitmap resource.
        if (!mIsPNG && mPos >= bmpDataEnd) {
            // There may be an optional AND bit mask after the data.  This is
            // only used if the alpha data is not already set. The alpha data
            // is used for 32bpp bitmaps as per the comment in ICODecoder.h
            // The alpha mask should be checked in all other cases.
            if (static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetBitsPerPixel() != 32 ||
                    !static_cast<nsBMPDecoder*>(mContainedDecoder.get())->HasAlphaData()) {
                PRUint32 rowSize = ((GetRealWidth() + 31) / 32) * 4; // + 31 to round up
                if (mPos == bmpDataEnd) {
                    mPos++;
                    mRowBytes = 0;
                    mCurLine = GetRealHeight();
                    mRow = (PRUint8*)moz_realloc(mRow, rowSize);
                    if (!mRow) {
                        PostDecoderError(NS_ERROR_OUT_OF_MEMORY);
                        return;
                    }
                }

                // Ensure memory has been allocated before decoding.
                NS_ABORT_IF_FALSE(mRow, "mRow is null");
                if (!mRow) {
                    PostDataError();
                    return;
                }

                while (mCurLine > 0 && aCount > 0) {
                    PRUint32 toCopy = NS_MIN(rowSize - mRowBytes, aCount);
                    if (toCopy) {
                        memcpy(mRow + mRowBytes, aBuffer, toCopy);
                        aCount -= toCopy;
                        aBuffer += toCopy;
                        mRowBytes += toCopy;
                    }
                    if (rowSize == mRowBytes) {
                        mCurLine--;
                        mRowBytes = 0;

                        PRUint32* imageData =
                            static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetImageData();
                        if (!imageData) {
                            PostDataError();
                            return;
                        }
                        PRUint32* decoded = imageData + mCurLine * GetRealWidth();
                        PRUint32* decoded_end = decoded + GetRealWidth();
                        PRUint8* p = mRow, *p_end = mRow + rowSize;
                        while (p < p_end) {
                            PRUint8 idx = *p++;
                            for (PRUint8 bit = 0x80; bit && decoded<decoded_end; bit >>= 1) {
                                // Clear pixel completely for transparency.
                                if (idx & bit) {
                                    *decoded = 0;
                                }
                                decoded++;
                            }
                        }
                    }
                }
            }
void
SVGMotionSMILAnimationFunction::
  RebuildPathAndVerticesFromBasicAttrs(const nsIContent* aContextElem)
{
  NS_ABORT_IF_FALSE(!HasAttr(nsGkAtoms::path),
                    "Should be using |path| attr if we have it");
  NS_ABORT_IF_FALSE(!mPath, "regenerating when we aleady have path");
  NS_ABORT_IF_FALSE(mPathVertices.IsEmpty(),
                    "regenerating when we already have vertices");

  if (!aContextElem->IsSVG()) {
    NS_ERROR("Uh oh, SVG animateMotion element targeting a non-SVG node");
    return;
  }

  SVGMotionSMILPathUtils::PathGenerator
    pathGenerator(static_cast<const nsSVGElement*>(aContextElem));

  bool success = false;
  if (HasAttr(nsGkAtoms::values)) {
    // Generate path based on our values array
    mPathSourceType = ePathSourceType_ValuesAttr;
    const nsAString& valuesStr = GetAttr(nsGkAtoms::values)->GetStringValue();
    SVGMotionSMILPathUtils::MotionValueParser parser(&pathGenerator,
                                                     &mPathVertices);
    success =
      NS_SUCCEEDED(nsSMILParserUtils::ParseValuesGeneric(valuesStr, parser));
  } else if (HasAttr(nsGkAtoms::to) || HasAttr(nsGkAtoms::by)) {
    // Apply 'from' value (or a dummy 0,0 'from' value)
    if (HasAttr(nsGkAtoms::from)) {
      const nsAString& fromStr = GetAttr(nsGkAtoms::from)->GetStringValue();
      success = pathGenerator.MoveToAbsolute(fromStr);
      mPathVertices.AppendElement(0.0);
    } else {
      // Create dummy 'from' value at 0,0, if we're doing by-animation.
      // (NOTE: We don't add the dummy 0-point to our list for *to-animation*,
      // because the nsSMILAnimationFunction logic for to-animation doesn't
      // expect a dummy value. It only expects one value: the final 'to' value.)
      pathGenerator.MoveToOrigin();
      if (!HasAttr(nsGkAtoms::to)) {
        mPathVertices.AppendElement(0.0);
      }
      success = true;
    }

    // Apply 'to' or 'by' value
    if (success) {
      double dist;
      if (HasAttr(nsGkAtoms::to)) {
        mPathSourceType = ePathSourceType_ToAttr;
        const nsAString& toStr = GetAttr(nsGkAtoms::to)->GetStringValue();
        success = pathGenerator.LineToAbsolute(toStr, dist);
      } else { // HasAttr(nsGkAtoms::by)
        mPathSourceType = ePathSourceType_ByAttr;
        const nsAString& byStr = GetAttr(nsGkAtoms::by)->GetStringValue();
        success = pathGenerator.LineToRelative(byStr, dist);
      }
      if (success) {
        mPathVertices.AppendElement(dist);
      }
    }
  }
  if (success) {
    mPath = pathGenerator.GetResultingPath();
  } else {
    // Parse failure. Leave path as null, and clear path-related member data.
    mPathVertices.Clear();
  }
}
NS_IMETHODIMP nsDeviceContextSpecQt::GetSurfaceForPrinter(
        gfxASurface** aSurface)
{
    NS_ENSURE_ARG_POINTER(aSurface);
    *aSurface = nullptr;

    double width, height;
    mPrintSettings->GetEffectivePageSize(&width, &height);

    // If we're in landscape mode, we'll be rotating the output --
    // need to swap width & height.
    int32_t orientation;
    mPrintSettings->GetOrientation(&orientation);
    if (nsIPrintSettings::kLandscapeOrientation == orientation) {
        double tmp = width;
        width = height;
        height = tmp;
    }

    // convert twips to points
    width  /= TWIPS_PER_POINT_FLOAT;
    height /= TWIPS_PER_POINT_FLOAT;

    DO_PR_DEBUG_LOG(("\"%s\", %f, %f\n", mPath, width, height));

    QTemporaryFile file;
    if(!file.open()) {
        return NS_ERROR_GFX_PRINTER_COULD_NOT_OPEN_FILE;
    }
    file.setAutoRemove(false);

    nsresult rv = NS_NewNativeLocalFile(
            nsDependentCString(file.fileName().toUtf8().constData()),
            false,
            getter_AddRefs(mSpoolFile));
    if (NS_FAILED(rv)) {
        file.remove();
        return NS_ERROR_GFX_PRINTER_COULD_NOT_OPEN_FILE;
    }

    mSpoolName = file.fileName().toUtf8().constData();

    mSpoolFile->SetPermissions(0600);

    nsCOMPtr<nsIFileOutputStream> stream =
        do_CreateInstance("@mozilla.org/network/file-output-stream;1");

    rv = stream->Init(mSpoolFile, -1, -1, 0);
    if (NS_FAILED(rv))
        return rv;

    int16_t format;
    mPrintSettings->GetOutputFormat(&format);

    nsRefPtr<gfxASurface> surface;
    gfxSize surfaceSize(width, height);

    if (format == nsIPrintSettings::kOutputFormatNative) {
        if (mIsPPreview) {
            // There is nothing to detect on Print Preview, use PS.
            // TODO: implement for Qt?
            //format = nsIPrintSettings::kOutputFormatPS;
            return NS_ERROR_NOT_IMPLEMENTED;
        }
        format = nsIPrintSettings::kOutputFormatPDF;
    }
    if (format == nsIPrintSettings::kOutputFormatPDF) {
        surface = new gfxPDFSurface(stream, surfaceSize);
    } else {
        return NS_ERROR_NOT_IMPLEMENTED;
    }

    NS_ABORT_IF_FALSE(surface, "valid address expected");

    surface.swap(*aSurface);
    return NS_OK;
}
示例#17
0
nsresult
AudioStream::Init(int32_t aNumChannels, int32_t aRate,
                  const dom::AudioChannelType aAudioChannelType,
                  LatencyRequest aLatencyRequest)
{
  cubeb* cubebContext = GetCubebContext();

  if (!cubebContext || aNumChannels < 0 || aRate < 0) {
    return NS_ERROR_FAILURE;
  }

  PR_LOG(gAudioStreamLog, PR_LOG_DEBUG,
    ("%s  channels: %d, rate: %d", __FUNCTION__, aNumChannels, aRate));
  mInRate = mOutRate = aRate;
  mChannels = aNumChannels;
  mOutChannels = (aNumChannels > 2) ? 2 : aNumChannels;
  mLatencyRequest = aLatencyRequest;

  mDumpFile = OpenDumpFile(this);

  cubeb_stream_params params;
  params.rate = aRate;
  params.channels = mOutChannels;
#if defined(__ANDROID__)
#if defined(MOZ_B2G)
  params.stream_type = ConvertChannelToCubebType(aAudioChannelType);
#else
  params.stream_type = CUBEB_STREAM_TYPE_MUSIC;
#endif

  if (params.stream_type == CUBEB_STREAM_TYPE_MAX) {
    return NS_ERROR_INVALID_ARG;
  }
#endif
  if (AUDIO_OUTPUT_FORMAT == AUDIO_FORMAT_S16) {
    params.format = CUBEB_SAMPLE_S16NE;
  } else {
    params.format = CUBEB_SAMPLE_FLOAT32NE;
  }
  mBytesPerFrame = sizeof(AudioDataValue) * mOutChannels;

  mAudioClock.Init();

  // If the latency pref is set, use it. Otherwise, if this stream is intended
  // for low latency playback, try to get the lowest latency possible.
  // Otherwise, for normal streams, use 100ms.
  uint32_t latency;
  if (aLatencyRequest == LowLatency && !CubebLatencyPrefSet()) {
    if (cubeb_get_min_latency(cubebContext, params, &latency) != CUBEB_OK) {
      latency = GetCubebLatency();
    }
  } else {
    latency = GetCubebLatency();
  }

  {
    cubeb_stream* stream;
    if (cubeb_stream_init(cubebContext, &stream, "AudioStream", params,
                          latency, DataCallback_S, StateCallback_S, this) == CUBEB_OK) {
      mCubebStream.own(stream);
    }
  }

  if (!mCubebStream) {
    return NS_ERROR_FAILURE;
  }

  // Size mBuffer for one second of audio.  This value is arbitrary, and was
  // selected based on the observed behaviour of the existing AudioStream
  // implementations.
  uint32_t bufferLimit = FramesToBytes(aRate);
  NS_ABORT_IF_FALSE(bufferLimit % mBytesPerFrame == 0, "Must buffer complete frames");
  mBuffer.SetCapacity(bufferLimit);

  // Start the stream right away when low latency has been requested. This means
  // that the DataCallback will feed silence to cubeb, until the first frames
  // are writtent to this AudioStream.
  if (mLatencyRequest == LowLatency) {
    Start();
  }

  return NS_OK;
}
示例#18
0
void
nsSVGForeignObjectFrame::NotifySVGChanged(PRUint32 aFlags)
{
  NS_ABORT_IF_FALSE(!(aFlags & DO_NOT_NOTIFY_RENDERING_OBSERVERS) ||
                    (GetStateBits() & NS_STATE_SVG_NONDISPLAY_CHILD),
                    "Must be NS_STATE_SVG_NONDISPLAY_CHILD!");

  NS_ABORT_IF_FALSE(aFlags & (TRANSFORM_CHANGED | COORD_CONTEXT_CHANGED),
                    "Invalidation logic may need adjusting");

  bool needNewBounds = false; // i.e. mRect or visual overflow rect
  bool needReflow = false;
  bool needNewCanvasTM = false;

  if (aFlags & COORD_CONTEXT_CHANGED) {
    nsSVGForeignObjectElement *fO =
      static_cast<nsSVGForeignObjectElement*>(mContent);
    // Coordinate context changes affect mCanvasTM if we have a
    // percentage 'x' or 'y'
    if (fO->mLengthAttributes[nsSVGForeignObjectElement::X].IsPercentage() ||
        fO->mLengthAttributes[nsSVGForeignObjectElement::Y].IsPercentage()) {
      needNewBounds = true;
      needNewCanvasTM = true;
    }
    // Our coordinate context's width/height has changed. If we have a
    // percentage width/height our dimensions will change so we must reflow.
    if (fO->mLengthAttributes[nsSVGForeignObjectElement::WIDTH].IsPercentage() ||
        fO->mLengthAttributes[nsSVGForeignObjectElement::HEIGHT].IsPercentage()) {
      needNewBounds = true;
      needReflow = true;
    }
  }

  if (aFlags & TRANSFORM_CHANGED) {
    needNewBounds = true; // needed if it was _our_ transform that changed
    needNewCanvasTM = true;
    // In an ideal world we would reflow when our CTM changes. This is because
    // glyph metrics do not necessarily scale uniformly with change in scale
    // and, as a result, CTM changes may require text to break at different
    // points. The problem would be how to keep performance acceptable when
    // e.g. the transform of an ancestor is animated.
    // We also seem to get some sort of infinite loop post bug 421584 if we
    // reflow.
  }

  if (needNewBounds &&
      !(aFlags & DO_NOT_NOTIFY_RENDERING_OBSERVERS)) {
    nsSVGUtils::InvalidateAndScheduleBoundsUpdate(this);
  }

  // If we're called while the PresShell is handling reflow events then we
  // must have been called as a result of the NotifyViewportChange() call in
  // our nsSVGOuterSVGFrame's Reflow() method. We must not call RequestReflow
  // at this point (i.e. during reflow) because it could confuse the
  // PresShell and prevent it from reflowing us properly in future. Besides
  // that, nsSVGOuterSVGFrame::DidReflow will take care of reflowing us
  // synchronously, so there's no need.
  if (needReflow && !PresContext()->PresShell()->IsReflowLocked()) {
    RequestReflow(nsIPresShell::eResize);
  }

  if (needNewCanvasTM) {
    // Do this after calling InvalidateAndScheduleBoundsUpdate in case we
    // change the code and it needs to use it.
    mCanvasTM = nsnull;
  }
}
示例#19
0
long
AudioStream::DataCallback(void* aBuffer, long aFrames)
{
  MonitorAutoLock mon(mMonitor);
  uint32_t available = std::min(static_cast<uint32_t>(FramesToBytes(aFrames)), mBuffer.Length());
  NS_ABORT_IF_FALSE(available % mBytesPerFrame == 0, "Must copy complete frames");
  AudioDataValue* output = reinterpret_cast<AudioDataValue*>(aBuffer);
  uint32_t underrunFrames = 0;
  uint32_t servicedFrames = 0;
  int64_t insertTime;

  if (available) {
    // When we are playing a low latency stream, and it is the first time we are
    // getting data from the buffer, we prefer to add the silence for an
    // underrun at the beginning of the buffer, so the first buffer is not cut
    // in half by the silence inserted to compensate for the underrun.
    if (mInRate == mOutRate) {
      if (mLatencyRequest == LowLatency && !mWritten) {
        servicedFrames = GetUnprocessedWithSilencePadding(output, aFrames, insertTime);
      } else {
        servicedFrames = GetUnprocessed(output, aFrames, insertTime);
      }
    } else {
      servicedFrames = GetTimeStretched(output, aFrames, insertTime);
    }
    float scaled_volume = float(GetVolumeScale() * mVolume);

    ScaleAudioSamples(output, aFrames * mOutChannels, scaled_volume);

    NS_ABORT_IF_FALSE(mBuffer.Length() % mBytesPerFrame == 0, "Must copy complete frames");

    // Notify any blocked Write() call that more space is available in mBuffer.
    mon.NotifyAll();
  } else {
    GetBufferInsertTime(insertTime);
  }

  underrunFrames = aFrames - servicedFrames;

  if (mState != DRAINING) {
    uint8_t* rpos = static_cast<uint8_t*>(aBuffer) + FramesToBytes(aFrames - underrunFrames);
    memset(rpos, 0, FramesToBytes(underrunFrames));
    if (underrunFrames) {
      PR_LOG(gAudioStreamLog, PR_LOG_WARNING,
             ("AudioStream %p lost %d frames", this, underrunFrames));
    }
    mLostFrames += underrunFrames;
    servicedFrames += underrunFrames;
  }

  WriteDumpFile(mDumpFile, this, aFrames, aBuffer);
  // Don't log if we're not interested or if the stream is inactive
  if (PR_LOG_TEST(GetLatencyLog(), PR_LOG_DEBUG) &&
      insertTime != INT64_MAX && servicedFrames > underrunFrames) {
    uint32_t latency = UINT32_MAX;
    if (cubeb_stream_get_latency(mCubebStream, &latency)) {
      NS_WARNING("Could not get latency from cubeb.");
    }
    TimeStamp now = TimeStamp::Now();

    mLatencyLog->Log(AsyncLatencyLogger::AudioStream, reinterpret_cast<uint64_t>(this),
                     insertTime, now);
    mLatencyLog->Log(AsyncLatencyLogger::Cubeb, reinterpret_cast<uint64_t>(mCubebStream.get()),
                     (latency * 1000) / mOutRate, now);
  }

  mAudioClock.UpdateWritePosition(servicedFrames);
  return servicedFrames;
}
示例#20
0
nsCSSValue::nsCSSValue(const nsCSSValue& aCopy)
  : mUnit(aCopy.mUnit)
{
  if (mUnit <= eCSSUnit_DummyInherit) {
    // nothing to do, but put this important case first
  }
  else if (eCSSUnit_Percent <= mUnit) {
    mValue.mFloat = aCopy.mValue.mFloat;
  }
  else if (UnitHasStringValue()) {
    mValue.mString = aCopy.mValue.mString;
    mValue.mString->AddRef();
  }
  else if (eCSSUnit_Integer <= mUnit && mUnit <= eCSSUnit_EnumColor) {
    mValue.mInt = aCopy.mValue.mInt;
  }
  else if (eCSSUnit_Color == mUnit) {
    mValue.mColor = aCopy.mValue.mColor;
  }
  else if (UnitHasArrayValue()) {
    mValue.mArray = aCopy.mValue.mArray;
    mValue.mArray->AddRef();
  }
  else if (eCSSUnit_URL == mUnit) {
    mValue.mURL = aCopy.mValue.mURL;
    mValue.mURL->AddRef();
  }
  else if (eCSSUnit_Image == mUnit) {
    mValue.mImage = aCopy.mValue.mImage;
    mValue.mImage->AddRef();
  }
  else if (eCSSUnit_Gradient == mUnit) {
    mValue.mGradient = aCopy.mValue.mGradient;
    mValue.mGradient->AddRef();
  }
  else if (eCSSUnit_Pair == mUnit) {
    mValue.mPair = aCopy.mValue.mPair;
    mValue.mPair->AddRef();
  }
  else if (eCSSUnit_Triplet == mUnit) {
    mValue.mTriplet = aCopy.mValue.mTriplet;
    mValue.mTriplet->AddRef();
  }
  else if (eCSSUnit_Rect == mUnit) {
    mValue.mRect = aCopy.mValue.mRect;
    mValue.mRect->AddRef();
  }
  else if (eCSSUnit_List == mUnit) {
    mValue.mList = aCopy.mValue.mList;
    mValue.mList->AddRef();
  }
  else if (eCSSUnit_ListDep == mUnit) {
    mValue.mListDependent = aCopy.mValue.mListDependent;
  }
  else if (eCSSUnit_PairList == mUnit) {
    mValue.mPairList = aCopy.mValue.mPairList;
    mValue.mPairList->AddRef();
  }
  else if (eCSSUnit_PairListDep == mUnit) {
    mValue.mPairListDependent = aCopy.mValue.mPairListDependent;
  }
  else {
    NS_ABORT_IF_FALSE(false, "unknown unit");
  }
}
TimeDuration
TimeDuration::Resolution()
{
  NS_ABORT_IF_FALSE(gInitialized, "calling TimeDuration too early");
  return TimeDuration::FromTicks(PRInt64(sResolution));
}
示例#22
0
imgIRequest* nsCSSValue::GetImageValue() const
{
  NS_ABORT_IF_FALSE(mUnit == eCSSUnit_Image, "not an Image value");
  return mValue.mImage->mRequest;
}
TimeDuration
TimeDuration::FromMilliseconds(double aMilliseconds)
{
  NS_ABORT_IF_FALSE(gInitialized, "calling TimeDuration too early");
  return TimeDuration::FromTicks(PRInt64((aMilliseconds * kNsPerMsd) / sNsPerTick));
}
示例#24
0
void
nsCSSValue::AppendToString(nsCSSProperty aProperty, nsAString& aResult) const
{
  // eCSSProperty_UNKNOWN gets used for some recursive calls below.
  NS_ABORT_IF_FALSE((0 <= aProperty &&
                     aProperty <= eCSSProperty_COUNT_no_shorthands) ||
                    aProperty == eCSSProperty_UNKNOWN,
                    "property ID out of range");

  nsCSSUnit unit = GetUnit();
  if (unit == eCSSUnit_Null) {
    return;
  }

  if (eCSSUnit_String <= unit && unit <= eCSSUnit_Attr) {
    if (unit == eCSSUnit_Attr) {
      aResult.AppendLiteral("attr(");
    }
    nsAutoString  buffer;
    GetStringValue(buffer);
    if (unit == eCSSUnit_String) {
      nsStyleUtil::AppendEscapedCSSString(buffer, aResult);
    } else if (unit == eCSSUnit_Families) {
      // XXX We really need to do *some* escaping.
      aResult.Append(buffer);
    } else {
      nsStyleUtil::AppendEscapedCSSIdent(buffer, aResult);
    }
  }
  else if (eCSSUnit_Array <= unit && unit <= eCSSUnit_Steps) {
    switch (unit) {
      case eCSSUnit_Counter:  aResult.AppendLiteral("counter(");  break;
      case eCSSUnit_Counters: aResult.AppendLiteral("counters("); break;
      case eCSSUnit_Cubic_Bezier: aResult.AppendLiteral("cubic-bezier("); break;
      case eCSSUnit_Steps: aResult.AppendLiteral("steps("); break;
      default: break;
    }

    nsCSSValue::Array *array = GetArrayValue();
    PRBool mark = PR_FALSE;
    for (size_t i = 0, i_end = array->Count(); i < i_end; ++i) {
      if (aProperty == eCSSProperty_border_image && i >= 5) {
        if (array->Item(i).GetUnit() == eCSSUnit_Null) {
          continue;
        }
        if (i == 5) {
          aResult.AppendLiteral(" /");
        }
      }
      if (mark && array->Item(i).GetUnit() != eCSSUnit_Null) {
        if (unit == eCSSUnit_Array &&
            eCSSProperty_transition_timing_function != aProperty)
          aResult.AppendLiteral(" ");
        else
          aResult.AppendLiteral(", ");
      }
      if (unit == eCSSUnit_Steps && i == 1) {
        NS_ABORT_IF_FALSE(array->Item(i).GetUnit() == eCSSUnit_Enumerated &&
                          (array->Item(i).GetIntValue() ==
                            NS_STYLE_TRANSITION_TIMING_FUNCTION_STEP_START ||
                           array->Item(i).GetIntValue() ==
                            NS_STYLE_TRANSITION_TIMING_FUNCTION_STEP_END),
                          "unexpected value");
        if (array->Item(i).GetIntValue() ==
              NS_STYLE_TRANSITION_TIMING_FUNCTION_STEP_START) {
          aResult.AppendLiteral("start");
        } else {
          aResult.AppendLiteral("end");
        }
        continue;
      }
      nsCSSProperty prop =
        ((eCSSUnit_Counter <= unit && unit <= eCSSUnit_Counters) &&
         i == array->Count() - 1)
        ? eCSSProperty_list_style_type : aProperty;
      if (array->Item(i).GetUnit() != eCSSUnit_Null) {
        array->Item(i).AppendToString(prop, aResult);
        mark = PR_TRUE;
      }
    }
    if (eCSSUnit_Array == unit &&
        aProperty == eCSSProperty_transition_timing_function) {
      aResult.AppendLiteral(")");
    }
  }
  /* Although Function is backed by an Array, we'll handle it separately
   * because it's a bit quirky.
   */
  else if (eCSSUnit_Function == unit) {
    const nsCSSValue::Array* array = GetArrayValue();
    NS_ABORT_IF_FALSE(array->Count() >= 1,
                      "Functions must have at least one element for the name.");

    /* Append the function name. */
    const nsCSSValue& functionName = array->Item(0);
    if (functionName.GetUnit() == eCSSUnit_Enumerated) {
      // We assume that the first argument is always of nsCSSKeyword type.
      const nsCSSKeyword functionId =
        static_cast<nsCSSKeyword>(functionName.GetIntValue());
      nsStyleUtil::AppendEscapedCSSIdent(
        NS_ConvertASCIItoUTF16(nsCSSKeywords::GetStringValue(functionId)),
        aResult);
    } else {
      functionName.AppendToString(aProperty, aResult);
    }
    aResult.AppendLiteral("(");

    /* Now, step through the function contents, writing each of them as we go. */
    for (size_t index = 1; index < array->Count(); ++index) {
      array->Item(index).AppendToString(aProperty, aResult);

      /* If we're not at the final element, append a comma. */
      if (index + 1 != array->Count())
        aResult.AppendLiteral(", ");
    }

    /* Finally, append the closing parenthesis. */
    aResult.AppendLiteral(")");
  }
  else if (IsCalcUnit()) {
    NS_ABORT_IF_FALSE(GetUnit() == eCSSUnit_Calc, "unexpected unit");
    CSSValueSerializeCalcOps ops(aProperty, aResult);
    css::SerializeCalc(*this, ops);
  }
  else if (eCSSUnit_Integer == unit) {
    aResult.AppendInt(GetIntValue(), 10);
  }
  else if (eCSSUnit_Enumerated == unit) {
    if (eCSSProperty_text_decoration_line == aProperty) {
      PRInt32 intValue = GetIntValue();
      if (NS_STYLE_TEXT_DECORATION_LINE_NONE == intValue) {
        AppendASCIItoUTF16(nsCSSProps::LookupPropertyValue(aProperty, intValue),
                           aResult);
      } else {
        // Ignore the "override all" internal value.
        // (It doesn't have a string representation.)
        intValue &= ~NS_STYLE_TEXT_DECORATION_LINE_OVERRIDE_ALL;
        nsStyleUtil::AppendBitmaskCSSValue(
          aProperty, intValue,
          NS_STYLE_TEXT_DECORATION_LINE_UNDERLINE,
          NS_STYLE_TEXT_DECORATION_LINE_PREF_ANCHORS,
          aResult);
      }
    }
    else if (eCSSProperty_marks == aProperty) {
      PRInt32 intValue = GetIntValue();
      if (intValue == NS_STYLE_PAGE_MARKS_NONE) {
        AppendASCIItoUTF16(nsCSSProps::LookupPropertyValue(aProperty, intValue),
                           aResult);
      } else {
        nsStyleUtil::AppendBitmaskCSSValue(aProperty, intValue,
                                           NS_STYLE_PAGE_MARKS_CROP,
                                           NS_STYLE_PAGE_MARKS_REGISTER,
                                           aResult);
      }
    }
    else {
      const nsAFlatCString& name = nsCSSProps::LookupPropertyValue(aProperty, GetIntValue());
      AppendASCIItoUTF16(name, aResult);
    }
  }
  else if (eCSSUnit_EnumColor == unit) {
    // we can lookup the property in the ColorTable and then
    // get a string mapping the name
    nsCAutoString str;
    if (nsCSSProps::GetColorName(GetIntValue(), str)){
      AppendASCIItoUTF16(str, aResult);
    } else {
      NS_ABORT_IF_FALSE(false, "bad color value");
    }
  }
  else if (eCSSUnit_Color == unit) {
    nscolor color = GetColorValue();
    if (color == NS_RGBA(0, 0, 0, 0)) {
      // Use the strictest match for 'transparent' so we do correct
      // round-tripping of all other rgba() values.
      aResult.AppendLiteral("transparent");
    } else {
      PRUint8 a = NS_GET_A(color);
      if (a < 255) {
        aResult.AppendLiteral("rgba(");
      } else {
        aResult.AppendLiteral("rgb(");
      }

      NS_NAMED_LITERAL_STRING(comma, ", ");

      aResult.AppendInt(NS_GET_R(color), 10);
      aResult.Append(comma);
      aResult.AppendInt(NS_GET_G(color), 10);
      aResult.Append(comma);
      aResult.AppendInt(NS_GET_B(color), 10);
      if (a < 255) {
        aResult.Append(comma);
        aResult.AppendFloat(nsStyleUtil::ColorComponentToFloat(a));
      }
      aResult.Append(PRUnichar(')'));
    }
  }
  else if (eCSSUnit_URL == unit || eCSSUnit_Image == unit) {
    aResult.Append(NS_LITERAL_STRING("url("));
    nsStyleUtil::AppendEscapedCSSString(
      nsDependentString(GetOriginalURLValue()), aResult);
    aResult.Append(NS_LITERAL_STRING(")"));
  }
  else if (eCSSUnit_Element == unit) {
    aResult.Append(NS_LITERAL_STRING("-moz-element(#"));
    nsAutoString tmpStr;
    GetStringValue(tmpStr);
    nsStyleUtil::AppendEscapedCSSIdent(
      nsDependentString(tmpStr), aResult);
    aResult.Append(NS_LITERAL_STRING(")"));
  }
  else if (eCSSUnit_Percent == unit) {
    aResult.AppendFloat(GetPercentValue() * 100.0f);
  }
  else if (eCSSUnit_Percent < unit) {  // length unit
    aResult.AppendFloat(GetFloatValue());
  }
  else if (eCSSUnit_Gradient == unit) {
    nsCSSValueGradient* gradient = GetGradientValue();

    if (gradient->mIsRepeating) {
      if (gradient->mIsRadial)
        aResult.AppendLiteral("-moz-repeating-radial-gradient(");
      else
        aResult.AppendLiteral("-moz-repeating-linear-gradient(");
    } else {
      if (gradient->mIsRadial)
        aResult.AppendLiteral("-moz-radial-gradient(");
      else
        aResult.AppendLiteral("-moz-linear-gradient(");
    }

    if (gradient->mBgPos.mXValue.GetUnit() != eCSSUnit_None ||
        gradient->mBgPos.mYValue.GetUnit() != eCSSUnit_None ||
        gradient->mAngle.GetUnit() != eCSSUnit_None) {
      if (gradient->mBgPos.mXValue.GetUnit() != eCSSUnit_None) {
        gradient->mBgPos.mXValue.AppendToString(eCSSProperty_background_position,
                                                aResult);
        aResult.AppendLiteral(" ");
      }
      if (gradient->mBgPos.mXValue.GetUnit() != eCSSUnit_None) {
        gradient->mBgPos.mYValue.AppendToString(eCSSProperty_background_position,
                                                aResult);
        aResult.AppendLiteral(" ");
      }
      if (gradient->mAngle.GetUnit() != eCSSUnit_None) {
        gradient->mAngle.AppendToString(aProperty, aResult);
      }
      aResult.AppendLiteral(", ");
    }

    if (gradient->mIsRadial &&
        (gradient->mRadialShape.GetUnit() != eCSSUnit_None ||
         gradient->mRadialSize.GetUnit() != eCSSUnit_None)) {
      if (gradient->mRadialShape.GetUnit() != eCSSUnit_None) {
        NS_ABORT_IF_FALSE(gradient->mRadialShape.GetUnit() ==
                          eCSSUnit_Enumerated,
                          "bad unit for radial gradient shape");
        PRInt32 intValue = gradient->mRadialShape.GetIntValue();
        NS_ABORT_IF_FALSE(intValue != NS_STYLE_GRADIENT_SHAPE_LINEAR,
                          "radial gradient with linear shape?!");
        AppendASCIItoUTF16(nsCSSProps::ValueToKeyword(intValue,
                               nsCSSProps::kRadialGradientShapeKTable),
                           aResult);
        aResult.AppendLiteral(" ");
      }

      if (gradient->mRadialSize.GetUnit() != eCSSUnit_None) {
        NS_ABORT_IF_FALSE(gradient->mRadialSize.GetUnit() ==
                          eCSSUnit_Enumerated,
                          "bad unit for radial gradient size");
        PRInt32 intValue = gradient->mRadialSize.GetIntValue();
        AppendASCIItoUTF16(nsCSSProps::ValueToKeyword(intValue,
                               nsCSSProps::kRadialGradientSizeKTable),
                           aResult);
      }
      aResult.AppendLiteral(", ");
    }

    for (PRUint32 i = 0 ;;) {
      gradient->mStops[i].mColor.AppendToString(aProperty, aResult);
      if (gradient->mStops[i].mLocation.GetUnit() != eCSSUnit_None) {
        aResult.AppendLiteral(" ");
        gradient->mStops[i].mLocation.AppendToString(aProperty, aResult);
      }
      if (++i == gradient->mStops.Length()) {
        break;
      }
      aResult.AppendLiteral(", ");
    }

    aResult.AppendLiteral(")");
  } else if (eCSSUnit_Pair == unit) {
    GetPairValue().AppendToString(aProperty, aResult);
  } else if (eCSSUnit_Triplet == unit) {
    GetTripletValue().AppendToString(aProperty, aResult);
  } else if (eCSSUnit_Rect == unit) {
    GetRectValue().AppendToString(aProperty, aResult);
  } else if (eCSSUnit_List == unit || eCSSUnit_ListDep == unit) {
    GetListValue()->AppendToString(aProperty, aResult);
  } else if (eCSSUnit_PairList == unit || eCSSUnit_PairListDep == unit) {
    GetPairListValue()->AppendToString(aProperty, aResult);
  }

  switch (unit) {
    case eCSSUnit_Null:         break;
    case eCSSUnit_Auto:         aResult.AppendLiteral("auto");     break;
    case eCSSUnit_Inherit:      aResult.AppendLiteral("inherit");  break;
    case eCSSUnit_Initial:      aResult.AppendLiteral("-moz-initial"); break;
    case eCSSUnit_None:         aResult.AppendLiteral("none");     break;
    case eCSSUnit_Normal:       aResult.AppendLiteral("normal");   break;
    case eCSSUnit_System_Font:  aResult.AppendLiteral("-moz-use-system-font"); break;
    case eCSSUnit_All:          aResult.AppendLiteral("all"); break;
    case eCSSUnit_Dummy:
    case eCSSUnit_DummyInherit:
      NS_ABORT_IF_FALSE(false, "should never serialize");
      break;

    case eCSSUnit_String:       break;
    case eCSSUnit_Ident:        break;
    case eCSSUnit_Families:     break;
    case eCSSUnit_URL:          break;
    case eCSSUnit_Image:        break;
    case eCSSUnit_Element:      break;
    case eCSSUnit_Array:        break;
    case eCSSUnit_Attr:
    case eCSSUnit_Cubic_Bezier:
    case eCSSUnit_Steps:
    case eCSSUnit_Counter:
    case eCSSUnit_Counters:     aResult.Append(PRUnichar(')'));    break;
    case eCSSUnit_Local_Font:   break;
    case eCSSUnit_Font_Format:  break;
    case eCSSUnit_Function:     break;
    case eCSSUnit_Calc:         break;
    case eCSSUnit_Calc_Plus:    break;
    case eCSSUnit_Calc_Minus:   break;
    case eCSSUnit_Calc_Times_L: break;
    case eCSSUnit_Calc_Times_R: break;
    case eCSSUnit_Calc_Divided: break;
    case eCSSUnit_Integer:      break;
    case eCSSUnit_Enumerated:   break;
    case eCSSUnit_EnumColor:    break;
    case eCSSUnit_Color:        break;
    case eCSSUnit_Percent:      aResult.Append(PRUnichar('%'));    break;
    case eCSSUnit_Number:       break;
    case eCSSUnit_Gradient:     break;
    case eCSSUnit_Pair:         break;
    case eCSSUnit_Triplet:      break;
    case eCSSUnit_Rect:         break;
    case eCSSUnit_List:         break;
    case eCSSUnit_ListDep:      break;
    case eCSSUnit_PairList:     break;
    case eCSSUnit_PairListDep:  break;

    case eCSSUnit_Inch:         aResult.AppendLiteral("in");   break;
    case eCSSUnit_Millimeter:   aResult.AppendLiteral("mm");   break;
    case eCSSUnit_PhysicalMillimeter: aResult.AppendLiteral("mozmm");   break;
    case eCSSUnit_Centimeter:   aResult.AppendLiteral("cm");   break;
    case eCSSUnit_Point:        aResult.AppendLiteral("pt");   break;
    case eCSSUnit_Pica:         aResult.AppendLiteral("pc");   break;

    case eCSSUnit_EM:           aResult.AppendLiteral("em");   break;
    case eCSSUnit_XHeight:      aResult.AppendLiteral("ex");   break;
    case eCSSUnit_Char:         aResult.AppendLiteral("ch");   break;
    case eCSSUnit_RootEM:       aResult.AppendLiteral("rem");  break;

    case eCSSUnit_Pixel:        aResult.AppendLiteral("px");   break;

    case eCSSUnit_Degree:       aResult.AppendLiteral("deg");  break;
    case eCSSUnit_Grad:         aResult.AppendLiteral("grad"); break;
    case eCSSUnit_Radian:       aResult.AppendLiteral("rad");  break;

    case eCSSUnit_Hertz:        aResult.AppendLiteral("Hz");   break;
    case eCSSUnit_Kilohertz:    aResult.AppendLiteral("kHz");  break;

    case eCSSUnit_Seconds:      aResult.Append(PRUnichar('s'));    break;
    case eCSSUnit_Milliseconds: aResult.AppendLiteral("ms");   break;
  }
}
示例#25
0
double
WaveReader::BytesToTime(int64_t aBytes) const
{
  NS_ABORT_IF_FALSE(aBytes >= 0, "Must be >= 0");
  return float(aBytes) / mSampleRate / mFrameSize;
}
示例#26
0
PRStatus
nsSOCKSSocketInfo::ReadV5ConnectResponseTop()
{
    PRUint8 res;
    PRUint32 len;

    NS_ABORT_IF_FALSE(mState == SOCKS5_READ_CONNECT_RESPONSE_TOP,
                      "Invalid state!");
    NS_ABORT_IF_FALSE(mDataLength == 5,
                      "SOCKS 5 connection reply must be exactly 5 bytes!");

    LOGDEBUG(("socks5: checking connection reply"));

    // Check version number
    if (ReadUint8() != 0x05) {
        LOGERROR(("socks5: unexpected version in the reply"));
        HandshakeFinished(PR_CONNECT_REFUSED_ERROR);
        return PR_FAILURE;
    }

    // Check response
    res = ReadUint8();
    if (res != 0x00) {
        PRErrorCode c = PR_CONNECT_REFUSED_ERROR;

        switch (res) {
            case 0x01:
                LOGERROR(("socks5: connect failed: "
                          "01, General SOCKS server failure."));
                break;
            case 0x02:
                LOGERROR(("socks5: connect failed: "
                          "02, Connection not allowed by ruleset."));
                break;
            case 0x03:
                LOGERROR(("socks5: connect failed: 03, Network unreachable."));
                c = PR_NETWORK_UNREACHABLE_ERROR;
                break;
            case 0x04:
                LOGERROR(("socks5: connect failed: 04, Host unreachable."));
                break;
            case 0x05:
                LOGERROR(("socks5: connect failed: 05, Connection refused."));
                break;
            case 0x06:  
                LOGERROR(("socks5: connect failed: 06, TTL expired."));
                c = PR_CONNECT_TIMEOUT_ERROR;
                break;
            case 0x07:
                LOGERROR(("socks5: connect failed: "
                          "07, Command not supported."));
                break;
            case 0x08:
                LOGERROR(("socks5: connect failed: "
                          "08, Address type not supported."));
                c = PR_BAD_ADDRESS_ERROR;
                break;
            default:
                LOGERROR(("socks5: connect failed."));
                break;
        }

        HandshakeFinished(c);
        return PR_FAILURE;
    }

    if (ReadV5AddrTypeAndLength(&res, &len) != PR_SUCCESS) {
        HandshakeFinished(PR_BAD_ADDRESS_ERROR);
        return PR_FAILURE;
    }

    mState = SOCKS5_READ_CONNECT_RESPONSE_BOTTOM;
    WantRead(len + 2);

    return PR_SUCCESS;
}
示例#27
0
int64_t
WaveReader::RoundDownToFrame(int64_t aBytes) const
{
  NS_ABORT_IF_FALSE(aBytes >= 0, "Must be >= 0");
  return aBytes - (aBytes % mFrameSize);
}
already_AddRefed<Layer>
RenderFrameParent::BuildLayer(nsDisplayListBuilder* aBuilder,
                              nsIFrame* aFrame,
                              LayerManager* aManager,
                              const nsIntRect& aVisibleRect)
{
  NS_ABORT_IF_FALSE(aFrame,
                    "makes no sense to have a shadow tree without a frame");
  NS_ABORT_IF_FALSE(!mContainer ||
                    IsTempLayerManager(aManager) ||
                    mContainer->Manager() == aManager,
                    "retaining manager changed out from under us ... HELP!");

  if (mContainer && mContainer->Manager() != aManager) {
    // This can happen if aManager is a "temporary" manager, or if the
    // widget's layer manager changed out from under us.  We need to
    // FIXME handle the former case somehow, probably with an API to
    // draw a manager's subtree.  The latter is bad bad bad, but the
    // the NS_ABORT_IF_FALSE() above will flag it.  Returning NULL
    // here will just cause the shadow subtree not to be rendered.
    return nsnull;
  }

  if (mContainer) {
    ClearContainer(mContainer);
  }

  ContainerLayer* shadowRoot = GetRootLayer();
  if (!shadowRoot) {
    mContainer = nsnull;
    return nsnull;
  }

  NS_ABORT_IF_FALSE(!shadowRoot || shadowRoot->Manager() == aManager,
                    "retaining manager changed out from under us ... HELP!");

  // Wrap the shadow layer tree in mContainer.
  if (!mContainer) {
    mContainer = aManager->CreateContainerLayer();
  }
  NS_ABORT_IF_FALSE(!mContainer->GetFirstChild(),
                    "container of shadow tree shouldn't have a 'root' here");

  mContainer->InsertAfter(shadowRoot, nsnull);

  AssertInTopLevelChromeDoc(mContainer, aFrame);
  ViewTransform transform;
  TransformShadowTree(aBuilder, mFrameLoader, aFrame, shadowRoot, transform);
  mContainer->SetClipRect(nsnull);

  if (mFrameLoader->AsyncScrollEnabled()) {
    const nsContentView* view = GetContentView(FrameMetrics::ROOT_SCROLL_ID);
    BuildBackgroundPatternFor(mContainer,
                              shadowRoot,
                              view->GetViewConfig(),
                              mBackgroundColor,
                              aManager, aFrame);
  }
  mContainer->SetVisibleRegion(aVisibleRect);

  return nsRefPtr<Layer>(mContainer).forget();
}
示例#29
0
bool
WaveReader::LoadAllChunks(nsAutoPtr<nsHTMLMediaElement::MetadataTags> &aTags)
{
  // Chunks are always word (two byte) aligned.
  NS_ABORT_IF_FALSE(mDecoder->GetResource()->Tell() % 2 == 0,
                    "LoadAllChunks called with unaligned resource");

  bool loadFormatChunk = false;
  bool findDataOffset = false;

  for (;;) {
    static const unsigned int CHUNK_HEADER_SIZE = 8;
    char chunkHeader[CHUNK_HEADER_SIZE];
    const char* p = chunkHeader;

    if (!ReadAll(chunkHeader, sizeof(chunkHeader))) {
      return false;
    }

    PR_STATIC_ASSERT(sizeof(uint32_t) * 2 <= CHUNK_HEADER_SIZE);

    uint32_t magic = ReadUint32BE(&p);
    uint32_t chunkSize = ReadUint32LE(&p);
    int64_t chunkStart = GetPosition();

    switch (magic) {
      case FRMT_CHUNK_MAGIC:
        loadFormatChunk = LoadFormatChunk(chunkSize);
        if (!loadFormatChunk) {
          return false;
        }
        break;

      case LIST_CHUNK_MAGIC:
        if (!aTags) {
          LoadListChunk(chunkSize, aTags);
        }
        break;

      case DATA_CHUNK_MAGIC:
        findDataOffset = FindDataOffset(chunkSize);
        return loadFormatChunk && findDataOffset;

      default:
        break;
    }

    // RIFF chunks are two-byte aligned, so round up if necessary.
    chunkSize += chunkSize % 2;

    // Move forward to next chunk
    CheckedInt64 forward = CheckedInt64(chunkStart) + chunkSize - GetPosition();

    if (!forward.isValid() || forward.value() < 0) {
      return false;
    }

    static const int64_t MAX_CHUNK_SIZE = 1 << 16;
    PR_STATIC_ASSERT(uint64_t(MAX_CHUNK_SIZE) < UINT_MAX / sizeof(char));
    nsAutoArrayPtr<char> chunk(new char[MAX_CHUNK_SIZE]);
    while (forward.value() > 0) {
      int64_t size = NS_MIN(forward.value(), MAX_CHUNK_SIZE);
      if (!ReadAll(chunk.get(), size)) {
        return false;
      }
      forward -= size;
    }
  }

  return false;
}
示例#30
0
void
DOMSVGPathSegList::InternalListWillChangeTo(const SVGPathData& aNewValue)
{
  // When the number of items in our internal counterpart changes, we MUST stay
  // in sync. Everything in the scary comment in
  // DOMSVGLengthList::InternalBaseValListWillChangeTo applies here just as
  // much, but we have the additional issue that failing to stay in sync would
  // mean that - assuming we aren't reading bad memory - we would likely end up
  // decoding command types from argument floats when looking in our
  // SVGPathData's data array! Either way, we'll likely then go down
  // NS_NOTREACHED code paths, or end up reading/setting more bad memory!!

  // The only time that our other DOM list type implementations remove items is
  // if those items become surplus items due to an attribute change or SMIL
  // animation sample shortening the list. In general though, they try to keep
  // their existing DOM items, even when things change. To be consistent, we'd
  // really like to do the same thing. However, because different types of path
  // segment correspond to different DOMSVGPathSeg subclasses, the type of
  // items in our list are generally not the same, which makes this harder for
  // us. We have to remove DOM segments if their type is not the same as the
  // type of the new internal segment at their index.
  //
  // We also need to sync up mInternalDataIndex, but since we need to loop over
  // all the items in the new list checking types anyway, that's almost
  // insignificant in terms of overhead.
  //
  // Note that this method is called on every single SMIL animation resample
  // and we have no way to short circuit the overhead since we don't have a
  // way to tell if the call is due to a new animation, or a resample of an
  // existing animation (when the number and type of items would be the same).
  // (Note that a new animation could start overriding an existing animation at
  // any time, so checking IsAnimating() wouldn't work.) Because we get called
  // on every sample, it would not be acceptable alternative to throw away all
  // our items and let them be recreated lazily, since that would break what
  // script sees!

  PRUint32 length = mItems.Length();
  PRUint32 index = 0;

  PRUint32 dataLength = aNewValue.mData.Length();
  PRUint32 dataIndex = 0; // index into aNewValue's raw data array

  PRUint32 newSegType;

  while (index < length && dataIndex < dataLength) {
    newSegType = SVGPathSegUtils::DecodeType(aNewValue.mData[dataIndex]);
    if (ItemAt(index) && ItemAt(index)->Type() != newSegType) {
      ItemAt(index)->RemovingFromList();
      ItemAt(index) = nsnull;
    }
    // Only after the RemovingFromList() can we touch mInternalDataIndex!
    mItems[index].mInternalDataIndex = dataIndex;
    ++index;
    dataIndex += 1 + SVGPathSegUtils::ArgCountForType(newSegType);
  }

  NS_ABORT_IF_FALSE((index == length && dataIndex <= dataLength) ||
                    (index <= length && dataIndex == dataLength),
                    "very bad - list corruption?");

  if (index < length) {
    // aNewValue has fewer items than our previous internal counterpart

    PRUint32 newLength = index;

    // Remove excess items from the list:
    for (; index < length; ++index) {
      if (ItemAt(index)) {
        ItemAt(index)->RemovingFromList();
        ItemAt(index) = nsnull;
      }
    }

    // Only now may we truncate mItems
    mItems.SetLength(newLength);

  } else if (dataIndex < dataLength) {
    // aNewValue has more items than our previous internal counterpart

    // Sync mItems:
    while (dataIndex < dataLength) {
      if (mItems.Length() &&
          mItems.Length() - 1 > DOMSVGPathSeg::MaxListIndex()) {
        // It's safe to get out of sync with our internal list as long as we
        // have FEWER items than it does.
        return;
      }
      if (!mItems.AppendElement(ItemProxy(nsnull, dataIndex))) {
        // OOM
        Clear();
        return;
      }
      dataIndex += 1 + SVGPathSegUtils::ArgCountForType(SVGPathSegUtils::DecodeType(aNewValue.mData[dataIndex]));
    }
  }

  NS_ABORT_IF_FALSE(dataIndex == dataLength, "Serious processing error");
  NS_ABORT_IF_FALSE(index == length, "Serious counting error");
}