void AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer, AudioSampleFormat aFormat, uint32_t aChannels, uint32_t aFrames, uint32_t aSampleRate) { AutoTArray<nsTArray<AudioDataValue>, MONO> output; AutoTArray<const AudioDataValue*, MONO> bufferPtrs; output.SetLength(MONO); bufferPtrs.SetLength(MONO); uint32_t written = 0; // We need to copy here, because the mixer will reuse the storage, we should // not hold onto it. Buffers are in planar format. for (uint32_t channel = 0; channel < aChannels; channel++) { AudioDataValue* out = output[channel].AppendElements(aFrames); PodCopy(out, aMixedBuffer + written, aFrames); bufferPtrs[channel] = out; written += aFrames; } AudioChunk chunk; chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output); chunk.mDuration = aFrames; chunk.mBufferFormat = aFormat; chunk.mVolume = 1.0f; chunk.mChannelData.SetLength(MONO); for (uint32_t channel = 0; channel < aChannels; channel++) { chunk.mChannelData[channel] = bufferPtrs[channel]; } // Now we have mixed data, simply append it to out track. EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk); }
/*static*/ void AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk, int32_t aDuration, uint32_t aOutputChannels, AudioDataValue* aOutput) { switch(aChunk.mBufferFormat) { case AUDIO_FORMAT_S16: { AutoTArray<const int16_t*, 2> array; array.SetLength(aOutputChannels); for (uint32_t i = 0; i < array.Length(); i++) { array[i] = static_cast<const int16_t*>(aChunk.mChannelData[i]); } InterleaveTrackData(array, aDuration, aOutputChannels, aOutput, aChunk.mVolume); break; } case AUDIO_FORMAT_FLOAT32: { AutoTArray<const float*, 2> array; array.SetLength(aOutputChannels); for (uint32_t i = 0; i < array.Length(); i++) { array[i] = static_cast<const float*>(aChunk.mChannelData[i]); } InterleaveTrackData(array, aDuration, aOutputChannels, aOutput, aChunk.mVolume); break; } case AUDIO_FORMAT_SILENCE: { MOZ_ASSERT(false, "To implement."); } }; }
static void CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock, uint32_t aOffsetInBlock) { uint32_t blockChannels = aBlock->ChannelCount(); AutoTArray<const T*,2> channels; if (aInput.IsNull()) { channels.SetLength(blockChannels); PodZero(channels.Elements(), blockChannels); } else { const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>(); channels.SetLength(inputChannels.Length()); PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length()); if (channels.Length() != blockChannels) { // We only need to upmix here because aBlock's channel count has been // chosen to be a superset of the channel count of every chunk. AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr)); } } for (uint32_t c = 0; c < blockChannels; ++c) { float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock; if (channels[c]) { ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume); } else { PodZero(outputData, aInput.GetDuration()); } } }
Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate) { float scale = 1; AutoTArray<const float*,4> irChannels; for (size_t i = 0; i < impulseResponse->GetChannels(); ++i) { irChannels.AppendElement(impulseResponse->GetData(i)); } AutoTArray<float,1024> tempBuf; if (normalize) { scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate); if (scale) { tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength); for (uint32_t i = 0; i < irChannels.Length(); ++i) { float* buf = &tempBuf[i*impulseResponseBufferLength]; AudioBufferCopyWithScale(irChannels[i], scale, buf, impulseResponseBufferLength); irChannels[i] = buf; } } } initialize(irChannels, impulseResponseBufferLength, maxFFTSize, useBackgroundThreads); }
void AudioStream::GetTimeStretched(AudioBufferWriter& aWriter) { mMonitor.AssertCurrentThreadOwns(); // We need to call the non-locking version, because we already have the lock. if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) { return; } uint32_t toPopFrames = ceil(aWriter.Available() * mAudioClock.GetPlaybackRate()); while (mTimeStretcher->numSamples() < aWriter.Available()) { UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames); if (c->Frames() == 0) { break; } MOZ_ASSERT(c->Frames() <= toPopFrames); if (IsValidAudioFormat(c.get())) { mTimeStretcher->putSamples(c->Data(), c->Frames()); } else { // Write silence if invalid format. AutoTArray<AudioDataValue, 1000> buf; buf.SetLength(mOutChannels * c->Frames()); memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue)); mTimeStretcher->putSamples(buf.Elements(), c->Frames()); } } auto timeStretcher = mTimeStretcher; aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) { return timeStretcher->receiveSamples(aPtr, aFrames); }, aWriter.Available()); }
// Read audio data in aChunk, resample them if needed, // and then send the result to OMX input buffer (or buffers if one buffer is not enough). // aSamplesRead will be the number of samples that have been read from aChunk. BufferState ReadChunk(AudioChunk& aChunk, size_t* aSamplesRead) { size_t chunkSamples = aChunk.GetDuration(); size_t bytesToCopy = chunkSamples * mOMXAEncoder.mResamplingRatio * mOMXAEncoder.mChannels * sizeof(AudioDataValue); size_t bytesCopied = 0; if (bytesToCopy <= AvailableSize()) { if (aChunk.IsNull()) { bytesCopied = SendSilenceToBuffer(chunkSamples); } else { bytesCopied = SendChunkToBuffer(aChunk, chunkSamples); } UpdateAfterSendChunk(chunkSamples, bytesCopied, aSamplesRead); } else { // Interleave data to a temporary buffer. AutoTArray<AudioDataValue, 9600> pcm; pcm.SetLength(bytesToCopy); AudioDataValue* interleavedSource = pcm.Elements(); AudioTrackEncoder::InterleaveTrackData(aChunk, chunkSamples, mOMXAEncoder.mChannels, interleavedSource); // When the data size of chunk is larger than the buffer capacity, // we split it into sub-chunks to fill up buffers. size_t subChunkSamples = 0; while(GetNextSubChunk(bytesToCopy, subChunkSamples)) { // To avoid enqueueing an empty buffer, we follow the order that // clear up buffer first, then create one, send data to it in the end. if (!IsEmpty()) { // Submit the filled-up buffer and request a new buffer. status_t result = Enqueue(mOMXAEncoder.mTimestamp, mInputFlags & ~OMXCodecWrapper::BUFFER_EOS); if (result != OK) { return BUFFER_FAIL; } result = Dequeue(); if (result == -EAGAIN) { return WAIT_FOR_NEW_BUFFER; } if (result != OK) { return BUFFER_FAIL; } } if (aChunk.IsNull()) { bytesCopied = SendSilenceToBuffer(subChunkSamples); } else { bytesCopied = SendInterleavedSubChunkToBuffer(interleavedSource, subChunkSamples); } UpdateAfterSendChunk(subChunkSamples, bytesCopied, aSamplesRead); // Move to the position where samples are not yet send to the buffer. interleavedSource += subChunkSamples * mOMXAEncoder.mChannels; } } return BUFFER_OK; }
typename EnableIf<IsSame<T, float>::value, void>::Type WriteDumpFileHelper(T* aInput, size_t aSamples, FILE* aFile) { AutoTArray<uint8_t, 1024*2> buf; buf.SetLength(aSamples*2); uint8_t* output = buf.Elements(); for (uint32_t i = 0; i < aSamples; ++i) { SetUint16LE(output + i*2, int16_t(aInput[i]*32767.0f)); } fwrite(output, 2, aSamples, aFile); fflush(aFile); }
void DOMMatrixReadOnly::ToFloat64Array(JSContext* aCx, JS::MutableHandle<JSObject*> aResult, ErrorResult& aRv) const { AutoTArray<double, 16> arr; arr.SetLength(16); GetDataFromMatrix(this, arr.Elements()); JS::Rooted<JS::Value> value(aCx); if (!ToJSValue(aCx, TypedArrayCreator<Float64Array>(arr), &value)) { aRv.Throw(NS_ERROR_OUT_OF_MEMORY); return; } aResult.set(&value.toObject()); }
void AudioStream::GetTimeStretched(AudioBufferWriter& aWriter) { mMonitor.AssertCurrentThreadOwns(); // We need to call the non-locking version, because we already have the lock. if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) { return; } uint32_t toPopFrames = ceil(aWriter.Available() * mAudioClock.GetPlaybackRate()); while (mTimeStretcher->numSamples() < aWriter.Available()) { UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames); if (c->Frames() == 0) { break; } MOZ_ASSERT(c->Frames() <= toPopFrames); if (IsValidAudioFormat(c.get())) { mTimeStretcher->putSamples(c->Data(), c->Frames()); } else { // Write silence if invalid format. AutoTArray<AudioDataValue, 1000> buf; auto size = CheckedUint32(mOutChannels) * c->Frames(); if (!size.isValid()) { // The overflow should not happen in normal case. LOGW("Invalid member data: %d channels, %d frames", mOutChannels, c->Frames()); return; } buf.SetLength(size.value()); size = size * sizeof(AudioDataValue); if (!size.isValid()) { LOGW("The required memory size is too large."); return; } memset(buf.Elements(), 0, size.value()); mTimeStretcher->putSamples(buf.Elements(), c->Frames()); } } auto timeStretcher = mTimeStretcher; aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) { return timeStretcher->receiveSamples(aPtr, aFrames); }, aWriter.Available()); }
void AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk, uint32_t aOutputChannelCount, nsTArray<const float*>& aOutputChannels, DownmixBufferType& aDownmixBuffer) { for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) { aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i])); } if (aOutputChannels.Length() < aOutputChannelCount) { if (mChannelInterpretation == ChannelInterpretation::Speakers) { AudioChannelsUpMix<float>(&aOutputChannels, aOutputChannelCount, nullptr); NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(), "We called GetAudioChannelsSuperset to avoid this"); } else { // Fill up the remaining aOutputChannels by zeros for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) { aOutputChannels.AppendElement(nullptr); } } } else if (aOutputChannels.Length() > aOutputChannelCount) { if (mChannelInterpretation == ChannelInterpretation::Speakers) { AutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels; outputChannels.SetLength(aOutputChannelCount); aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE); for (uint32_t j = 0; j < aOutputChannelCount; ++j) { outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE]; } AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(), aOutputChannelCount, WEBAUDIO_BLOCK_SIZE); aOutputChannels.SetLength(aOutputChannelCount); for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) { aOutputChannels[j] = outputChannels[j]; } } else { // Drop the remaining aOutputChannels aOutputChannels.RemoveElementsAt(aOutputChannelCount, aOutputChannels.Length() - aOutputChannelCount); } } }
// Interleave chunk data and send it to buffer, // and return the copied bytes number of audio data. size_t SendChunkToBuffer(AudioChunk& aSource, size_t aSamplesNum) { AudioDataValue* dst = reinterpret_cast<AudioDataValue*>(GetPointer()); size_t bytesToCopy = aSamplesNum * mOMXAEncoder.mResamplingRatio * mOMXAEncoder.mChannels * sizeof(AudioDataValue); uint32_t dstSamplesCopied = aSamplesNum; if (mOMXAEncoder.mResampler) { AutoTArray<AudioDataValue, 9600> pcm; pcm.SetLength(bytesToCopy); AudioTrackEncoder::InterleaveTrackData(aSource, aSamplesNum, mOMXAEncoder.mChannels, pcm.Elements()); int16_t* tempSource = reinterpret_cast<int16_t*>(pcm.Elements()); speex_resampler_process_interleaved_int(mOMXAEncoder.mResampler, tempSource, &aSamplesNum, dst, &dstSamplesCopied); } else { AudioTrackEncoder::InterleaveTrackData(aSource, aSamplesNum, mOMXAEncoder.mChannels, dst); } return dstSamplesCopied * mOMXAEncoder.mChannels * sizeof(AudioDataValue); }
nsresult VorbisTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData) { if (mEosSetInEncoder) { return NS_OK; } PROFILER_LABEL("VorbisTrackEncoder", "GetEncodedTrack", js::ProfileEntry::Category::OTHER); nsAutoPtr<AudioSegment> sourceSegment; sourceSegment = new AudioSegment(); { // Move all the samples from mRawSegment to sourceSegment. We only hold // the monitor in this block. ReentrantMonitorAutoEnter mon(mReentrantMonitor); // Wait if mEncoder is not initialized, or when not enough raw data, but is // not the end of stream nor is being canceled. while (!mCanceled && mRawSegment.GetDuration() < GetPacketDuration() && !mEndOfStream) { mon.Wait(); } VORBISLOG("GetEncodedTrack passes wait, duration is %lld\n", mRawSegment.GetDuration()); if (mCanceled || mEncodingComplete) { return NS_ERROR_FAILURE; } sourceSegment->AppendFrom(&mRawSegment); } if (mEndOfStream && (sourceSegment->GetDuration() == 0) && !mEosSetInEncoder) { mEncodingComplete = true; mEosSetInEncoder = true; VORBISLOG("[Vorbis] Done encoding."); vorbis_analysis_wrote(&mVorbisDsp, 0); GetEncodedFrames(aData); return NS_OK; } // Start encoding data. AudioSegment::ChunkIterator iter(*sourceSegment); AudioDataValue **vorbisBuffer = vorbis_analysis_buffer(&mVorbisDsp, (int)sourceSegment->GetDuration()); int framesCopied = 0; AutoTArray<AudioDataValue, 9600> interleavedPcm; AutoTArray<AudioDataValue, 9600> nonInterleavedPcm; interleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels); nonInterleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels); while (!iter.IsEnded()) { AudioChunk chunk = *iter; int frameToCopy = chunk.GetDuration(); if (!chunk.IsNull()) { InterleaveTrackData(chunk, frameToCopy, mChannels, interleavedPcm.Elements() + framesCopied * mChannels); } else { // empty data memset(interleavedPcm.Elements() + framesCopied * mChannels, 0, frameToCopy * mChannels * sizeof(AudioDataValue)); } framesCopied += frameToCopy; iter.Next(); } // De-interleave the interleavedPcm. DeInterleaveTrackData(interleavedPcm.Elements(), framesCopied, mChannels, nonInterleavedPcm.Elements()); // Copy the nonInterleavedPcm to vorbis buffer. for(uint8_t i = 0; i < mChannels; ++i) { memcpy(vorbisBuffer[i], nonInterleavedPcm.Elements() + framesCopied * i, framesCopied * sizeof(AudioDataValue)); } // Now the vorbisBuffer contain the all data in non-interleaved. // Tell the library how much we actually submitted. vorbis_analysis_wrote(&mVorbisDsp, framesCopied); VORBISLOG("vorbis_analysis_wrote framesCopied %d\n", framesCopied); GetEncodedFrames(aData); return NS_OK; }
void MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer, size_t aFrames, uint32_t aChannels) { if (mState != kStarted) { return; } if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) { mTotalFrames += aFrames; if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second MOZ_LOG(AudioLogModule(), LogLevel::Debug, ("%p: Inserting %zu samples into graph, total frames = %" PRIu64, (void*)this, aFrames, mTotalFrames)); mLastLogFrames = mTotalFrames; } } size_t len = mSources.Length(); for (size_t i = 0; i < len; i++) { if (!mSources[i]) { continue; } TimeStamp insertTime; // Make sure we include the stream and the track. // The 0:1 is a flag to note when we've done the final insert for a given input block. LogTime(AsyncLatencyLogger::AudioTrackInsertion, LATENCY_STREAM_ID(mSources[i].get(), mTrackID), (i+1 < len) ? 0 : 1, insertTime); // Bug 971528 - Support stereo capture in gUM MOZ_ASSERT(aChannels == 1 || aChannels == 2, "GraphDriver only supports mono and stereo audio for now"); nsAutoPtr<AudioSegment> segment(new AudioSegment()); RefPtr<SharedBuffer> buffer = SharedBuffer::Create(aFrames * aChannels * sizeof(T)); AutoTArray<const T*, 8> channels; if (aChannels == 1) { PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames); channels.AppendElement(static_cast<T*>(buffer->Data())); } else { channels.SetLength(aChannels); AutoTArray<T*, 8> write_channels; write_channels.SetLength(aChannels); T * samples = static_cast<T*>(buffer->Data()); size_t offset = 0; for(uint32_t i = 0; i < aChannels; ++i) { channels[i] = write_channels[i] = samples + offset; offset += aFrames; } DeinterleaveAndConvertBuffer(aBuffer, aFrames, aChannels, write_channels.Elements()); } MOZ_ASSERT(aChannels == channels.Length()); segment->AppendFrames(buffer.forget(), channels, aFrames, mPrincipalHandles[i]); segment->GetStartTime(insertTime); mSources[i]->AppendToTrack(mTrackID, segment); } }
// performs a locale sensitive date formatting operation on the struct tm parameter nsresult nsDateTimeFormatMac::FormatTMTime(nsILocale* locale, const nsDateFormatSelector dateFormatSelector, const nsTimeFormatSelector timeFormatSelector, const struct tm* tmTime, nsAString& stringOut) { nsresult res = NS_OK; // set up locale data (void) Initialize(locale); // return, nothing to format if (dateFormatSelector == kDateFormatNone && timeFormatSelector == kTimeFormatNone) { stringOut.Truncate(); return NS_OK; } NS_ASSERTION(tmTime->tm_mon >= 0, "tm is not set correctly"); NS_ASSERTION(tmTime->tm_mday >= 1, "tm is not set correctly"); NS_ASSERTION(tmTime->tm_hour >= 0, "tm is not set correctly"); NS_ASSERTION(tmTime->tm_min >= 0, "tm is not set correctly"); NS_ASSERTION(tmTime->tm_sec >= 0, "tm is not set correctly"); NS_ASSERTION(tmTime->tm_wday >= 0, "tm is not set correctly"); // Got the locale for the formatter: CFLocaleRef formatterLocale; if (!locale) { formatterLocale = CFLocaleCopyCurrent(); } else { CFStringRef localeStr = CFStringCreateWithCharacters(nullptr, reinterpret_cast<const UniChar*>(mLocale.get()), mLocale.Length()); formatterLocale = CFLocaleCreate(nullptr, localeStr); CFRelease(localeStr); } // Get the date style for the formatter: CFDateFormatterStyle dateStyle; switch (dateFormatSelector) { case kDateFormatLong: dateStyle = kCFDateFormatterLongStyle; break; case kDateFormatShort: dateStyle = kCFDateFormatterShortStyle; break; case kDateFormatYearMonth: case kDateFormatWeekday: dateStyle = kCFDateFormatterNoStyle; // formats handled below break; case kDateFormatNone: dateStyle = kCFDateFormatterNoStyle; break; default: NS_ERROR("Unknown nsDateFormatSelector"); res = NS_ERROR_FAILURE; dateStyle = kCFDateFormatterNoStyle; } // Get the time style for the formatter: CFDateFormatterStyle timeStyle; switch (timeFormatSelector) { case kTimeFormatSeconds: case kTimeFormatSecondsForce24Hour: // 24 hour part fixed below timeStyle = kCFDateFormatterMediumStyle; break; case kTimeFormatNoSeconds: case kTimeFormatNoSecondsForce24Hour: // 24 hour part fixed below timeStyle = kCFDateFormatterShortStyle; break; case kTimeFormatNone: timeStyle = kCFDateFormatterNoStyle; break; default: NS_ERROR("Unknown nsTimeFormatSelector"); res = NS_ERROR_FAILURE; timeStyle = kCFDateFormatterNoStyle; } // Create the formatter and fix up its formatting as necessary: CFDateFormatterRef formatter = CFDateFormatterCreate(nullptr, formatterLocale, dateStyle, timeStyle); CFRelease(formatterLocale); if (dateFormatSelector == kDateFormatYearMonth || dateFormatSelector == kDateFormatWeekday) { CFStringRef dateFormat = dateFormatSelector == kDateFormatYearMonth ? CFSTR("yyyy/MM ") : CFSTR("EEE "); CFStringRef oldFormat = CFDateFormatterGetFormat(formatter); CFMutableStringRef newFormat = CFStringCreateMutableCopy(nullptr, 0, oldFormat); CFStringInsert(newFormat, 0, dateFormat); CFDateFormatterSetFormat(formatter, newFormat); CFRelease(newFormat); // note we don't own oldFormat } if (timeFormatSelector == kTimeFormatSecondsForce24Hour || timeFormatSelector == kTimeFormatNoSecondsForce24Hour) { // Replace "h" with "H", and remove "a": CFStringRef oldFormat = CFDateFormatterGetFormat(formatter); CFMutableStringRef newFormat = CFStringCreateMutableCopy(nullptr, 0, oldFormat); CFIndex replaceCount = CFStringFindAndReplace(newFormat, CFSTR("h"), CFSTR("H"), CFRangeMake(0, CFStringGetLength(newFormat)), 0); NS_ASSERTION(replaceCount <= 2, "Unexpected number of \"h\" occurrences"); replaceCount = CFStringFindAndReplace(newFormat, CFSTR("a"), CFSTR(""), CFRangeMake(0, CFStringGetLength(newFormat)), 0); NS_ASSERTION(replaceCount <= 1, "Unexpected number of \"a\" occurrences"); CFDateFormatterSetFormat(formatter, newFormat); CFRelease(newFormat); // note we don't own oldFormat } // Now get the formatted date: CFGregorianDate date; date.second = tmTime->tm_sec; date.minute = tmTime->tm_min; date.hour = tmTime->tm_hour; date.day = tmTime->tm_mday; // Mac is 1-based, tm is 1-based date.month = tmTime->tm_mon + 1; // Mac is 1-based, tm is 0-based date.year = tmTime->tm_year + 1900; CFTimeZoneRef timeZone = CFTimeZoneCopySystem(); // tmTime is in local time CFAbsoluteTime absTime = CFGregorianDateGetAbsoluteTime(date, timeZone); CFRelease(timeZone); CFStringRef formattedDate = CFDateFormatterCreateStringWithAbsoluteTime(nullptr, formatter, absTime); CFIndex stringLen = CFStringGetLength(formattedDate); AutoTArray<UniChar, 256> stringBuffer; stringBuffer.SetLength(stringLen + 1); CFStringGetCharacters(formattedDate, CFRangeMake(0, stringLen), stringBuffer.Elements()); stringOut.Assign(reinterpret_cast<char16_t*>(stringBuffer.Elements()), stringLen); CFRelease(formattedDate); CFRelease(formatter); return res; }
nsresult gfxGraphiteShaper::SetGlyphsFromSegment(DrawTarget *aDrawTarget, gfxShapedText *aShapedText, uint32_t aOffset, uint32_t aLength, const char16_t *aText, gr_segment *aSegment) { int32_t dev2appUnits = aShapedText->GetAppUnitsPerDevUnit(); bool rtl = aShapedText->IsRightToLeft(); uint32_t glyphCount = gr_seg_n_slots(aSegment); // identify clusters; graphite may have reordered/expanded/ligated glyphs. AutoTArray<Cluster,SMALL_GLYPH_RUN> clusters; AutoTArray<uint16_t,SMALL_GLYPH_RUN> gids; AutoTArray<float,SMALL_GLYPH_RUN> xLocs; AutoTArray<float,SMALL_GLYPH_RUN> yLocs; if (!clusters.SetLength(aLength, fallible) || !gids.SetLength(glyphCount, fallible) || !xLocs.SetLength(glyphCount, fallible) || !yLocs.SetLength(glyphCount, fallible)) { return NS_ERROR_OUT_OF_MEMORY; } // walk through the glyph slots and check which original character // each is associated with uint32_t gIndex = 0; // glyph slot index uint32_t cIndex = 0; // current cluster index for (const gr_slot *slot = gr_seg_first_slot(aSegment); slot != nullptr; slot = gr_slot_next_in_segment(slot), gIndex++) { uint32_t before = gr_cinfo_base(gr_seg_cinfo(aSegment, gr_slot_before(slot))); uint32_t after = gr_cinfo_base(gr_seg_cinfo(aSegment, gr_slot_after(slot))); gids[gIndex] = gr_slot_gid(slot); xLocs[gIndex] = gr_slot_origin_X(slot); yLocs[gIndex] = gr_slot_origin_Y(slot); // if this glyph has a "before" character index that precedes the // current cluster's char index, we need to merge preceding // clusters until it gets included while (before < clusters[cIndex].baseChar && cIndex > 0) { clusters[cIndex-1].nChars += clusters[cIndex].nChars; clusters[cIndex-1].nGlyphs += clusters[cIndex].nGlyphs; --cIndex; } // if there's a gap between the current cluster's base character and // this glyph's, extend the cluster to include the intervening chars if (gr_slot_can_insert_before(slot) && clusters[cIndex].nChars && before >= clusters[cIndex].baseChar + clusters[cIndex].nChars) { NS_ASSERTION(cIndex < aLength - 1, "cIndex at end of word"); Cluster& c = clusters[cIndex + 1]; c.baseChar = clusters[cIndex].baseChar + clusters[cIndex].nChars; c.nChars = before - c.baseChar; c.baseGlyph = gIndex; c.nGlyphs = 0; ++cIndex; } // increment cluster's glyph count to include current slot NS_ASSERTION(cIndex < aLength, "cIndex beyond word length"); ++clusters[cIndex].nGlyphs; // bump |after| index if it falls in the middle of a surrogate pair if (NS_IS_HIGH_SURROGATE(aText[after]) && after < aLength - 1 && NS_IS_LOW_SURROGATE(aText[after + 1])) { after++; } // extend cluster if necessary to reach the glyph's "after" index if (clusters[cIndex].baseChar + clusters[cIndex].nChars < after + 1) { clusters[cIndex].nChars = after + 1 - clusters[cIndex].baseChar; } } bool roundX, roundY; GetRoundOffsetsToPixels(aDrawTarget, &roundX, &roundY); gfxShapedText::CompressedGlyph *charGlyphs = aShapedText->GetCharacterGlyphs() + aOffset; // now put glyphs into the textrun, one cluster at a time for (uint32_t i = 0; i <= cIndex; ++i) { const Cluster& c = clusters[i]; float adv; // total advance of the cluster if (rtl) { if (i == 0) { adv = gr_seg_advance_X(aSegment) - xLocs[c.baseGlyph]; } else { adv = xLocs[clusters[i-1].baseGlyph] - xLocs[c.baseGlyph]; } } else { if (i == cIndex) { adv = gr_seg_advance_X(aSegment) - xLocs[c.baseGlyph]; } else { adv = xLocs[clusters[i+1].baseGlyph] - xLocs[c.baseGlyph]; } } // Check for default-ignorable char that didn't get filtered, combined, // etc by the shaping process, and skip it. uint32_t offs = c.baseChar; NS_ASSERTION(offs < aLength, "unexpected offset"); if (c.nGlyphs == 1 && c.nChars == 1 && aShapedText->FilterIfIgnorable(aOffset + offs, aText[offs])) { continue; } uint32_t appAdvance = roundX ? NSToIntRound(adv) * dev2appUnits : NSToIntRound(adv * dev2appUnits); if (c.nGlyphs == 1 && gfxShapedText::CompressedGlyph::IsSimpleGlyphID(gids[c.baseGlyph]) && gfxShapedText::CompressedGlyph::IsSimpleAdvance(appAdvance) && charGlyphs[offs].IsClusterStart() && yLocs[c.baseGlyph] == 0) { charGlyphs[offs].SetSimpleGlyph(appAdvance, gids[c.baseGlyph]); } else { // not a one-to-one mapping with simple metrics: use DetailedGlyph AutoTArray<gfxShapedText::DetailedGlyph,8> details; float clusterLoc; for (uint32_t j = c.baseGlyph; j < c.baseGlyph + c.nGlyphs; ++j) { gfxShapedText::DetailedGlyph* d = details.AppendElement(); d->mGlyphID = gids[j]; d->mYOffset = roundY ? NSToIntRound(-yLocs[j]) * dev2appUnits : -yLocs[j] * dev2appUnits; if (j == c.baseGlyph) { d->mXOffset = 0; d->mAdvance = appAdvance; clusterLoc = xLocs[j]; } else { float dx = rtl ? (xLocs[j] - clusterLoc) : (xLocs[j] - clusterLoc - adv); d->mXOffset = roundX ? NSToIntRound(dx) * dev2appUnits : dx * dev2appUnits; d->mAdvance = 0; } } gfxShapedText::CompressedGlyph g; g.SetComplex(charGlyphs[offs].IsClusterStart(), true, details.Length()); aShapedText->SetGlyphs(aOffset + offs, g, details.Elements()); } for (uint32_t j = c.baseChar + 1; j < c.baseChar + c.nChars; ++j) { NS_ASSERTION(j < aLength, "unexpected offset"); gfxShapedText::CompressedGlyph &g = charGlyphs[j]; NS_ASSERTION(!g.IsSimpleGlyph(), "overwriting a simple glyph"); g.SetComplex(g.IsClusterStart(), false, 0); } } return NS_OK; }
int CALLBACK GDIFontInfo::EnumerateFontsForFamily( const ENUMLOGFONTEXW *lpelfe, const NEWTEXTMETRICEXW *nmetrics, DWORD fontType, LPARAM data) { EnumerateFontsForFamilyData *famData = reinterpret_cast<EnumerateFontsForFamilyData*>(data); HDC hdc = famData->mFontInfo.mHdc; LOGFONTW logFont = lpelfe->elfLogFont; const NEWTEXTMETRICW& metrics = nmetrics->ntmTm; AutoSelectFont font(hdc, &logFont); if (!font.IsValid()) { return 1; } FontFaceData fontData; nsDependentString fontName(lpelfe->elfFullName); // callback called for each style-charset so return if style already seen if (fontName.Equals(famData->mPreviousFontName)) { return 1; } famData->mPreviousFontName = fontName; famData->mFontInfo.mLoadStats.fonts++; // read name table info bool nameDataLoaded = false; if (famData->mFontInfo.mLoadFaceNames || famData->mFontInfo.mLoadOtherNames) { uint32_t kNAME = NativeEndian::swapToBigEndian(TRUETYPE_TAG('n','a','m','e')); uint32_t nameSize; AutoTArray<uint8_t, 1024> nameData; nameSize = ::GetFontData(hdc, kNAME, 0, nullptr, 0); if (nameSize != GDI_ERROR && nameSize > 0 && nameData.SetLength(nameSize, fallible)) { ::GetFontData(hdc, kNAME, 0, nameData.Elements(), nameSize); // face names if (famData->mFontInfo.mLoadFaceNames) { gfxFontUtils::ReadCanonicalName((const char*)(nameData.Elements()), nameSize, gfxFontUtils::NAME_ID_FULL, fontData.mFullName); gfxFontUtils::ReadCanonicalName((const char*)(nameData.Elements()), nameSize, gfxFontUtils::NAME_ID_POSTSCRIPT, fontData.mPostscriptName); nameDataLoaded = true; famData->mFontInfo.mLoadStats.facenames++; } // other family names if (famData->mFontInfo.mLoadOtherNames) { gfxFontFamily::ReadOtherFamilyNamesForFace(famData->mFamilyName, (const char*)(nameData.Elements()), nameSize, famData->mOtherFamilyNames, false); } } } // read cmap bool cmapLoaded = false; gfxWindowsFontType feType = GDIFontEntry::DetermineFontType(metrics, fontType); if (famData->mFontInfo.mLoadCmaps && (feType == GFX_FONT_TYPE_PS_OPENTYPE || feType == GFX_FONT_TYPE_TT_OPENTYPE || feType == GFX_FONT_TYPE_TRUETYPE)) { uint32_t kCMAP = NativeEndian::swapToBigEndian(TRUETYPE_TAG('c','m','a','p')); uint32_t cmapSize; AutoTArray<uint8_t, 1024> cmapData; cmapSize = ::GetFontData(hdc, kCMAP, 0, nullptr, 0); if (cmapSize != GDI_ERROR && cmapSize > 0 && cmapData.SetLength(cmapSize, fallible)) { ::GetFontData(hdc, kCMAP, 0, cmapData.Elements(), cmapSize); bool cmapLoaded = false; bool unicodeFont = false, symbolFont = false; RefPtr<gfxCharacterMap> charmap = new gfxCharacterMap(); uint32_t offset; if (NS_SUCCEEDED(gfxFontUtils::ReadCMAP(cmapData.Elements(), cmapSize, *charmap, offset, unicodeFont, symbolFont))) { fontData.mCharacterMap = charmap; fontData.mUVSOffset = offset; fontData.mSymbolFont = symbolFont; cmapLoaded = true; famData->mFontInfo.mLoadStats.cmaps++; } } } if (cmapLoaded || nameDataLoaded) { famData->mFontInfo.mFontFaceData.Put(fontName, fontData); } return famData->mFontInfo.mCanceled ? 0 : 1; }
nsresult gfxCoreTextShaper::SetGlyphsFromRun(gfxShapedText *aShapedText, uint32_t aOffset, uint32_t aLength, CTRunRef aCTRun, int32_t aStringOffset) { // The word has been bidi-wrapped; aStringOffset is the number // of chars at the beginning of the CTLine that we should skip. // aCTRun is a glyph run from the CoreText layout process. int32_t direction = aShapedText->IsRightToLeft() ? -1 : 1; int32_t numGlyphs = ::CTRunGetGlyphCount(aCTRun); if (numGlyphs == 0) { return NS_OK; } int32_t wordLength = aLength; // character offsets get really confusing here, as we have to keep track of // (a) the text in the actual textRun we're constructing // (c) the string that was handed to CoreText, which contains the text of the font run // plus directional-override padding // (d) the CTRun currently being processed, which may be a sub-run of the CoreText line // (but may extend beyond the actual font run into the bidi wrapping text). // aStringOffset tells us how many initial characters of the line to ignore. // get the source string range within the CTLine's text CFRange stringRange = ::CTRunGetStringRange(aCTRun); // skip the run if it is entirely outside the actual range of the font run if (stringRange.location - aStringOffset + stringRange.length <= 0 || stringRange.location - aStringOffset >= wordLength) { return NS_OK; } // retrieve the laid-out glyph data from the CTRun UniquePtr<CGGlyph[]> glyphsArray; UniquePtr<CGPoint[]> positionsArray; UniquePtr<CFIndex[]> glyphToCharArray; const CGGlyph* glyphs = nullptr; const CGPoint* positions = nullptr; const CFIndex* glyphToChar = nullptr; // Testing indicates that CTRunGetGlyphsPtr (almost?) always succeeds, // and so allocating a new array and copying data with CTRunGetGlyphs // will be extremely rare. // If this were not the case, we could use an AutoTArray<> to // try and avoid the heap allocation for small runs. // It's possible that some future change to CoreText will mean that // CTRunGetGlyphsPtr fails more often; if this happens, AutoTArray<> // may become an attractive option. glyphs = ::CTRunGetGlyphsPtr(aCTRun); if (!glyphs) { glyphsArray = MakeUniqueFallible<CGGlyph[]>(numGlyphs); if (!glyphsArray) { return NS_ERROR_OUT_OF_MEMORY; } ::CTRunGetGlyphs(aCTRun, ::CFRangeMake(0, 0), glyphsArray.get()); glyphs = glyphsArray.get(); } positions = ::CTRunGetPositionsPtr(aCTRun); if (!positions) { positionsArray = MakeUniqueFallible<CGPoint[]>(numGlyphs); if (!positionsArray) { return NS_ERROR_OUT_OF_MEMORY; } ::CTRunGetPositions(aCTRun, ::CFRangeMake(0, 0), positionsArray.get()); positions = positionsArray.get(); } // Remember that the glyphToChar indices relate to the CoreText line, // not to the beginning of the textRun, the font run, // or the stringRange of the glyph run glyphToChar = ::CTRunGetStringIndicesPtr(aCTRun); if (!glyphToChar) { glyphToCharArray = MakeUniqueFallible<CFIndex[]>(numGlyphs); if (!glyphToCharArray) { return NS_ERROR_OUT_OF_MEMORY; } ::CTRunGetStringIndices(aCTRun, ::CFRangeMake(0, 0), glyphToCharArray.get()); glyphToChar = glyphToCharArray.get(); } double runWidth = ::CTRunGetTypographicBounds(aCTRun, ::CFRangeMake(0, 0), nullptr, nullptr, nullptr); AutoTArray<gfxShapedText::DetailedGlyph,1> detailedGlyphs; gfxShapedText::CompressedGlyph *charGlyphs = aShapedText->GetCharacterGlyphs() + aOffset; // CoreText gives us the glyphindex-to-charindex mapping, which relates each glyph // to a source text character; we also need the charindex-to-glyphindex mapping to // find the glyph for a given char. Note that some chars may not map to any glyph // (ligature continuations), and some may map to several glyphs (eg Indic split vowels). // We set the glyph index to NO_GLYPH for chars that have no associated glyph, and we // record the last glyph index for cases where the char maps to several glyphs, // so that our clumping will include all the glyph fragments for the character. // The charToGlyph array is indexed by char position within the stringRange of the glyph run. static const int32_t NO_GLYPH = -1; AutoTArray<int32_t,SMALL_GLYPH_RUN> charToGlyphArray; if (!charToGlyphArray.SetLength(stringRange.length, fallible)) { return NS_ERROR_OUT_OF_MEMORY; } int32_t *charToGlyph = charToGlyphArray.Elements(); for (int32_t offset = 0; offset < stringRange.length; ++offset) { charToGlyph[offset] = NO_GLYPH; } for (int32_t i = 0; i < numGlyphs; ++i) { int32_t loc = glyphToChar[i] - stringRange.location; if (loc >= 0 && loc < stringRange.length) { charToGlyph[loc] = i; } } // Find character and glyph clumps that correspond, allowing for ligatures, // indic reordering, split glyphs, etc. // // The idea is that we'll find a character sequence starting at the first char of stringRange, // and extend it until it includes the character associated with the first glyph; // we also extend it as long as there are "holes" in the range of glyphs. So we // will eventually have a contiguous sequence of characters, starting at the beginning // of the range, that map to a contiguous sequence of glyphs, starting at the beginning // of the glyph array. That's a clump; then we update the starting positions and repeat. // // NB: In the case of RTL layouts, we iterate over the stringRange in reverse. // // This may find characters that fall outside the range 0:wordLength, // so we won't necessarily use everything we find here. bool isRightToLeft = aShapedText->IsRightToLeft(); int32_t glyphStart = 0; // looking for a clump that starts at this glyph index int32_t charStart = isRightToLeft ? stringRange.length - 1 : 0; // and this char index (in the stringRange of the glyph run) while (glyphStart < numGlyphs) { // keep finding groups until all glyphs are accounted for bool inOrder = true; int32_t charEnd = glyphToChar[glyphStart] - stringRange.location; NS_WARN_IF_FALSE(charEnd >= 0 && charEnd < stringRange.length, "glyph-to-char mapping points outside string range"); // clamp charEnd to the valid range of the string charEnd = std::max(charEnd, 0); charEnd = std::min(charEnd, int32_t(stringRange.length)); int32_t glyphEnd = glyphStart; int32_t charLimit = isRightToLeft ? -1 : stringRange.length; do { // This is normally executed once for each iteration of the outer loop, // but in unusual cases where the character/glyph association is complex, // the initial character range might correspond to a non-contiguous // glyph range with "holes" in it. If so, we will repeat this loop to // extend the character range until we have a contiguous glyph sequence. NS_ASSERTION((direction > 0 && charEnd < charLimit) || (direction < 0 && charEnd > charLimit), "no characters left in range?"); charEnd += direction; while (charEnd != charLimit && charToGlyph[charEnd] == NO_GLYPH) { charEnd += direction; } // find the maximum glyph index covered by the clump so far if (isRightToLeft) { for (int32_t i = charStart; i > charEnd; --i) { if (charToGlyph[i] != NO_GLYPH) { // update extent of glyph range glyphEnd = std::max(glyphEnd, charToGlyph[i] + 1); } } } else { for (int32_t i = charStart; i < charEnd; ++i) { if (charToGlyph[i] != NO_GLYPH) { // update extent of glyph range glyphEnd = std::max(glyphEnd, charToGlyph[i] + 1); } } } if (glyphEnd == glyphStart + 1) { // for the common case of a single-glyph clump, we can skip the following checks break; } if (glyphEnd == glyphStart) { // no glyphs, try to extend the clump continue; } // check whether all glyphs in the range are associated with the characters // in our clump; if not, we have a discontinuous range, and should extend it // unless we've reached the end of the text bool allGlyphsAreWithinCluster = true; int32_t prevGlyphCharIndex = charStart; for (int32_t i = glyphStart; i < glyphEnd; ++i) { int32_t glyphCharIndex = glyphToChar[i] - stringRange.location; if (isRightToLeft) { if (glyphCharIndex > charStart || glyphCharIndex <= charEnd) { allGlyphsAreWithinCluster = false; break; } if (glyphCharIndex > prevGlyphCharIndex) { inOrder = false; } prevGlyphCharIndex = glyphCharIndex; } else { if (glyphCharIndex < charStart || glyphCharIndex >= charEnd) { allGlyphsAreWithinCluster = false; break; } if (glyphCharIndex < prevGlyphCharIndex) { inOrder = false; } prevGlyphCharIndex = glyphCharIndex; } } if (allGlyphsAreWithinCluster) { break; } } while (charEnd != charLimit); NS_WARN_IF_FALSE(glyphStart < glyphEnd, "character/glyph clump contains no glyphs!"); if (glyphStart == glyphEnd) { ++glyphStart; // make progress - avoid potential infinite loop charStart = charEnd; continue; } NS_WARN_IF_FALSE(charStart != charEnd, "character/glyph clump contains no characters!"); if (charStart == charEnd) { glyphStart = glyphEnd; // this is bad - we'll discard the glyph(s), // as there's nowhere to attach them continue; } // Now charStart..charEnd is a ligature clump, corresponding to glyphStart..glyphEnd; // Set baseCharIndex to the char we'll actually attach the glyphs to (1st of ligature), // and endCharIndex to the limit (position beyond the last char), // adjusting for the offset of the stringRange relative to the textRun. int32_t baseCharIndex, endCharIndex; if (isRightToLeft) { while (charEnd >= 0 && charToGlyph[charEnd] == NO_GLYPH) { charEnd--; } baseCharIndex = charEnd + stringRange.location - aStringOffset + 1; endCharIndex = charStart + stringRange.location - aStringOffset + 1; } else { while (charEnd < stringRange.length && charToGlyph[charEnd] == NO_GLYPH) { charEnd++; } baseCharIndex = charStart + stringRange.location - aStringOffset; endCharIndex = charEnd + stringRange.location - aStringOffset; } // Then we check if the clump falls outside our actual string range; if so, just go to the next. if (endCharIndex <= 0 || baseCharIndex >= wordLength) { glyphStart = glyphEnd; charStart = charEnd; continue; } // Ensure we won't try to go beyond the valid length of the word's text baseCharIndex = std::max(baseCharIndex, 0); endCharIndex = std::min(endCharIndex, wordLength); // Now we're ready to set the glyph info in the textRun; measure the glyph width // of the first (perhaps only) glyph, to see if it is "Simple" int32_t appUnitsPerDevUnit = aShapedText->GetAppUnitsPerDevUnit(); double toNextGlyph; if (glyphStart < numGlyphs-1) { toNextGlyph = positions[glyphStart+1].x - positions[glyphStart].x; } else { toNextGlyph = positions[0].x + runWidth - positions[glyphStart].x; } int32_t advance = int32_t(toNextGlyph * appUnitsPerDevUnit); // Check if it's a simple one-to-one mapping int32_t glyphsInClump = glyphEnd - glyphStart; if (glyphsInClump == 1 && gfxTextRun::CompressedGlyph::IsSimpleGlyphID(glyphs[glyphStart]) && gfxTextRun::CompressedGlyph::IsSimpleAdvance(advance) && charGlyphs[baseCharIndex].IsClusterStart() && positions[glyphStart].y == 0.0) { charGlyphs[baseCharIndex].SetSimpleGlyph(advance, glyphs[glyphStart]); } else { // collect all glyphs in a list to be assigned to the first char; // there must be at least one in the clump, and we already measured its advance, // hence the placement of the loop-exit test and the measurement of the next glyph while (1) { gfxTextRun::DetailedGlyph *details = detailedGlyphs.AppendElement(); details->mGlyphID = glyphs[glyphStart]; details->mXOffset = 0; details->mYOffset = -positions[glyphStart].y * appUnitsPerDevUnit; details->mAdvance = advance; if (++glyphStart >= glyphEnd) { break; } if (glyphStart < numGlyphs-1) { toNextGlyph = positions[glyphStart+1].x - positions[glyphStart].x; } else { toNextGlyph = positions[0].x + runWidth - positions[glyphStart].x; } advance = int32_t(toNextGlyph * appUnitsPerDevUnit); } gfxTextRun::CompressedGlyph textRunGlyph; textRunGlyph.SetComplex(charGlyphs[baseCharIndex].IsClusterStart(), true, detailedGlyphs.Length()); aShapedText->SetGlyphs(aOffset + baseCharIndex, textRunGlyph, detailedGlyphs.Elements()); detailedGlyphs.Clear(); } // the rest of the chars in the group are ligature continuations, no associated glyphs while (++baseCharIndex != endCharIndex && baseCharIndex < wordLength) { gfxShapedText::CompressedGlyph &shapedTextGlyph = charGlyphs[baseCharIndex]; NS_ASSERTION(!shapedTextGlyph.IsSimpleGlyph(), "overwriting a simple glyph"); shapedTextGlyph.SetComplex(inOrder && shapedTextGlyph.IsClusterStart(), false, 0); } glyphStart = glyphEnd; charStart = charEnd; } return NS_OK; }