void PropagateTypeTasklet::EditInstruction(Instruction* const inst_ptr) { auto& inst = *inst_ptr; if (auto const fieldptr_inst = inst.DynamicCast<FieldPtrI>()) { if (auto const r1 = fieldptr_inst->GetRx()) { if (auto const vardef_inst = r1->GetDefI()->DynamicCast<VarDefI>()) { auto& cell_class = *vardef_inst->output_type() .StaticCast<ConstructedClass>(); fieldptr_inst->GetOperandBox(1)->Replace(*cell_class.Find(*Q_value)); UpdateOutputType( inst, PointerType::Intern(cell_class.GetTypeArg())); } } } else if (inst.Is<LoadI>()) { if (auto const ptrty = inst.GetSx()->GetTy().DynamicCast<PointerType>()) { UpdateOutputType(inst, ptrty->pointee_type()); } else { AddErrorInfo( VmErrorInfo( inst.source_info(), VmError_Instruction_Invalid, inst, String::Format("Expect pointer type: %s", inst.op0().GetTy()))); } } else if (auto const select_inst = inst.DynamicCast<SelectI>()) { auto const nth = select_inst->GetIy(); auto& valsty = *select_inst->GetVx()->GetDefI()->output_type() .StaticCast<ValuesType>(); if (nth >= valsty.Count()) { AddErrorInfo( VmErrorInfo( inst.source_info(), VmError_Instruction_Invalid, inst, String::Format("Too large index(%d) for %s", nth, valsty))); return; } UpdateOutputType(inst, valsty.Get(nth)); } else if (auto const vardef_inst = inst.DynamicCast<VarDefI>()) { auto& ty = vardef_inst->GetSy()->GetTy(); UpdateOutputType(inst, Ty_ClosedCell->Construct(ty)); } else { UpdateOutputType(inst, inst.output_type()); } }
HRESULT WMFAudioMFTManager::Output(int64_t aStreamOffset, nsRefPtr<MediaData>& aOutData) { aOutData = nullptr; RefPtr<IMFSample> sample; HRESULT hr; int typeChangeCount = 0; while (true) { hr = mDecoder->Output(&sample); if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { return hr; } if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { hr = UpdateOutputType(); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Catch infinite loops, but some decoders perform at least 2 stream // changes on consecutive calls, so be permissive. // 100 is arbitrarily > 2. NS_ENSURE_TRUE(typeChangeCount < 100, MF_E_TRANSFORM_STREAM_CHANGE); ++typeChangeCount; continue; } break; } NS_ENSURE_TRUE(SUCCEEDED(hr), hr); RefPtr<IMFMediaBuffer> buffer; hr = sample->ConvertToContiguousBuffer(byRef(buffer)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); BYTE* data = nullptr; // Note: *data will be owned by the IMFMediaBuffer, we don't need to free it. DWORD maxLength = 0, currentLength = 0; hr = buffer->Lock(&data, &maxLength, ¤tLength); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Sometimes when starting decoding, the AAC decoder gives us samples // with a negative timestamp. AAC does usually have preroll (or encoder // delay) encoded into its bitstream, but the amount encoded to the stream // is variable, and it not signalled in-bitstream. There is sometimes // signalling in the MP4 container what the preroll amount, but it's // inconsistent. It looks like WMF's AAC encoder may take this into // account, so strip off samples with a negative timestamp to get us // to a 0-timestamp start. This seems to maintain A/V sync, so we can run // with this until someone complains... // We calculate the timestamp and the duration based on the number of audio // frames we've already played. We don't trust the timestamp stored on the // IMFSample, as sometimes it's wrong, possibly due to buggy encoders? // If this sample block comes after a discontinuity (i.e. a gap or seek) // reset the frame counters, and capture the timestamp. Future timestamps // will be offset from this block's timestamp. UINT32 discontinuity = false; sample->GetUINT32(MFSampleExtension_Discontinuity, &discontinuity); if (mMustRecaptureAudioPosition || discontinuity) { // Update the output type, in case this segment has a different // rate. This also triggers on the first sample, which can have a // different rate than is advertised in the container, and sometimes we // don't get a MF_E_TRANSFORM_STREAM_CHANGE when the rate changes. hr = UpdateOutputType(); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); mAudioFrameSum = 0; LONGLONG timestampHns = 0; hr = sample->GetSampleTime(×tampHns); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); mAudioTimeOffset = media::TimeUnit::FromMicroseconds(timestampHns / 10); mMustRecaptureAudioPosition = false; } // We can assume PCM 16 output. int32_t numSamples = currentLength / 2; int32_t numFrames = numSamples / mAudioChannels; MOZ_ASSERT(numFrames >= 0); MOZ_ASSERT(numSamples >= 0); if (numFrames == 0) { // All data from this chunk stripped, loop back and try to output the next // frame, if possible. return S_OK; } nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[numSamples]); int16_t* pcm = (int16_t*)data; for (int32_t i = 0; i < numSamples; ++i) { audioData[i] = AudioSampleToFloat(pcm[i]); } buffer->Unlock(); media::TimeUnit timestamp = mAudioTimeOffset + FramesToTimeUnit(mAudioFrameSum, mAudioRate); NS_ENSURE_TRUE(timestamp.IsValid(), E_FAIL); mAudioFrameSum += numFrames; media::TimeUnit duration = FramesToTimeUnit(numFrames, mAudioRate); NS_ENSURE_TRUE(duration.IsValid(), E_FAIL); aOutData = new AudioData(aStreamOffset, timestamp.ToMicroseconds(), duration.ToMicroseconds(), numFrames, audioData.forget(), mAudioChannels, mAudioRate); #ifdef LOG_SAMPLE_DECODE LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u", timestamp.ToMicroseconds(), duration.ToMicroseconds(), currentLength); #endif return S_OK; }
HRESULT WMFAudioMFTManager::Output(int64_t aStreamOffset, nsAutoPtr<MediaData>& aOutData) { aOutData = nullptr; RefPtr<IMFSample> sample; HRESULT hr; while (true) { hr = mDecoder->Output(&sample); if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { return hr; } if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { hr = UpdateOutputType(); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); continue; } break; } NS_ENSURE_TRUE(SUCCEEDED(hr), hr); RefPtr<IMFMediaBuffer> buffer; hr = sample->ConvertToContiguousBuffer(byRef(buffer)); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); BYTE* data = nullptr; // Note: *data will be owned by the IMFMediaBuffer, we don't need to free it. DWORD maxLength = 0, currentLength = 0; hr = buffer->Lock(&data, &maxLength, ¤tLength); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); // Sometimes when starting decoding, the AAC decoder gives us samples // with a negative timestamp. AAC does usually have preroll (or encoder // delay) encoded into its bitstream, but the amount encoded to the stream // is variable, and it not signalled in-bitstream. There is sometimes // signalling in the MP4 container what the preroll amount, but it's // inconsistent. It looks like WMF's AAC encoder may take this into // account, so strip off samples with a negative timestamp to get us // to a 0-timestamp start. This seems to maintain A/V sync, so we can run // with this until someone complains... // We calculate the timestamp and the duration based on the number of audio // frames we've already played. We don't trust the timestamp stored on the // IMFSample, as sometimes it's wrong, possibly due to buggy encoders? // If this sample block comes after a discontinuity (i.e. a gap or seek) // reset the frame counters, and capture the timestamp. Future timestamps // will be offset from this block's timestamp. UINT32 discontinuity = false; int32_t numFramesToStrip = 0; sample->GetUINT32(MFSampleExtension_Discontinuity, &discontinuity); if (mMustRecaptureAudioPosition || discontinuity) { // Update the output type, in case this segment has a different // rate. This also triggers on the first sample, which can have a // different rate than is advertised in the container, and sometimes we // don't get a MF_E_TRANSFORM_STREAM_CHANGE when the rate changes. hr = UpdateOutputType(); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); mAudioFrameSum = 0; LONGLONG timestampHns = 0; hr = sample->GetSampleTime(×tampHns); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); hr = HNsToFrames(timestampHns, mAudioRate, &mAudioFrameOffset); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); if (mAudioFrameOffset < 0) { // First sample has a negative timestamp. Strip off the samples until // we reach positive territory. numFramesToStrip = -mAudioFrameOffset; mAudioFrameOffset = 0; } mMustRecaptureAudioPosition = false; } MOZ_ASSERT(numFramesToStrip >= 0); int32_t numSamples = currentLength / mAudioBytesPerSample; int32_t numFrames = numSamples / mAudioChannels; int32_t offset = std::min<int32_t>(numFramesToStrip, numFrames); numFrames -= offset; numSamples -= offset * mAudioChannels; MOZ_ASSERT(numFrames >= 0); MOZ_ASSERT(numSamples >= 0); if (numFrames == 0) { // All data from this chunk stripped, loop back and try to output the next // frame, if possible. return S_OK; } nsAutoArrayPtr<AudioDataValue> audioData(new AudioDataValue[numSamples]); // Just assume PCM output for now... MOZ_ASSERT(mAudioBytesPerSample == 2); int16_t* pcm = ((int16_t*)data) + (offset * mAudioChannels); MOZ_ASSERT(pcm >= (int16_t*)data); MOZ_ASSERT(pcm <= (int16_t*)(data + currentLength)); MOZ_ASSERT(pcm+numSamples <= (int16_t*)(data + currentLength)); for (int32_t i = 0; i < numSamples; ++i) { audioData[i] = AudioSampleToFloat(pcm[i]); } buffer->Unlock(); int64_t timestamp; hr = FramesToUsecs(mAudioFrameOffset + mAudioFrameSum, mAudioRate, ×tamp); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); mAudioFrameSum += numFrames; int64_t duration; hr = FramesToUsecs(numFrames, mAudioRate, &duration); NS_ENSURE_TRUE(SUCCEEDED(hr), hr); aOutData = new AudioData(aStreamOffset, timestamp, duration, numFrames, audioData.forget(), mAudioChannels, mAudioRate); #ifdef LOG_SAMPLE_DECODE LOG("Decoded audio sample! timestamp=%lld duration=%lld currentLength=%u", timestamp, duration, currentLength); #endif return S_OK; }