void UseSeg (const SegDef* D) /* Use the segment with the given name */ { Segment* Seg = SegmentList; while (Seg) { if (strcmp (Seg->Def->Name, D->Name) == 0) { /* We found this segment. Check if the type is identical */ if (D->AddrSize != ADDR_SIZE_DEFAULT && Seg->Def->AddrSize != D->AddrSize) { Error ("Segment attribute mismatch"); /* Use the new attribute to avoid errors */ Seg->Def->AddrSize = D->AddrSize; } ActiveSeg = Seg; return; } /* Check next segment */ Seg = Seg->List; } /* Segment is not in list, create a new one */ if (D->AddrSize == ADDR_SIZE_DEFAULT) { Seg = NewSegment (D->Name, ADDR_SIZE_ABS); } else { Seg = NewSegment (D->Name, D->AddrSize); } ActiveSeg = Seg; }
void UseSeg (const SegDef* D) /* Use the segment with the given name */ { unsigned I; for (I = 0; I < CollCount (&SegmentList); ++I) { Segment* Seg = CollAtUnchecked (&SegmentList, I); if (strcmp (Seg->Def->Name, D->Name) == 0) { /* We found this segment. Check if the type is identical */ if (D->AddrSize != ADDR_SIZE_DEFAULT && Seg->Def->AddrSize != D->AddrSize) { Error ("Segment attribute mismatch"); /* Use the new attribute to avoid errors */ Seg->Def->AddrSize = D->AddrSize; } ActiveSeg = Seg; return; } } /* Segment is not in list, create a new one */ if (D->AddrSize == ADDR_SIZE_DEFAULT) { ActiveSeg = NewSegment (D->Name, ADDR_SIZE_ABS); } else { ActiveSeg = NewSegment (D->Name, D->AddrSize); } }
static already_AddRefed<SharedMemory> ReadSegment(const IPC::Message& aDescriptor, Shmem::id_t* aId, size_t* aNBytes, size_t aExtraSize) { if (SHMEM_CREATED_MESSAGE_TYPE != aDescriptor.type()) { NS_ERROR("expected 'shmem created' message"); return nullptr; } SharedMemory::SharedMemoryType type; PickleIterator iter(aDescriptor); if (!ShmemCreated::ReadInfo(&aDescriptor, &iter, aId, aNBytes, &type)) { return nullptr; } RefPtr<SharedMemory> segment = NewSegment(type); if (!segment) { return nullptr; } if (!segment->ReadHandle(&aDescriptor, &iter)) { NS_ERROR("trying to open invalid handle"); return nullptr; } aDescriptor.EndRead(iter); size_t size = SharedMemory::PageAlignedSize(*aNBytes + aExtraSize); if (!segment->Map(size)) { return nullptr; } // close the handle to the segment after it is mapped segment->CloseHandle(); return segment.forget(); }
void CDXVADecoderMpeg2::Init() { TRACE(_T("CDXVADecoderMpeg2::Init()\n")); memset (&m_PictureParams, 0, sizeof(m_PictureParams)); memset (&m_SliceInfo, 0, sizeof(m_SliceInfo)); memset (&m_QMatrixData, 0, sizeof(m_QMatrixData)); m_PictureParams.bMacroblockWidthMinus1 = 15; // This is equal to "15" for MPEG-1, MPEG-2, H.263, and MPEG-4 m_PictureParams.bMacroblockHeightMinus1 = 15; // This is equal to "15" for MPEG-1, MPEG-2, H.261, H.263, and MPEG-4 m_PictureParams.bBlockWidthMinus1 = 7; // This is equal to "7" for MPEG-1, MPEG-2, H.261, H.263, and MPEG-4 m_PictureParams.bBlockHeightMinus1 = 7; // This is equal to "7" for MPEG-1, MPEG-2, H.261, H.263, and MPEG-4 m_PictureParams.bBPPminus1 = 7; // It is equal to "7" for MPEG-1, MPEG-2, H.261, and H.263 m_PictureParams.bChromaFormat = 0x01; // For MPEG-1, MPEG-2 "Main Profile," H.261 and H.263 bitstreams, this value shall always be set to "01", indicating "4:2:0" format m_nMaxWaiting = 5; m_wRefPictureIndex[0] = NO_REF_FRAME; m_wRefPictureIndex[1] = NO_REF_FRAME; m_nSliceCount = 0; switch (GetMode()) { case MPEG2_VLD : AllocExecuteParams (4); break; default : ASSERT(FALSE); } m_pMPEG2Buffer = NULL; m_nMPEG2BufferSize = 0; NewSegment(); }
Segment* GetSegment (unsigned Name, unsigned char AddrSize, const char* ObjName) /* Search for a segment and return an existing one. If the segment does not ** exist, create a new one and return that. ObjName is only used for the error ** message and may be NULL if the segment is linker generated. */ { /* Try to locate the segment in the table */ Segment* S = SegFind (Name); /* If we don't have that segment already, allocate it using the type of ** the first section. */ if (S == 0) { /* Create a new segment */ S = NewSegment (Name, AddrSize); } else { /* Check if the existing segment has the requested address size */ if (S->AddrSize != AddrSize) { /* Allow an empty object name */ if (ObjName == 0) { ObjName = "[linker generated]"; } Error ("Module '%s': Type mismatch for segment '%s'", ObjName, GetString (Name)); } } /* Return the segment */ return S; }
//this is where the list of segments is created //returns t which is an object whose head points to the first segment of the track track *NewTrack(int maxx, int maxy){ int i, random; track *t = (track *) malloc(sizeof(track)); t->maxx = maxx; t->maxy = maxy; t->tail = NewSegment(10, t, maxx, maxy); drawSegment(t->tail); for(i = 0; i < maxy-2; i++){ t->head = NewSegment(10, t, maxx, maxy); //drawSegment(t->head); } drawSegment(t->head); refresh(); return t; }
static void registerSegment( orl_sec_handle o_shnd ) //************************************************** { orl_sec_flags sec_flags; orl_sec_handle reloc_section; orl_sec_alignment alignment; char * content; int ctr; segment *seg; seg = NewSegment(); seg->name = ORLSecGetName( o_shnd ); seg->size = ORLSecGetSize( o_shnd ); seg->start = 0; seg->use_32 = 1; // only 32-bit object files use ORL seg->attr = ( 2 << 2 ); // (?) combine public alignment = ORLSecGetAlignment( o_shnd ); // FIXME: Need better alignment translation. switch( alignment ) { case 0: seg->attr |= ( 1 << 5 ); break; case 1: seg->attr |= ( 2 << 5 ); break; case 3: case 4: seg->attr |= ( 3 << 5 ); break; case 8: seg->attr |= ( 4 << 5 ); break; case 2: seg->attr |= ( 5 << 5 ); break; case 12: seg->attr |= ( 6 << 5 ); break; default: // fprintf( stderr, "NOTE! 'Strange' alignment (%d) found. Using byte alignment.\n", alignment ); seg->attr |= ( 1 << 5 ); break; } sec_flags = ORLSecGetFlags( o_shnd ); if( !( sec_flags & ORL_SEC_FLAG_EXEC ) ) { seg->data_seg = true; } if( seg->size > 0 && ORLSecGetContents( o_shnd, &content ) == ORL_OKAY ) { Segment = seg; // Putting contents into segment struct. for( ctr = 0; ctr < seg->size; ctr++ ) { PutSegByte( ctr, content[ctr] ); } } if( !HashTableInsert( SectionToSegmentTable, (hash_value)o_shnd, (hash_data)seg ) ) { SysError( ERR_OUT_OF_MEM, false ); } reloc_section = ORLSecGetRelocTable( o_shnd ); if( reloc_section ) { if( !addRelocSection( reloc_section ) ) { SysError( ERR_OUT_OF_MEM, false ); } } }
void Synchronizer::NewSegment() { if(m_output_format->m_audio_enabled) { AudioLock audiolock(&m_audio_data); InitAudioSegment(audiolock.get()); } SharedLock lock(&m_shared_data); NewSegment(lock.get()); }
void Synchronizer::NewSegment() { { AudioLock audiolock(&m_audio_data); InitAudioSegment(audiolock.get()); } SharedLock lock(&m_shared_data); NewSegment(lock.get()); }
void Synchronizer::ReadAudioHole() { Q_ASSERT(m_audio_encoder != NULL); SharedLock lock(&m_shared_data); // if the audio has not been started, ignore it if(!lock->m_segment_audio_started) return; Logger::LogWarning("[Synchronizer::ReadAudioSamples] Warning: Received hole in audio stream, starting new segment to keep the audio in sync with the video (some video and/or audio may be lost)."); NewSegment(lock.get()); }
static already_AddRefed<SharedMemory> CreateSegment(SharedMemory::SharedMemoryType aType, size_t aNBytes, size_t aExtraSize) { RefPtr<SharedMemory> segment = NewSegment(aType); if (!segment) { return nullptr; } size_t size = SharedMemory::PageAlignedSize(aNBytes + aExtraSize); if (!segment->Create(size) || !segment->Map(size)) { return nullptr; } return segment.forget(); }
//---------------------------------------------------------------------------- //! @brief フィルタをポーズする //! @return エラーコード //---------------------------------------------------------------------------- STDMETHODIMP CDemuxSource::Pause() { { CAutoLock lock(&m_crtFilterLock); if( m_State == State_Stopped ) { HRESULT hr = Reader()->OnStart(); if( FAILED(hr) ) return hr; NewSegment(); } } return CSource::Pause(); }
//-------------------------------------------------------------------------------------------------- static dstr_Ref_t NewOrFirstSegmentRef ( dstr_Ref_t headRef ///< [IN] The head of the string. ) //-------------------------------------------------------------------------------------------------- { dstr_Ref_t segmentRef = FirstSegmentRef(headRef); if (segmentRef != NULL) { return segmentRef; } segmentRef = NewSegment(); le_sls_Stack(&headRef->head.list, &segmentRef->body.link); return segmentRef; }
//-------------------------------------------------------------------------------------------------- static dstr_Ref_t NewOrNextSegmentRef ( dstr_Ref_t headRef, ///< [IN] The head of the string. dstr_Ref_t currentPtr ///< [IN] The current sub-section of the string. ) //-------------------------------------------------------------------------------------------------- { dstr_Ref_t segmentRef = NextSegmentRef(headRef, currentPtr); if (segmentRef != NULL) { return segmentRef; } segmentRef = NewSegment(); le_sls_AddAfter(&headRef->head.list, ¤tPtr->body.link, &segmentRef->body.link); return segmentRef; }
HRESULT CBufferFilterOutputPin::DeliverNewSegment(REFERENCE_TIME tStart, REFERENCE_TIME tStop, double dRate) { CallQueue(NewSegment(tStart, tStop, dRate)); }
void Synchronizer::ReadAudioSamples(unsigned int sample_rate, unsigned int channels, unsigned int sample_count, const uint8_t* data, AVSampleFormat format, int64_t timestamp) { Q_ASSERT(m_audio_encoder != NULL); SharedLock lock(&m_shared_data); if(lock->m_sync_diagram != NULL) { lock->m_sync_diagram->AddBlock(1, (double) timestamp * 1.0e-6, (double) timestamp * 1.0e-6 + (double) sample_count / (double) m_audio_sample_rate, QColor(0, 255, 0)); } if(sample_count == 0) return; // check the timestamp if(lock->m_segment_audio_started && timestamp < lock->m_segment_audio_last_timestamp) { if(timestamp < lock->m_segment_audio_last_timestamp - 10000) Logger::LogWarning("[Synchronizer::ReadAudioSamples] Warning: Received audio samples with non-monotonic timestamp."); timestamp = lock->m_segment_audio_last_timestamp; } Q_ASSERT(sample_rate == m_audio_sample_rate); // resampling isn't supported Q_ASSERT(channels == m_audio_channels); // remixing isn't supported Q_ASSERT(format == AV_SAMPLE_FMT_S16); // only S16 is currently supported // avoid memory problems by limiting the audio buffer size if(lock->m_audio_buffer.GetSize() / m_audio_sample_size >= MAX_AUDIO_SAMPLES_BUFFERED) { if(lock->m_segment_video_started) { Logger::LogWarning("[Synchronizer::ReadAudioSamples] Warning: Audio buffer overflow, starting new segment to keep the audio in sync with the video " "(some video and/or audio may be lost). The video input seems to be too slow."); NewSegment(lock.get()); } else { // If the video hasn't started yet, it makes more sense to drop the oldest samples. // Shifting the start time like this isn't completely accurate, but this shouldn't happen often anyway. // The number of samples dropped is calculated so that the buffer will be 90% full after this. size_t n = lock->m_audio_buffer.GetSize() / m_audio_sample_size - (MAX_AUDIO_SAMPLES_BUFFERED - MAX_AUDIO_SAMPLES_BUFFERED / 10); lock->m_audio_buffer.Drop(n * m_audio_sample_size); lock->m_segment_audio_start_time += (int64_t) round((double) n / (double) m_audio_sample_rate * 1.0e6); } } // do speed correction (i.e. do the calculations so the video can synchronize to it) // The point of speed correction is to keep video and audio in sync even when the clocks are not running at exactly the same speed. // This can happen because the sample rate of the sound card is not always 100% accurate. Even a 0.1% error will result in audio that is // seconds too early or too late at the end of a one hour video. This problem doesn't occur on all computers though (I'm not sure why). // Another cause of desynchronization is problems/glitches with PulseAudio (e.g. jumps in time when switching between sources). if(lock->m_segment_audio_started) { double sample_length = (double) (lock->m_segment_audio_samples_read + lock->m_audio_buffer.GetSize() / m_audio_sample_size) / (double) m_audio_sample_rate; double time_length = (double) (timestamp - lock->m_segment_audio_start_time) * 1.0e-6; double current_error = (sample_length - time_length) - lock->m_av_desync; if(fabs(current_error) > DESYNC_ERROR_THRESHOLD) { Logger::LogWarning("[Synchronizer::ReadAudioSamples] Warning: Desynchronization is too high, starting new segment to keep the audio " "in sync with the video (some video and/or audio may be lost)."); NewSegment(lock.get()); } else { double dt = std::min((double) (timestamp - lock->m_segment_audio_last_timestamp) * 1.0e-6, 0.5); lock->m_av_desync_i = clamp(lock->m_av_desync_i + DESYNC_CORRECTION_I * current_error * dt, -1.0, 1.0); lock->m_av_desync += (DESYNC_CORRECTION_P * current_error + lock->m_av_desync_i) * dt; if(lock->m_av_desync_i < -0.05 && lock->m_warn_desync) { lock->m_warn_desync = false; Logger::LogWarning("[Synchronizer::ReadAudioSamples] Warning: Audio input is more than 5% too slow!"); } if(lock->m_av_desync_i > 0.05 && lock->m_warn_desync) { lock->m_warn_desync = false; Logger::LogWarning("[Synchronizer::ReadAudioSamples] Warning: Audio input is more than 5% too fast!"); } } } // start audio if(!lock->m_segment_audio_started) { lock->m_segment_audio_started = true; lock->m_segment_audio_start_time = timestamp; lock->m_segment_audio_stop_time = timestamp; } // store the samples lock->m_segment_audio_last_timestamp = timestamp; lock->m_audio_buffer.Write((const char*) data, sample_count * m_audio_sample_size); // increase segment stop time double sample_length = (double) (lock->m_segment_audio_samples_read + lock->m_audio_buffer.GetSize() / m_audio_sample_size) / (double) m_audio_sample_rate; lock->m_segment_audio_stop_time = lock->m_segment_audio_start_time + (int64_t) round(sample_length * 1.0e6); //Logger::LogInfo("[Synchronizer::ReadAudioSamples] Added audio samples at " + QString::number(timestamp) + "."); }
void Synchronizer::NewSegment() { SharedLock lock(&m_shared_data); NewSegment(lock.get()); }
void Synchronizer::ReadAudioSamples(unsigned int channels, unsigned int sample_rate, AVSampleFormat format, unsigned int sample_count, const uint8_t* data, int64_t timestamp) { assert(m_output_format->m_audio_enabled); // sanity check if(sample_count == 0) return; // add new block to sync diagram if(m_sync_diagram != NULL) m_sync_diagram->AddBlock(1, (double) timestamp * 1.0e-6, (double) timestamp * 1.0e-6 + (double) sample_count / (double) sample_rate, QColor(0, 255, 0)); AudioLock audiolock(&m_audio_data); // check the timestamp if(timestamp < audiolock->m_last_timestamp) { if(timestamp < audiolock->m_last_timestamp - 10000) Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Received audio samples with non-monotonic timestamp.")); timestamp = audiolock->m_last_timestamp; } // update the timestamps int64_t previous_timestamp; if(audiolock->m_first_timestamp == (int64_t) AV_NOPTS_VALUE) { audiolock->m_filtered_timestamp = timestamp; audiolock->m_first_timestamp = timestamp; previous_timestamp = timestamp; } else { previous_timestamp = audiolock->m_last_timestamp; } audiolock->m_last_timestamp = timestamp; // filter the timestamp int64_t timestamp_delta = (int64_t) sample_count * (int64_t) 1000000 / (int64_t) sample_rate; audiolock->m_filtered_timestamp += (timestamp - audiolock->m_filtered_timestamp) / AUDIO_TIMESTAMP_FILTER; // calculate drift double current_drift = GetAudioDrift(audiolock.get()); // if there are too many audio samples, drop some of them (unlikely unless you use PulseAudio) if(current_drift > DRIFT_ERROR_THRESHOLD && !audiolock->m_drop_samples) { audiolock->m_drop_samples = true; Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Too many audio samples, dropping samples to keep the audio in sync with the video.")); } // if there are not enough audio samples, insert zeros if(current_drift < -DRIFT_ERROR_THRESHOLD && !audiolock->m_insert_samples) { audiolock->m_insert_samples = true; Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Not enough audio samples, inserting silence to keep the audio in sync with the video.")); } // reset filter and recalculate drift if necessary if(audiolock->m_drop_samples || audiolock->m_insert_samples) { audiolock->m_filtered_timestamp = timestamp; current_drift = GetAudioDrift(audiolock.get()); } // drop samples if(audiolock->m_drop_samples) { audiolock->m_drop_samples = false; // drop samples int n = (int) round(current_drift * (double) sample_rate); if(n > 0) { if(n >= (int) sample_count) { audiolock->m_drop_samples = true; return; // drop all samples } if(format == AV_SAMPLE_FMT_FLT) { data += n * channels * sizeof(float); } else if(format == AV_SAMPLE_FMT_S16) { data += n * channels * sizeof(int16_t); } else { assert(false); } sample_count -= n; } } // insert zeros unsigned int sample_count_out = 0; if(audiolock->m_insert_samples) { audiolock->m_insert_samples = false; // how many samples should be inserted? int n = (int) round(-current_drift * (double) sample_rate); if(n > 0) { // insert zeros audiolock->m_temp_input_buffer.Alloc(n * m_output_format->m_audio_channels); std::fill_n(audiolock->m_temp_input_buffer.GetData(), n * m_output_format->m_audio_channels, 0.0f); sample_count_out = audiolock->m_fast_resampler->Resample((double) sample_rate / (double) m_output_format->m_audio_sample_rate, 1.0, audiolock->m_temp_input_buffer.GetData(), n, &audiolock->m_temp_output_buffer, sample_count_out); // recalculate drift current_drift = GetAudioDrift(audiolock.get(), sample_count_out); } } // increase filtered timestamp audiolock->m_filtered_timestamp += timestamp_delta; // do drift correction // The point of drift correction is to keep video and audio in sync even when the clocks are not running at exactly the same speed. // This can happen because the sample rate of the sound card is not always 100% accurate. Even a 0.1% error will result in audio that is // seconds too early or too late at the end of a one hour video. This problem doesn't occur on all computers though (I'm not sure why). // Another cause of desynchronization is problems/glitches with PulseAudio (e.g. jumps in time when switching between sources). double drift_correction_dt = fmin((double) (timestamp - previous_timestamp) * 1.0e-6, DRIFT_MAX_BLOCK); audiolock->m_average_drift = clamp(audiolock->m_average_drift + DRIFT_CORRECTION_I * current_drift * drift_correction_dt, -0.5, 0.5); if(audiolock->m_average_drift < -0.02 && audiolock->m_warn_desync) { audiolock->m_warn_desync = false; Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Audio input is more than 2% too slow!")); } if(audiolock->m_average_drift > 0.02 && audiolock->m_warn_desync) { audiolock->m_warn_desync = false; Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Audio input is more than 2% too fast!")); } double length = (double) sample_count / (double) sample_rate; double drift_correction = clamp(DRIFT_CORRECTION_P * current_drift + audiolock->m_average_drift, -0.5, 0.5) * fmin(1.0, DRIFT_MAX_BLOCK / length); //qDebug() << "current_drift" << current_drift << "average_drift" << audiolock->m_average_drift << "drift_correction" << drift_correction; // convert the samples const float *data_float = NULL; // to keep GCC happy if(format == AV_SAMPLE_FMT_FLT) { if(channels == m_output_format->m_audio_channels) { data_float = (const float*) data; } else { audiolock->m_temp_input_buffer.Alloc(sample_count * m_output_format->m_audio_channels); data_float = audiolock->m_temp_input_buffer.GetData(); SampleChannelRemap(sample_count, (const float*) data, channels, audiolock->m_temp_input_buffer.GetData(), m_output_format->m_audio_channels); } } else if(format == AV_SAMPLE_FMT_S16) { audiolock->m_temp_input_buffer.Alloc(sample_count * m_output_format->m_audio_channels); data_float = audiolock->m_temp_input_buffer.GetData(); SampleChannelRemap(sample_count, (const int16_t*) data, channels, audiolock->m_temp_input_buffer.GetData(), m_output_format->m_audio_channels); } else { assert(false); } // resample sample_count_out = audiolock->m_fast_resampler->Resample((double) sample_rate / (double) m_output_format->m_audio_sample_rate, 1.0 / (1.0 - drift_correction), data_float, sample_count, &audiolock->m_temp_output_buffer, sample_count_out); audiolock->m_samples_written += sample_count_out; SharedLock lock(&m_shared_data); // avoid memory problems by limiting the audio buffer size if(lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels >= MAX_AUDIO_SAMPLES_BUFFERED) { if(lock->m_segment_video_started) { Logger::LogWarning("[Synchronizer::ReadAudioSamples] " + Logger::tr("Warning: Audio buffer overflow, starting new segment to keep the audio in sync with the video " "(some video and/or audio may be lost). The video input seems to be too slow.")); NewSegment(lock.get()); } else { // If the video hasn't started yet, it makes more sense to drop the oldest samples. // Shifting the start time like this isn't completely accurate, but this shouldn't happen often anyway. // The number of samples dropped is calculated so that the buffer will be 90% full after this. size_t n = lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels - MAX_AUDIO_SAMPLES_BUFFERED * 9 / 10; lock->m_audio_buffer.Pop(n * m_output_format->m_audio_channels); lock->m_segment_audio_start_time += (int64_t) round((double) n / (double) m_output_format->m_audio_sample_rate * 1.0e6); } } // start audio if(!lock->m_segment_audio_started) { lock->m_segment_audio_started = true; lock->m_segment_audio_start_time = timestamp; lock->m_segment_audio_stop_time = timestamp; } // store the samples lock->m_audio_buffer.Push(audiolock->m_temp_output_buffer.GetData(), sample_count_out * m_output_format->m_audio_channels); // increase segment stop time double new_sample_length = (double) (lock->m_segment_audio_samples_read + lock->m_audio_buffer.GetSize() / m_output_format->m_audio_channels) / (double) m_output_format->m_audio_sample_rate; lock->m_segment_audio_stop_time = lock->m_segment_audio_start_time + (int64_t) round(new_sample_length * 1.0e6); }
CDXVADecoderMpeg2::~CDXVADecoderMpeg2(void) { TRACE(_T("CDXVADecoderMpeg2::Destroy()\n")); NewSegment(); }