Пример #1
0
static GstFlowReturn
vorbis_dec_chain (GstPad * pad, GstBuffer * buffer)
{
  GstVorbisDec *vd;
  GstFlowReturn result = GST_FLOW_OK;
  gboolean discont;

  vd = GST_VORBIS_DEC (gst_pad_get_parent (pad));

  discont = GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT);

  /* resync on DISCONT */
  if (G_UNLIKELY (discont)) {
    GST_DEBUG_OBJECT (vd, "received DISCONT buffer");
    vd->last_timestamp = GST_CLOCK_TIME_NONE;
#ifdef HAVE_VORBIS_SYNTHESIS_RESTART
    vorbis_synthesis_restart (&vd->vd);
#endif
    vd->discont = TRUE;
  }

  if (vd->segment.rate >= 0.0)
    result = vorbis_dec_chain_forward (vd, discont, buffer);
  else
    result = vorbis_dec_chain_reverse (vd, discont, buffer);

  gst_object_unref (vd);

  return result;
}
Пример #2
0
int vorbis_synthesis_init(vorbis_dsp_state *v,vorbis_info *vi){
  if(_vds_shared_init(v,vi,0)){
    vorbis_dsp_clear(v);
    return 1;
  }
  vorbis_synthesis_restart(v);
  return 0;
}
Пример #3
0
static void
vorbis_dec_flush (GstAudioDecoder * dec, gboolean hard)
{
#ifdef HAVE_VORBIS_SYNTHESIS_RESTART
  GstVorbisDec *vd = GST_VORBIS_DEC (dec);

  vorbis_synthesis_restart (&vd->vd);
#endif
}
Пример #4
0
int vorbis_synthesis_init(vorbis_dsp_state *v,vorbis_info *vi)
{
 if(_vds_shared_init(v,vi)<0)
  return -1;

 vorbis_synthesis_restart(v,vi);

 return(0);
}
// Try to flush the buffer
// unsuccessfully :(
  uint8_t ADM_vorbis::endDecompress( void ) 
  {
  float **sample_pcm;
  ogg_packet packet;
  
  	//vorbis_synthesis_blockin(&STRUCT->vdsp,&STRUCT->vblock);
  	vorbis_synthesis_pcmout(&STRUCT->vdsp,&sample_pcm);
        vorbis_synthesis_restart(&STRUCT->vdsp);
  	return 1;
  }
Пример #6
0
nsresult
VorbisDataDecoder::Flush()
{
  mTaskQueue->Flush();
  // Ignore failed results from vorbis_synthesis_restart. They
  // aren't fatal and it fails when ResetDecode is called at a
  // time when no vorbis data has been read.
  vorbis_synthesis_restart(&mVorbisDsp);
  mLastFrameTime.reset();
  return NS_OK;
}
Пример #7
0
nsresult nsVorbisState::Reset()
{
  nsresult res = NS_OK;
  if (mActive && vorbis_synthesis_restart(&mDsp) != 0) {
    res = NS_ERROR_FAILURE;
  }
  if (NS_FAILED(nsOggCodecState::Reset())) {
    return NS_ERROR_FAILURE;
  }
  return res;
}
Пример #8
0
RefPtr<MediaDataDecoder::FlushPromise>
VorbisDataDecoder::Flush()
{
  RefPtr<VorbisDataDecoder> self = this;
  return InvokeAsync(mTaskQueue, __func__, [self]() {
    // Ignore failed results from vorbis_synthesis_restart. They
    // aren't fatal and it fails when ResetDecode is called at a
    // time when no vorbis data has been read.
    vorbis_synthesis_restart(&self->mVorbisDsp);
    self->mLastFrameTime.reset();
    return FlushPromise::CreateAndResolve(true, __func__);
  });
}
Пример #9
0
void
VorbisDataDecoder::Flush()
{
  MOZ_ASSERT(mCallback->OnReaderTaskQueue());
  mIsFlushing = true;
  nsCOMPtr<nsIRunnable> r = NS_NewRunnableFunction([this] () {
    // Ignore failed results from vorbis_synthesis_restart. They
    // aren't fatal and it fails when ResetDecode is called at a
    // time when no vorbis data has been read.
    vorbis_synthesis_restart(&mVorbisDsp);
    mLastFrameTime.reset();
  });
  SyncRunnable::DispatchToThread(mTaskQueue, r);
  mIsFlushing = false;
}
Пример #10
0
nsresult VorbisState::Reset()
{
  nsresult res = NS_OK;
  if (mActive && vorbis_synthesis_restart(&mDsp) != 0) {
    res = NS_ERROR_FAILURE;
  }
  if (NS_FAILED(OggCodecState::Reset())) {
    return NS_ERROR_FAILURE;
  }

  mGranulepos = 0;
  mPrevVorbisBlockSize = 0;

  return res;
}
Пример #11
0
static int Flush(vorbis* p)
{
	vorbis_synthesis_restart(&p->DSP);
	p->Samples = 0;
	return ERR_NONE;
}
Пример #12
0
int vorbis_synthesis_init(vorbis_dsp_state *v,vorbis_info *vi){
  if(_vds_init(v,vi))return 1;
  vorbis_synthesis_restart(v);

  return 0;
}
Пример #13
0
int vorbis_synthesis_init(vorbis_dsp_state *v,vorbis_info *vi){
  _vds_init(v,vi);
  vorbis_synthesis_restart(v);

  return(0);
}
Пример #14
0
static gboolean
vorbis_dec_sink_event (GstPad * pad, GstEvent * event)
{
  gboolean ret = FALSE;
  GstVorbisDec *dec;

  dec = GST_VORBIS_DEC (gst_pad_get_parent (pad));

  GST_LOG_OBJECT (dec, "handling event");
  switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_EOS:
      if (dec->segment.rate < 0.0)
        vorbis_dec_chain_reverse (dec, TRUE, NULL);
      ret = gst_pad_push_event (dec->srcpad, event);
      break;
    case GST_EVENT_FLUSH_START:
      ret = gst_pad_push_event (dec->srcpad, event);
      break;
    case GST_EVENT_FLUSH_STOP:
      /* here we must clean any state in the decoder */
#ifdef HAVE_VORBIS_SYNTHESIS_RESTART
      vorbis_synthesis_restart (&dec->vd);
#endif
      gst_vorbis_dec_reset (dec);
      ret = gst_pad_push_event (dec->srcpad, event);
      break;
    case GST_EVENT_NEWSEGMENT:
    {
      GstFormat format;
      gdouble rate, arate;
      gint64 start, stop, time;
      gboolean update;

      gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format,
          &start, &stop, &time);

      /* we need time for now */
      if (format != GST_FORMAT_TIME)
        goto newseg_wrong_format;

      GST_DEBUG_OBJECT (dec,
          "newsegment: update %d, rate %g, arate %g, start %" GST_TIME_FORMAT
          ", stop %" GST_TIME_FORMAT ", time %" GST_TIME_FORMAT,
          update, rate, arate, GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
          GST_TIME_ARGS (time));

      /* now configure the values */
      gst_segment_set_newsegment_full (&dec->segment, update,
          rate, arate, format, start, stop, time);
      dec->seqnum = gst_event_get_seqnum (event);

      if (dec->initialized)
        /* and forward */
        ret = gst_pad_push_event (dec->srcpad, event);
      else {
        /* store it to send once we're initialized */
        dec->pendingevents = g_list_append (dec->pendingevents, event);
        ret = TRUE;
      }
      break;
    }
    case GST_EVENT_TAG:
    {
      if (dec->initialized)
        /* and forward */
        ret = gst_pad_push_event (dec->srcpad, event);
      else {
        /* store it to send once we're initialized */
        dec->pendingevents = g_list_append (dec->pendingevents, event);
        ret = TRUE;
      }
      break;
    }
    default:
      ret = gst_pad_push_event (dec->srcpad, event);
      break;
  }
done:
  gst_object_unref (dec);

  return ret;

  /* ERRORS */
newseg_wrong_format:
  {
    GST_DEBUG_OBJECT (dec, "received non TIME newsegment");
    goto done;
  }
}
Пример #15
0
HRESULT Inpin::Receive(IMediaSample* pInSample)
{
    if (pInSample == 0)
        return E_INVALIDARG;

    HRESULT hr;

//#define DEBUG_RECEIVE
#undef DEBUG_RECEIVE

#ifdef DEBUG_RECEIVE
    __int64 start_reftime_, stop_reftime_;
    hr = pInSample->GetTime(&start_reftime_, &stop_reftime_);
#endif

    Filter::Lock lock;

    hr = lock.Seize(m_pFilter);

    if (FAILED(hr))
        return hr;

//#ifdef DEBUG_RECEIVE
//    wodbgstream os;
//    os << L"vp8dec::inpin::Receive: THREAD=0x"
//       << std::hex << GetCurrentThreadId() << std::dec
//       << endl;
//#endif

    if (!bool(m_pPinConnection))
        return VFW_E_NOT_CONNECTED;

    Outpin& outpin = m_pFilter->m_outpin;

    if (!bool(outpin.m_pPinConnection))
        return S_FALSE;

    if (!bool(outpin.m_pAllocator))  //should never happen
        return VFW_E_NO_ALLOCATOR;

    if (m_pFilter->m_state == State_Stopped)
        return VFW_E_NOT_RUNNING;

    if (m_bEndOfStream)
        return VFW_E_SAMPLE_REJECTED_EOS;

    if (m_bFlush)
        return S_FALSE;

    if (m_bDone)
        return S_FALSE;

    if (m_first_reftime < 0)
    {
        LONGLONG sp;

        hr = pInSample->GetTime(&m_first_reftime, &sp);

        if (FAILED(hr))
            return S_OK;

        if (m_first_reftime < 0)
            return S_OK;

        m_start_reftime = m_first_reftime;
        m_samples = 0;

#ifdef DEBUG_RECEIVE
        odbgstream os;
        os << std::fixed << std::setprecision(3);

        os << "\nwebmvorbisdec::Inpin::Receive: RESET FIRST REFTIME;"
           << " st=" << m_start_reftime
           << " st[sec]=" << (double(m_start_reftime) / 10000000)
           << endl;
#endif

        const int status = vorbis_synthesis_restart(&m_dsp_state);
        status;
        assert(status == 0);  //success

        m_bDiscontinuity = true;
    }

#ifdef DEBUG_RECEIVE
    {
        odbgstream os;
        os << "webmvorbisdec::inpin::receive: ";

        os << std::fixed << std::setprecision(3);

        if (hr == S_OK)
            os << "start[sec]="
               << double(start_reftime_) / 10000000
               << "; stop[sec]="
               << double(stop_reftime_) / 10000000
               << "; dt[ms]="
               << double(stop_reftime_ - start_reftime_) / 10000;

        else if (hr == VFW_S_NO_STOP_TIME)
            os << "start[sec]=" << double(start_reftime_) / 10000000;

        os << endl;
    }
#endif

    Decode(pInSample);

    hr = lock.Release();
    assert(SUCCEEDED(hr));

    if (FAILED(hr))
        return hr;

    return PopulateSamples();
}
Пример #16
0
int vorbis_synthesis_blockin(vorbis_dsp_state *v,vorbis_block *vb)
{
 vorbis_info *vi=v->vi;
 codec_setup_info *ci=vi->codec_setup;
 backend_lookup_state *b=v->backend_state;
 unsigned int ch;
 unsigned int thisCenter;
 unsigned int prevCenter;
 unsigned int n,n0,n1;

 if(v->pcm_current>v->pcm_returned && v->pcm_returned!=-1){
  vorbis_synthesis_restart(v,vi);
  //return(OV_EINVAL);
 }

 v->lW=v->W;
 v->W=vb->W;
 v->nW=-1;

 if(v->sequence+1 != vb->sequence)
  v->granulepos=-1;

 v->sequence=vb->sequence;

 n=ci->blocksizes[v->W]/2;
 n0=ci->blocksizes[0]/2;
 n1=ci->blocksizes[1]/2;

 if(v->centerW){
  thisCenter=n1;
  prevCenter=0;
  v->centerW=0;
 }else{
  thisCenter=0;
  prevCenter=n1;
  v->centerW=n1;
 }

 for(ch=0;ch<vi->outchannels;ch++){
  ogg_double_t *vbpcmch=vb->pcm[ch];
  if(v->lW){
   if(v->W)
    vorbis_fmulwin_add_block(v->pcm[ch]+prevCenter,vbpcmch,b->window[1],n1);
   else{
#ifdef MPXPLAY
    vorbis_fscale_block(v->pcm[ch]+prevCenter,n1/2-n0/2,32768.0F);
#endif
    vorbis_fmulwin_add_block(v->pcm[ch]+prevCenter+n1/2-n0/2,vbpcmch,b->window[0],n0);
#ifdef MPXPLAY
    vorbis_fscale_block(v->pcm[ch]+prevCenter+n1/2+n0/2,n1/2-n0/2,32768.0F);
#endif
   }
  }else{
   if(v->W){
    ogg_double_t *pcm=v->pcm[ch]+prevCenter;
    const unsigned int i=n1/2-n0/2;
    ogg_double_t *p=vbpcmch+i;
    vorbis_fmulwin_add_block(pcm,p,b->window[0],n0);
#ifdef MPXPLAY
    vorbis_fcopy_and_scale_block(pcm+n0,p+n0,i,32768.0F);
#else
    vorbis_fcopy_block(pcm+n0,p+n0,i);
#endif
   }else
    vorbis_fmulwin_add_block(v->pcm[ch]+prevCenter,vbpcmch,b->window[0],n0);
  }
  vorbis_fcopy_block(v->pcm[ch]+thisCenter,vbpcmch+n,n);
 }

 if(v->pcm_returned==-1){
  v->pcm_returned=thisCenter;
  v->pcm_current=thisCenter;
 }else{
  v->pcm_returned=prevCenter;
  v->pcm_current=prevCenter+ci->blocksizes[v->lW]/4+ci->blocksizes[v->W]/4;
 }

 if(v->granulepos==-1){
  if(vb->granulepos!=-1){
   v->granulepos=vb->granulepos;
  }
 }else{
  v->granulepos+=ci->blocksizes[v->lW]/4+ci->blocksizes[v->W]/4;
  if(vb->granulepos!=-1 && v->granulepos!=vb->granulepos){
   if(v->granulepos>vb->granulepos){
    long extra=v->granulepos-vb->granulepos;

    if(vb->eofflag){
     v->pcm_current-=extra;
    }else
     if(vb->sequence == 1){
      v->pcm_returned+=extra;
      if(v->pcm_returned>v->pcm_current)
       v->pcm_returned=v->pcm_current;
     }
   }
   v->granulepos=vb->granulepos;
  }
 }

 if(vb->eofflag)
  v->eofflag=1;

 return(0);
}
Пример #17
0
	void VideoClip_Theora::_executeSeek()
	{
#if _DEBUG
		log(this->name + " [seek]: seeking to frame " + str(this->seekFrame));
#endif
		int frame = 0;
		float time = this->seekFrame / getFps();
		this->timer->seek(time);
		bool paused = this->timer->isPaused();
		if (!paused)
		{
			this->timer->pause(); // pause until seeking is done
		}
		this->endOfFile = false;
		this->restarted = false;
		this->_resetFrameQueue();
		// reset the video decoder.
		ogg_stream_reset(&this->info.TheoraStreamState);
		th_decode_free(this->info.TheoraDecoder);
		this->info.TheoraDecoder = th_decode_alloc(&this->info.TheoraInfo, this->info.TheoraSetup);
		Mutex::ScopeLock audioMutexLock;
		if (this->audioInterface != NULL)
		{
			audioMutexLock.acquire(this->audioMutex);
			ogg_stream_reset(&this->info.VorbisStreamState);
			vorbis_synthesis_restart(&this->info.VorbisDSPState);
			this->destroyAllAudioPackets();
		}
		// first seek to desired frame, then figure out the location of the
		// previous key frame and seek to it.
		// then by setting the correct time, the decoder will skip N frames untill
		// we get the frame we want.
		frame = (int)this->_seekPage(this->seekFrame, 1); // find the key frame nearest to the target frame
#ifdef _DEBUG
	//		log(mName + " [seek]: nearest key frame for frame " + str(mSeekFrame) + " is frame: " + str(frame));
#endif
		this->_seekPage(std::max(0, frame - 1), 0);

		ogg_packet opTheora;
		ogg_int64_t granulePos;
		bool granuleSet = false;
		if (frame <= 1)
		{
			if (this->info.TheoraInfo.version_major == 3 && this->info.TheoraInfo.version_minor == 2 && this->info.TheoraInfo.version_subminor == 0)
			{
				granulePos = 0;
			}
			else
			{
				granulePos = 1; // because of difference in granule interpretation in theora streams 3.2.0 and newer ones
			}
			th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &granulePos, sizeof(granulePos));
			granuleSet = true;
		}
		// now that we've found the key frame that preceeds our desired frame, lets keep on decoding frames until we
		// reach our target frame.
		int status = 0;
		while (this->seekFrame != 0)
		{
			if (ogg_stream_packetout(&this->info.TheoraStreamState, &opTheora) > 0)
			{
				if (!granuleSet)
				{
					// theora decoder requires to set the granule pos after seek to be able to determine the current frame
					if (opTheora.granulepos < 0)
					{
						continue; // ignore prev delta frames until we hit a key frame
					}
					th_decode_ctl(this->info.TheoraDecoder, TH_DECCTL_SET_GRANPOS, &opTheora.granulepos, sizeof(opTheora.granulepos));
					granuleSet = true;
				}
				status = th_decode_packetin(this->info.TheoraDecoder, &opTheora, &granulePos);
				if (status != 0 && status != TH_DUPFRAME)
				{
					continue;
				}
				frame = (int)th_granule_frame(this->info.TheoraDecoder, granulePos);
				if (frame >= this->seekFrame - 1)
				{
					break;
				}
			}
			else if (!this->_readData())
			{
				log(this->name + " [seek]: fineseeking failed, _readData failed!");
				return;
			}
		}
#ifdef _DEBUG
		//	log(mName + " [seek]: fineseeked to frame " + str(frame + 1) + ", requested: " + str(mSeekFrame));
#endif
		if (this->audioInterface != NULL)
		{
			// read audio data until we reach a timeStamp. this usually takes only one iteration, but just in case let's
			// wrap it in a loop
			float timeStamp = 0.0f;
			while (true)
			{
				timeStamp = this->_decodeAudio();
				if (timeStamp >= 0)
				{
					break;
				}
				this->_readData();
			}
			float rate = (float)this->audioFrequency * this->audioChannelsCount;
			float queuedTime = this->getAudioPacketQueueLength();
			int trimmedCount = 0;
			// at this point there are only 2 possibilities: either we have too much packets and we have to delete
			// the first N ones, or we don't have enough, so let's fill the gap with silence.
			if (time > timeStamp - queuedTime)
			{
				while (this->audioPacketQueue != NULL)
				{
					if (time <= timeStamp - queuedTime + this->audioPacketQueue->samplesCount / rate)
					{
						trimmedCount = (int)((timeStamp - queuedTime + this->audioPacketQueue->samplesCount / rate - time) * rate);
						if (this->audioPacketQueue->samplesCount - trimmedCount <= 0)
						{
							this->destroyAudioPacket(this->popAudioPacket()); // if there's no data to be left, just destroy it
						}
						else
						{
							for (int i = trimmedCount, j = 0; i < this->audioPacketQueue->samplesCount; ++i, ++j)
							{
								this->audioPacketQueue->pcmData[j] = this->audioPacketQueue->pcmData[i];
							}
							this->audioPacketQueue->samplesCount -= trimmedCount;
						}
						break;
					}
					queuedTime -= this->audioPacketQueue->samplesCount / rate;
					this->destroyAudioPacket(this->popAudioPacket());
				}
			}
			// expand the first packet with silence.
			else if (this->audioPacketQueue != NULL)
			{
				int i = 0;
				int j = 0;
				int missingCount = (int)((timeStamp - queuedTime - time) * rate);
				if (missingCount > 0)
				{
					float* samples = new float[missingCount + this->audioPacketQueue->samplesCount];
					if (missingCount > 0)
					{
						memset(samples, 0, missingCount * sizeof(float));
					}
					for (j = 0; i < missingCount + this->audioPacketQueue->samplesCount; ++i, ++j)
					{
						samples[i] = this->audioPacketQueue->pcmData[j];
					}
					delete[] this->audioPacketQueue->pcmData;
					this->audioPacketQueue->pcmData = samples;
				}
			}
			this->lastDecodedFrameNumber = this->seekFrame;
			this->readAudioSamples = (unsigned int)(timeStamp * this->audioFrequency);
			audioMutexLock.release();
		}
		if (!paused)
		{
			this->timer->play();
		}
		this->seekFrame = -1;
	}