Exemplo n.º 1
0
int fetch_and_process_audio(FILE *audio,ogg_page *audiopage,
                            ogg_stream_state *vo,
                            vorbis_dsp_state *vd,
                            vorbis_block *vb,
                            int audioflag){
  ogg_packet op;
  int i,j;

  while(audio && !audioflag){
    /* process any audio already buffered */
    spinnit();
    if(ogg_stream_pageout(vo,audiopage)>0) return 1;
    if(ogg_stream_eos(vo))return 0;

    {
      /* read and process more audio */
      signed char readbuffer[4096];
      int toread=4096/2/audio_ch;
      int bytesread=fread(readbuffer,1,toread*2*audio_ch,audio);
      int sampread=bytesread/2/audio_ch;
      float **vorbis_buffer;
      int count=0;

      if(bytesread<=0){
        /* end of file.  this can be done implicitly, but it's
           easier to see here in non-clever fashion.  Tell the
           library we're at end of stream so that it can handle the
           last frame and mark end of stream in the output properly */
        vorbis_analysis_wrote(vd,0);
      }else{
        vorbis_buffer=vorbis_analysis_buffer(vd,sampread);
        /* uninterleave samples */
        for(i=0;i<sampread;i++){
          for(j=0;j<audio_ch;j++){
            vorbis_buffer[j][i]=((readbuffer[count+1]<<8)|
                                 (0x00ff&(int)readbuffer[count]))/32768.f;
            count+=2;
          }
        }
        
        vorbis_analysis_wrote(vd,sampread);
        
      }

      while(vorbis_analysis_blockout(vd,vb)==1){
        
        /* analysis, assume we want to use bitrate management */
        vorbis_analysis(vb,NULL);
        vorbis_bitrate_addblock(vb);
        
        /* weld packets into the bitstream */
        while(vorbis_bitrate_flushpacket(vd,&op))
          ogg_stream_packetin(vo,&op);
        
      }
    }
  }

  return audioflag;
}
/*
 * Class:     org_tritonus_lowlevel_ogg_StreamState
 * Method:    isEOSReached
 * Signature: ()Z
 */
JNIEXPORT jboolean JNICALL
Java_org_tritonus_lowlevel_ogg_StreamState_isEOSReached
(JNIEnv* env, jobject obj)
{
	ogg_stream_state*	handle;
	int			nReturn;

	if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_ogg_StreamState_isEOSReached(): begin\n"); }
	handle = getHandle(env, obj);
	nReturn = ogg_stream_eos(handle);
	if (debug_flag) { fprintf(debug_file, "Java_org_tritonus_lowlevel_ogg_StreamState_isEOSReached(): end\n"); }
	return nReturn;
}
Exemplo n.º 3
0
int fetch_and_process_video(av_input *_avin,ogg_page *_page,
 ogg_stream_state *_vo,daala_enc_ctx *_dd,int _video_ready){
  ogg_packet op;
  while(!_video_ready){
    size_t ret;
    char   frame[6];
    char   c;
    int    last;
    if(ogg_stream_pageout(_vo,_page)>0)return 1;
    else if(ogg_stream_eos(_vo))return 0;
    ret=fread(frame,1,6,_avin->video_infile);
    if(ret==6){
      od_img *img;
      int     pli;
      if(memcmp(frame,"FRAME",5)!=0){
        fprintf(stderr,"Loss of framing in YUV input data.\n");
        exit(1);
      }
      if(frame[5]!='\n'){
        int bi;
        for(bi=0;bi<121;bi++){
          if(fread(&c,1,1,_avin->video_infile)==1&&c=='\n')break;
        }
        if(bi>=121){
          fprintf(stderr,"Error parsing YUV frame header.\n");
          exit(1);
        }
      }
      /*Read the frame data.*/
      img=&_avin->video_img;
      for(pli=0;pli<img->nplanes;pli++){
        od_img_plane *iplane;
        size_t        plane_sz;
        iplane=img->planes+pli;
        plane_sz=((_avin->video_pic_w+(1<<iplane->xdec)-1)>>iplane->xdec)*
         ((_avin->video_pic_h+(1<<iplane->ydec)-1)>>iplane->ydec);
        ret=fread(iplane->data/*+(_avin->video_pic_y>>iplane->ydec)*iplane->ystride+
         (_avin->video_pic_x>>iplane->xdec)*/,1,plane_sz,_avin->video_infile);
        if(ret!=plane_sz){
          fprintf(stderr,"Error reading YUV frame data.\n");
          exit(1);
        }
      }
      last=0;
    }
    else last=1;
Exemplo n.º 4
0
nsresult
OggWriter::WriteEncodedData(const nsTArray<uint8_t>& aBuffer, int aDuration,
                            uint32_t aFlags)
{
  if (!mInitialized) {
    LOG("[OggWriter] OggWriter has not initialized!");
    return NS_ERROR_FAILURE;
  }

  MOZ_ASSERT(!ogg_stream_eos(&mOggStreamState),
             "No data can be written after eos has marked.");

  // Set eos flag to true, and once the eos is written to a packet, there must
  // not be anymore pages after a page has marked as eos.
  if (aFlags & ContainerWriter::END_OF_STREAM) {
    LOG("[OggWriter] Set e_o_s flag to true.");
    mPacket.e_o_s = 1;
  }

  mPacket.packet = const_cast<uint8_t*>(aBuffer.Elements());
  mPacket.bytes = aBuffer.Length();
  mPacket.granulepos += aDuration;

  // 0 returned on success. -1 returned in the event of internal error.
  // The data in the packet is copied into the internal storage managed by the
  // mOggStreamState, so we are free to alter the contents of mPacket after
  // this call has returned.
  int rc = ogg_stream_packetin(&mOggStreamState, &mPacket);
  if (rc < 0) {
    LOG("[OggWriter] Failed in ogg_stream_packetin! (%d).", rc);
    return NS_ERROR_FAILURE;
  }

  if (mPacket.b_o_s) {
    mPacket.b_o_s = 0;
  }
  mPacket.packetno++;
  mPacket.packet = nullptr;

  return NS_OK;
}
Exemplo n.º 5
0
int fetch_and_process_video(FILE *video,ogg_page *videopage,
                            ogg_stream_state *to,
                            theora_state *td,
                            int videoflag){
  /* You'll go to Hell for using static variables */
  static int          state=-1;
  static unsigned char *yuvframe[2];
  unsigned char        *line;
  yuv_buffer          yuv;
  ogg_packet          op;
  int i, e;

  if(state==-1){
        /* initialize the double frame buffer */
    yuvframe[0]=malloc(video_x*video_y*3/2);
    yuvframe[1]=malloc(video_x*video_y*3/2);

        /* clear initial frame as it may be larger than actual video data */
        /* fill Y plane with 0x10 and UV planes with 0X80, for black data */
    memset(yuvframe[0],0x10,video_x*video_y);
    memset(yuvframe[0]+video_x*video_y,0x80,video_x*video_y/2);
    memset(yuvframe[1],0x10,video_x*video_y);
    memset(yuvframe[1]+video_x*video_y,0x80,video_x*video_y/2);

    state=0;
  }

  /* is there a video page flushed?  If not, work until there is. */
  while(!videoflag){
    spinnit();

    if(ogg_stream_pageout(to,videopage)>0) return 1;
    if(ogg_stream_eos(to)) return 0;

    {
      /* read and process more video */
      /* video strategy reads one frame ahead so we know when we're
         at end of stream and can mark last video frame as such
         (vorbis audio has to flush one frame past last video frame
         due to overlap and thus doesn't need this extra work */

      /* have two frame buffers full (if possible) before
         proceeding.  after first pass and until eos, one will
         always be full when we get here */

      for(i=state;i<2;i++){
        char c,frame[6];
        int ret=fread(frame,1,6,video);
        
	/* match and skip the frame header */
        if(ret<6)break;
        if(memcmp(frame,"FRAME",5)){
          fprintf(stderr,"Loss of framing in YUV input data\n");
          exit(1);
        }
        if(frame[5]!='\n'){
          int j;
          for(j=0;j<79;j++)
            if(fread(&c,1,1,video)&&c=='\n')break;
          if(j==79){
            fprintf(stderr,"Error parsing YUV frame header\n");
            exit(1);
          }
        }

        /* read the Y plane into our frame buffer with centering */
        line=yuvframe[i]+video_x*frame_y_offset+frame_x_offset;
        for(e=0;e<frame_y;e++){
          ret=fread(line,1,frame_x,video);
            if(ret!=frame_x) break;
          line+=video_x;
        }
        /* now get U plane*/
        line=yuvframe[i]+(video_x*video_y)
          +(video_x/2)*(frame_y_offset/2)+frame_x_offset/2;
        for(e=0;e<frame_y/2;e++){
          ret=fread(line,1,frame_x/2,video);
            if(ret!=frame_x/2) break;
          line+=video_x/2;
        }
        /* and the V plane*/
        line=yuvframe[i]+(video_x*video_y*5/4)
                  +(video_x/2)*(frame_y_offset/2)+frame_x_offset/2;
        for(e=0;e<frame_y/2;e++){
          ret=fread(line,1,frame_x/2,video);
            if(ret!=frame_x/2) break;
          line+=video_x/2;
        }
        state++;
      }

      if(state<1){
        /* can't get here unless YUV4MPEG stream has no video */
        fprintf(stderr,"Video input contains no frames.\n");
        exit(1);
      }

      /* Theora is a one-frame-in,one-frame-out system; submit a frame
         for compression and pull out the packet */

      {
        yuv.y_width=video_x;
        yuv.y_height=video_y;
        yuv.y_stride=video_x;

        yuv.uv_width=video_x/2;
        yuv.uv_height=video_y/2;
        yuv.uv_stride=video_x/2;

        yuv.y= yuvframe[0];
        yuv.u= yuvframe[0]+ video_x*video_y;
        yuv.v= yuvframe[0]+ video_x*video_y*5/4 ;
      }

      theora_encode_YUVin(td,&yuv);

      /* if there's only one frame, it's the last in the stream */
      if(state<2)
        theora_encode_packetout(td,1,&op);
      else
        theora_encode_packetout(td,0,&op);

      ogg_stream_packetin(to,&op);

      {
        unsigned char *temp=yuvframe[0];
        yuvframe[0]=yuvframe[1];
        yuvframe[1]=temp;
        state--;
      }

    }
  }
  return videoflag;
}