Ejemplo n.º 1
0
S32 encode_vorbis_file(const std::string& in_fname, const std::string& out_fname)
{
#define READ_BUFFER 1024
	unsigned char readbuffer[READ_BUFFER*4+44];   /* out of the data segment, not the stack */	/*Flawfinder: ignore*/

	ogg_stream_state os; /* take physical pages, weld into a logical stream of packets */
	ogg_page         og; /* one Ogg bitstream page.  Vorbis packets are inside */
	ogg_packet       op; /* one raw packet of data for decode */
	
	vorbis_info      vi; /* struct that stores all the static vorbis bitstream settings */
	vorbis_comment   vc; /* struct that stores all the user comments */
	
	vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
	vorbis_block     vb; /* local working space for packet->PCM decode */
	
	int eos=0;
	int result;

	U16 num_channels = 0;
	U32 sample_rate = 0;
	U32 bits_per_sample = 0;

	S32 format_error = 0;
	std::string error_msg;
	if ((format_error = check_for_invalid_wav_formats(in_fname, error_msg)))
	{
		llwarns << error_msg << ": " << in_fname << llendl;
		return(format_error);
	}

#if 1
	unsigned char wav_header[44];	/*Flawfinder: ignore*/

	S32 data_left = 0;

	LLAPRFile infile ;
	infile.open(in_fname,LL_APR_RB, LLAPRFile::global);
	if (!infile.getFileHandle())
	{
		llwarns << "Couldn't open temporary ogg file for writing: " << in_fname
			<< llendl;
		return(LLVORBISENC_SOURCE_OPEN_ERR);
	}

	LLAPRFile outfile ;
	outfile.open(out_fname,LL_APR_WPB, LLAPRFile::global);
	if (!outfile.getFileHandle())
	{
		llwarns << "Couldn't open upload sound file for reading: " << in_fname
			<< llendl;
		return(LLVORBISENC_DEST_OPEN_ERR);
	}
	
	 // parse the chunks
	 U32 chunk_length = 0;
	 U32 file_pos = 12;  // start at the first chunk (usually fmt but not always)
	 
	 while (infile.eof() != APR_EOF)
	 {
		 infile.seek(APR_SET,file_pos);
		 infile.read(wav_header, 44);
		 
		 chunk_length = ((U32) wav_header[7] << 24) 
			 + ((U32) wav_header[6] << 16) 
			 + ((U32) wav_header[5] << 8) 
			 + wav_header[4];
		 
//		 llinfos << "chunk found: '" << wav_header[0] << wav_header[1] << wav_header[2] << wav_header[3] << "'" << llendl;
		 
		 if (!(strncmp((char *)&(wav_header[0]),"fmt ",4)))
		 {
			 num_channels = ((U16) wav_header[11] << 8) + wav_header[10];
			 sample_rate = ((U32) wav_header[15] << 24) 
				 + ((U32) wav_header[14] << 16) 
				 + ((U32) wav_header[13] << 8) 
				 + wav_header[12];
			 bits_per_sample = ((U16) wav_header[23] << 8) + wav_header[22];
		 }
	 	 else if (!(strncmp((char *)&(wav_header[0]),"data",4)))
		 {
			 infile.seek(APR_SET,file_pos+8);
			 // leave the file pointer at the beginning of the data chunk data
			 data_left = chunk_length;			
			 break;
		 }
		 file_pos += (chunk_length + 8);
		 chunk_length = 0;
	 } 
	 

	 /********** Encode setup ************/
	 
	 /* choose an encoding mode */
	 /* (mode 0: 44kHz stereo uncoupled, roughly 128kbps VBR) */
	 vorbis_info_init(&vi);

	 // always encode to mono

	 // SL-52913 & SL-53779 determined this quality level to be our 'good
	 // enough' general-purpose quality level with a nice low bitrate.
	 // Equivalent to oggenc -q0.5
	 F32 quality = 0.05f;
//	 quality = (bitrate==128000 ? 0.4f : 0.1);

//	 if (vorbis_encode_init(&vi, /* num_channels */ 1 ,sample_rate, -1, bitrate, -1))
	 if (vorbis_encode_init_vbr(&vi, /* num_channels */ 1 ,sample_rate, quality))
//	 if (vorbis_encode_setup_managed(&vi,1,sample_rate,-1,bitrate,-1) ||
//		vorbis_encode_ctl(&vi,OV_ECTL_RATEMANAGE_AVG,NULL) ||
//		vorbis_encode_setup_init(&vi))
	{
		llwarns << "unable to initialize vorbis codec at quality " << quality << llendl;
		//		llwarns << "unable to initialize vorbis codec at bitrate " << bitrate << llendl;
		return(LLVORBISENC_DEST_OPEN_ERR);
	}
	 
	 /* add a comment */
	 vorbis_comment_init(&vc);
//	 vorbis_comment_add(&vc,"Linden");
	 
	 /* set up the analysis state and auxiliary encoding storage */
	 vorbis_analysis_init(&vd,&vi);
	 vorbis_block_init(&vd,&vb);
	 
	 /* set up our packet->stream encoder */
	 /* pick a random serial number; that way we can more likely build
		chained streams just by concatenation */
	 ogg_stream_init(&os, ll_rand());
	 
	 /* Vorbis streams begin with three headers; the initial header (with
		most of the codec setup parameters) which is mandated by the Ogg
		bitstream spec.  The second header holds any comment fields.  The
		third header holds the bitstream codebook.  We merely need to
		make the headers, then pass them to libvorbis one at a time;
		libvorbis handles the additional Ogg bitstream constraints */
	 
	 {
		 ogg_packet header;
		 ogg_packet header_comm;
		 ogg_packet header_code;
		 
		 vorbis_analysis_headerout(&vd,&vc,&header,&header_comm,&header_code);
		 ogg_stream_packetin(&os,&header); /* automatically placed in its own
											  page */
		 ogg_stream_packetin(&os,&header_comm);
		 ogg_stream_packetin(&os,&header_code);
		 
		 /* We don't have to write out here, but doing so makes streaming 
		  * much easier, so we do, flushing ALL pages. This ensures the actual
		  * audio data will start on a new page
		  */
		 while(!eos){
			 int result=ogg_stream_flush(&os,&og);
			 if(result==0)break;
			 outfile.write(og.header, og.header_len);
			 outfile.write(og.body, og.body_len);
		 }
		 
	 }
	 
	 
	 while(!eos)
	 {
		 long bytes_per_sample = bits_per_sample/8;

		 long bytes=(long)infile.read(readbuffer,llclamp((S32)(READ_BUFFER*num_channels*bytes_per_sample),0,data_left)); /* stereo hardwired here */
		 
		 if (bytes==0)
		 {
			 /* end of file.  this can be done implicitly in the mainline,
				but it's easier to see here in non-clever fashion.
				Tell the library we're at end of stream so that it can handle
				the last frame and mark end of stream in the output properly */

			 vorbis_analysis_wrote(&vd,0);
//			 eos = 1;
			 
		 }
		 else
		 {
			 long i;
			 long samples;
			 int temp;

			 data_left -= bytes;
             /* data to encode */
			 
			 /* expose the buffer to submit data */
			 float **buffer=vorbis_analysis_buffer(&vd,READ_BUFFER);
			
			 i = 0;
			 samples = bytes / (num_channels * bytes_per_sample);

			 if (num_channels == 2)
			 {
				 if (bytes_per_sample == 2)
				 {
					 /* uninterleave samples */
					 for(i=0; i<samples ;i++)
					 {
					 	 temp =  ((signed char *)readbuffer)[i*4+1];	/*Flawfinder: ignore*/
						 temp += ((signed char *)readbuffer)[i*4+3];	/*Flawfinder: ignore*/
						 temp <<= 8;
						 temp += readbuffer[i*4];
						 temp += readbuffer[i*4+2];

						 buffer[0][i] = ((float)temp) / 65536.f;
					 }
				 }
				 else // presume it's 1 byte per which is unsigned (F#@%ing wav "standard")
				 {
					 /* uninterleave samples */
					 for(i=0; i<samples ;i++)
					 {
					 	 temp  = readbuffer[i*2+0];
						 temp += readbuffer[i*2+1];
						 temp -= 256;
						 buffer[0][i] = ((float)temp) / 256.f;
					 }
				 } 
			 }
			 else if (num_channels == 1)
			 {
				 if (bytes_per_sample == 2)
				 {
					 for(i=0; i < samples ;i++)
					 {
					 	 temp = ((signed char*)readbuffer)[i*2+1];
						 temp <<= 8;
						 temp += readbuffer[i*2];
						 buffer[0][i] = ((float)temp) / 32768.f;
					 }
				 }
				 else // presume it's 1 byte per which is unsigned (F#@%ing wav "standard")
				 {
					 for(i=0; i < samples ;i++)
					 {
						 temp = readbuffer[i];
						 temp -= 128;
						 buffer[0][i] = ((float)temp) / 128.f;
					 }
				 }
			 }
				
			 /* tell the library how much we actually submitted */
			 vorbis_analysis_wrote(&vd,i);
		 }
			 
		 /* vorbis does some data preanalysis, then divvies up blocks for
			more involved (potentially parallel) processing.  Get a single
			block for encoding now */
		 while(vorbis_analysis_blockout(&vd,&vb)==1)
		 {
			 
			 /* analysis */
			/* Do the main analysis, creating a packet */
			vorbis_analysis(&vb, NULL);
			vorbis_bitrate_addblock(&vb);

			while(vorbis_bitrate_flushpacket(&vd, &op)) 
			{
			 
			 /* weld the packet into the bitstream */
			 ogg_stream_packetin(&os,&op);
			 
			 /* write out pages (if any) */
			 while(!eos)
			 {
				 result = ogg_stream_pageout(&os,&og);

				 if(result==0)
				 	break;

				 outfile.write(og.header, og.header_len);
				 outfile.write(og.body, og.body_len);
				 
				 /* this could be set above, but for illustrative purposes, I do
					it here (to show that vorbis does know where the stream ends) */
				 
				 if(ogg_page_eos(&og))
				 	eos=1;
				 
			 }
			}
		 }
	 }
	 
	 
	 
	 /* clean up and exit.  vorbis_info_clear() must be called last */
	 
	 ogg_stream_clear(&os);
	 vorbis_block_clear(&vb);
	 vorbis_dsp_clear(&vd);
	 vorbis_comment_clear(&vc);
	 vorbis_info_clear(&vi);
	 
	 /* ogg_page and ogg_packet structs always point to storage in
		libvorbis.  They're never freed or manipulated directly */
	 
//	 fprintf(stderr,"Vorbis encoding: Done.\n");
	 llinfos << "Vorbis encoding: Done." << llendl;
	 
#endif
	 return(LLVORBISENC_NOERR);
	 
}
Ejemplo n.º 2
0
static void *ucil_theora_encode_thread( ucil_theora_video_file_object_t *vobj )
{
   yuv_buffer yuv;
   double videopos = 0;
   double audiopos = 0;
   int gotpage = 0;
   unsigned char *ds_y_buffer = NULL;
   unsigned char *ds_u_buffer = NULL;
   unsigned char *ds_v_buffer = NULL;
   
   
   yuv.y_width = vobj->ti.width;
   yuv.y_height = vobj->ti.height;
   yuv.y_stride = vobj->ti.width;
   yuv.uv_width = vobj->ti.width / 2;
   yuv.uv_height = vobj->ti.height / 2;
   yuv.uv_stride = vobj->ti.width / 2;

   if( vobj->downsize > 1 || vobj->requires_resizing_frames )
   {
      ds_y_buffer = malloc( yuv.y_width * yuv.y_height );
      ds_u_buffer = malloc( yuv.uv_width * yuv.uv_height );
      ds_v_buffer = malloc( yuv.uv_width * yuv.uv_height );
   }

   vobj->last_frame = NULL;

   while( !vobj->quit_thread )
   {
      unicap_data_buffer_t *data_buffer;
      ogg_page og;

      sem_wait( &vobj->lock );
      data_buffer = ( unicap_data_buffer_t *)g_queue_pop_head( vobj->full_queue );
      sem_post( &vobj->lock );
      if( !data_buffer )
      {
	 audiopos = fetch_and_process_audio( vobj, audiopos );	 usleep( 1000 );
	 continue;
      }

      if( vobj->frame_count == 0 )
      {
	 memcpy( &vobj->recording_start_time, &data_buffer->fill_time, sizeof( struct timeval ) );
      }

      audiopos = fetch_and_process_audio( vobj, audiopos );

/*       printf( "v: %f   a: %f\n", videopos, audiopos ); */

      if( vobj->audio && ( videopos > audiopos ) )
      {
	 data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	 sem_wait( &vobj->lock );
	 g_queue_push_head( vobj->empty_queue, data_buffer );
	 sem_post( &vobj->lock );
	 continue;
      }


      if( vobj->fill_frames )
      {

	 if( vobj->audio )
	 {
	    if( vobj->last_frame )
	    {
	       unicap_data_buffer_t *last_data_buffer;
	       double streampos;
	       struct timeval streamtime;
	       
	       last_data_buffer = vobj->last_frame;
	       if( vobj->downsize > 1 || vobj->requires_resizing_frames )
	       {
		  yuv.y = ds_y_buffer;
		  yuv.u = ds_u_buffer;
		  yuv.v = ds_v_buffer;
	       }
	       else
	       {
		  yuv.y = last_data_buffer->data;
		  yuv.u = last_data_buffer->data + ( yuv.y_stride * yuv.y_height );
		  yuv.v = yuv.u + ( yuv.uv_stride * yuv.uv_height );
	       }

	       streamtime.tv_sec = data_buffer->fill_time.tv_sec - vobj->recording_start_time.tv_sec;
	       streamtime.tv_usec = data_buffer->fill_time.tv_usec;
	       if( data_buffer->fill_time.tv_usec < vobj->recording_start_time.tv_usec )
	       {
		  streamtime.tv_sec--;
		  streamtime.tv_usec += 1000000;
	       }
	       streamtime.tv_usec -= vobj->recording_start_time.tv_usec;
	       streampos = streamtime.tv_sec + ( (double)streamtime.tv_usec / 1000000.0f );
	       
		  
	       // If the streampos is ahead, this means that we get
	       // less than 30 frames per seconds.
	       // --> Fill up the stream 
	       while( streampos > videopos )
	       {
/* 		  printf( "s v: %f   a: %f\n", videopos, audiopos ); */
		  gotpage = 0;
		  if( theora_encode_YUVin( &vobj->th, &yuv ) )
		  {
		     TRACE( "theora_encode_YUVin FAILED!\n" );
		  }
		  theora_encode_packetout( &vobj->th, 0, &vobj->op );
		  ogg_stream_packetin( &vobj->os, &vobj->op );
		  while( ogg_stream_pageout( &vobj->os, &og ) )
		  {
		     double gt;
		     fwrite( og.header, og.header_len, 1, vobj->f );
		     fwrite( og.body, og.body_len, 1, vobj->f );
		     
		     gt = theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) );
		     if( gt < 0 )
		     {
			continue;
		     }

		     gotpage = 1;
		     videopos = gt;
/* 		     printf( "THEORA: %f\n", videopos ); */
		  }
		  if( !gotpage )
		  {
		     videopos += vobj->frame_interval / 1000000.0f;
		  }
		  
		  vobj->frame_count++;
		  audiopos = fetch_and_process_audio( vobj, audiopos );
	       }

	       // 
	       while( ( videopos + ( vobj->frame_interval / 1000000.0f ) ) < audiopos ) 
	       {
/* 		  printf( "a v: %f   a: %f\n", videopos, audiopos ); */
		  gotpage = 0;
		  if( theora_encode_YUVin( &vobj->th, &yuv ) )
		  {
		     TRACE( "theora_encode_YUVin FAILED!\n" );
		  }
		  theora_encode_packetout( &vobj->th, 0, &vobj->op );
		  ogg_stream_packetin( &vobj->os, &vobj->op );
		  while( ogg_stream_pageout( &vobj->os, &og ) )
		  {
		     double gt;
		     fwrite( og.header, og.header_len, 1, vobj->f );
		     fwrite( og.body, og.body_len, 1, vobj->f );

		     gt = theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) );
		     if( gt < 0 )
		     {
			continue;
		     }
		     
		     gotpage = 1;
		     videopos = gt;
/* 		     printf( "THEORA: %f\n", videopos ); */
		  }
		  if( !gotpage )
		  {
		     videopos += vobj->frame_interval / 1000000.0f;
		  }
		  
		  vobj->frame_count++;
		  audiopos = fetch_and_process_audio( vobj, audiopos );
	       }
	       last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	       sem_wait( &vobj->lock );
	       g_queue_push_head( vobj->empty_queue, vobj->last_frame );
	       sem_post( &vobj->lock );
	       vobj->last_frame = NULL;
	    }
	 }
	 else
	 {
	    fill_frames( vobj, data_buffer, &yuv, ds_y_buffer, ds_u_buffer, ds_v_buffer );
	 }
	 
      }
      else// ( !vobj->fill_frames )
      {
	 if( vobj->last_frame )
	 {
	    unicap_data_buffer_t *last_data_buffer;
	    
	    last_data_buffer = vobj->last_frame;
	    last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
	    sem_wait( &vobj->lock );
	    g_queue_push_head( vobj->empty_queue, vobj->last_frame );
	    sem_post( &vobj->lock );
	 }
	 vobj->last_frame = NULL;
      }
      
      //
      // Encode the new buffer
      //
      if( vobj->encode_frame_cb )
      {
	 vobj->encode_frame_cb( UNICAP_EVENT_NEW_FRAME, NULL, data_buffer, vobj->encode_frame_cb_data );
      }
      vobj->frame_count++;

      if( vobj->downsize > 1 || vobj->requires_resizing_frames )
      {
	 downsize_yuv420p( vobj->format.size.width, vobj->format.size.height, vobj->downsize, 
			   vobj->ti.width, vobj->ti.height, 
			   ds_y_buffer, ds_u_buffer, ds_v_buffer, 
			   data_buffer->data, data_buffer->data + ( vobj->format.size.width * vobj->format.size.height ), 
			   data_buffer->data + ( vobj->format.size.width * vobj->format.size.height ) + 
			   ( ( vobj->format.size.width * vobj->format.size.height ) / 4 ) );
	 yuv.y = ds_y_buffer;
	 yuv.u = ds_u_buffer;
	 yuv.v = ds_v_buffer;
      }
      else
      {
	 yuv.y = data_buffer->data;
	 yuv.u = data_buffer->data + ( yuv.y_stride * yuv.y_height );
	 yuv.v = yuv.u + ( yuv.uv_stride * yuv.uv_height );
      }
      
      if( theora_encode_YUVin( &vobj->th, &yuv ) )
      {
	 TRACE( "theora_encode_YUVin FAILED!\n" );
      }
      memcpy( &vobj->last_frame_time, &data_buffer->fill_time, sizeof( struct timeval ) );

      vobj->last_frame = data_buffer;
      
      theora_encode_packetout( &vobj->th, 0, &vobj->op );
      ogg_stream_packetin( &vobj->os, &vobj->op );
/*       printf( "= v: %f   a: %f\n", videopos, audiopos ); */
      gotpage = 0;
      while( ogg_stream_pageout( &vobj->os, &og ) )
      {
	 double gt;
	 fwrite( og.header, og.header_len, 1, vobj->f );
	 fwrite( og.body, og.body_len, 1, vobj->f );

	 gt = theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) );
	 if( gt < 0 )
	 {
	    continue;
	 }
	 
	 gotpage = 1;
	 videopos = gt;
/* 	 printf( "THEORA: %f\n", videopos ); */
      }
      if( !gotpage )
      {
	 videopos += vobj->frame_interval / 1000000.0f;
      }
   }

   if( vobj->last_frame )
   {
      // encode again to set eos
      unicap_data_buffer_t *last_data_buffer;
      ogg_page og;
      ogg_packet op;

#if HAVE_ALSA      
      if( vobj->audio && !vobj->async_audio_encoding )
      {
	 audiopos = fetch_and_process_audio( vobj, audiopos );
	 vorbis_analysis_wrote( &vobj->vd, 0 );
	 while( vorbis_analysis_blockout( &vobj->vd, &vobj->vb ) == 1 )
	 {
	    vorbis_analysis( &vobj->vb, NULL );
	    vorbis_bitrate_addblock( &vobj->vb );
	    while( vorbis_bitrate_flushpacket( &vobj->vd, &op ) )
	    {
	       ogg_stream_packetin( &vobj->vo, &op );
	    }
	 }
	 while( ogg_stream_pageout( &vobj->vo, &og ) )
	 {
	    fwrite( og.header, og.header_len, 1, vobj->f );
	    fwrite( og.body, og.body_len, 1, vobj->f );
	 }
      }
      else if( vobj->audio )
      {
	 audiopos = fetch_and_process_audio( vobj, audiopos );
      }
#endif

      last_data_buffer = vobj->last_frame;
      if( vobj->downsize > 1 || vobj->requires_resizing_frames )
      {
	 yuv.y = ds_y_buffer;
	 yuv.u = ds_u_buffer;
	 yuv.v = ds_v_buffer;
      }
      else
      {
         yuv.y = last_data_buffer->data;
         yuv.u = last_data_buffer->data + ( yuv.y_stride * yuv.y_height );
         yuv.v = yuv.u + ( yuv.uv_stride * yuv.uv_height );
      }
      if( theora_encode_YUVin( &vobj->th, &yuv ) )
      {
	 TRACE( "theora_encode_YUVin FAILED!\n" );
      }
      theora_encode_packetout( &vobj->th, 1, &vobj->op );
      ogg_stream_packetin( &vobj->os, &vobj->op );
      while( ogg_stream_pageout( &vobj->os, &og ) )
      {
/* 	 printf( "THEORA: %f\n", theora_granule_time( &vobj->th, ogg_page_granulepos( &og ) ) ); */
	 fwrite( og.header, og.header_len, 1, vobj->f );
	 fwrite( og.body, og.body_len, 1, vobj->f );
      }
      last_data_buffer->flags &= ~UNICAP_FLAGS_BUFFER_LOCKED;
      sem_wait( &vobj->lock );
      g_queue_push_head( vobj->empty_queue, vobj->last_frame );
      sem_post( &vobj->lock );

      vobj->last_frame = NULL;
   }

   return NULL;
}
Ejemplo n.º 3
0
static int oggvorbis_encode_frame(AVCodecContext *avccontext,
                                  unsigned char *packets,
                           int buf_size, void *data)
{
    OggVorbisContext *context = avccontext->priv_data ;
    ogg_packet op ;
    signed short *audio = data ;
    int l;

    if(data) {
        int samples = OGGVORBIS_FRAME_SIZE;
        float **buffer ;

        buffer = vorbis_analysis_buffer(&context->vd, samples) ;
        if(context->vi.channels == 1) {
            for(l = 0 ; l < samples ; l++)
                buffer[0][l]=audio[l]/32768.f;
        } else {
            for(l = 0 ; l < samples ; l++){
                buffer[0][l]=audio[l*2]/32768.f;
                buffer[1][l]=audio[l*2+1]/32768.f;
            }
        }
        vorbis_analysis_wrote(&context->vd, samples) ;
    } else {
        if(!context->eof)
            vorbis_analysis_wrote(&context->vd, 0) ;
        context->eof = 1;
    }

    while(vorbis_analysis_blockout(&context->vd, &context->vb) == 1) {
        vorbis_analysis(&context->vb, NULL);
        vorbis_bitrate_addblock(&context->vb) ;

        while(vorbis_bitrate_flushpacket(&context->vd, &op)) {
            /* i'd love to say the following line is a hack, but sadly it's
             * not, apparently the end of stream decision is in libogg. */
            if(op.bytes==1)
                continue;
            memcpy(context->buffer + context->buffer_index, &op, sizeof(ogg_packet));
            context->buffer_index += sizeof(ogg_packet);
            memcpy(context->buffer + context->buffer_index, op.packet, op.bytes);
            context->buffer_index += op.bytes;
//            av_log(avccontext, AV_LOG_DEBUG, "e%d / %d\n", context->buffer_index, op.bytes);
        }
    }

    l=0;
    if(context->buffer_index){
        ogg_packet *op2= (ogg_packet*)context->buffer;
        op2->packet = context->buffer + sizeof(ogg_packet);

        l=  op2->bytes;
        avccontext->coded_frame->pts= av_rescale_q(op2->granulepos, (AVRational){1, avccontext->sample_rate}, avccontext->time_base);
        //FIXME we should reorder the user supplied pts and not assume that they are spaced by 1/sample_rate

        memcpy(packets, op2->packet, l);
        context->buffer_index -= l + sizeof(ogg_packet);
        memmove(context->buffer, context->buffer + l + sizeof(ogg_packet), context->buffer_index);
//        av_log(avccontext, AV_LOG_DEBUG, "E%d\n", l);
    }

    return l;
}
Ejemplo n.º 4
0
HOTSPOT PUBLIC SW32 vorbis_encode( const char *filename, void *data, W32 size, W32 in_channels, W32 in_samplesize,
			   W32 rate, W32 quality, W32 max_bitrate, W32 min_bitrate  )
{
	FILE			*fp;
	ogg_stream_state	os;
	ogg_page 		og;
	ogg_packet 		op;

	vorbis_dsp_state	vd;
	vorbis_block		vb;
	vorbis_info		vi;

	ogg_packet		header_main;
	ogg_packet		header_comments;
	ogg_packet		header_codebooks;
	SW32			result;
	W32			serialno = 0;

	vorbis_comment		comments;

	SW32			ret = 0;
	SW32			eos;
	W32			samplesdone = 0;
	W32			packetsdone = 0;
	W32			bytes_written = 0;



	fp = fopen( filename, "wb" );
	if( fp == NULL )
	{
		return 0;
	}

	memset( &comments, 0, sizeof( comments ) );

	channels = in_channels;
	samplesize = in_samplesize;
	ptrCurrent = (PW8)data;
	ptrEnd = (PW8)data + size;


	vorbis_info_init( &vi );

	if( vorbis_encode_setup_vbr( &vi, channels, rate, quality ) )
	{
		fprintf( stderr, "Mode initialisation failed: invalid parameters for quality\n" );
		vorbis_info_clear( &vi );
// Añadido como consejo de cppcheck.
		fclose(fp);
		return 1;
	}

	/* do we have optional hard quality restrictions? */
	if( max_bitrate > 0 || min_bitrate > 0 )
	{
		struct ovectl_ratemanage_arg ai;

		vorbis_encode_ctl( &vi, OV_ECTL_RATEMANAGE_GET, &ai );

		ai.bitrate_hard_min = min_bitrate;
		ai.bitrate_hard_max = max_bitrate;
		ai.management_active = 1;

		vorbis_encode_ctl( &vi, OV_ECTL_RATEMANAGE_SET, &ai );
	}

	/* Turn off management entirely (if it was turned on). */
	vorbis_encode_ctl( &vi, OV_ECTL_RATEMANAGE_SET, NULL );


	vorbis_encode_setup_init( &vi );

	vorbis_analysis_init( &vd, &vi );
	vorbis_block_init( &vd, &vb );

	ogg_stream_init( &os, serialno );

	/* Now, build the three header packets and send through to the stream
	   output stage (but defer actual file output until the main encode loop) */


	/* Build the packets */
	ret = vorbis_analysis_headerout( &vd, &comments,
			&header_main, &header_comments, &header_codebooks );

	/* And stream them out */
	ogg_stream_packetin( &os, &header_main );
	ogg_stream_packetin( &os, &header_comments );
	ogg_stream_packetin( &os, &header_codebooks );

	while( (result = ogg_stream_flush( &os, &og )) )
	{
		ret = fwrite( og.header, 1, og.header_len, fp );
		ret += fwrite( og.body, 1, og.body_len, fp );

		if(ret != og.header_len + og.body_len)
		{
			fprintf( stderr, "[vorbis_encode]: Failed writing header to output stream\n") ;
			ret = 1;

			goto cleanup; /* Bail and try to clean up stuff */
		}
	}


	eos = 0;

	/* Main encode loop - continue until end of file */
	while( ! eos )
	{
		float **buffer = vorbis_analysis_buffer( &vd, READSIZE );
		SW32 samples_read = read_samples( buffer, READSIZE );

		if( samples_read == 0 )
		{
			/* Tell the library that we wrote 0 bytes - signalling the end */
			vorbis_analysis_wrote( &vd, 0 );
		}
		else
		{
			samplesdone += samples_read;

			/* Call progress update every 40 pages */
			if( packetsdone >= 40 )
			{
				packetsdone = 0;

				// progress bar here
			}

			/* Tell the library how many samples (per channel) we wrote
			   into the supplied buffer */
			vorbis_analysis_wrote( &vd, samples_read );
		}

		/* While we can get enough data from the library to analyse, one
		   block at a time... */
		while( vorbis_analysis_blockout( &vd, &vb ) == 1 )
		{

			/* Do the main analysis, creating a packet */
			vorbis_analysis( &vb, NULL );
			vorbis_bitrate_addblock( &vb );

			while( vorbis_bitrate_flushpacket( &vd, &op ) )
			{
				/* Add packet to bitstream */
				ogg_stream_packetin( &os, &op );
				packetsdone++;

				/* If we've gone over a page boundary, we can do actual output,
				   so do so (for however many pages are available) */

				while( ! eos )
				{
					SW32 result = ogg_stream_pageout( &os, &og );
					if( ! result )
					{
						break;
					}

					ret = fwrite( og.header, 1, og.header_len, fp );
					ret += fwrite( og.body, 1, og.body_len, fp );

					if(ret != og.header_len + og.body_len)
					{
						fprintf( stderr, "[vorbis_encode]: Failed writing data to output stream\n" );
						ret = 1;

						goto cleanup; /* Bail */
					}
					else
					{
						bytes_written += ret;
					}

					if( ogg_page_eos( &og ) )
					{
						eos = 1;
					}
				}
			}
		}
	}


cleanup:

	fclose( fp );

	ogg_stream_clear( &os );

	vorbis_block_clear( &vb );
	vorbis_dsp_clear( &vd );
	vorbis_info_clear( &vi );

	return 0;
}
const char *
_edje_multisense_encode_to_ogg_vorbis(char *snd_path, double quality, SF_INFO sfinfo)
{
   ogg_stream_state os; /* take physical pages, weld into a logical stream of packets */
   ogg_page og; /* one Ogg bitstream page.  Vorbis packets are inside */
   ogg_packet op; /* one raw packet of data for decode */
   vorbis_info vi; /* struct that stores all the static vorbis bitstream settings */
   vorbis_comment vc; /* struct that stores all the user comments */
   vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
   vorbis_block vb; /* local working space for packet->PCM decode */
   int eos = 0, ret;
   char *tmp;
   SNDFILE *sfile;
   FILE *fout;

   sfile = sf_open(snd_path, SFM_READ, &sfinfo);
   if (!sfile) return NULL;
   if (!sf_format_check(&sfinfo))
     {
        sf_close(sfile);
        return NULL;
     }
   tmp = malloc(strlen(snd_path) + 1 + 4);
   if (!tmp)
     {
        sf_close(sfile);
        return NULL;
     }
   strcpy(tmp, snd_path);
   snd_path = tmp;
   strcat(snd_path, ".ogg");
   fout = fopen(snd_path, "wb");
   if (!fout)
     {
        free(snd_path);
        sf_close(sfile);
        return NULL;
     }

   /********** Encode setup ************/
   vorbis_info_init(&vi);
   ret = vorbis_encode_init(&vi, sfinfo.channels, sfinfo.samplerate, 
                            -1, (long)(quality * 1000), -1);
   if (ret == OV_EFAULT) printf("OV_EFAULT\n");
   if (ret == OV_EINVAL) printf("OV_EINVAL\n");
   if (ret == OV_EIMPL) printf("OV_EIMPL\n");

   if (ret)
     {
        fclose(fout);
        free(snd_path);
        sf_close(sfile);
        return NULL;
     }

   /* add a comment */
   vorbis_comment_init(&vc);
   vorbis_comment_add_tag(&vc, "", "");

   /* set up the analysis state and auxiliary encoding storage */
   vorbis_analysis_init(&vd, &vi);
   vorbis_block_init(&vd, &vb);

   srand(time(NULL));
   ogg_stream_init(&os, rand());

   ogg_packet header;
   ogg_packet header_comm;
   ogg_packet header_code;

   vorbis_analysis_headerout(&vd, &vc, &header, &header_comm, &header_code);
   ogg_stream_packetin(&os, &header); /* automatically placed in its own page */
   ogg_stream_packetin(&os, &header_comm);
   ogg_stream_packetin(&os, &header_code);

   while (!eos)
     {
        int result = ogg_stream_flush(&os, &og);
        if (!result) break;
        fwrite(og.header, 1, og.header_len, fout);
        fwrite(og.body, 1, og.body_len, fout);
     }

   while (!eos)
     {
        int i, ch;
        float readbuffer[READBUF * 2];
        sf_count_t count;
        
        count = sf_readf_float(sfile, readbuffer, READBUF);

        if (!count)
          vorbis_analysis_wrote(&vd, 0);
        else
          {
             float **buffer = vorbis_analysis_buffer(&vd, count);
             
             /* uninterleave samples */
             for (i = 0; i < count; i++)
               {
                  for (ch = 0; ch < sfinfo.channels; ch++)
                    buffer[ch][i]= readbuffer[(i * sfinfo.channels) + ch];
               }
             vorbis_analysis_wrote(&vd, i);
          }
        while (vorbis_analysis_blockout(&vd, &vb) == 1)
          {
             vorbis_analysis(&vb, NULL);
             vorbis_bitrate_addblock(&vb);

             while (vorbis_bitrate_flushpacket(&vd, &op))
               {
                  ogg_stream_packetin(&os, &op);
                  while (!eos)
                    {
                       int result = ogg_stream_pageout(&os, &og);
                       if (!result) break;
                       fwrite(og.header, 1, og.header_len, fout);
                       fwrite(og.body, 1, og.body_len, fout);
                       if (ogg_page_eos(&og)) eos = 1;
                    }
               }
          }
     }
   ogg_stream_clear(&os);
   vorbis_block_clear(&vb);
   vorbis_dsp_clear(&vd);
   vorbis_comment_clear(&vc);
   vorbis_info_clear(&vi);
   sf_close(sfile);
   fclose (fout);
   return snd_path;
}
Ejemplo n.º 6
0
	int StreamEncoder::EncodeBuffer(void* buffer, int length)
	{
		/*
		While there is more audio to encode:

		Submit a chunk of audio data using vorbis_analysis_buffer and vorbis_analysis_wrote.
		Obtain all available blocks using vorbis_analysis_blockout in a loop. For each block obtained:
			Encode the block into a packet (or prepare it for bitrate management) using vorbis_analysis. (It's a good idea to always pass the blocks through the bitrate management mechanism; more information is on the vorbis_analysis page. It does not affect the resulting packets unless you are actually using a bitrate-managed mode.)
			If you are using bitrate management, submit the block using vorbis_bitrate_addblock and obtain packets using vorbis_bitrate_flushpacket.
			Output any obtained packets.
		*/

		float** allocatedChannels = vorbis_analysis_buffer(&mVorbisDspState, length);

		//I'm not sure of the way that the samples inside of buffer are laid out
		float* bufferChannels = static_cast<float*>(buffer);

		Write("Casted the buffer channels");

		for (int i = 0; i < length; i+=2)
		{
			//Assume two channels here
			allocatedChannels[0][i] = bufferChannels[i];
			allocatedChannels[1][i] = bufferChannels[i+1];
		}

		if (vorbis_analysis_wrote(&mVorbisDspState, length) != 0)
		{
			//Error
			return 0;
		}

		int processedSamplesCount = 0;

		int retValue = 1;
		while (retValue == 1)
		{
			retValue = vorbis_analysis_blockout(&mVorbisDspState, &mVorbisBlock) == 1;

			if (retValue >= 0)
			{
				//No Error
				if (vorbis_analysis(&mVorbisBlock, NULL) == 0)
				{
					vorbis_bitrate_addblock(&mVorbisBlock);

					ogg_packet samplePacket;

					int fpRetValue = 1;
					while (fpRetValue == 1)
					{
						fpRetValue = vorbis_bitrate_flushpacket(&mVorbisDspState, &samplePacket);
						if (fpRetValue >= 0)
						{
							//Store the bytes inside of buffer
							//memcpy(buffer, samplePacket.bytes, len)
							//processedSamplesCount += len;
						}
					}
				}
			}

			if (retValue == 0)
			{
				break;
			}
		}

		return processedSamplesCount;
	}
	virtual bool Cook(FName Format, const TArray<uint8>& SrcBuffer, FSoundQualityInfo& QualityInfo, TArray<uint8>& CompressedDataStore) const override
	{
		check(Format == NAME_OGG);
#if WITH_OGGVORBIS
		{

			short				ReadBuffer[SAMPLES_TO_READ * SAMPLE_SIZE * 2];

			ogg_stream_state	os;		// take physical pages, weld into a logical stream of packets 
			ogg_page			og;		// one ogg bitstream page.  Vorbis packets are inside
			ogg_packet			op;		// one raw packet of data for decode
			vorbis_info			vi;		// struct that stores all the static vorbis bitstream settings
			vorbis_comment		vc;		// struct that stores all the user comments
			vorbis_dsp_state	vd;		// central working state for the packet->PCM decoder
			vorbis_block		vb;		// local working space for packet->PCM decode
			uint32				i;
			bool				eos;

			// Create a buffer to store compressed data
			CompressedDataStore.Empty();
			FMemoryWriter CompressedData( CompressedDataStore );
			uint32 BufferOffset = 0;

			float CompressionQuality = ( float )( QualityInfo.Quality * VORBIS_QUALITY_MODIFIER ) / 100.0f;
			CompressionQuality = FMath::Clamp( CompressionQuality, -0.1f, 1.0f );

			vorbis_info_init( &vi );

			if( vorbis_encode_init_vbr( &vi, QualityInfo.NumChannels, QualityInfo.SampleRate, CompressionQuality ) )
			{
				return false;
			}

			// add a comment
			vorbis_comment_init( &vc );
			vorbis_comment_add_tag( &vc, "ENCODER", "UnrealEngine4" );

			// set up the analysis state and auxiliary encoding storage
			vorbis_analysis_init( &vd, &vi );
			vorbis_block_init( &vd, &vb );

			// set up our packet->stream encoder
			ogg_stream_init( &os, 0 );

			ogg_packet header;
			ogg_packet header_comm;
			ogg_packet header_code;

			vorbis_analysis_headerout( &vd, &vc, &header, &header_comm, &header_code);
			ogg_stream_packetin( &os, &header );
			ogg_stream_packetin( &os, &header_comm );
			ogg_stream_packetin( &os, &header_code );

			// This ensures the actual audio data will start on a new page, as per spec
			while( true )
			{
				int result = ogg_stream_flush( &os, &og );
				if( result == 0 )
				{
					break;
				}

				CompressedData.Serialize( og.header, og.header_len );
				CompressedData.Serialize( og.body, og.body_len );
			}

			eos = false;
			while( !eos )
			{
				// Read samples
				uint32 BytesToRead = FMath::Min( SAMPLES_TO_READ * QualityInfo.NumChannels * SAMPLE_SIZE, QualityInfo.SampleDataSize - BufferOffset );
				FMemory::Memcpy( ReadBuffer, SrcBuffer.GetData() + BufferOffset, BytesToRead );
				BufferOffset += BytesToRead;

				if( BytesToRead == 0)
				{
					// end of file
					vorbis_analysis_wrote( &vd, 0 );
				}
				else
				{
					// expose the buffer to submit data
					float **buffer = vorbis_analysis_buffer( &vd, SAMPLES_TO_READ );

					if( QualityInfo.NumChannels == 1 )
					{
						for( i = 0; i < BytesToRead / SAMPLE_SIZE; i++ )
						{
							buffer[0][i] = ( ReadBuffer[i] ) / 32768.0f;
						}
					}
					else
					{
						for( i = 0; i < BytesToRead / ( SAMPLE_SIZE * 2 ); i++ )
						{
							buffer[0][i] = ( ReadBuffer[i * 2] ) / 32768.0f;
							buffer[1][i] = ( ReadBuffer[i * 2 + 1] ) / 32768.0f;
						}
					}

					// tell the library how many samples we actually submitted
					vorbis_analysis_wrote( &vd, i );
				}

				// vorbis does some data preanalysis, then divvies up blocks for more involved (potentially parallel) processing.
				while( vorbis_analysis_blockout( &vd, &vb ) == 1 )
				{
					// analysis, assume we want to use bitrate management
					vorbis_analysis( &vb, NULL );
					vorbis_bitrate_addblock( &vb );

					while( vorbis_bitrate_flushpacket( &vd, &op ) )
					{
						// weld the packet into the bitstream
						ogg_stream_packetin( &os, &op );

						// write out pages (if any)
						while( !eos )
						{
							int result = ogg_stream_pageout( &os, &og );
							if( result == 0 )
							{
								break;
							}
							CompressedData.Serialize( og.header, og.header_len );
							CompressedData.Serialize( og.body, og.body_len );

							// this could be set above, but for illustrative purposes, I do	it here (to show that vorbis does know where the stream ends)
							if( ogg_page_eos( &og ) )
							{
								eos = true;
							}
						}
					}
				}
			}

			// clean up and exit.  vorbis_info_clear() must be called last
			ogg_stream_clear( &os );
			vorbis_block_clear( &vb );
			vorbis_dsp_clear( &vd );
			vorbis_comment_clear( &vc );
			vorbis_info_clear( &vi );
			// ogg_page and ogg_packet structs always point to storage in libvorbis.  They're never freed or manipulated directly
		}
		return CompressedDataStore.Num() > 0;
#else
		return false;
#endif		// WITH_OGGVOBVIS
	}
/**
    \fn encode

*/
bool	AUDMEncoder_Vorbis::encode(uint8_t *dest, uint32_t *len, uint32_t *samples)
{
  uint32_t nbout;
  uint32_t consumed=0;
  float **float_samples;
    int channels=wavheader.channels;
  ogg_packet op ;

  *len = 0;
  _chunk=1024*channels;
  int count=ROUNDMAX;
// Check that we have packet from previous pass
  while(count--)
  {
    if(!refillBuffer(_chunk ))
    {
      return 0;
    }

    if(tmptail-tmphead<_chunk)
    {
      return 0;
    }

	//printf("Round %d\n",ROUNDMAX-count);
    if(vorbis_analysis_blockout(&VD, &VB) == 1)
    {
      vorbis_analysis(&VB, NULL);
      vorbis_bitrate_addblock(&VB) ;
	//printf("Blockout\n");

      if(vorbis_bitrate_flushpacket(&VD, &op))
      {
        memcpy(dest, op.packet,op.bytes);
        *len=op.bytes;
        *samples=op.granulepos-_oldpos;
        _oldpos=op.granulepos;
        //  aprintf("1st packet :sampl:%lu len :%lu sample:%lu abs:%llu\n",*samples,op.bytes,total,op.granulepos);
        return 1;
      }
    }


    uint32_t nbSample=(tmptail-tmphead)/channels;
    if(nbSample>1024) nbSample=1024;
    float_samples=vorbis_analysis_buffer(&VD, nbSample) ;
    int index=tmphead;
    // Put our samples in incoming buffer
    //reorderChannels(&(tmpbuffer[tmphead]), nbSample,_incoming->getChannelMapping(),outputChannelMapping);
    reorderToPlanar2(&(tmpbuffer[tmphead]),float_samples,nbSample,_incoming->getChannelMapping(),outputChannelMapping);
#if 0
    for (int i = 0; i < nbSample; i++)
      for (int j = 0; j < channels; j++) {
      float_samples[j][i] = tmpbuffer[index++];
      if (float_samples[j][i] > 1) float_samples[j][i] = 1;
      if (float_samples[j][i] < -1) float_samples[j][i] = -1;
      }
#endif
      // Buffer full, go go go
      vorbis_analysis_wrote(&VD, nbSample) ;
      tmphead+=nbSample*channels;
  }
  return 0;

}
Ejemplo n.º 9
0
// @todo, nicely set e_o_s flag
void VorbisWriter::onAudioIn(const void* input, unsigned long nframes) {
  if(!fp) {
    return;
  }
  if(!is_setup) {
    printf("ERROR: onAudioIn(), first call open().\n");
    return;
  }

  float** buffer = vorbis_analysis_buffer(&vd, nframes);

  // check if we need to convert the input.
  if(format == VW_INT16) {
    short int* input_ptr = (short int*)input;
    float* out_channel_ptr = NULL;
    short int* in_channel_ptr = NULL;
    for(int i = 0; i < num_channels; ++i) {
      in_channel_ptr = input_ptr + i;
      out_channel_ptr = buffer[i];
      for(int j = 0; j < nframes; ++j) {
        out_channel_ptr[j] = (float(*(in_channel_ptr))) / 32768.0f;
        in_channel_ptr += num_channels;
      }
    }
  }
  else if(format == VW_FLOAT32) {
    int src_dx = 0;
    int dest_dx = 0;
    float* input_ptr = (float*)input;
    float* out_channel_ptr = NULL;
    float* in_channel_ptr = NULL;
    for(int i = 0; i < num_channels; ++i) {
      src_dx = 0;
      out_channel_ptr = buffer[i];
      in_channel_ptr = input_ptr + i;
      for(int j = 0; j < nframes; ++j) {
        out_channel_ptr[j] = *in_channel_ptr;
        in_channel_ptr += num_channels;
      }
    }
  }

  int r = vorbis_analysis_wrote(&vd, nframes);
  if (r != 0) {
    printf("ERROR: error with vorbis_analysis_wrote\n");
  }

  // write out 
  while(vorbis_analysis_blockout(&vd, &vb) == 1) {
    vorbis_analysis(&vb, NULL);
    vorbis_bitrate_addblock(&vb);
    while(vorbis_bitrate_flushpacket(&vd, &op)) {
      ogg_stream_packetin(&os, &op);
      while(true) {
        int result = ogg_stream_pageout(&os, &og);
        if(result == 0) {
          break;
        }
        fwrite(og.header, 1, og.header_len, fp);
        fwrite(og.body, 1, og.body_len, fp);
      }
    }
  }
}
Ejemplo n.º 10
0
/****************************************************************************
 * Encode: the whole thing
 ****************************************************************************
 * This function spits out ogg packets.
 ****************************************************************************/
static block_t *Encode( encoder_t *p_enc, block_t *p_aout_buf )
{
    encoder_sys_t *p_sys = p_enc->p_sys;
    ogg_packet oggpacket;
    block_t *p_block, *p_chain = NULL;
    float **buffer;

    /* FIXME: flush buffers in here */
    if( unlikely( !p_aout_buf ) ) return NULL;

    mtime_t i_pts = p_aout_buf->i_pts -
                (mtime_t)1000000 * (mtime_t)p_sys->i_samples_delay /
                (mtime_t)p_enc->fmt_in.audio.i_rate;

    p_sys->i_samples_delay += p_aout_buf->i_nb_samples;

    buffer = vorbis_analysis_buffer( &p_sys->vd, p_aout_buf->i_nb_samples );

    /* convert samples to float and uninterleave */
    for( unsigned int i = 0; i < p_sys->i_channels; i++ )
    {
        for( unsigned int j = 0 ; j < p_aout_buf->i_nb_samples ; j++ )
        {
            buffer[i][j]= ((float *)p_aout_buf->p_buffer)
                                    [j * p_sys->i_channels + p_sys->pi_chan_table[i]];
        }
    }

    vorbis_analysis_wrote( &p_sys->vd, p_aout_buf->i_nb_samples );

    while( vorbis_analysis_blockout( &p_sys->vd, &p_sys->vb ) == 1 )
    {
        int i_samples;

        vorbis_analysis( &p_sys->vb, NULL );
        vorbis_bitrate_addblock( &p_sys->vb );

        while( vorbis_bitrate_flushpacket( &p_sys->vd, &oggpacket ) )
        {
            int i_block_size;
            p_block = block_Alloc( oggpacket.bytes );
            memcpy( p_block->p_buffer, oggpacket.packet, oggpacket.bytes );

            i_block_size = vorbis_packet_blocksize( &p_sys->vi, &oggpacket );

            if( i_block_size < 0 ) i_block_size = 0;
            i_samples = ( p_sys->i_last_block_size + i_block_size ) >> 2;
            p_sys->i_last_block_size = i_block_size;

            p_block->i_length = (mtime_t)1000000 *
                (mtime_t)i_samples / (mtime_t)p_enc->fmt_in.audio.i_rate;

            p_block->i_dts = p_block->i_pts = i_pts;

            p_sys->i_samples_delay -= i_samples;

            /* Update pts */
            i_pts += p_block->i_length;
            block_ChainAppend( &p_chain, p_block );
        }
    }

    return p_chain;
}
void *EncodeSoundBuffer(ProgData *pdata){
    int sampread=pdata->periodsize;
#ifdef HAVE_LIBJACK
    void *jackbuf=NULL;
    if(pdata->args.use_jack){
        jackbuf=malloc(pdata->sound_framesize*pdata->jdata->buffersize);
    }
#endif
    pdata->v_encoding_clean=0;
    while((pdata->running)){
        float **vorbis_buffer;
        int count=0,i,j;
        SndBuffer *buff=NULL;

        if (pdata->paused) {
            pthread_mutex_lock(&pdata->pause_mutex);
            pthread_cond_wait(&pdata->pause_cond, &pdata->pause_mutex);
            pthread_mutex_unlock(&pdata->pause_mutex);
        }
        if(!pdata->args.use_jack){
            if(pdata->sound_buffer==NULL){
                pdata->v_enc_thread_waiting=1;
                pthread_mutex_lock(&pdata->snd_buff_ready_mutex);
                pthread_cond_wait(&pdata->sound_data_read,
                                &pdata->snd_buff_ready_mutex);
                pthread_mutex_unlock(&pdata->snd_buff_ready_mutex);
                pdata->v_enc_thread_waiting=0;
            }
            if(pdata->sound_buffer==NULL || !pdata->running)
                break;
            pthread_mutex_lock(&pdata->sound_buffer_mutex);
            buff=pdata->sound_buffer;
            //advance the list
            pdata->sound_buffer=pdata->sound_buffer->next;
            pthread_mutex_unlock(&pdata->sound_buffer_mutex);

            vorbis_buffer=vorbis_analysis_buffer(&pdata->enc_data->m_vo_dsp,
                                                 sampread);

            for(i=0;i<sampread;i++){
                for(j=0;j<pdata->args.channels;j++){
                    vorbis_buffer[j][i]=((buff->data[count+1]<<8)|
                                        (0x00ff&(int)buff->data[count]))/
                                        32768.f;
                    count+=2;
                }
            }
            free(buff->data);
            free(buff);
        }
        else{
#ifdef HAVE_LIBJACK
            if((*jack_ringbuffer_read_space)(pdata->jdata->sound_buffer)>=
               (pdata->sound_framesize*pdata->jdata->buffersize)){
                (*jack_ringbuffer_read)(pdata->jdata->sound_buffer,
                                          jackbuf,
                                          (pdata->sound_framesize*
                                           pdata->jdata->buffersize));
                vorbis_buffer=vorbis_analysis_buffer(&pdata->enc_data->m_vo_dsp,
                                                    sampread);
                for(j=0;j<pdata->args.channels;j++){
                    for(i=0;i<sampread;i++){
                        vorbis_buffer[j][i]=((float*)jackbuf)[count];
                        count++;
                    }
                }
            }
            else{
                pdata->v_enc_thread_waiting=1;
                pthread_mutex_lock(&pdata->snd_buff_ready_mutex);
                pthread_cond_wait(&pdata->sound_data_read,
                                &pdata->snd_buff_ready_mutex);
                pthread_mutex_unlock(&pdata->snd_buff_ready_mutex);
                pdata->v_enc_thread_waiting=0;
                continue;
            }
#endif
        }
        vorbis_analysis_wrote(&pdata->enc_data->m_vo_dsp,sampread);

        pthread_mutex_lock(&pdata->libogg_mutex);
        while(vorbis_analysis_blockout(&pdata->enc_data->m_vo_dsp,
                                       &pdata->enc_data->m_vo_block)==1){

            vorbis_analysis(&pdata->enc_data->m_vo_block,NULL);
            vorbis_bitrate_addblock(&pdata->enc_data->m_vo_block);

            while(vorbis_bitrate_flushpacket(&pdata->enc_data->m_vo_dsp,
                                             &pdata->enc_data->m_ogg_pckt2)){
                ogg_stream_packetin(&pdata->enc_data->m_ogg_vs,
                                    &pdata->enc_data->m_ogg_pckt2);
            }
        }
        pthread_mutex_unlock(&pdata->libogg_mutex);

        pdata->avd-=pdata->periodtime;


    }

    pdata->v_encoding_clean=1;
    pthread_mutex_lock(&pdata->vorbis_lib_mutex);
    pthread_cond_signal(&pdata->vorbis_lib_clean);
    pthread_mutex_unlock(&pdata->vorbis_lib_mutex);
    pthread_exit(&errno);
}
//Method to start encoding
int startEncoding(JNIEnv *env, jclass *cls_ptr, jlong *sampleRate_ptr, jlong *channels_ptr, jfloat *quality_ptr, jlong *bitrate_ptr, jobject *encoderDataFeed_ptr, int type) {
    //Dereference our variables
    jclass cls = (*cls_ptr);
    jlong sampleRate = (*sampleRate_ptr);
    jlong channels = (*channels_ptr);
    jfloat quality = (*quality_ptr);
    jlong bitrate = (*bitrate_ptr);
    jobject encoderDataFeed = (*encoderDataFeed_ptr);

    //Create our PCM data buffer
    signed char readbuffer[READ*4+44];

    //Create a new java byte array to pass to the data feed method
    jbyteArray jByteArrayBuffer = (*env)->NewByteArray(env, READ*4);

    //Create a new java byte buffer to write to
    jbyteArray jByteArrayWriteBuffer = (*env)->NewByteArray(env, READ*8);

    //Find our java classes we'll be calling
    jclass encoderDataFeedClass = (*env)->FindClass(env, "org/xiph/vorbis/encoder/EncodeFeed");

    //Find our java method id's we'll be calling
    jmethodID writeVorbisDataMethodId = (*env)->GetMethodID(env, encoderDataFeedClass, "writeVorbisData", "([BI)I");
    jmethodID readPCMDataMethodId = (*env)->GetMethodID(env, encoderDataFeedClass, "readPCMData", "([BI)J");
    jmethodID startMethodId = (*env)->GetMethodID(env, encoderDataFeedClass, "start", "()V");
    jmethodID stopMethodId = (*env)->GetMethodID(env, encoderDataFeedClass, "stop", "()V");

    ogg_stream_state os; /* take physical pages, weld into a logical
                            stream of packets */
    ogg_page         og; /* one Ogg bitstream page.  Vorbis packets are inside */
    ogg_packet       op; /* one raw packet of data for decode */

    vorbis_info      vi; /* struct that stores all the static vorbis bitstream
                            settings */
    vorbis_comment   vc; /* struct that stores all the user comments */

    vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
    vorbis_block     vb; /* local working space for packet->PCM decode */

    int eos=0,ret;
    int i, founddata;

    /********** Encode setup ************/
    __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Setting up encoding");
    vorbis_info_init(&vi);

    /* choose an encoding mode.  A few possibilities commented out, one
       actually used: */

    /*********************************************************************
     Encoding using a VBR quality mode.  The usable range is -.1
     (lowest quality, smallest file) to 1. (highest quality, largest file).
     Example quality mode .4: 44kHz stereo coupled, roughly 128kbps VBR

     ret = vorbis_encode_init_vbr(&vi,2,44100,.4);

     ---------------------------------------------------------------------

     Encoding using an average bitrate mode (ABR).
     example: 44kHz stereo coupled, average 128kbps VBR

     ret = vorbis_encode_init(&vi,2,44100,-1,128000,-1);

     ---------------------------------------------------------------------

     Encode using a quality mode, but select that quality mode by asking for
     an approximate bitrate.  This is not ABR, it is true VBR, but selected
     using the bitrate interface, and then turning bitrate management off:

     ret = ( vorbis_encode_setup_managed(&vi,2,44100,-1,128000,-1) ||
             vorbis_encode_ctl(&vi,OV_ECTL_RATEMANAGE2_SET,NULL) ||
             vorbis_encode_setup_init(&vi));

     *********************************************************************/
     switch(type) {
        case WITH_BITRATE:
            __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Initializing with %lld channels %lldHz sample rate and %lld bitrate", channels, sampleRate, bitrate);
            ret=vorbis_encode_init(&vi, (long)channels, (long)sampleRate, (long)-1, (long)bitrate, (long)-1);
            break;
        case WITH_QUALITY:
            __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Initializing with %lld channels %lldHz sample rate and %f quality", channels, sampleRate, quality);
            ret=vorbis_encode_init_vbr(&vi, (long)channels, (long)sampleRate, (float)quality);
            break;
        default:
            __android_log_print(ANDROID_LOG_ERROR, "VorbisEncoder", "Failed to initialize");
            stopEncodeFeed(env, &encoderDataFeed, &stopMethodId);
            return ERROR_INITIALIZING;
     }


    /* do not continue if setup failed; this can happen if we ask for a
       mode that libVorbis does not support (eg, too low a bitrate, etc,
       will return 'OV_EIMPL') */

    if(ret) {
      __android_log_print(ANDROID_LOG_ERROR, "VorbisEncoder", "Failed to initialize");
      stopEncodeFeed(env, &encoderDataFeed, &stopMethodId);
      return ERROR_INITIALIZING;
    }

    startEncodeFeed(env, &encoderDataFeed, &startMethodId);

    /* add a comment */
    __android_log_print(ANDROID_LOG_DEBUG, "VorbisEncoder", "Adding comments");
    vorbis_comment_init(&vc);
    vorbis_comment_add_tag(&vc,"ENCODER","JNIVorbisEncoder");

    /* set up the analysis state and auxiliary encoding storage */
    vorbis_analysis_init(&vd,&vi);
    vorbis_block_init(&vd,&vb);

    /* set up our packet->stream encoder */
    /* pick a random serial number; that way we can more likely build
       chained streams just by concatenation */
    srand(time(NULL));
    ogg_stream_init(&os,rand());

    /* Vorbis streams begin with three headers; the initial header (with
       most of the codec setup parameters) which is mandated by the Ogg
       bitstream spec.  The second header holds any comment fields.  The
       third header holds the bitstream codebook.  We merely need to
       make the headers, then pass them to libvorbis one at a time;
       libvorbis handles the additional Ogg bitstream constraints */

    {
      ogg_packet header;
      ogg_packet header_comm;
      ogg_packet header_code;

      vorbis_analysis_headerout(&vd,&vc,&header,&header_comm,&header_code);
      ogg_stream_packetin(&os,&header); /* automatically placed in its own
                                           page */
      ogg_stream_packetin(&os,&header_comm);
      ogg_stream_packetin(&os,&header_code);

      /* This ensures the actual
       * audio data will start on a new page, as per spec
       */
      __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Writting header");
      while(!eos){
        int result=ogg_stream_flush(&os,&og);
        if(result==0)break;
        writeVorbisDataToEncoderDataFeed(env, &encoderDataFeed, &writeVorbisDataMethodId, og.header, og.header_len, &jByteArrayWriteBuffer);
        writeVorbisDataToEncoderDataFeed(env, &encoderDataFeed, &writeVorbisDataMethodId, og.body, og.body_len, &jByteArrayWriteBuffer);
      }

    }

    __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Starting to read from pcm callback");
    while(!eos){
      long i;
      long bytes = readPCMDataFromEncoderDataFeed(env, &encoderDataFeed, &readPCMDataMethodId, readbuffer, READ*4, &jByteArrayBuffer);

      if(bytes==0){
        /* end of file.  this can be done implicitly in the mainline,
           but it's easier to see here in non-clever fashion.
           Tell the library we're at end of stream so that it can handle
           the last frame and mark end of stream in the output properly */
        __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "End of file");
        vorbis_analysis_wrote(&vd,0);

      }else{
        /* data to encode */

        /* expose the buffer to submit data */
        float **buffer=vorbis_analysis_buffer(&vd,bytes/(2*channels));

        /* uninterleave samples */
        int channel;
        for(i=0;i<bytes/(2*channels);i++) {
            for(channel = 0; channel < channels; channel++) {
                buffer[channel][i]=((readbuffer[i*(2*channels)+(channel*2+1)]<<8)|
                              (0x00ff&(int)readbuffer[i*(2*channels)+(channel*2)]))/32768.f;
            }
        }

        /* tell the library how much we actually submitted */
        vorbis_analysis_wrote(&vd,i);
      }

      /* vorbis does some data preanalysis, then divvies up blocks for
         more involved (potentially parallel) processing.  Get a single
         block for encoding now */
      while(vorbis_analysis_blockout(&vd,&vb)==1){

        /* analysis, assume we want to use bitrate management */
        vorbis_analysis(&vb,NULL);
        vorbis_bitrate_addblock(&vb);

        while(vorbis_bitrate_flushpacket(&vd,&op)){

          /* weld the packet into the bitstream */
          ogg_stream_packetin(&os,&op);

          /* write out pages (if any) */
          while(!eos){
            int result=ogg_stream_pageout(&os,&og);
            if(result==0)break;
            writeVorbisDataToEncoderDataFeed(env, &encoderDataFeed, &writeVorbisDataMethodId, og.header, og.header_len, &jByteArrayWriteBuffer);
            writeVorbisDataToEncoderDataFeed(env, &encoderDataFeed, &writeVorbisDataMethodId, og.body, og.body_len, &jByteArrayWriteBuffer);

            /* this could be set above, but for illustrative purposes, I do
               it here (to show that vorbis does know where the stream ends) */

            if(ogg_page_eos(&og))eos=1;
          }
        }
      }
    }

    /* clean up and exit.  vorbis_info_clear() must be called last */
    __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Cleaning up encoder");
    ogg_stream_clear(&os);
    vorbis_block_clear(&vb);
    vorbis_dsp_clear(&vd);
    vorbis_comment_clear(&vc);
    vorbis_info_clear(&vi);

    /* ogg_page and ogg_packet structs always point to storage in
       libvorbis.  They're never freed or manipulated directly */
    __android_log_print(ANDROID_LOG_INFO, "VorbisEncoder", "Completed encoding.");
    stopEncodeFeed(env, &encoderDataFeed, &stopMethodId);

    //Clean up encode buffers
    (*env)->DeleteLocalRef(env, jByteArrayBuffer);
    (*env)->DeleteLocalRef(env, jByteArrayWriteBuffer);

    return SUCCESS;
}
Ejemplo n.º 13
0
int main(){
  ogg_stream_state os; /* take physical pages, weld into a logical
                          stream of packets */
  ogg_page         og; /* one Ogg bitstream page.  Vorbis packets are inside */
  ogg_packet       op; /* one raw packet of data for decode */

  vorbis_info      vi; /* struct that stores all the static vorbis bitstream
                          settings */
  vorbis_comment   vc; /* struct that stores all the user comments */

  vorbis_dsp_state vd; /* central working state for the packet->PCM decoder */
  vorbis_block     vb; /* local working space for packet->PCM decode */

  int eos=0,ret;
  int i, founddata;
  int numReads=0;
    
    FILE *outFile = fopen("../test.ogg","wb");

  /* we cheat on the WAV header; we just bypass 44 bytes (simplest WAV
     header is 44 bytes) and assume that the data is 44.1khz, stereo, 16 bit
     little endian pcm samples. This is just an example, after all. */

  /********** Encode setup ************/

  vorbis_info_init(&vi);

  /* choose an encoding mode.  A few possibilities commented out, one
     actually used: */

  /*********************************************************************
   Encoding using a VBR quality mode.  The usable range is -.1
   (lowest quality, smallest file) to 1. (highest quality, largest file).
   Example quality mode .4: 44kHz stereo coupled, roughly 128kbps VBR

   ret = vorbis_encode_init_vbr(&vi,2,44100,.4);

   ---------------------------------------------------------------------

   Encoding using an average bitrate mode (ABR).
   example: 44kHz stereo coupled, average 128kbps VBR

   ret = vorbis_encode_init(&vi,2,44100,-1,128000,-1);

   ---------------------------------------------------------------------

   Encode using a quality mode, but select that quality mode by asking for
   an approximate bitrate.  This is not ABR, it is true VBR, but selected
   using the bitrate interface, and then turning bitrate management off:

   ret = ( vorbis_encode_setup_managed(&vi,2,44100,-1,128000,-1) ||
           vorbis_encode_ctl(&vi,OV_ECTL_RATEMANAGE2_SET,NULL) ||
           vorbis_encode_setup_init(&vi));

   *********************************************************************/

  ret=vorbis_encode_init_vbr(&vi,2,44100,1.0);

  /* do not continue if setup failed; this can happen if we ask for a
     mode that libVorbis does not support (eg, too low a bitrate, etc,
     will return 'OV_EIMPL') */

  if(ret)exit(1);

  /* add a comment */
  vorbis_comment_init(&vc);
  vorbis_comment_add_tag(&vc,"ENCODER","encoder_example.c");

  /* set up the analysis state and auxiliary encoding storage */
  vorbis_analysis_init(&vd,&vi);
  vorbis_block_init(&vd,&vb);

  /* set up our packet->stream encoder */
  /* pick a random serial number; that way we can more likely build
     chained streams just by concatenation */
  srand(time(NULL));
  ogg_stream_init(&os,rand());

  /* Vorbis streams begin with three headers; the initial header (with
     most of the codec setup parameters) which is mandated by the Ogg
     bitstream spec.  The second header holds any comment fields.  The
     third header holds the bitstream codebook.  We merely need to
     make the headers, then pass them to libvorbis one at a time;
     libvorbis handles the additional Ogg bitstream constraints */

  {
    ogg_packet header;
    ogg_packet header_comm;
    ogg_packet header_code;

    vorbis_analysis_headerout(&vd,&vc,&header,&header_comm,&header_code);
    ogg_stream_packetin(&os,&header); /* automatically placed in its own
                                         page */
    ogg_stream_packetin(&os,&header_comm);
    ogg_stream_packetin(&os,&header_code);

    /* This ensures the actual
     * audio data will start on a new page, as per spec
     */
    while(!eos){
      int result=ogg_stream_flush(&os,&og);
      if(result==0)break;
      fwrite(og.header,1,og.header_len,outFile);
      fwrite(og.body,1,og.body_len,outFile);
    }

  }

  while(!eos){
    long i;
    long bytes=READ*4; /* stereo hardwired here */
    numReads++;

    if(numReads>10000) bytes=0;

    if(bytes==0){
      /* end of file.  this can be done implicitly in the mainline,
         but it's easier to see here in non-clever fashion.
         Tell the library we're at end of stream so that it can handle
         the last frame and mark end of stream in the output properly */
      vorbis_analysis_wrote(&vd,0);

    }else{
      /* data to encode */

      /* expose the buffer to submit data */
      float **buffer=vorbis_analysis_buffer(&vd,READ);

      /* uninterleave samples */
      for(i=0;i<bytes/4;i++){
	static float curamp = 0;
	curamp += 0.15f;
	buffer[0][i] = sinf(curamp);
	buffer[1][i] = sinf(curamp);
      }



      /* tell the library how much we actually submitted */
      vorbis_analysis_wrote(&vd,i);
    }

    /* vorbis does some data preanalysis, then divvies up blocks for
       more involved (potentially parallel) processing.  Get a single
       block for encoding now */
    while(vorbis_analysis_blockout(&vd,&vb)==1){

      /* analysis, assume we want to use bitrate management */
      vorbis_analysis(&vb,NULL);
      vorbis_bitrate_addblock(&vb);

      while(vorbis_bitrate_flushpacket(&vd,&op)){

        /* weld the packet into the bitstream */
        ogg_stream_packetin(&os,&op);

        /* write out pages (if any) */
        while(!eos){
          int result=ogg_stream_pageout(&os,&og);
          if(result==0)break;
          fwrite(og.header,1,og.header_len,outFile);
          fwrite(og.body,1,og.body_len,outFile);

          /* this could be set above, but for illustrative purposes, I do
             it here (to show that vorbis does know where the stream ends) */

          if(ogg_page_eos(&og))eos=1;
        }
      }
    }
  }

  /* clean up and exit.  vorbis_info_clear() must be called last */

  ogg_stream_clear(&os);
  vorbis_block_clear(&vb);
  vorbis_dsp_clear(&vd);
  vorbis_comment_clear(&vc);
  vorbis_info_clear(&vi);

  /* ogg_page and ogg_packet structs always point to storage in
     libvorbis.  They're never freed or manipulated directly */

  fprintf(stderr,"Done.\n");
  return(0);
}
Ejemplo n.º 14
0
int ExportOGG::Export(AudacityProject *project,
                       int numChannels,
                       wxString fName,
                       bool selectionOnly,
                       double t0,
                       double t1,
                       MixerSpec *mixerSpec,
                       Tags *metadata,
                       int WXUNUSED(subformat))
{
   double    rate    = project->GetRate();
   TrackList *tracks = project->GetTracks();
   double    quality = (gPrefs->Read(wxT("/FileFormats/OggExportQuality"), 50)/(float)100.0);

   wxLogNull logNo;            // temporarily disable wxWidgets error messages
   int updateResult = eProgressSuccess;
   int       eos = 0;

   FileIO outFile(fName, FileIO::Output);

   if (!outFile.IsOpened()) {
      wxMessageBox(_("Unable to open target file for writing"));
      return false;
   }

   // All the Ogg and Vorbis encoding data
   ogg_stream_state stream;
   ogg_page         page;
   ogg_packet       packet;

   vorbis_info      info;
   vorbis_comment   comment;
   vorbis_dsp_state dsp;
   vorbis_block     block;

   // Encoding setup
   vorbis_info_init(&info);
   vorbis_encode_init_vbr(&info, numChannels, int(rate + 0.5), quality);

   // Retrieve tags
   if (!FillComment(project, &comment, metadata)) {
      return false;
   }

   // Set up analysis state and auxiliary encoding storage
   vorbis_analysis_init(&dsp, &info);
   vorbis_block_init(&dsp, &block);

   // Set up packet->stream encoder.  According to encoder example,
   // a random serial number makes it more likely that you can make
   // chained streams with concatenation.
   srand(time(NULL));
   ogg_stream_init(&stream, rand());

   // First we need to write the required headers:
   //    1. The Ogg bitstream header, which contains codec setup params
   //    2. The Vorbis comment header
   //    3. The bitstream codebook.
   //
   // After we create those our responsibility is complete, libvorbis will
   // take care of any other ogg bistream constraints (again, according
   // to the example encoder source)
   ogg_packet bitstream_header;
   ogg_packet comment_header;
   ogg_packet codebook_header;

   vorbis_analysis_headerout(&dsp, &comment, &bitstream_header, &comment_header,
         &codebook_header);

   // Place these headers into the stream
   ogg_stream_packetin(&stream, &bitstream_header);
   ogg_stream_packetin(&stream, &comment_header);
   ogg_stream_packetin(&stream, &codebook_header);

   // Flushing these headers now guarentees that audio data will
   // start on a new page, which apparently makes streaming easier
   while (ogg_stream_flush(&stream, &page)) {
      outFile.Write(page.header, page.header_len);
      outFile.Write(page.body, page.body_len);
   }

   int numWaveTracks;
   WaveTrack **waveTracks;
   tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
   Mixer *mixer = CreateMixer(numWaveTracks, waveTracks,
                            tracks->GetTimeTrack(),
                            t0, t1,
                            numChannels, SAMPLES_PER_RUN, false,
                            rate, floatSample, true, mixerSpec);
   delete [] waveTracks;

   ProgressDialog *progress = new ProgressDialog(wxFileName(fName).GetName(),
      selectionOnly ?
      _("Exporting the selected audio as Ogg Vorbis") :
      _("Exporting the entire project as Ogg Vorbis"));

   while (updateResult == eProgressSuccess && !eos) {
      float **vorbis_buffer = vorbis_analysis_buffer(&dsp, SAMPLES_PER_RUN);
      sampleCount samplesThisRun = mixer->Process(SAMPLES_PER_RUN);

      if (samplesThisRun == 0) {
         // Tell the library that we wrote 0 bytes - signalling the end.
         vorbis_analysis_wrote(&dsp, 0);
      }
      else {

         for (int i = 0; i < numChannels; i++) {
            float *temp = (float *)mixer->GetBuffer(i);
            memcpy(vorbis_buffer[i], temp, sizeof(float)*SAMPLES_PER_RUN);
         }

         // tell the encoder how many samples we have
         vorbis_analysis_wrote(&dsp, samplesThisRun);
      }

      // I don't understand what this call does, so here is the comment
      // from the example, verbatim:
      //
      //    vorbis does some data preanalysis, then divvies up blocks
      //    for more involved (potentially parallel) processing. Get
      //    a single block for encoding now
      while (vorbis_analysis_blockout(&dsp, &block) == 1) {

         // analysis, assume we want to use bitrate management
         vorbis_analysis(&block, NULL);
         vorbis_bitrate_addblock(&block);

         while (vorbis_bitrate_flushpacket(&dsp, &packet)) {

            // add the packet to the bitstream
            ogg_stream_packetin(&stream, &packet);

            // From vorbis-tools-1.0/oggenc/encode.c:
            //   If we've gone over a page boundary, we can do actual output,
            //   so do so (for however many pages are available).

            while (!eos) {
               int result = ogg_stream_pageout(&stream, &page);
               if (!result) {
                  break;
               }

               outFile.Write(page.header, page.header_len);
               outFile.Write(page.body, page.body_len);

               if (ogg_page_eos(&page)) {
                  eos = 1;
               }
            }
         }
      }

      updateResult = progress->Update(mixer->MixGetCurrentTime()-t0, t1-t0);
   }

   delete progress;;

   delete mixer;

   ogg_stream_clear(&stream);

   vorbis_block_clear(&block);
   vorbis_dsp_clear(&dsp);
   vorbis_info_clear(&info);
   vorbis_comment_clear(&comment);

   outFile.Close();

   return updateResult;
}
Ejemplo n.º 15
0
/* The following function is basically a hacked version of the code in
 * examples/encoder_example.c */
void
write_vorbis_data_or_die (const char *filename, int srate, float q, const float * data, int count, int ch)
{
  FILE * file ;
  ogg_stream_state os;
  ogg_page         og;
  ogg_packet       op;
  vorbis_info      vi;
  vorbis_comment   vc;
  vorbis_dsp_state vd;
  vorbis_block     vb;

  int eos = 0, ret;

  if ((file = fopen (filename, "wb")) == NULL) {
    printf("\n\nError : fopen failed : %s\n", strerror (errno)) ;
    exit (1) ;
  }

  /********** Encode setup ************/

  vorbis_info_init (&vi);

  ret = vorbis_encode_init_vbr (&vi,ch,srate,q);
  if (ret) {
    printf ("vorbis_encode_init_vbr return %d\n", ret) ;
    exit (1) ;
  }

  vorbis_comment_init (&vc);
  vorbis_comment_add_tag (&vc,"ENCODER","test/util.c");
  vorbis_analysis_init (&vd,&vi);
  vorbis_block_init (&vd,&vb);

  ogg_stream_init (&os,12345678);

  {
    ogg_packet header;
    ogg_packet header_comm;
    ogg_packet header_code;

    vorbis_analysis_headerout (&vd,&vc,&header,&header_comm,&header_code);
    ogg_stream_packetin (&os,&header);
    ogg_stream_packetin (&os,&header_comm);
    ogg_stream_packetin (&os,&header_code);

    /* Ensures the audio data will start on a new page. */
    while (!eos){
        int result = ogg_stream_flush (&os,&og);
        if (result == 0)
            break;
        fwrite (og.header,1,og.header_len,file);
        fwrite (og.body,1,og.body_len,file);
    }

  }

  {
    /* expose the buffer to submit data */
    float **buffer = vorbis_analysis_buffer (&vd,count);
    int i;

    for(i=0;i<ch;i++)
      memcpy (buffer [i], data, count * sizeof (float)) ;

    /* tell the library how much we actually submitted */
    vorbis_analysis_wrote (&vd,count);
    vorbis_analysis_wrote (&vd,0);
  }

  while (vorbis_analysis_blockout (&vd,&vb) == 1) {
    vorbis_analysis (&vb,NULL);
    vorbis_bitrate_addblock (&vb);

    while (vorbis_bitrate_flushpacket (&vd,&op)) {
      ogg_stream_packetin (&os,&op);

      while (!eos) {
          int result = ogg_stream_pageout (&os,&og);
          if (result == 0)
              break;
          fwrite (og.header,1,og.header_len,file);
          fwrite (og.body,1,og.body_len,file);

          if (ogg_page_eos (&og))
              eos = 1;
      }
    }
  }

  ogg_stream_clear (&os);
  vorbis_block_clear (&vb);
  vorbis_dsp_clear (&vd);
  vorbis_comment_clear (&vc);
  vorbis_info_clear (&vi);

 fclose (file) ;
}