コード例 #1
0
void audio_callback(void *userdata, Uint8 *stream, int len)
{
    VideoState *is = (VideoState*)userdata;
    int len1, audio_size;

    static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;

    double pts;
    while(len > 0) {
        if(audio_buf_index >= audio_buf_size) {
            /* We have already sent all our data; get more */
            audio_size = audio_decode_frame(is, audio_buf, audio_buf_size,&pts);
            if(audio_size < 0) {
                /* If error, output silence */
                audio_buf_size = 1024; // arbitrary?
                memset(audio_buf, 0, audio_buf_size);
            } else {
                audio_buf_size = audio_size;
            }
            audio_buf_index = 0;
        }
        len1 = audio_buf_size - audio_buf_index;
        if(len1 > len)
            len1 = len;
        memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}
コード例 #2
0
ファイル: tutorial03.c プロジェクト: chenbk85/ffmpeg_tutorial
void audio_callback(void *userdata, Uint8 *stream, int len) {

    AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
    int len1, audio_size;

	// 一次 audio_decode_frame 出来的 audio_buf_size 可能比 len 更大, memcpy 不完, 
	// 下次 audio_callback 继续  memcpy 剩下的数据,而不再调用 audio_decode_frame
    static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;

    while(len > 0) {
        if(audio_buf_index >= audio_buf_size) {
            /* We have already sent all our data; get more */
            audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
            if(audio_size < 0) {
                /* If error, output silence */
                audio_buf_size = 1024; // arbitrary?
                memset(audio_buf, 0, audio_buf_size);
            } else {
                audio_buf_size = audio_size;
            }
            audio_buf_index = 0;
        }
        
        len1 = audio_buf_size - audio_buf_index;
        if(len1 > len)
            len1 = len;
        memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}
コード例 #3
0
void audio_callback(void *userdata, Uint8 *stream, int len){
	//VideoState *is = (VideoState *) userdata;
	SDL_LockMutex(global_mutex_lock);
	VideoState *is = global_video_state[global_audioIndex];
	SDL_UnlockMutex(global_mutex_lock);
	int len1, audio_size;
	double pts;

	while (len > 0){
		if (is->audio_buf_index >= is->audio_buf_size){
			/* We have already sent all our data; get more */
			audio_size = audio_decode_frame(is, &pts);
			if (audio_size < 0){
				/* If error, output silence */
				is->audio_buf_size = 1024;
				memset(is->audio_buf, 0, is->audio_buf_size);
			}else{
				audio_size = synchronize_audio(is, (int16_t *) is->audio_buf,audio_size, pts);
				is->audio_buf_size = audio_size;
			}
			is->audio_buf_index = 0;
		}
		len1 = is->audio_buf_size - is->audio_buf_index;
		if (len1 > len){
			len1 = len;
		}
		memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
		len -= len1;
		stream += len1;
		is->audio_buf_index += len1;
	}
}
コード例 #4
0
ファイル: ffmpeg.c プロジェクト: Emisense/eTracks
/*
 * Read up to len samples of type sox_sample_t from file into buf[].
 * Return number of samples read.
 */
static size_t read_samples(sox_format_t * ft, sox_sample_t *buf, size_t len)
{
  priv_t * ffmpeg = (priv_t *)ft->priv;
  AVPacket *pkt = &ffmpeg->audio_pkt;
  int ret;
  size_t nsamp = 0, nextra;

  /* Read data repeatedly until buf is full or no more can be read */
  do {
    /* If input buffer empty, read more data */
    if (ffmpeg->audio_buf_index * 2 >= ffmpeg->audio_buf_size) {
      if ((ret = av_read_frame(ffmpeg->ctxt, pkt)) < 0)
        break;
      ffmpeg->audio_buf_size = audio_decode_frame(ffmpeg, ffmpeg->audio_buf, AVCODEC_MAX_AUDIO_FRAME_SIZE);
      ffmpeg->audio_buf_index = 0;
    }

    /* Convert data into SoX samples up to size of buffer */
    nextra = min((ffmpeg->audio_buf_size - ffmpeg->audio_buf_index) / 2, (int)(len - nsamp));
    for (; nextra > 0; nextra--)
      buf[nsamp++] = SOX_SIGNED_16BIT_TO_SAMPLE(((int16_t *)ffmpeg->audio_buf)[ffmpeg->audio_buf_index++], ft->clips);
  } while (nsamp < len && nextra > 0);

  return nsamp;
}
コード例 #5
0
ファイル: videoplayer.cpp プロジェクト: qyvlik/VideoItem
void audio_callback(void *userdata, Uint8 *stream, int len)
{
    AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
    int len1, audio_size;
    static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;
    while(len > 0)
    {
        if(audio_buf_index >= audio_buf_size)
        {
            audio_size = audio_decode_frame(aCodecCtx, audio_buf,sizeof(audio_buf));
            if(audio_size < 0)
            {
                audio_buf_size = 1024;
                memset(audio_buf, 0, audio_buf_size);
            }
            else
            {
                audio_buf_size = audio_size;
            }
            audio_buf_index = 0;
        }
        len1 = audio_buf_size - audio_buf_index;
        if(len1 > len)
            len1 = len;
        SDL_MixAudio(stream, (uint8_t * )audio_buf + audio_buf_index, len1, volume);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}
コード例 #6
0
ファイル: tutorial05.c プロジェクト: huamulan/ffmpeg-tutor
void audio_callback(void *userdata, Uint8 *stream, int len) {

    VideoState *is = (VideoState *)userdata;
    int len1, audio_size;
    double pts;

    while(len > 0) {
        if(is->audio_buf_index >= is->audio_buf_size) {
            /* We have already sent all our data; get more */
            audio_size = audio_decode_frame(is, &pts);
            if(audio_size < 0) {
                /* If error, output silence */
                is->audio_buf_size = 1024;
                memset(is->audio_buf, 0, is->audio_buf_size);
            } else {
                is->audio_buf_size = audio_size;
            }
            is->audio_buf_index = 0;
        }
        len1 = is->audio_buf_size - is->audio_buf_index;
        if(len1 > len)
            len1 = len;
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
        len -= len1;
        stream += len1;
        is->audio_buf_index += len1;
    }
}
コード例 #7
0
ファイル: ffmovie.c プロジェクト: Abstak/Mastermind_Python
/* prepare a new audio buffer */
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
{
/*SDL AUDIO THREAD*/
    FFMovie *movie = opaque;
    int audio_size, len1;
    double pts;

    while (len > 0) {
        if (movie->audio_buf_index >= movie->audio_buf_size) {
           audio_size = audio_decode_frame(movie, movie->audio_buf, &pts);
           if (audio_size < 0) {
                /* if error, just output silence */
               movie->audio_buf_size = 1024;
               memset(movie->audio_buf, 0, movie->audio_buf_size);
           } else {
               audio_size = synchronize_audio(movie, (int16_t*)movie->audio_buf, audio_size, pts);
               movie->audio_buf_size = audio_size;
           }
           movie->audio_buf_index = 0;
        }
        len1 = movie->audio_buf_size - movie->audio_buf_index;
        if (len1 > len)
            len1 = len;
        memcpy(stream, (uint8_t *)movie->audio_buf + movie->audio_buf_index, len1);
        len -= len1;
        stream += len1;
        movie->audio_buf_index += len1;
    }
}
コード例 #8
0
void audio_callback(void *userdata, Uint8 *stream, int len) {
	FFmpegState *st = (FFmpegState *)userdata;
	int len1, audio_data_size;

	while (len > 0) {
		if (st->audio_buf_index >= st->audio_buf_size) {
			audio_data_size = audio_decode_frame(st);

			if (audio_data_size < 0) {
				/* silence */
				st->audio_buf_size = 1024;
				memset(st->audio_buf, 0, st->audio_buf_size);
			}
			else {
				st->audio_buf_size = audio_data_size;
			}
			st->audio_buf_index = 0;
		}

		len1 = st->audio_buf_size - st->audio_buf_index;
		if (len1 > len) {
			len1 = len;
		}

		memcpy(stream, (uint8_t *)st->audio_buf + st->audio_buf_index, len1);
		len -= len1;
		stream += len1;
		st->audio_buf_index += len1;
	}
}
コード例 #9
0
ファイル: myplay.c プロジェクト: zsirGitHub/helloffmpeg
// SDL audio callback: to get decoded audio frame data.
// stream: [out] decoded audio frame data.
// len   : [in ] data length that SDL needs.
void audio_callback(void *userdata, Uint8 *stream, int len) 
{
	AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
	int len1, audio_size;  // Decoded audio data size

	static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int audio_buf_size = 0;
	static unsigned int audio_buf_index = 0;

	printf("\r\n ==============>audio_callback  start\r\n");
	while(len > 0) {                                                // 仍未满足SDL需求数据量, 需继续向解码器要数据.
		if(audio_buf_index >= audio_buf_size) {                     // 上一次解码出来的数据已经推完, 需继续解码.
			/* We have already sent all our data; get more */
			audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));  // 解码音频帧后的数据放入audio_buf
			printf("\r\n audio_size:%d!\r\n", audio_size);
			if(audio_size < 0) {
				/* If error, output silence */
				audio_buf_size = 1024; // arbitrary?
				memset(audio_buf, 0, audio_buf_size);
			} else {
				audio_buf_size = audio_size;
			}
			audio_buf_index = 0;
		}
		len1 = audio_buf_size - audio_buf_index;
//		printf("\r\n len1:%d!\r\n", len1);
//		printf("\r\n len:%d!\r\n", len);
		if(len1 > len)
			len1 = len;
		memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
		len -= len1;
		stream += len1;
		audio_buf_index += len1;
	}
}
コード例 #10
0
ファイル: tutorial04_1.cpp プロジェクト: shileiz/notes
void sdl_audio_callback(void *userdata, Uint8 *stream, int len) {
	
	VideoState *is = (VideoState *)userdata;
	int len1, audio_size;
	while (len > 0) {
		if (is->audio_buf_index >= is->audio_buf_size) {
			/* We have already sent all our data; get more */
			audio_size = audio_decode_frame(is);
			if (audio_size < 0) {
				//TODO
				/* if error, just output silence */
			}
			else {
				is->audio_buf_size = audio_size;
			}
			is->audio_buf_index = 0;
		}
		len1 = is->audio_buf_size - is->audio_buf_index;
		if (len1 > len)
			len1 = len;
		memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
		len -= len1;
		stream += len1;
		is->audio_buf_index += len1;
	}
}
コード例 #11
0
ファイル: Test7.cpp プロジェクト: soffio/FFmpegTutorial
void audio_callback(void* userdata, Uint8* stream, int len) {
	VideoState *is = (VideoState*) userdata;
	int len1, audio_size;
	double pts;

	printf("audio_callback len:%d\n", len);
	fflush(stdout);
	while (len > 0) {
		if (is->audio_buf_index >= is->audio_buf_size) {
			/* We have already sent all our data; get more */
			audio_size = audio_decode_frame(is, is->audio_buf,
					sizeof(is->audio_buf), &pts);
			printf("audio_decode_frame audio_size=%d\n", audio_size);
			fflush(stdout);
			if (audio_size < 0) {
				is->audio_buf_size = 1024;
				memset(is->audio_buf, 0, is->audio_buf_size);
			} else {
				audio_size = synchronize_audio(is, (int16_t *) is->audio_buf,
						audio_size, pts);
				is->audio_buf_size = audio_size;
			}
			is->audio_buf_index = 0;
		}
		len1 = is->audio_buf_size - is->audio_buf_index;
		if (len1 > len)
			len1 = len;
		memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
		len -= len1;
		stream += len1;
		is->audio_buf_index += len1;
	}
}
コード例 #12
0
void audio_callback(void *userdata, Uint8 *stream, int len) {
    AVCodecContext *aCodeCtx = (AVCodecContext *)userdata;
    int len1, audio_size;

    static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = 0;
    static unsigned int audio_buf_index = 0;

    while (len > 0) {
        if (audio_buf_index >= audio_buf_size) {
            // we have already sent all our data; get more
            audio_size = audio_decode_frame(aCodeCtx, audio_buf, audio_buf_size);
            if (audio_size < 0) {
                // If error, output silence
                audio_buf_size = 1024; // arbitrary?
                memset(audio_buf, 0, audio_buf_size);
            } else {
                audio_buf_size = audio_size;
            }
            audio_buf_index = 0;
        }
        len1 = audio_buf_size - audio_buf_index;
        if (len1 > len) {
            len1 = len;
        }
        memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
        len -= len1;
        stream += len1;
        audio_buf_index += len1;
    }
}
コード例 #13
0
ファイル: audiofuncs.c プロジェクト: super11/JSPlayer
void audio_callback( void *userdata, uint8_t *stream, int len )
{
        //printf( "In Audio callback : Thread\n" );
        //printf("Length is %d\n",len);
        static int stream_buf_index = 0;
        static int audio_data_size = 0;
        static int audio_buf_index = 0;
        int free_flag = 0;

        while ( len > 0 && gMedia->quit[2] == no ) {
                if ( audio_data_size == 0 ) {
                        audio_buf_index = 0;
                        audio_data_size = audio_decode_frame();
                        //printf("%d\n",audio_data_size);
                        free_flag = 1;
                }

                if ( audio_data_size == -1 ) {
                        return;
                }

                if ( len <= audio_data_size ) {
                        memcpy( stream + stream_buf_index,
                                *( gMedia->audio_buf ) + audio_buf_index,
                                len );
                        //printf("%d bytes copied : %d bytes left in buffer\t",len,audio_data_size);
                        audio_data_size -= len;
                        audio_buf_index += len;
                        //     printf("%d\n",audio_buf_index);
                        len = 0;
                        stream_buf_index = 0;
                        break;
                }

                if ( len > audio_data_size ) {
                        memcpy( stream + stream_buf_index,
                                *( gMedia->audio_buf ) + audio_buf_index,
                                audio_data_size );
                        len -= audio_data_size;
                        stream_buf_index += audio_data_size;
                        audio_data_size = 0;
                        audio_buf_index = 0;
                        continue;
                }
        }

        if ( free_flag && !audio_data_size ) {
                av_free( gMedia->audio_buf[0] );
        }
}
コード例 #14
0
ファイル: zwvideothread.cpp プロジェクト: weinkym/src_miao
static void audio_callback(void *userdata, Uint8 *stream, int len) {
    VideoState *is = (VideoState *) userdata;

    int len1, audio_data_size;

    double pts;

    /*   len是由SDL传入的SDL缓冲区的大小,如果这个缓冲未满,我们就一直往里填充数据 */
    while (len > 0) {
        /*  audio_buf_index 和 audio_buf_size 标示我们自己用来放置解码出来的数据的缓冲区,*/
        /*   这些数据待copy到SDL缓冲区, 当audio_buf_index >= audio_buf_size的时候意味着我*/
        /*   们的缓冲为空,没有数据可供copy,这时候需要调用audio_decode_frame来解码出更
         /*   多的桢数据 */
        if (is->audio_buf_index >= is->audio_buf_size) {

            audio_data_size = audio_decode_frame(is, &pts);

            /* audio_data_size < 0 标示没能解码出数据,我们默认播放静音 */
            if (audio_data_size < 0) {
                /* silence */
                is->audio_buf_size = 1024;
                /* 清零,静音 */
                if (is->audio_buf == NULL) return;
                memset(is->audio_buf, 0, is->audio_buf_size);
            } else {
                is->audio_buf_size = audio_data_size;
            }
            is->audio_buf_index = 0;
        }
        /*  查看stream可用空间,决定一次copy多少数据,剩下的下次继续copy */
        len1 = is->audio_buf_size - is->audio_buf_index;
        if (len1 > len) {
            len1 = len;
        }

        if (is->audio_buf == NULL) return;

        memcpy(stream, (uint8_t *) is->audio_buf + is->audio_buf_index, len1);
//        SDL_MixAudio(stream, (uint8_t * )is->audio_buf + is->audio_buf_index, len1, 50);

//        SDL_MixAudioFormat(stream, (uint8_t * )is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, 50);

        len -= len1;
        stream += len1;
        is->audio_buf_index += len1;
    }

}
コード例 #15
0
void audio_callback(void *userdata, Uint8 *stream, int len) 
{
	//ffplay_info("Start.\n");

	AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
	int len1, audio_size;

	static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int audio_buf_size = 0;
	static unsigned int audio_buf_index = 0;
	
	ffplay_info("len = %d, audio_buf_index = %d, audio_buf_size = %d\n",len,audio_buf_index,audio_buf_size);
	while(len > 0) 
	{
		ffplay_info("len = %d, audio_buf_index = %d, audio_buf_size = %d\n",len,audio_buf_index,audio_buf_size);
		if(audio_buf_index >= audio_buf_size) 
		{
			/* We have already sent all our data; get more */
			audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
			ffplay_info("audio_size = %d\n",audio_size);
			if(audio_size < 0) 
			{
				/* If error, output silence */
				audio_buf_size = 1024; // arbitrary?
				memset(audio_buf, 0, audio_buf_size);
			} 
			else 
			{
				audio_buf_size = audio_size;
			}
			audio_buf_index = 0;
		}
		
		len1 = audio_buf_size - audio_buf_index;
		
		if(len1 > len)
		{
			len1 = len;
		}
		memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
		len -= len1;
		stream += len1;
		audio_buf_index += len1;
		ffplay_info("len = %d\n",len);
	}
	//ffplay_info("end.\n");
}
コード例 #16
0
static int audio_thread(void *data)
{
    audio_context *audio_ctx = (audio_context *)data;
    int i, j;

    while(!quit_video)
    {
        if(musicchannel.paused)
        {
            continue;
        }

        // Just to be sure: check if all goes well...
        for(i = 0; i < MUSIC_NUM_BUFFERS; i++)
        {
            if(musicchannel.fp_playto[i] > INT_TO_FIX(MUSIC_BUF_SIZE))
            {
                musicchannel.fp_playto[i] = 0;
            }
        }

        // Need to update?
        for(j = 0, i = musicchannel.playing_buffer + 1; j < MUSIC_NUM_BUFFERS; j++, i++)
        {
            i %= MUSIC_NUM_BUFFERS;
            if(musicchannel.fp_playto[i] == 0)
            {
                // Buffer needs to be filled
                if (audio_decode_frame(audio_ctx, (uint8_t*)musicchannel.buf[i], MUSIC_BUF_SIZE * sizeof(short)) < 0)
                    return 0;
                musicchannel.fp_playto[i] = INT_TO_FIX(MUSIC_BUF_SIZE);
                if(!musicchannel.active)
                {
                    musicchannel.playing_buffer = i;
                    musicchannel.active = 1;
                }
            }
        }

        // Sleep for 1 ms so that this thread doesn't waste CPU cycles busywaiting
        usleep(1000);
    }

    return 0;
}
コード例 #17
0
static void audio_callback(void *userdata, uint8_t * stream, int len)
{
   VideoState *is = (VideoState *) userdata;
   int len1, audio_size;
   double pts;
   
   if (!is->first) return;

   while (len > 0) {
      if (is->audio_buf_index >= is->audio_buf_size) {

         /* We have already sent all our data; get more */
         audio_size = -1;
         if (!is->paused) {
            audio_size = audio_decode_frame(is, AUDIO_BUF(is),
               AUDIO_BUF_SIZE, &pts);
         }
            
         if (audio_size < 0) {
            /* If error, output silence */
            is->audio_buf_size = 1024;
            memset(AUDIO_BUF(is), 0, is->audio_buf_size);
         }
         else {
            audio_size = synchronize_audio(is, (int16_t *) AUDIO_BUF(is),
                                           audio_size, pts);
            is->audio_buf_size = audio_size;
         }
         is->audio_buf_index = 0;
      }
      len1 = is->audio_buf_size - is->audio_buf_index;
      if (len1 > len)
         len1 = len;
      memcpy(stream, (uint8_t *) AUDIO_BUF(is) + is->audio_buf_index, len1);
      len -= len1;
      stream += len1;
      is->audio_buf_index += len1;
   }
}
コード例 #18
0
void audio_callback(void *userdata, Uint8 *stream, int len) {

  AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
    int len1 = 0;
    int audio_size = 0;

  static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
    static unsigned int audio_buf_size = (MAX_AUDIO_FRAME_SIZE * 3) / 2;
  static unsigned int audio_buf_index = 0;
//    int bytes_per_sec = 2* aCodecCtx->channels * aCodecCtx->sample_rate;
    
  while(len > 0)
  {
    if(audio_buf_index >= audio_buf_size)
    {
      /* We have already sent all our data; get more */
      audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
      if(audio_size < 0)
      {
        /* If error, output silence */
        audio_buf_size = 1024; // arbitrary?
        memset(audio_buf, 0, audio_buf_size);
      }
      else
      {
          audio_buf_size = audio_size;
      }
      audio_buf_index = 0;
    }
    len1 = audio_buf_size - audio_buf_index;
    if(len1 > len)
      len1 = len;
    memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
    len -= len1;
    stream += len1;
    audio_buf_index += len1;
  }
}
コード例 #19
0
ファイル: main.c プロジェクト: elmagroud00/Experiments
void audio_callback(void *userdata, uint8_t *stream, int len)
{
	AVCodecContext *aCodecCtx;
	aCodecCtx = (AVCodecContext*)userdata;
	int len1, audio_size;
	static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int audio_buf_size = 0;
	static unsigned int audio_buf_index = 0;
	AVPacket *pkt = av_mallocz(sizeof(AVPacket));
	AVPacket *pkt_temp = av_mallocz(sizeof(AVPacket));
	AVFrame *frame = NULL;
	while(len > 0)
	{
		if(audio_buf_index >= audio_buf_size)
		{
			/*we have already sent all our data; get more */
			audio_size = audio_decode_frame(aCodecCtx, pkt, pkt_temp, frame, audio_buf);
			if(audio_size < 0)
			{
				/*if error, output silence*/
				audio_buf_size = 1024;
				memset(audio_buf, 0, audio_buf_size);
			}
			else
			{
				audio_buf_size = audio_size;	
			}
			audio_buf_index = 0;
		}
		len1 = audio_buf_size - audio_buf_index;
		if(len1 > len)
			len1 = len;
		memcpy(stream, (uint8_t*)audio_buf + audio_buf_index, len1);
		len -= len1;
		stream += len1;
		audio_buf_index += len1;
	}
}
コード例 #20
0
ファイル: te04.c プロジェクト: send2young/Samples
void audio_callback(void *userdata, Uint8 *stream, int len) {

	AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
	int len1, audio_size;

	static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int audio_buf_size = 0;
	static unsigned int audio_buf_index = 0;

	printf("audio_callback-len:%d\n", len);

	while(len > 0) {
		if(audio_buf_index >= audio_buf_size) {
			// We have already sent all our data; get more
			audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);
			if(audio_size < 0) {
				// If error, output silence
				audio_buf_size = 1024;	//
				memset(audio_buf, 0, audio_buf_size);
			} else {
				audio_buf_size = audio_size;
			}
			audio_buf_index = 0;
		}
		len1 = audio_buf_size - audio_buf_index;
		if(len1 > len)
			len1 = len;

		memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);

		len -= len1;	// if len > 0, it means decode something again

		stream += len1;		// update stream location(destination buffer)
		audio_buf_index += len1; // update audio_buf location(source buffer)

	}
}
コード例 #21
0
ファイル: playlist.c プロジェクト: kallisti5/libgroove
static int decode_one_frame(struct GroovePlaylist *playlist, struct GrooveFile *file) {
    struct GroovePlaylistPrivate *p = (struct GroovePlaylistPrivate *) playlist;
    struct GrooveFilePrivate *f = (struct GrooveFilePrivate *) file;
    AVPacket *pkt = &f->audio_pkt;

    // might need to rebuild the filter graph if certain things changed
    if (maybe_init_filter_graph(playlist, file) < 0)
        return -1;

    // abort_request is set if we are destroying the file
    if (f->abort_request)
        return -1;

    // handle pause requests
    // only read p->paused once so that we don't need a mutex
    int paused = p->paused;
    if (paused != p->last_paused) {
        p->last_paused = paused;
        if (paused) {
            av_read_pause(f->ic);
        } else {
            av_read_play(f->ic);
        }
    }

    // handle seek requests
    pthread_mutex_lock(&f->seek_mutex);
    if (f->seek_pos >= 0) {
        if (av_seek_frame(f->ic, f->audio_stream_index, f->seek_pos, 0) < 0) {
            av_log(NULL, AV_LOG_ERROR, "%s: error while seeking\n", f->ic->filename);
        } else if (f->seek_flush) {
            every_sink_flush(playlist);
        }
        avcodec_flush_buffers(f->audio_st->codec);
        f->seek_pos = -1;
        f->eof = 0;
    }
    pthread_mutex_unlock(&f->seek_mutex);

    if (f->eof) {
        if (f->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
            av_init_packet(pkt);
            pkt->data = NULL;
            pkt->size = 0;
            pkt->stream_index = f->audio_stream_index;
            if (audio_decode_frame(playlist, file) > 0) {
                // keep flushing
                return 0;
            }
        }
        // this file is complete. move on
        return -1;
    }
    int err = av_read_frame(f->ic, pkt);
    if (err < 0) {
        // treat all errors as EOF, but log non-EOF errors.
        if (err != AVERROR_EOF) {
            av_log(NULL, AV_LOG_WARNING, "error reading frames\n");
        }
        f->eof = 1;
        return 0;
    }
    if (pkt->stream_index != f->audio_stream_index) {
        // we're only interested in the One True Audio Stream
        av_free_packet(pkt);
        return 0;
    }
    audio_decode_frame(playlist, file);
    av_free_packet(pkt);
    return 0;
}
コード例 #22
0
ファイル: ffmpeg-jni.c プロジェクト: dalvik/Drovik
void *audio_thread(void *arg) {
	JNIEnv* env; 
	if((*g_jvm)->AttachCurrentThread(g_jvm, (void**)&env, NULL) != JNI_OK) 
	{
		LOGE("%s: AttachCurrentThread() failed", __FUNCTION__);
		return ((void *)-1);;
	}
	VideoState *is = (VideoState*)arg;
	int remain, audio_size;//remain 解码出的音频缓冲区剩余的数据长度
	int pcmBufferLen;//音频数据写入的缓冲区的长度
	jclass audio_track_cls = (*env)->FindClass(env,"android/media/AudioTrack");
	jmethodID min_buff_size_id = (*env)->GetStaticMethodID(
										 env,
										 audio_track_cls,
										"getMinBufferSize",
										"(III)I");
	int buffer_size = (*env)->CallStaticIntMethod(env,audio_track_cls,min_buff_size_id, 		frequency,
			    12,			/*CHANNEL_IN_STEREO*/
				2);         /*ENCODING_PCM_16BIT*/
	LOGI(10,"buffer_size=%i",buffer_size);	
	pcmBufferLen = AVCODEC_MAX_AUDIO_FRAME_SIZE * 3/2;
	jbyteArray buffer = (*env)->NewByteArray(env,pcmBufferLen);
	jmethodID constructor_id = (*env)->GetMethodID(env,audio_track_cls, "<init>",
			"(IIIIII)V");
	jobject audio_track = (*env)->NewObject(env,audio_track_cls,
			constructor_id,
			3, 			  /*AudioManager.STREAM_MUSIC*/
			frequency,        /*sampleRateInHz*/
			12,			  /*CHANNEL_IN_STEREO*/
			2,			  /*ENCODING_PCM_16BIT*/
			buffer_size*10,  /*bufferSizeInBytes*/
			1			  /*AudioTrack.MODE_STREAM*/
	);	
	//setvolume
	jmethodID setStereoVolume = (*env)->GetMethodID(env,audio_track_cls,"setStereoVolume","(FF)I");
	(*env)->CallIntMethod(env,audio_track,setStereoVolume,1.0,1.0);
	//play
    jmethodID method_play = (*env)->GetMethodID(env,audio_track_cls, "play",
			"()V");
    (*env)->CallVoidMethod(env,audio_track, method_play);
    //write
    jmethodID method_write = (*env)->GetMethodID(env,audio_track_cls,"write","([BII)I");
	//release
	jmethodID method_release = (*env)->GetMethodID(env,audio_track_cls,"release","()V");
	//double ref_clock, sync_threshold, diff;
	double pts;
	LOGI(1, "pcmBufferLen = %d, AVCODEC_MAX_AUDIO_FRAME_SIZE = %d", pcmBufferLen, AVCODEC_MAX_AUDIO_FRAME_SIZE);
	while(!is->quit) {
		if(is->audio_buf_index >= is->audio_buf_size) {//audio_buf中的数据已经转移完毕了
		    audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
		    if (audio_size <= 0) {
				is->audio_buf_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 3/2;
				memset(is->audio_buf, 0, is->audio_buf_size);
		    }else {
				audio_size = synchronize_audio(is, (int16_t *)is->audio_buf,audio_size,pts);
				is->audio_buf_size = audio_size;
				//fwrite(is->audio_buf, 1, audio_size, fp);
		    } 
		    //每次解码出音频之后,就把音频的索引audio_buf_index值0 从头开始索引
		    is->audio_buf_index = 0;	
		}
		//剩余的数据长度超过音频数据写入的缓冲区的长度
		remain = is->audio_buf_size - is->audio_buf_index;
		if(remain > pcmBufferLen) {
		  remain = pcmBufferLen;
		}
		(*env)->SetByteArrayRegion(env,buffer, 0, remain, (jbyte *)is->audio_buf);
		(*env)->CallIntMethod(env,audio_track,method_write,buffer,0,remain);
		is->audio_buf_index += remain;	
	}
	(*env)->CallVoidMethod(env,audio_track, method_release);
	if(debug) LOGI(1, "### decode audio thread exit.");
	if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) {
		LOGE(1,"### detach audio thread error");
	} 
	pthread_exit(0);
	((void *)0);
}