static int FFmpegDecode(audio_decoder_operations_t *adec_ops, char *outbuf, int *outlen, char *inbuf, int inlen){
#if 0
        aml_audio_dec_t *audec=(aml_audio_dec_t *)(adec_ops->priv_data);
        AVCodecContext *ctxCodec=audec->pcodec->ctxCodec;
        AVCodecContext *ctxCodec=NULL;
	AVPacket avpkt;
	
	av_init_packet(&avpkt);
	avpkt.data = inbuf;
	avpkt.size = inlen;
	int  i;
	if (ctxCodec->codec_id == CODEC_ID_FLAC){
		static int end = 0; 
		static int pkt_end_data = 0;
		if (inlen < 10240)	{
			end++;
			if (1==i)		
				pkt_end_data = inlen;	
			if (pkt_end_data == inlen){		
				end++;	
			}
			else{
				end=0;		
			}
			if ((end/2)<5)	
				return -1;	
		}
		end=0;	
		pkt_end_data = 0;
	}
	  ret = avcodec_decode_audio3(ctxCodec, (int16_t *)outbuf, outlen, &avpkt);
	  #endif
	  int ret;
	  return ret;
}
예제 #2
0
int decodeFrameFromPacket(AVPacket* aPacket)
    {
    if (aPacket->stream_index == gVideoStreamIdx)
        {
        int frameFinished = 0;
        if (avcodec_decode_video2(gVideoCodecCtx, gVideoFrame, &frameFinished, aPacket) <= 0)
            {
            __android_log_print(ANDROID_LOG_ERROR, "com.example.ffmpegav", "avcodec_decode_video2() decoded no frame");
            return -1;
            }
        return VIDEO_DATA_ID;
        }

    if (aPacket->stream_index == gAudioStreamIdx)
        {
        int dataLength = gAudioFrameRefBufferMaxSize;
        if (avcodec_decode_audio3(gAudioCodecCtx, (int16_t*)gAudioFrameRefBuffer, &dataLength, aPacket) <= 0)
            {
            __android_log_print(ANDROID_LOG_ERROR, "com.example.ffmpegav", "avcodec_decode_audio3() decoded no frame");
            gAudioFrameDataLengthRefBuffer[0] = 0;
            return -2;
            }

        gAudioFrameDataLengthRefBuffer[0] = dataLength;
        return AUDIO_DATA_ID;
        }

    return 0;
    }
예제 #3
0
int LibAvDecoder::decodePacket(AVCodecContext* cCtx, AVPacket* avpkt, AudioStream* ab) throw (Exception){
	int16_t outputBuffer[AVCODEC_MAX_AUDIO_FRAME_SIZE];
	int16_t *samples = (int16_t*)outputBuffer;
	int outputBufferSize, bytesConsumed;
	while (avpkt->size > 0) {
		outputBufferSize = sizeof(outputBuffer);
		bytesConsumed = avcodec_decode_audio3(cCtx, samples, &outputBufferSize, avpkt);
		if(bytesConsumed <= 0){ // < 0 for an error, == 0 for no frame data decompressed
			avpkt->size = 0;
			return 1;
		}else{
			int newSamplesDecoded = outputBufferSize/sizeof(int16_t);
			int oldSampleCount = ab->getSampleCount();
			try{
				ab->addToSampleCount(newSamplesDecoded);
			}catch(Exception& e){
				throw e;
			}
			for(int i=0; i<newSamplesDecoded; i++)
				ab->setSample(oldSampleCount+i,(float)samples[i]); // can divide samples[i] by 32768 if you want unity values. Makes no difference.
		}
		if(bytesConsumed < avpkt->size){
			int newLength = avpkt->size - bytesConsumed;
			uint8_t* datacopy = avpkt->data;
			avpkt->data = (uint8_t*)malloc(newLength);
			memcpy(avpkt->data,datacopy + bytesConsumed,newLength);
			av_free(datacopy);
		}
		avpkt->size -= bytesConsumed;
	}
	return 0;
}
예제 #4
0
static int decode_audio(sh_audio_t *sh_audio,unsigned char *buf,int minlen,int maxlen)
{
    unsigned char *start=NULL;
    int y,len=-1;
    while(len<minlen){
	AVPacket pkt;
	int len2=maxlen;
	double pts;
	int x=ds_get_packet_pts(sh_audio->ds,&start, &pts);
	if(x<=0) {
	    start = NULL;
	    x = 0;
	    ds_parse(sh_audio->ds, &start, &x, MP_NOPTS_VALUE, 0);
	    if (x <= 0)
	        break; // error
	} else {
	    int in_size = x;
	    int consumed = ds_parse(sh_audio->ds, &start, &x, pts, 0);
	    sh_audio->ds->buffer_pos -= in_size - consumed;
	}

	av_init_packet(&pkt);
	pkt.data = start;
	pkt.size = x;
	if (pts != MP_NOPTS_VALUE) {
	    sh_audio->pts = pts;
	    sh_audio->pts_bytes = 0;
	}
	y=avcodec_decode_audio3(sh_audio->context,(int16_t*)buf,&len2,&pkt);
//printf("return:%d samples_out:%d bitstream_in:%d sample_sum:%d\n", y, len2, x, len); fflush(stdout);
	// LATM may need many packets to find mux info
	if (y == AVERROR(EAGAIN))
	    continue;
	if(y<0){ mp_msg(MSGT_DECAUDIO,MSGL_V,"lavc_audio: error\n");break; }
	if(!sh_audio->parser && y<x)
	    sh_audio->ds->buffer_pos+=y-x;  // put back data (HACK!)
	if(len2>0){
	  if (((AVCodecContext *)sh_audio->context)->channels >= 5) {
            int samplesize = av_get_bytes_per_sample(((AVCodecContext *)
                                    sh_audio->context)->sample_fmt);
            reorder_channel_nch(buf, AF_CHANNEL_LAYOUT_LAVC_DEFAULT,
                                AF_CHANNEL_LAYOUT_MPLAYER_DEFAULT,
                                ((AVCodecContext *)sh_audio->context)->channels,
                                len2 / samplesize, samplesize);
	  }
	  //len=len2;break;
	  if(len<0) len=len2; else len+=len2;
	  buf+=len2;
	  maxlen -= len2;
	  sh_audio->pts_bytes += len2;
	}
        mp_dbg(MSGT_DECAUDIO,MSGL_DBG2,"Decoded %d -> %d  \n",y,len2);

        if (setup_format(sh_audio, sh_audio->context))
            break;
    }
  return len;
}
예제 #5
0
파일: player.c 프로젝트: 26597925/SmileTime
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) {

  int len1, data_size, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      data_size = buf_size;
      len1 = avcodec_decode_audio3(is->audio_ctx, 
				  (int16_t *)audio_buf, &data_size, pkt);
				  //is->audio_pkt_data, is->audio_pkt_size);

      //len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, 
			//	  audio_pkt_data, audio_pkt_size);
      if(len1 < 0) {
        // if error, skip frame
        is->audio_pkt_size = 0;
        break;
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
        // No data yet, get more frames
        continue;
      }
      pts = is->audio_clock;
      *pts_ptr = pts;
      //n = 2 * is->audio_ctx->channels;
      n = 2;
      is->audio_clock += (double)data_size /
      (double)(n * is->audio_ctx->sample_rate);

      // We have data, return it and come back for more later
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }

    // next packet
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }

    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;

    // if update, update the audio clock w/pts
    if(pkt->pts != AV_NOPTS_VALUE) {
      is->audio_clock = av_q2d(is->audio_ctx->time_base)*pkt->pts;
    }

  }
}
예제 #6
0
파일: main.c 프로젝트: simon-r/dr_meter
// Decode one frame of audio
int sc_get_next_frame(struct stream_context *self) {
	int err;

	if (self->state == STATE_CLOSED) {
		return AVERROR_EOF;
	}

	// Grab a new packet, if necessary
	while (self->state == STATE_OPEN) {
		err = av_read_frame(self->format_ctx, &self->real_pkt);
		if (err == AVERROR_EOF || url_feof(self->format_ctx->pb)) {
			av_init_packet(&self->pkt);
			self->pkt.data = NULL;
			self->pkt.size = 0;
			self->state = STATE_NEED_FLUSH;
		} else if (err < 0) {
			return err;
		} else if (self->real_pkt.stream_index == self->stream_index) {
			self->pkt = self->real_pkt;
			self->state = STATE_VALID_PACKET;
		} else {
			// we don't care about this frame; try another
			av_free_packet(&self->real_pkt);
		}
	}

	AVCodecContext *codec_ctx = sc_get_codec(self);

	// Decode the audio.

	// The third parameter gives the size of the output buffer, and is set
	// to the number of bytes used of the output buffer.
	// The return value is the number of bytes read from the packet.
	// The codec is not required to read the entire packet, so we may need
	// to keep it around for a while.
	int buf_size = self->buf_alloc_size;
	err = avcodec_decode_audio3(codec_ctx, self->buf, &buf_size, &self->pkt);
	if (err < 0) { return err; }
	int bytes_used = err;

	self->buf_size = buf_size;

	if (self->state == STATE_VALID_PACKET) {
		if (0 < bytes_used && bytes_used < self->pkt.size) {
			self->pkt.data += bytes_used;
			self->pkt.size -= bytes_used;
		} else  {
			self->state = STATE_OPEN;
			av_free_packet(&self->real_pkt);
		}
	} else if (self->state == STATE_NEED_FLUSH) {
		avcodec_close(codec_ctx);
		self->state = STATE_CLOSED;
	}

	return 0;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) 
{
	//ffplay_info("Start.\n");

	static AVPacket pkt;
	static uint8_t *audio_pkt_data = NULL;
	static int audio_pkt_size = 0;

	int len1, data_size;

	for(;;) 
	{
		while(audio_pkt_size > 0) 
		{
			data_size = buf_size;

			len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, 
					  &pkt);
			ffplay_info("audio_buf = 0x%8x, data_size = %d, pkt = 0x%8x\n",audio_buf,data_size,&pkt);
			if(len1 < 0) 
			{
				/* if error, skip frame */
				audio_pkt_size = 0;
				break;
			}
			audio_pkt_data += len1;
			audio_pkt_size -= len1;
			if(data_size <= 0) 
			{
				/* No data yet, get more frames */
				continue;
			}
			/* We have data, return it and come back for more later */
			return data_size;
		}
		if(pkt.data)
		{
			ffplay_info("Here.\n");
			av_free_packet(&pkt);
		}
		if(quit) 
		{
			ffplay_info("Here.\n");
			return -1;
		}

		if(packet_queue_get(&audioq, &pkt, 1) < 0) 
		{
			ffplay_info("Here.\n");
			return -1;
		}
		audio_pkt_data = pkt.data;
		audio_pkt_size = pkt.size;
	}
	//ffplay_info("end.\n");
}
예제 #8
0
int video_decode_audio_frame( AVCodecContext *context, uint8_t *buffer, int buffer_size ) {
    static AVPacket packet;
    int used, data_size;

    for(;;) {
        while( audio_packet_size > 0 ) {
            data_size = buffer_size;

            AVPacket avp;
            av_init_packet( &avp );
            avp.data = audio_packet_data;
            avp.size = audio_packet_size;

            used = avcodec_decode_audio3( context, (int16_t *)audio_buffer, &data_size,
                                          &avp );
            if( used < 0 ) {
                /* if error, skip frame */
                audio_packet_size = 0;
                break;
            }
            audio_packet_data += used;
            audio_packet_size -= used;

            if( data_size <= 0 ) {
                /* No data yet, get more frames */
                continue;
            }

            audio_clock += (double)data_size /
                           (double)(format_context->streams[audio_stream]->codec->sample_rate *
                                    (2 * format_context->streams[audio_stream]->codec->channels));

            /* We have data, return it and come back for more later */
            return data_size;
        }
        if( packet.data )
            av_free_packet( &packet );

        if( stop ) {
            audio_running = 0;
            return -1;
        }

        if( packet_queue_get( &audio_queue, &packet, 1 ) < 0 )
            return -1;

        audio_packet_data = packet.data;
        audio_packet_size = packet.size;

        if( packet.pts != AV_NOPTS_VALUE ) {
            audio_clock = packet.pts * av_q2d( format_context->streams[audio_stream]->time_base );
        }
    }
}
예제 #9
0
SimpleBuf* FFMpegDecoder::decodeAudio()
{	
	int frameSize = audioBuf.totalSize;
	int bytesDecoded = avcodec_decode_audio3(pAudioCodecCtx,(int16_t*)audioBuf.data,&frameSize,&encodedPacket);
	
	if( bytesDecoded <= 0 || frameSize < 0)		//error, or no more frames
		return NULL;

	audioBuf.dataSize = frameSize;
	audioPtsSec += (double)frameSize / audioBytesPerSec;

	return &audioBuf;
}
예제 #10
0
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) 
{
	static AVPacket pkt;
	static uint8_t *audio_pkt_data = NULL;
	static int audio_pkt_size = 0;
	AVFrame *pFrame = av_frame_alloc();
    int got_frame;
	
	int len1, data_size;

	for(;;) {
		while(audio_pkt_size > 0) {
			data_size = buf_size;
			#if 0
			len1 = avcodec_decode_audio4(aCodecCtx, pFrame, &got_frame, &pkt);
			#else
			len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, &pkt);
			#endif
			if(len1 < 0) {
				/* if error, skip frame */
				audio_pkt_size = 0;
				break;
			}
			audio_pkt_data += len1;
			audio_pkt_size -= len1;
			if(data_size <= 0) {
				/* No data yet, get more frames */
				continue;
			}
			/* We have data, return it and come back for more later */
			av_frame_free(&pFrame);
			return data_size;
		}
		if(pkt.data)
			av_free_packet(&pkt);

		if(quit) {
			av_frame_free(&pFrame);
			return -1;
		}

		if(packet_queue_get(&audioq, &pkt, 1) < 0) {
			av_frame_free(&pFrame);
			return -1;
		}
		audio_pkt_data = pkt.data;
    		audio_pkt_size = pkt.size;
		printf("\r\n audio_pkt_size:%d!!\r\n",audio_pkt_size);
	}
}
            void decodeFrame() throw(PacketDecodeException)
            {
                if (!hasPacket())
                    throw PacketDecodeException();
                
                int dataSize = sampleBufferAlloc;
                int used = 
                #if (LIBAVCODEC_VERSION_MAJOR >= 53)
                    avcodec_decode_audio3(
                        pCodecCtx,
                        (int16_t*)sampleBufferAligned, &dataSize,
                        &packet);
                #else
                    avcodec_decode_audio2(
                        pCodecCtx,
                        (int16_t*)sampleBufferAligned, &dataSize,
                        packetBuffer, packetBufferSize);
                #endif
                
                if (used < 0)
                    throw PacketDecodeException();
                
                #if (LIBAVCODEC_VERSION_MAJOR >= 53)
                
                if ((size_t)used > packet.size)
                    used = packet.size;

                (char*&)(packet.data) += used;
                packet.size -= used;
                
                #else
                
                if ((size_t)used > packetBufferSize)
                    used = packetBufferSize;

                (char*&)packetBuffer += used;
                packetBufferSize -= used;
                
                #endif
                
                if (dataSize < 0)
                    dataSize = 0;

                sampleBuffer = sampleBufferAligned;
                sampleBufferStart += sampleBufferSize;
                sampleBufferSize = dataSize / sampleSize;
                
                if (sampleBufferStart + sampleBufferSize > streamSize)
                    streamSize = sampleBufferStart + sampleBufferSize;
            }
예제 #12
0
int decode_audio(AudioState *audio, uint8_t *audio_buf, int buf_size, double *pts_ptr)
{
  int len1, data_size, n;
  AVPacket *pkt = &audio->audio_pkt;
  double pts;

  for(;;) {
    while(audio->audioPktSize > 0) {
      data_size = buf_size;
      len1 = avcodec_decode_audio3(audio->pCodecCtx, 
			 (int16_t *)audio_buf, &data_size, pkt);
      if(len1 < 0) {
	/* if error, skip frame */
	audio->audioPktSize = 0;
	break;
      }
      audio->audioPktData += len1;
      audio->audioPktSize -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      pts = audio->audioClk;
      *pts_ptr = pts;
      n = 2;//* audio->pCodecCtx->channels;
      audio->audioClk += (double)data_size /
	(double)(n * audio->pCodecCtx->sample_rate);

      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(audio->quit) {
      return -1;
    }
    /* next packet */
    if(get_from_queue(&audio->audioq, pkt) < 0) {
      break;
    }
    audio->audioPktData = pkt->data;
    audio->audioPktSize = pkt->size;
    /* if update, update the audio clock w/pts */
    if(pkt->pts != AV_NOPTS_VALUE) {
      audio->audioClk = av_q2d(audio->pCodecCtx->time_base)*pkt->pts;
    }

  }
}
예제 #13
0
int AUD_FFMPEGReader::decode(AVPacket* packet, AUD_Buffer& buffer)
{
	// save packet parameters
	uint8_t *audio_pkg_data = packet->data;
	int audio_pkg_size = packet->size;

	int buf_size = buffer.getSize();
	int buf_pos = 0;

	int read_length, data_size;

	AVPacket tmp_pkt;
	
	av_init_packet(&tmp_pkt);

	// as long as there is still data in the package
	while(audio_pkg_size > 0)
	{
		// resize buffer if needed
		if(buf_size - buf_pos < AVCODEC_MAX_AUDIO_FRAME_SIZE)
		{
			buffer.resize(buf_size + AVCODEC_MAX_AUDIO_FRAME_SIZE, true);
			buf_size += AVCODEC_MAX_AUDIO_FRAME_SIZE;
		}

		// read samples from the packet
		data_size = buf_size - buf_pos;

		tmp_pkt.data = audio_pkg_data;
		tmp_pkt.size = audio_pkg_size;

		read_length = avcodec_decode_audio3(
			m_codecCtx,
			(int16_t*)(((data_t*)buffer.getBuffer()) + buf_pos),
			&data_size, &tmp_pkt);

		// read error, next packet!
		if(read_length < 0)
			break;

		buf_pos += data_size;

		// move packet parameters
		audio_pkg_data += read_length;
		audio_pkg_size -= read_length;
	}

	return buf_pos;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {

  static AVPacket pkt;
  static uint8_t *audio_pkt_data = NULL;
  static int audio_pkt_size = 0;

  int len1, data_size;

  for(;;) {
    while(audio_pkt_size > 0) {
      data_size = buf_size;
      /*
len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, 
				  audio_pkt_data, audio_pkt_size);
*/
      len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, 
				  &pkt);
      audio_pkt_data = pkt.data;
      audio_pkt_size = pkt.size;

      if(len1 < 0) {
	/* if error, skip frame */
	audio_pkt_size = 0;
	break;
      }
      audio_pkt_data += len1;
      audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt.data)
      av_free_packet(&pkt);

    if(quit) {
      return -1;
    }

    if(packet_queue_get(&audioq, &pkt, 1) < 0) {
      return -1;
    }
    audio_pkt_data = pkt.data;
    audio_pkt_size = pkt.size;
  }
}
예제 #15
0
int tvh_audio_enqueue(tvh_object_t *tvh, uint8_t *buf, size_t len, int64_t pts, int64_t dts, int64_t dur) {
  uint8_t *ptr;
  AVPacket packet;
  int length;
  acodec_sys_t *cs = tvh->acs;
  aout_sys_t *ao = tvh->ao;
  int running = 0;

  pthread_mutex_lock(&tvh->mutex);

  if(!tvh->running) {
    pthread_mutex_unlock(&tvh->mutex);
    return -1;
  }
  
  av_init_packet(&packet);
  packet.data = ptr = buf;
  packet.size = len;
  
  cs->len = AVCODEC_MAX_AUDIO_FRAME_SIZE*2;
  length = avcodec_decode_audio3(cs->ctx, cs->buf, &cs->len, &packet);
  if(length <= 0) {
    ERROR("Unable to decode audio stream (%d)", length);
    pthread_mutex_unlock(&tvh->mutex);
    return -1;
  }

  if(packet.pts != AV_NOPTS_VALUE) {
    pts = packet.pts;
  }

  aout_buffer_t *ab = (aout_buffer_t *) malloc(sizeof(aout_buffer_t));
  ab->ptr = av_malloc(cs->len);
  ab->len = cs->len;
  ab->pts = pts;
  memcpy(ab->ptr, cs->buf, cs->len);

  if(!opensles_is_open(tvh->ao)) {
    opensles_open(tvh->ao, cs->ctx->channels , cs->ctx->sample_rate*1000);
  }

  running = opensles_enqueue(ao, ab) > 0; 

  pthread_mutex_unlock(&tvh->mutex);

  return running;
}
static int FFMpegPlayerAndroid_processAudio(JNIEnv *env, AVPacket *packet, int16_t *samples, int samples_size) {
	/*
	int size = FFMAX(packet->size * sizeof(*samples), samples_size);
	if(samples_size < size) {
		__android_log_print(ANDROID_LOG_INFO, TAG, "resizing audio buffer from %i to %i", samples_size, size);
		av_free(samples);
		samples_size = size;
		samples = (int16_t *) av_malloc(samples_size);
	}
	*/
	static bool called = false;
	static int temp_size = 0;
	int len = avcodec_decode_audio3(ffmpeg_audio.codec_ctx, samples, &samples_size, packet);
	if(!called) {
		__android_log_print(ANDROID_LOG_INFO, TAG, "starting android audio track");
		if(AudioDriver_set(MUSIC, 
						   ffmpeg_audio.codec_ctx->sample_rate,
						   PCM_16_BIT,
						   (ffmpeg_audio.codec_ctx->channels == 2) ? CHANNEL_OUT_STEREO : CHANNEL_OUT_MONO,
						   samples_size) != ANDROID_AUDIOTRACK_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't set audio track parametres");
			return -1;
		}
		if(AudioDriver_start() != ANDROID_AUDIOTRACK_RESULT_SUCCESS) {
			jniThrowException(env,
							  "java/io/IOException",
							  "Couldn't start audio track");
			return -1;
		}
		temp_size = samples_size;
		called = true;
	}

	if(AudioDriver_write(samples, samples_size) <= 0) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Couldn't write bytes to audio track");
		return -1;
	}
	
	AudioDriver_flush();
	return 0;
}
예제 #17
0
int EJOAudio::DecodeFrame(AVCodecContext *aCodecCtx, uint8_t *audioBuffer, int bufferSize){
	static AVPacket pk;
	static AVPacket *pk_temp;
	pk_temp = (AVPacket *) av_malloc(sizeof(AVPacket));
	av_init_packet(pk_temp);
	pk_temp->size = 0;
	pk_temp->data = NULL;

	int len1, data_size;

	for(;;){
		while(pk_temp->size > 0){

			data_size = bufferSize;
			//len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *) audioBuffer, &data_size, audio_pkt_data, audio_pkt_size);
			len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *) audioBuffer, &data_size, pk_temp);

			std::cerr << "Length: " << len1 << std::endl;
			if(len1 < 0){
				pk_temp->size = 0;
				break;
			}
			pk_temp->data += len1;
			pk_temp->size -= len1;

			if(data_size <= 0){
				continue;
			}

			return data_size;
		}
		if(pk.data)
			av_free_packet(&pk);

		if(this->quit)
			return -1;

		if(this->packetQueue->Get(&pk, 1) < 0)
			return -1;

		pk_temp->data = pk.data;
		pk_temp->size = pk.size;
	}
	return 0;
}
예제 #18
0
파일: ffmpeg-jni.c 프로젝트: dalvik/Drovik
int audio_decode_frame(VideoState*is, int16_t *audio_buf, int buf_size, double *pts_ptr) {
  int len1, data_size, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;  
  int index = 0;
  for (;;) {
    while (is->audio_pkt_size > 0) {
      data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2;
      len1 = avcodec_decode_audio3(is->audio_st->codec,
                                   audio_buf, 
                                  &data_size,
                                   pkt);
      if (len1 < 0) {
		is->audio_pkt_size = 0;
		break;
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if (data_size < 0) {
		continue;
      }
	  index +=data_size;
      pts = is->audio_clock;
      *pts_ptr = pts;
      n = 2 * is->audio_st->codec->channels;
      is->audio_clock += (double)data_size / (double)(n*is->audio_st->codec->sample_rate);
      return data_size;
    }
    if (pkt->data) {
      av_free_packet(pkt);
    }
    if (is->quit) {
      return -1;
    }
    if (packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
    if (pkt->pts != AV_NOPTS_VALUE) {
		is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
	}
  }
}
예제 #19
0
	u32 FFmpegAudioDecoder::Read(AudioBuffer<s16>& buffer, u32 count, u32 boffset)
	{
		int offset = ReadCache(buffer, count, boffset);
		count -= offset;
		while (count > 0) {
			AVPacket originalPacket;
			if (!decoder->ReadPacket(&originalPacket, streamIndex))
				break;

			AVPacket packet = originalPacket;

			cacheLength = 0;

			while (packet.size > 0) {
				int datasize = bufferSize;
				int used = avcodec_decode_audio3(stream->codec, (s16*)this->buffer, &datasize, &packet);
				if (used < 0)
					break;
				packet.size -= used;
				packet.data += used;

				if (datasize <= 0)
					break;

				int read = Math::Min((u32)datasize, count * 2 * channels);
				int left = datasize - read;
				if (read > 0) {
					int samples = read / sizeof(AudioBuffer<s16>::AudioSample) / channels;
					buffer.DeinterlaceFrom((AudioBuffer<s16>::AudioSample*)this->buffer, samples, boffset + offset);
					offset += samples;
					count -= samples;
				}

				if (left > 0) {
					memcpy((u8*)cache + cacheLength, (u8*)this->buffer + read, left);
					cacheLength += left;
				}
			}

			decoder->FreePacket(&originalPacket);
		}
		AudioCodec<s16>::Read(buffer, offset);
		return offset;
	}
예제 #20
0
static int decode_audio(AVCodecContext *avctx, int16_t *samples,
                         int *frame_size_ptr,
                         const uint8_t *buf, int buf_size)
{
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR==52 && LIBAVCODEC_VERSION_MINOR>=32)

    // following code segment copied from ffmpeg's avcodec_decode_audio2()
    // implementation to avoid warnings about deprecated function usage.
    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = const_cast<uint8_t *>(buf);
    avpkt.size = buf_size;

    return avcodec_decode_audio3(avctx, samples, frame_size_ptr, &avpkt);
#else
    // fallback for older versions of ffmpeg that don't have avcodec_decode_audio3.
    return avcodec_decode_audio2(avctx, samples, frame_size_ptr, buf, buf_size);
#endif
}
예제 #21
0
int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size) {

  int len1, data_size;
  AVPacket *pkt = &is->audio_pkt;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      data_size = buf_size;
      //len1 = avcodec_decode_audio2(is->audio_st->codec, 
				  //(int16_t *)audio_buf, &data_size, 
				  //is->audio_pkt_data, is->audio_pkt_size);
		len1 = avcodec_decode_audio3(is->audio_st->codec, (int16_t *)audio_buf, &data_size, pkt);
			
      if(len1 < 0) {
	/* if error, skip frame */
	is->audio_pkt_size = 0;
	break;
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }
    /* next packet */
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
  }
}
static int decode_audio(AVCodecContext *ctxCodec, char *outbuf, int *outlen, char *inbuf, int inlen){
	AVPacket avpkt;
	int ret;

	av_init_packet(&avpkt);
	avpkt.data = inbuf;
	avpkt.size = inlen;
	int i;

	switch (ctxCodec->codec_id) {
	case CODEC_ID_AAC:
		ret = avcodec_decode_audio3(ctxCodec, (int16_t *)outbuf, outlen, &avpkt);
		//adec_print("ape samplerate=%d,channels=%d,framesize=%d,ret=%d,outlen=%d,inlen=%d,-------------------------------------------\n",
	    //		ctxCodec->sample_rate,ctxCodec->channels,ctxCodec->frame_size,ret,*outlen,inlen);
		break;
	default: 
		break;
	}

	return ret;
}
예제 #23
0
   void Scheduler::process_audio(AVPacket& pkt, AlignedBuffer<int16_t>& buf)
   {
      if (!has_audio)
         return;

      uint8_t *data = pkt.data;
      size_t size = pkt.size;

      size_t written = 0;
      while (pkt.size > 0)
      {
         int out_size = buf.size() * sizeof(int16_t) - written;
         audio_lock.lock();
         int ret = avcodec_decode_audio3(file->audio().ctx, &buf[written / sizeof(int16_t)], &out_size, &pkt);
         audio_lock.unlock();
         if (ret <= 0)
            break;

         pkt.size -= ret;
         pkt.data += ret;
         written += out_size;
      }
      pkt.data = data;
      pkt.size = size;

      audio_lock.lock();
      audio->write(&buf[0], written / 2);
      audio_lock.unlock();

      avlock.lock();
      audio_written += written;

      if (pkt.pts != (int64_t)AV_NOPTS_VALUE)
         audio_pts = pkt.pts * av_q2d(file->audio().time_base) - audio->delay();
      else if (pkt.dts != (int64_t)AV_NOPTS_VALUE)
         audio_pts = pkt.dts * av_q2d(file->audio().time_base) - audio->delay();

      audio_pts_ts = get_time();
      avlock.unlock();
   }
예제 #24
0
int VideoLayer::decode_audio_packet(int *data_size) {
  int datasize, res;

  datasize = AVCODEC_MAX_AUDIO_FRAME_SIZE; //+ FF_INPUT_BUFFER_PADDING_SIZE;
#if LIBAVCODEC_VERSION_MAJOR < 53
  res = avcodec_decode_audio2(audio_codec_ctx, (int16_t *)audio_buf,
			      &datasize, pkt.data, pkt.size);
#else
  res = avcodec_decode_audio3(audio_codec_ctx, (int16_t *)audio_buf,
			      &datasize, &pkt);
#endif

  if (data_size) *data_size = datasize; 
  
  if(res < 0) {
    /* if error, skip frame */
    pkt.size = 0;
    return 0;
  }
  /* We have data, return it and come back for more later */
  return res;
}
예제 #25
0
static void audio_async_decode(void *async_data){
  H264Frame *frame = (H264Frame *)async_data;
  H264Decoder *handle = frame->decoder;
  int16_t *outbuf=NULL;
  int len;
  int size_out;
  outbuf=av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE*2);
  AVPacket avpkt;  
  avpkt.data = (uint8_t *)frame->h264->orig_bytes;
  avpkt.size = frame->h264->orig_size;
  size_out = AVCODEC_MAX_AUDIO_FRAME_SIZE*2;
  len = avcodec_decode_audio3(handle->dec, outbuf, &size_out,&avpkt);  
  if(len == -1) {
    av_free(outbuf);
    return;
  };
  frame->sample = driver_alloc_binary(size_out);    
  memcpy(frame->sample->orig_bytes,outbuf,size_out);
  frame->sample->orig_size = size_out; 
  av_free(outbuf);
  driver_free_binary(frame->h264);
}
예제 #26
0
int LibAvDecoder::decodePacket(AVCodecContext* cCtx, ReSampleContext* rsCtx, AVPacket* originalPacket, KeyFinder::AudioData* audio){
  // copy packet so we can shift data pointer about without endangering garbage collection
  AVPacket tempPacket;
  tempPacket.size = originalPacket->size;
  tempPacket.data = originalPacket->data;
  // loop in case audio packet contains multiple frames
  while(tempPacket.size > 0){
    int dataSize = frameBufferSize;
    int16_t* dataBuffer = (int16_t*)frameBuffer;
    int bytesConsumed = avcodec_decode_audio3(cCtx, dataBuffer, &dataSize, &tempPacket);
    if(bytesConsumed < 0){ // error
      tempPacket.size = 0;
      return 1;
    }
    tempPacket.data += bytesConsumed;
    tempPacket.size -= bytesConsumed;
    if(dataSize <= 0)
      continue; // nothing decoded
    int newSamplesDecoded = dataSize / av_get_bytes_per_sample(cCtx->sample_fmt);
    // Resample if necessary
    if(cCtx->sample_fmt != AV_SAMPLE_FMT_S16){
      int resampleResult = audio_resample(rsCtx, (short*)frameBufferConverted, (short*)frameBuffer, newSamplesDecoded);
      if(resampleResult < 0){
        throw KeyFinder::Exception(GuiStrings::getInstance()->libavCouldNotResample().toLocal8Bit().constData());
      }
      dataBuffer = (int16_t*)frameBufferConverted;
    }
    int oldSampleCount = audio->getSampleCount();
    try{
      audio->addToSampleCount(newSamplesDecoded);
    }catch(KeyFinder::Exception& e){
      throw e;
    }
    for(int i = 0; i < newSamplesDecoded; i++){
      audio->setSample(oldSampleCount+i, (float)dataBuffer[i]);
    }
  }
  return 0;
}
예제 #27
0
파일: playvid.c 프로젝트: UIKit0/openlase-1
void moreaudio(float *lb, float *rb, int samples)
{
	AVPacket packet;
	int bytes, bytesDecoded;
	int input_samples;
	while (samples)
	{
		if (!buffered_samples) {
			do {
				if(av_read_frame(pAFormatCtx, &packet)<0) {
					fprintf(stderr, "Audio EOF!\n");
					memset(lb, 0, samples*sizeof(float));
					memset(rb, 0, samples*sizeof(float));
					return;
				}
			} while(packet.stream_index!=audioStream);

			bytes = AUDIO_BUF * sizeof(short);

			bytesDecoded = avcodec_decode_audio3(pACodecCtx, iabuf, &bytes, &packet);
			if(bytesDecoded < 0)
			{
				fprintf(stderr, "Error while decoding audio frame\n");
				return;
			}

			input_samples = bytes / (sizeof(short)*pACodecCtx->channels);

			buffered_samples = audio_resample(resampler, (void*)oabuf, iabuf, input_samples);
			poabuf = oabuf;
		}

		*lb++ = *poabuf++ * volume;
		*rb++ = *poabuf++ * volume;
		buffered_samples--;
		samples--;
	}
}
예제 #28
0
/* Decode one audio frame and returns its uncompressed size */
static int audio_decode_frame(priv_t * ffmpeg, uint8_t *audio_buf, int buf_size)
{
  AVPacket *pkt = &ffmpeg->audio_pkt;
  int len1, data_size;

  for (;;) {
    /* NOTE: the audio packet can contain several frames */
    while (ffmpeg->audio_pkt.size > 0) {
      data_size = buf_size;
      len1 = avcodec_decode_audio3(ffmpeg->audio_st->codec,
				   (int16_t *)audio_buf, &data_size,
				   pkt);
      if (len1 < 0) /* if error, we skip the rest of the packet */
	return 0;

      ffmpeg->audio_pkt.data += len1;
      ffmpeg->audio_pkt.size -= len1;
      if (data_size <= 0)
	continue;
      return data_size;
    }
  }
}
예제 #29
0
int FFMpegDecoder::decodeAudio(AVPacket *pkt,unsigned char* outputBuf,int outputBufSize)
{
	//decode 1 or more audio frames to outputBuf, from 1 packet
	unsigned char *bufPtr = outputBuf;
	int availableSize = outputBufSize;
	while (true)
	{
		int bufSize = availableSize;
		int bytesDecoded = avcodec_decode_audio3(pAudioCodecCtx,(int16_t*)bufPtr,&bufSize,pkt);
		if (bytesDecoded < 0)
			return -1;

		if (bufSize > availableSize)
		{
			printf("warning! audio buffer too small!\n");
			return 0;
		}

		availableSize -= bufSize;
		bufPtr += bufSize;
		if (availableSize <= 0)
			return (outputBufSize - availableSize);
	}
}
예제 #30
0
int FFMpegDecoder::read(unsigned char **buffer)
{
  AVPacket packet;
  int bytes_read;
  *buffer = m_buffer;

  if (!m_format) return -1;

  if (av_read_frame(m_format, &packet) < 0)
    return -1;

  if (m_audio_stream->duration > 1) {
    m_position = packet.pts * 1.0 / m_audio_stream->duration;
  }
  else {
    m_position = m_audio_stream->parser->frame_offset * 1.0 / m_file_size;
  }

  debug("pos: %f\n", m_position);

  if (packet.stream_index == 0) {
    bytes_read = m_buffer_size;
    if (avcodec_decode_audio3(m_codec_ctx, (int16_t *)m_buffer, &bytes_read, &packet) < 0) {
      debug("error decoding frame\n");
      return 0;
    }
  }
  else
    bytes_read = 0;

  av_free_packet(&packet);
    
  if (m_codec_ctx->sample_rate) m_rate = m_codec_ctx->sample_rate;
  if (m_codec_ctx->channels) m_channels = m_codec_ctx->channels;
  return bytes_read;
}