Ejemplo n.º 1
0
int main(int argc, char **argv)
{
    int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
    int src_rate = 48000, dst_rate = 44100;
    uint8_t **src_data = NULL, **dst_data = NULL;
    int src_nb_channels = 0, dst_nb_channels = 0;
    int src_linesize, dst_linesize;
    int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
    enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
    const char *dst_filename = NULL;
    FILE *dst_file;
    int dst_bufsize;
    const char *fmt;
    struct SwrContext *swr_ctx;
    double t;
    int ret;

    if (argc != 2) {
        fprintf(stderr, "Usage: %s output_file\n"
                "API example program to show how to resample an audio stream with libswresample.\n"
                "This program generates a series of audio frames, resamples them to a specified "
                "output format and rate and saves them to an output file named output_file.\n",
            argv[0]);
        exit(1);
    }
    dst_filename = argv[1];

    dst_file = fopen(dst_filename, "wb");
    if (!dst_file) {
        fprintf(stderr, "Could not open destination file %s\n", dst_filename);
        exit(1);
    }

    /* create resampler context */
    swr_ctx = swr_alloc();
    if (!swr_ctx) {
        fprintf(stderr, "Could not allocate resampler context\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* set options */
    av_opt_set_int(swr_ctx, "in_channel_layout",    src_ch_layout, 0);
    av_opt_set_int(swr_ctx, "in_sample_rate",       src_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);

    av_opt_set_int(swr_ctx, "out_channel_layout",    dst_ch_layout, 0);
    av_opt_set_int(swr_ctx, "out_sample_rate",       dst_rate, 0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);

    /* initialize the resampling context */
    if ((ret = swr_init(swr_ctx)) < 0) {
        fprintf(stderr, "Failed to initialize the resampling context\n");
        goto end;
    }

    /* allocate source and destination samples buffers */

    src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
    ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
                                             src_nb_samples, src_sample_fmt, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate source samples\n");
        goto end;
    }

    /* compute the number of converted samples: buffering is avoided
     * ensuring that the output buffer will contain at least all the
     * converted input samples */
    max_dst_nb_samples = dst_nb_samples =
        av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);

    /* buffer is going to be directly written to a rawaudio file, no alignment */
    dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
    ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
                                             dst_nb_samples, dst_sample_fmt, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not allocate destination samples\n");
        goto end;
    }

    t = 0;
    do {
        /* generate synthetic audio */
        fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);

        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
                                        src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
        if (dst_nb_samples > max_dst_nb_samples) {
            av_free(dst_data[0]);
            ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
                                   dst_nb_samples, dst_sample_fmt, 1);
            if (ret < 0)
                break;
            max_dst_nb_samples = dst_nb_samples;
        }

        /* convert to destination format */
        ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            goto end;
        }
        dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
                                                 ret, dst_sample_fmt, 1);
        if (dst_bufsize < 0) {
            fprintf(stderr, "Could not get sample buffer size\n");
            goto end;
        }
        printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
        fwrite(dst_data[0], 1, dst_bufsize, dst_file);
    } while (t < 10);

    if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
        goto end;
    fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
            "ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
            fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);

end:
    if (dst_file)
        fclose(dst_file);

    if (src_data)
        av_freep(&src_data[0]);
    av_freep(&src_data);

    if (dst_data)
        av_freep(&dst_data[0]);
    av_freep(&dst_data);

    swr_free(&swr_ctx);
    return ret < 0;
}
Ejemplo n.º 2
0
static BOOL tsmf_ffmpeg_decode_audio(ITSMFDecoder* decoder, const BYTE *data, UINT32 data_size, UINT32 extensions)
{
	TSMFFFmpegDecoder* mdecoder = (TSMFFFmpegDecoder*) decoder;
	int len;
	int frame_size;
	UINT32 src_size;
	const BYTE *src;
	BYTE *dst;
	int dst_offset;
#if 0
	WLog_DBG(TAG, ("tsmf_ffmpeg_decode_audio: data_size %d", data_size));
	int i;
	for(i = 0; i < data_size; i++)
	{
		WLog_DBG(TAG, ("%02X ", data[i]));
		if (i % 16 == 15)
			WLog_DBG(TAG, ("\n"));
	}
#endif
	if (mdecoder->decoded_size_max == 0)
		mdecoder->decoded_size_max = MAX_AUDIO_FRAME_SIZE + 16;

	mdecoder->decoded_data = calloc(1, mdecoder->decoded_size_max);
	if (!mdecoder->decoded_data)
		return FALSE;

	/* align the memory for SSE2 needs */
	dst = (BYTE *)(((uintptr_t) mdecoder->decoded_data + 15) & ~ 0x0F);
	dst_offset = dst - mdecoder->decoded_data;
	src = data;
	src_size = data_size;
	while(src_size > 0)
	{
		/* Ensure enough space for decoding */
		if (mdecoder->decoded_size_max - mdecoder->decoded_size < MAX_AUDIO_FRAME_SIZE)
		{
			BYTE *tmp_data;

			tmp_data = realloc(mdecoder->decoded_data, mdecoder->decoded_size_max * 2 + 16);
			if (!tmp_data)
				return FALSE;
			mdecoder->decoded_size_max = mdecoder->decoded_size_max * 2 + 16;
			mdecoder->decoded_data = tmp_data;

			dst = (BYTE *)(((uintptr_t)mdecoder->decoded_data + 15) & ~ 0x0F);
			if (dst - mdecoder->decoded_data != dst_offset)
			{
				/* re-align the memory if the alignment has changed after realloc */
				memmove(dst, mdecoder->decoded_data + dst_offset, mdecoder->decoded_size);
				dst_offset = dst - mdecoder->decoded_data;
			}
			dst += mdecoder->decoded_size;
		}
		frame_size = mdecoder->decoded_size_max - mdecoder->decoded_size;
#if LIBAVCODEC_VERSION_MAJOR < 52 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR <= 20)
		len = avcodec_decode_audio2(mdecoder->codec_context,
									(int16_t *) dst, &frame_size, src, src_size);
#else
		{
#if LIBAVCODEC_VERSION_MAJOR < 55
			AVFrame *decoded_frame = avcodec_alloc_frame();
#else
			AVFrame *decoded_frame = av_frame_alloc();
#endif
			int got_frame = 0;
			AVPacket pkt;
			av_init_packet(&pkt);
			pkt.data = (BYTE *) src;
			pkt.size = src_size;
			len = avcodec_decode_audio4(mdecoder->codec_context, decoded_frame, &got_frame, &pkt);
			if (len >= 0 && got_frame)
			{
				frame_size = av_samples_get_buffer_size(NULL, mdecoder->codec_context->channels,
														decoded_frame->nb_samples, mdecoder->codec_context->sample_fmt, 1);
				memcpy(dst, decoded_frame->data[0], frame_size);
			}
			av_free(decoded_frame);
		}
#endif
		if (len <= 0 || frame_size <= 0)
		{
			WLog_ERR(TAG, "error decoding");
			break;
		}
		src += len;
		src_size -= len;
		mdecoder->decoded_size += frame_size;
		dst += frame_size;
	}
	if (mdecoder->decoded_size == 0)
	{
		free(mdecoder->decoded_data);
		mdecoder->decoded_data = NULL;
	}
	else
		if (dst_offset)
		{
			/* move the aligned decoded data to original place */
			memmove(mdecoder->decoded_data, mdecoder->decoded_data + dst_offset, mdecoder->decoded_size);
		}
	DEBUG_TSMF("data_size %d decoded_size %d",
			   data_size, mdecoder->decoded_size);
	return TRUE;
}
Ejemplo n.º 3
0
int audio_decode_frame(FFmpegState *st) {
	int len1, len2, decoded_data_size;
	AVPacket *pkt = &st->audio_pkt;
	int got_frame = 0;
	int64_t dec_channel_layout;
	int wanted_nb_samples, resampled_data_size;

	for (;;) {
		while (st->audio_pkt_size > 0) {
			if (!st->aFrame) {
				if (!(st->aFrame = av_frame_alloc())) {
					return AVERROR(ENOMEM);
				}
			}
			//else
			//    avcodec_get_frame_defaults(st->aFrame);

			len1 = avcodec_decode_audio4(st->audio_st->codec, st->aFrame, &got_frame, pkt);
			if (len1 < 0) {
				// error, skip the frame
				st->audio_pkt_size = 0;
				break;
			}

			st->audio_pkt_data += len1;
			st->audio_pkt_size -= len1;

			if (!got_frame)
				continue;

			decoded_data_size = av_samples_get_buffer_size(NULL,
				st->aFrame->channels,
				st->aFrame->nb_samples,
				(AVSampleFormat)st->aFrame->format, 1);

			dec_channel_layout = (st->aFrame->channel_layout && st->aFrame->channels
				== av_get_channel_layout_nb_channels(st->aFrame->channel_layout))
				? st->aFrame->channel_layout
				: av_get_default_channel_layout(st->aFrame->channels);

			wanted_nb_samples = st->aFrame->nb_samples;

			//fprintf(stderr, "wanted_nb_samples = %d\n", wanted_nb_samples);
			if (st->swr) {
				// const uint8_t *in[] = { is->audio_frame->data[0] };
				const uint8_t **in = (const uint8_t **)st->aFrame->extended_data;
				uint8_t *out[] = { st->audio_buf2 };
				if (wanted_nb_samples != st->aFrame->nb_samples) {
					if (swr_set_compensation(st->swr, (wanted_nb_samples - st->aFrame->nb_samples)
						* st->audio_tgt_freq / st->aFrame->sample_rate,
						wanted_nb_samples * st->audio_tgt_freq / st->aFrame->sample_rate) < 0) {
						fprintf(stderr, "swr_set_compensation() failed\n");
						break;
					}
				}

				// SWR Convert
				len2 = swr_convert(st->swr, out,
					sizeof(st->audio_buf2)
					/ st->audio_tgt_channels
					/ av_get_bytes_per_sample(st->audio_tgt_fmt),
					in, st->aFrame->nb_samples);
				if (len2 < 0) {
					fprintf(stderr, "swr_convert() failed\n");
					break;
				}

				if (len2 == sizeof(st->audio_buf2) / st->audio_tgt_channels / av_get_bytes_per_sample(st->audio_tgt_fmt)) {
					fprintf(stderr, "warning: audio buffer is probably too small\n");
					swr_init(st->swr);
				}

				st->audio_buf = st->audio_buf2;
				resampled_data_size = len2 * st->audio_tgt_channels * av_get_bytes_per_sample(st->audio_tgt_fmt);
			}
			else {
				resampled_data_size = decoded_data_size;
				st->audio_buf = st->aFrame->data[0];
			}

			// Update the wave buffer
			if (st->wave) {
				update_wave_buffer(st->audio_buf, resampled_data_size);
			}

			// We have data, return it and come back for more later
			return resampled_data_size;
		}

		// Read packet from queue
		if (pkt->data) av_free_packet(pkt);
		memset(pkt, 0, sizeof(*pkt));
		if (st->quit) return -1;
		if (packet_queue_get(&st->audioq, pkt, 1) < 0) return -1;

		st->audio_pkt_data = pkt->data;
		st->audio_pkt_size = pkt->size;
	}
}
Ejemplo n.º 4
0
long audio_decode_frame(VideoState *is, double *pts_ptr) {
    /* For example with wma audio package size can be
       like 100 000 bytes */
    long len1, data_size = 0;
    AVPacket *pkt = &is->audio_pkt;
    double pts;
    int n = 0;
#ifdef __RESAMPLER__
    long resample_size = 0;
#endif

    for(;;) {
        while(is->audio_pkt_size > 0) {
            int got_frame;
            len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);

            if(len1 < 0) {
                /* if error, skip frame */
                is->audio_pkt_size = 0;
                break;
            }

            if(got_frame) {
                data_size =
                    av_samples_get_buffer_size
                    (
                        NULL,
                        is->audio_st->codec->channels,
                        is->audio_frame.nb_samples,
                        is->audio_st->codec->sample_fmt,
                        1
                    );

                if(data_size <= 0) {
                    /* No data yet, get more frames */
                    continue;
                }

#ifdef __RESAMPLER__

                if(is->audio_need_resample == 1) {
                    resample_size = audio_tutorial_resample(is, &is->audio_frame);

                    if( resample_size > 0 ) {
                        memcpy(is->audio_buf, is->pResampledOut, resample_size);
                        memset(is->pResampledOut, 0x00, resample_size);
                    }

                } else {
#endif
                    memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
#ifdef __RESAMPLER__
                }

#endif
            }

            is->audio_pkt_data += len1;
            is->audio_pkt_size -= len1;

            pts = is->audio_clock;
            *pts_ptr = pts;
            n = 2 * is->audio_st->codec->channels;

#ifdef __RESAMPLER__

            /* If you just return original data_size you will suffer
               for clicks because you don't have that much data in
               queue incoming so return resampled size. */
            if(is->audio_need_resample == 1) {
                is->audio_clock += (double)resample_size /
                                   (double)(n * is->audio_st->codec->sample_rate);
                return resample_size;

            } else {
#endif
                /* We have data, return it and come back for more later */
                is->audio_clock += (double)data_size /
                                   (double)(n * is->audio_st->codec->sample_rate);
                return data_size;
#ifdef __RESAMPLER__
            }

#endif
        }

        if(pkt->data) {
            av_free_packet(pkt);
        }

        if(is->quit) {
            return -1;
        }

        /* next packet */
        if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
            return -1;
        }

        is->audio_pkt_data = pkt->data;
        is->audio_pkt_size = pkt->size;

        /* if update, update the audio clock w/pts */
        if(pkt->pts != AV_NOPTS_VALUE) {
            is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts;
        }

    }
}
 JNIEXPORT jint JNICALL Java_com_jiuan_it_ipc_utils_RtspFromFFMPEG_thread_1DecodePacket_1audio
   (JNIEnv *env, jobject obj, jbyteArray jarrRV, jdoubleArray pts_audio)
 {
	//LOGD("%s-------%d进入该函数", __FUNCTION__, __LINE__);
	AVPacket *packet = NULL;
	AVFrame* pFrame = NULL;
	int ret = 0;
	int got_audio;
	double pts_arr[1];

	packet = (AVPacket *)cbuf_dequeue(&m_cbuf_audio);
	if (packet == NULL)
	{
		//LOGE("m_cbuf_audio is NULL\n");
		ret = -1;
		goto ErrLab;
	}

	//avcodec_decode_audio4
	pFrame = av_frame_alloc();
	ret = avcodec_decode_audio4(pCodecCtx_audio, pFrame, &got_audio, packet);
	if(ret <= 0){
		LOGE("Decode Error.(解码错误)\n");
		ret = -1;
		goto ErrLab;
	}

	if(packet->pts != AV_NOPTS_VALUE) {
		int64_t seek_conv_CurPktPts = av_rescale_q(packet->pts, pCodecCtx_audio->time_base, pCodecCtx_video->time_base);
		pts_arr[0] = seek_conv_CurPktPts * av_q2d(pCodecCtx_video->time_base);
		//pts_arr[0] = packet->pts * av_q2d(pCodecCtx_audio->time_base);
		(*env)->SetDoubleArrayRegion(env, pts_audio, 0, 1, pts_arr);
	} else {
		pts_arr[0] = 0;
		(*env)->SetDoubleArrayRegion(env, pts_audio, 0, 1, pts_arr);
	}

	if (got_audio > 0)
	{
		int error = swr_convert(aud_convert_ctx, &out_buffer_audio,
								AVCODEC_MAX_AUDIO_FRAME_SIZE,
								(const uint8_t **) pFrame->data,
								pFrame->nb_samples);

		int bufsize = av_samples_get_buffer_size(NULL,av_get_channel_layout_nb_channels(AV_CH_LAYOUT_MONO),pFrame->nb_samples,AV_SAMPLE_FMT_S16, 1);

		(*env)->SetByteArrayRegion(env, jarrRV, 0, bufsize, out_buffer_audio);
		ret = bufsize;

		LOGD("解码完成一帧,当前i值为:%d",bufsize);
		//ret = 0;
	}
	else
	{
		ret = -1;
		goto ErrLab;
	}

ErrLab:
	if(packet) av_free_packet(packet);
	if(pFrame) av_frame_free(&pFrame);
	return ret;
 }
Ejemplo n.º 6
0
static int encode_audio(AVCodecContext *avctx, AVPacket *pkt, int16_t *audio_samples, int nb_samples)
{
   int i, ch, buffer_size, ret, got_output = 0;
   void *samples = NULL;
   AVFrame *frame = NULL;

   if (audio_samples) {
      frame = av_frame_alloc();
      if (!frame)
         return AVERROR(ENOMEM);

      frame->nb_samples     = nb_samples;
      frame->format         = avctx->sample_fmt;
#if !defined(DISABLE_DYNAMIC_LOADING_FFMPEG) || (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(54, 13, 0))
      frame->channel_layout = avctx->channel_layout;
#endif

      buffer_size = av_samples_get_buffer_size(NULL, avctx->channels, frame->nb_samples,
                                              avctx->sample_fmt, 0);
      if (buffer_size < 0) {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not get sample buffer size")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return buffer_size;
      }
      samples = av_malloc(buffer_size);
      if (!samples) {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not allocate bytes for samples buffer")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return AVERROR(ENOMEM);
      }
      /* setup the data pointers in the AVFrame */
      ret = avcodec_fill_audio_frame(frame, avctx->channels, avctx->sample_fmt,
                                  (const uint8_t*)samples, buffer_size, 0);
      if (ret < 0) {
         wxMessageBox(wxString::Format(_("FFmpeg : ERROR - Could not setup audio frame")),
                      _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
         return ret;
      }

      for (ch = 0; ch < avctx->channels; ch++) {
         for (i = 0; i < frame->nb_samples; i++) {
            switch(avctx->sample_fmt) {
            case AV_SAMPLE_FMT_U8:
               ((uint8_t*)(frame->data[0]))[ch + i*avctx->channels] = audio_samples[ch + i*avctx->channels]/258 + 128;
               break;
            case AV_SAMPLE_FMT_U8P:
               ((uint8_t*)(frame->data[ch]))[i] = audio_samples[ch + i*avctx->channels]/258 + 128;
               break;
            case AV_SAMPLE_FMT_S16:
               ((int16_t*)(frame->data[0]))[ch + i*avctx->channels] = audio_samples[ch + i*avctx->channels];
               break;
            case AV_SAMPLE_FMT_S16P:
               ((int16_t*)(frame->data[ch]))[i] = audio_samples[ch + i*avctx->channels];
               break;
            case AV_SAMPLE_FMT_S32:
               ((int32_t*)(frame->data[0]))[ch + i*avctx->channels] = audio_samples[ch + i*avctx->channels]<<16;
               break;
            case AV_SAMPLE_FMT_S32P:
               ((int32_t*)(frame->data[ch]))[i] = audio_samples[ch + i*avctx->channels]<<16;
               break;
            case AV_SAMPLE_FMT_FLT:
               ((float*)(frame->data[0]))[ch + i*avctx->channels] = audio_samples[ch + i*avctx->channels] / 32767.0;
               break;
            case AV_SAMPLE_FMT_FLTP:
               ((float*)(frame->data[ch]))[i] = audio_samples[ch + i*avctx->channels] / 32767.;
               break;
            }
         }
      }
   }
   av_init_packet(pkt);
   pkt->data = NULL; // packet data will be allocated by the encoder
   pkt->size = 0;

   ret = avcodec_encode_audio2(avctx, pkt, frame, &got_output);
   if (ret < 0) {
      wxMessageBox(wxString::Format(_("FFmpeg : ERROR - encoding frame failed")),
                   _("FFmpeg Error"), wxOK|wxCENTER|wxICON_EXCLAMATION);
      return ret;
   }

   pkt->dts = pkt->pts = AV_NOPTS_VALUE; // we dont set frame.pts thus dont trust the AVPacket ts

   av_frame_free(&frame);
   av_freep(&samples);

   return got_output;
}
Ejemplo n.º 7
0
AkPacket ConvertAudioFFmpeg::convert(const AkAudioPacket &packet,
                                     const AkCaps &oCaps)
{
    AkAudioCaps oAudioCaps(oCaps);

    int64_t iSampleLayout = channelLayouts->value(packet.caps().layout(), 0);

    AVSampleFormat iSampleFormat =
            av_get_sample_fmt(AkAudioCaps::sampleFormatToString(packet.caps().format())
                              .toStdString().c_str());

    int iSampleRate = packet.caps().rate();
    int iNChannels = packet.caps().channels();
    int iNSamples = packet.caps().samples();

    int64_t oSampleLayout = channelLayouts->value(oAudioCaps.layout(),
                                                  AV_CH_LAYOUT_STEREO);

    AVSampleFormat oSampleFormat =
            av_get_sample_fmt(AkAudioCaps::sampleFormatToString(oAudioCaps.format())
                              .toStdString().c_str());

    int oSampleRate = oAudioCaps.rate();
    int oNChannels = oAudioCaps.channels();

    this->m_resampleContext =
            swr_alloc_set_opts(this->m_resampleContext,
                               oSampleLayout,
                               oSampleFormat,
                               oSampleRate,
                               iSampleLayout,
                               iSampleFormat,
                               iSampleRate,
                               0,
                               NULL);

    if (!this->m_resampleContext)
        return AkPacket();

    // Create input audio frame.
    static AVFrame iFrame;
    memset(&iFrame, 0, sizeof(AVFrame));
    iFrame.format = iSampleFormat;
    iFrame.channels = iNChannels;
    iFrame.channel_layout = uint64_t(iSampleLayout);
    iFrame.sample_rate = iSampleRate;
    iFrame.nb_samples = iNSamples;
    iFrame.pts = iFrame.pkt_pts = packet.pts();

    if (avcodec_fill_audio_frame(&iFrame,
                                 iFrame.channels,
                                 iSampleFormat,
                                 reinterpret_cast<const uint8_t *>(packet.buffer().constData()),
                                 packet.buffer().size(),
                                 1) < 0) {
        return AkPacket();
    }

    // Fill output audio frame.
    AVFrame oFrame;
    memset(&oFrame, 0, sizeof(AVFrame));
    oFrame.format = oSampleFormat;
    oFrame.channels = oNChannels;
    oFrame.channel_layout = uint64_t(oSampleLayout);
    oFrame.sample_rate = oSampleRate;
    oFrame.nb_samples = int(swr_get_delay(this->m_resampleContext, oSampleRate))
                        + iFrame.nb_samples
                        * oSampleRate
                        / iSampleRate
                        + 3;
    oFrame.pts = oFrame.pkt_pts = iFrame.pts * oSampleRate / iSampleRate;

    // Calculate the size of the audio buffer.
    int frameSize = av_samples_get_buffer_size(oFrame.linesize,
                                               oFrame.channels,
                                               oFrame.nb_samples,
                                               oSampleFormat,
                                               1);

    QByteArray oBuffer(frameSize, Qt::Uninitialized);

    if (avcodec_fill_audio_frame(&oFrame,
                                 oFrame.channels,
                                 oSampleFormat,
                                 reinterpret_cast<const uint8_t *>(oBuffer.constData()),
                                 oBuffer.size(),
                                 1) < 0) {
        return AkPacket();
    }

    // convert to destination format
    if (swr_convert_frame(this->m_resampleContext,
                          &oFrame,
                          &iFrame) < 0)
        return AkPacket();

    frameSize = av_samples_get_buffer_size(oFrame.linesize,
                                           oFrame.channels,
                                           oFrame.nb_samples,
                                           oSampleFormat,
                                           1);

    oBuffer.resize(frameSize);

    AkAudioPacket oAudioPacket;
    oAudioPacket.caps() = oAudioCaps;
    oAudioPacket.caps().samples() = oFrame.nb_samples;
    oAudioPacket.buffer() = oBuffer;
    oAudioPacket.pts() = oFrame.pts;
    oAudioPacket.timeBase() = AkFrac(1, oAudioCaps.rate());
    oAudioPacket.index() = packet.index();
    oAudioPacket.id() = packet.id();

    return oAudioPacket.toPacket();
}
Ejemplo n.º 8
0
std::uint8_t*
AudioDecoderFfmpeg::decodeFrame(const std::uint8_t* input,
        std::uint32_t inputSize, std::uint32_t& outputSize)
{
    //GNASH_REPORT_FUNCTION;

    assert(inputSize);

    size_t outSize = MAX_AUDIO_FRAME_SIZE;

    // TODO: make this a private member, to reuse (see NetStreamFfmpeg in 0.8.3)
    std::unique_ptr<std::int16_t, decltype(av_free)*> output(
        reinterpret_cast<std::int16_t*>(av_malloc(outSize)), av_free );
    if (!output.get()) {
        log_error(_("failed to allocate audio buffer."));
        outputSize = 0;
        return nullptr;
    }

    std::int16_t* outPtr = output.get();


#ifdef GNASH_DEBUG_AUDIO_DECODING
    log_debug("AudioDecoderFfmpeg: about to decode %d bytes; "
        "ctx->channels:%d, ctx->frame_size:%d",
        inputSize, _audioCodecCtx->channels, _audioCodecCtx->frame_size);
#endif

    // older ffmpeg versions didn't accept a const input..
    AVPacket pkt;
    int got_frm = 0;
    av_init_packet(&pkt);
    pkt.data = const_cast<uint8_t*>(input);
    pkt.size = inputSize;
    std::unique_ptr<AVFrame, FrameDeleter> frm(FRAMEALLOC(), FrameDeleter());
    if (!frm.get()) {
        log_error(_("failed to allocate frame."));
        return nullptr;
    }
    int tmp = avcodec_decode_audio4(_audioCodecCtx, frm.get(), &got_frm, &pkt);

#ifdef GNASH_DEBUG_AUDIO_DECODING
    const char* fmtname = av_get_sample_fmt_name(_audioCodecCtx->sample_fmt);
    log_debug(" decodeFrame | frm->nb_samples: %d | &got_frm: %d | "
        "returned %d | inputSize: %d",
        frm->nb_samples, got_frm, tmp, inputSize);
#endif

    int plane_size;
    if (tmp >= 0 && got_frm) {
        int data_size = av_samples_get_buffer_size( &plane_size,
            _audioCodecCtx->channels, frm->nb_samples,
            _audioCodecCtx->sample_fmt, 1);
        if (static_cast<int>(outSize) < data_size) {
            log_error(_("output buffer size is too small for the current frame "
                "(%d < %d)"), outSize, data_size);
            return nullptr;
        }

        memcpy(outPtr, frm->extended_data[0], plane_size);

#if !(defined(HAVE_SWRESAMPLE_H) || defined(HAVE_AVRESAMPLE_H))
        int planar = av_sample_fmt_is_planar(_audioCodecCtx->sample_fmt);
        if (planar && _audioCodecCtx->channels > 1) {
            uint8_t *out = ((uint8_t *)outPtr) + plane_size;
            for (int ch = 1; ch < _audioCodecCtx->channels; ch++) {
                memcpy(out, frm->extended_data[ch], plane_size);
                out += plane_size;
            }
        }
#endif

        outSize = data_size;
#ifdef GNASH_DEBUG_AUDIO_DECODING
        log_debug(" decodeFrame | fmt: %d | fmt_name: %s | planar: %d | "
            "plane_size: %d | outSize: %d",
            _audioCodecCtx->sample_fmt, fmtname, planar, plane_size, outSize);
#endif
    } else {
        if (tmp < 0)
            log_error(_("avcodec_decode_audio returned %d."), tmp);
        if (outSize < 2)
            log_error(_("outputSize:%d after decoding %d bytes of input audio "
                "data."), outputSize, inputSize);
        log_error(_("Upgrading ffmpeg/libavcodec might fix this issue."));
        outputSize = 0;
        return nullptr;
    }

    // Resampling is needed.
    if (_resampler.init(_audioCodecCtx)) {
        // Resampling is needed.

        // Compute new size based on frame_size and
        // resampling configuration

        // Find out the needed sample rate scaling
        double resampleFactor = 44100.0/_audioCodecCtx->sample_rate;

        // Compute total number of input samples
        int inSamples = outSize;
        bool stereo = _audioCodecCtx->channels > 1 ? true : false;

        if (stereo) inSamples = inSamples >> 1;
        if (_audioCodecCtx->sample_fmt == AV_SAMPLE_FMT_S16 ||
            _audioCodecCtx->sample_fmt == AV_SAMPLE_FMT_S16P) {
            inSamples = inSamples >> 1;
        }
Ejemplo n.º 9
0
int AudioLoader::decode_audio_frame(AVCodecContext* audioCtx,
                                    int16_t* output,
                                    int* outputSize,
                                    AVPacket* packet) {


#if LIBAVCODEC_VERSION_INT < AVCODEC_51_28_0

    int len = avcodec_decode_audio(audioCtx, output, outputSize,
                                 packet->data, packet->size);

#elif LIBAVCODEC_VERSION_INT < AVCODEC_52_47_0

    int len = avcodec_decode_audio2(audioCtx, output, outputSize,
                                    packet->data, packet->size);

#elif LIBAVCODEC_VERSION_INT < AVCODEC_AUDIO_DECODE4

    int len = avcodec_decode_audio3(audioCtx, output, outputSize,
                                    packet);

#else

    int gotFrame = 0;
    avcodec_get_frame_defaults(_decodedFrame);

    int len = avcodec_decode_audio4(audioCtx, _decodedFrame, &gotFrame, packet);

    if (len < 0) return len; // error handling should be done outside

    if (gotFrame) {
        int nsamples = _decodedFrame->nb_samples;
        int inputDataSize = av_samples_get_buffer_size(NULL, audioCtx->channels, nsamples,
                                                       audioCtx->sample_fmt, 1);

#  if HAVE_SWRESAMPLE
        if (_convertCtx) {
            int outputSamples = *outputSize / (2 /*sizeof(S16)*/ * _nChannels);
            //if (outputSamples < nsamples) { cout << "OOPS!!" << endl; }

            if (swr_convert(_convertCtx,
                            (uint8_t**) &output, outputSamples,
                            (const uint8_t**)_decodedFrame->data, nsamples) < 0) {
                ostringstream msg;
                msg << "AudioLoader: Error converting"
                    << " from " << av_get_sample_fmt_name(_audioCtx->sample_fmt)
                    << " to "   << av_get_sample_fmt_name(AV_SAMPLE_FMT_S16);
                throw EssentiaException(msg);
            }
            *outputSize = nsamples * (2 /*sizeof(S16)*/ * _nChannels);
        }
        else {
            // no conversion needed, make a direct copy
            // copy and convert data from our frame to our output audio buffer
            //E_WARNING("Should use swresample always!");
            memcpy(output, _decodedFrame->data[0], inputDataSize);
            *outputSize = inputDataSize;
        }
#  else
        // direct copy, we do the sample format conversion later if needed
        memcpy(output, _decodedFrame->data[0], inputDataSize);
        *outputSize = inputDataSize;
#  endif

    }
    else {
        E_DEBUG(EAlgorithm, "AudioLoader: tried to decode packet but didn't get any frame...");
        *outputSize = 0;
    }

#endif

    return len;
}
Ejemplo n.º 10
0
std::uint8_t*
AudioDecoderFfmpeg::decodeFrame(const std::uint8_t* input,
        std::uint32_t inputSize, std::uint32_t& outputSize)
{
    //GNASH_REPORT_FUNCTION;

    assert(inputSize);

    size_t outSize = MAX_AUDIO_FRAME_SIZE;

    // TODO: make this a private member, to reuse (see NetStreamFfmpeg in 0.8.3)
    std::unique_ptr<std::int16_t, decltype(av_free)*> output(
        reinterpret_cast<std::int16_t*>(av_malloc(outSize)), av_free );
    if (!output.get()) {
        log_error(_("failed to allocate audio buffer."));
        outputSize = 0;
        return nullptr;
    }

    std::int16_t* outPtr = output.get();


#ifdef GNASH_DEBUG_AUDIO_DECODING
    log_debug("AudioDecoderFfmpeg: about to decode %d bytes; "
        "ctx->channels:%d, ctx->frame_size:%d",
        inputSize, _audioCodecCtx->channels, _audioCodecCtx->frame_size);
#endif

    // older ffmpeg versions didn't accept a const input..
    AVPacket pkt;
    int got_frm = 0;
    av_init_packet(&pkt);
    pkt.data = const_cast<uint8_t*>(input);
    pkt.size = inputSize;
    std::unique_ptr<AVFrame, FrameDeleter> frm(FRAMEALLOC(), FrameDeleter());
    if (!frm.get()) {
        log_error(_("failed to allocate frame."));
        return nullptr;
    }
    int tmp = avcodec_decode_audio4(_audioCodecCtx, frm.get(), &got_frm, &pkt);

#ifdef GNASH_DEBUG_AUDIO_DECODING
    const char* fmtname = av_get_sample_fmt_name(_audioCodecCtx->sample_fmt);
    log_debug(" decodeFrame | frm->nb_samples: %d | &got_frm: %d | "
        "returned %d | inputSize: %d",
        frm->nb_samples, got_frm, tmp, inputSize);
#endif

    int plane_size;
    if (tmp >= 0 && got_frm) {
        int data_size = av_samples_get_buffer_size( &plane_size,
            _audioCodecCtx->channels, frm->nb_samples,
            _audioCodecCtx->sample_fmt, 1);
        if (static_cast<int>(outSize) < data_size) {
            log_error(_("output buffer size is too small for the current frame "
                "(%d < %d)"), outSize, data_size);
            return nullptr;
        }

        memcpy(outPtr, frm->extended_data[0], plane_size);

#if !(defined(HAVE_SWRESAMPLE_H) || defined(HAVE_AVRESAMPLE_H))
        int planar = av_sample_fmt_is_planar(_audioCodecCtx->sample_fmt);
        if (planar && _audioCodecCtx->channels > 1) {
            uint8_t *out = ((uint8_t *)outPtr) + plane_size;
            for (int ch = 1; ch < _audioCodecCtx->channels; ch++) {
                memcpy(out, frm->extended_data[ch], plane_size);
                out += plane_size;
            }
        }
#endif

        outSize = data_size;
#ifdef GNASH_DEBUG_AUDIO_DECODING
        log_debug(" decodeFrame | fmt: %d | fmt_name: %s | planar: %d | "
            "plane_size: %d | outSize: %d",
            _audioCodecCtx->sample_fmt, fmtname, planar, plane_size, outSize);
#endif
    } else {
        if (tmp < 0)
            log_error(_("avcodec_decode_audio returned %d."), tmp);
        if (outSize < 2)
            log_error(_("outputSize:%d after decoding %d bytes of input audio "
                "data."), outputSize, inputSize);
        log_error(_("Upgrading ffmpeg/libavcodec might fix this issue."));
        outputSize = 0;
        return nullptr;
    }

    // Resampling is needed.
    if (_resampler.init(_audioCodecCtx)) {
        // Resampling is needed.

        // Compute new size based on frame_size and
        // resampling configuration
        double resampleFactor = (44100.0/_audioCodecCtx->sample_rate) * (2.0/_audioCodecCtx->channels);
        bool stereo = _audioCodecCtx->channels > 1 ? true : false;
        int inSamples = stereo ? outSize >> 2 : outSize >> 1;

        int expectedMaxOutSamples = std::ceil(inSamples*resampleFactor);

        // *channels *sampleSize 
        int resampledFrameSize = expectedMaxOutSamples*2*2;

        // Allocate just the required amount of bytes
        std::uint8_t* resampledOutput = new std::uint8_t[resampledFrameSize];

#ifdef GNASH_DEBUG_AUDIO_DECODING
        log_debug(" decodeFrame | Calling the resampler, resampleFactor: %d | "
            "in %d hz %d ch %d bytes %d samples, %s fmt", resampleFactor,
            _audioCodecCtx->sample_rate, _audioCodecCtx->channels, outSize,
            inSamples, fmtname);
        log_debug(" decodeFrame | out 44100 hz 2 ch %d bytes",
            resampledFrameSize);
#endif

        int outSamples = _resampler.resample(frm->extended_data, // input
            plane_size, // input
            frm->nb_samples, // input
            &resampledOutput); // output

        // make sure to set outPtr *after* we use it as input to the resampler
        outPtr = reinterpret_cast<std::int16_t*>(resampledOutput);

#ifdef GNASH_DEBUG_AUDIO_DECODING
        log_debug("resampler returned %d samples ", outSamples);
#endif

        if (expectedMaxOutSamples < outSamples) {
            log_error(_(" --- Computation of resampled samples (%d) < then the actual returned samples (%d)"),
                expectedMaxOutSamples, outSamples);

            log_debug(" input frame size: %d", outSize);
            log_debug(" input sample rate: %d", _audioCodecCtx->sample_rate);
            log_debug(" input channels: %d", _audioCodecCtx->channels);
            log_debug(" input samples: %d", inSamples);

            log_debug(" output sample rate (assuming): %d", 44100);
            log_debug(" output channels (assuming): %d", 2);
            log_debug(" output samples: %d", outSamples);

            /// Memory errors...
            abort();
        }

        // Use the actual number of samples returned, multiplied
        // to get size in bytes (not two-byte samples) and for 
        // stereo?
        outSize = outSamples * 2 * 2;

    }
Ejemplo n.º 11
0
vod_status_t 
audio_filter_process_frame(void* context, input_frame_t* frame, u_char* buffer)
{
	audio_filter_state_t* state = (audio_filter_state_t*)context;
	vod_status_t rc;
	AVPacket output_packet;
	AVPacket input_packet;
	int got_packet;
	int got_frame;
	int ret;
#ifdef AUDIO_FILTER_DEBUG
	size_t data_size;
#endif // AUDIO_FILTER_DEBUG
	
	if (frame == NULL)
	{
		return audio_filter_flush_encoder(state);
	}

#ifdef AUDIO_FILTER_DEBUG
	audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_INPUT, buffer, frame->size);
#endif // AUDIO_FILTER_DEBUG
	
	vod_memzero(&input_packet, sizeof(input_packet));
	input_packet.data = buffer;
	input_packet.size = frame->size;
	input_packet.dts = state->dts;
	input_packet.pts = (state->dts + frame->pts_delay);
	input_packet.duration = frame->duration;
	input_packet.flags = AV_PKT_FLAG_KEY;
	state->dts += frame->duration;
	
	avcodec_get_frame_defaults(state->decoded_frame);

	got_frame = 0;
	ret = avcodec_decode_audio4(state->decoder, state->decoded_frame, &got_frame, &input_packet);
	if (ret < 0) 
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_process_frame: avcodec_decode_audio4 failed %d", ret);
		return VOD_BAD_DATA;
	}

	if (!got_frame)
	{
		return VOD_OK;
	}

#ifdef AUDIO_FILTER_DEBUG
	data_size = av_samples_get_buffer_size(
		NULL, 
		state->decoder->channels,
		state->decoded_frame->nb_samples,
		state->decoder->sample_fmt, 
		1);
	audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_DECODED, state->decoded_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG
	
	ret = av_buffersrc_add_frame_flags(state->buffer_src, state->decoded_frame, AV_BUFFERSRC_FLAG_PUSH);
	if (ret < 0) 
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_process_frame: av_buffersrc_add_frame_flags failed %d", ret);
		return VOD_ALLOC_FAILED;
	}

	for (;;)
	{
		ret = av_buffersink_get_frame_flags(state->buffer_sink, state->filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
		{
			break;
		}
		
		if (ret < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_process_frame: av_buffersink_get_frame_flags failed %d", ret);
			return VOD_UNEXPECTED;
		}

#ifdef AUDIO_FILTER_DEBUG
		data_size = av_samples_get_buffer_size(
			NULL, 
			state->encoder->channels,
			state->filtered_frame->nb_samples,
			state->encoder->sample_fmt, 
			1);
		audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_FILTERED, state->filtered_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG

		av_init_packet(&output_packet);
		output_packet.data = NULL; // packet data will be allocated by the encoder
		output_packet.size = 0;

		got_packet = 0;
		ret = avcodec_encode_audio2(state->encoder, &output_packet, state->filtered_frame, &got_packet);
		if (ret < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_process_frame: avcodec_encode_audio2 failed %d", ret);
			return VOD_ALLOC_FAILED;
		}
		
		if (got_packet)
		{
			rc = audio_filter_write_frame(state, &output_packet);

			av_free_packet(&output_packet);
			
			if (rc != VOD_OK)
			{
				return rc;
			}
		}
		
		av_frame_unref(state->filtered_frame);
	}
	
	return VOD_OK;
}
Ejemplo n.º 12
0
int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	AVOutputFormat* fmt;
	AVStream* audio_st;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;

	uint8_t* frame_buf;
	AVFrame* pFrame;
	AVPacket pkt;

	int got_frame=0;
	int ret=0;
	int size=0;

	FILE *in_file=NULL;	                        //Raw PCM data
	int framenum=1000;                          //Audio frame number
	const char* out_file = "tdjm.aac";          //Output URL
	int i;

	in_file= fopen("tdjm.pcm", "rb");

	av_register_all();

	//Method 1.
	pFormatCtx = avformat_alloc_context();
	fmt = av_guess_format(NULL, out_file, NULL);
	pFormatCtx->oformat = fmt;


	//Method 2.
	//avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
	//fmt = pFormatCtx->oformat;

	//Open output URL
	if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
		printf("Failed to open output file!\n");
		return -1;
	}

	audio_st = avformat_new_stream(pFormatCtx, 0);
	if (audio_st==NULL){
		return -1;
	}
	pCodecCtx = audio_st->codec;
	pCodecCtx->codec_id = fmt->audio_codec;
	pCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
	pCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
	pCodecCtx->sample_rate= 44100;
	pCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
	pCodecCtx->channels = av_get_channel_layout_nb_channels(pCodecCtx->channel_layout);
	pCodecCtx->bit_rate = 64000;  

	//Show some information
	av_dump_format(pFormatCtx, 0, out_file, 1);

	pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
	if (!pCodec){
		printf("Can not find encoder!\n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
		printf("Failed to open encoder!\n");
		return -1;
	}
	pFrame = av_frame_alloc();
	pFrame->nb_samples= pCodecCtx->frame_size;
	pFrame->format= pCodecCtx->sample_fmt;
	
	size = av_samples_get_buffer_size(NULL, pCodecCtx->channels,pCodecCtx->frame_size,pCodecCtx->sample_fmt, 1);
	frame_buf = (uint8_t *)av_malloc(size);
	avcodec_fill_audio_frame(pFrame, pCodecCtx->channels, pCodecCtx->sample_fmt,(const uint8_t*)frame_buf, size, 1);
	
	//Write Header
	avformat_write_header(pFormatCtx,NULL);

	av_new_packet(&pkt,size);

	for (i=0; i<framenum; i++){
		//Read PCM
		if (fread(frame_buf, 1, size, in_file) <= 0){
			printf("Failed to read raw data! \n");
			return -1;
		}else if(feof(in_file)){
			break;
		}
		pFrame->data[0] = frame_buf;  //PCM Data

		pFrame->pts=i*100;
		got_frame=0;
		//Encode
		ret = avcodec_encode_audio2(pCodecCtx, &pkt,pFrame, &got_frame);
		if(ret < 0){
			printf("Failed to encode!\n");
			return -1;
		}
		if (got_frame==1){
			printf("Succeed to encode 1 frame! \tsize:%5d\n",pkt.size);
			pkt.stream_index = audio_st->index;
			ret = av_write_frame(pFormatCtx, &pkt);
			av_free_packet(&pkt);
		}
	}
	
	//Flush Encoder
	ret = flush_encoder(pFormatCtx,0);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}

	//Write Trailer
	av_write_trailer(pFormatCtx);

	//Clean
	if (audio_st){
		avcodec_close(audio_st->codec);
		av_free(pFrame);
		av_free(frame_buf);
	}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);

	fclose(in_file);

	return 0;
}
Ejemplo n.º 13
0
/**
 *
 * buffer is a float array like: 
 * 
 * 	- float buffer[nsamples * nchannels];
 * 
 *
 */
bool AV::addAudioFrame(unsigned char* buffer, int nsamples, int nchannels) {
	if(!use_audio) {
		printf("Cannot add audio stream, we're not using audio.\n");
		return false;
	}
	
	AVCodecContext* c = ct.as->codec;
	
	// BUFFER HANDLING
	int samples_stored = av_audio_fifo_write(ct.afifo, (void**)&buffer, nsamples);
	if(samples_stored != nsamples) {
		return false;
	}
	int nstored = av_audio_fifo_size(ct.afifo);
	if(nstored < c->frame_size) {
		return false;
	}

	
	AVPacket packet = {0}; // data and size must be '0' (allocation is done for you :> )
	AVFrame* frame = avcodec_alloc_frame(); 
	int got_packet = 0;
	

	av_init_packet(&packet);
	packet.data = NULL;
	packet.size = 0;

	int use_nsamples = c->frame_size; 
	frame->nb_samples = use_nsamples; // <-- important, must be set  before avcodec_fill_audio_frame
	
	// GET DATA FROM BUFFER
	int num_bytes = av_samples_get_buffer_size(NULL, c->channels, use_nsamples, c->sample_fmt, 0);
	uint8_t* my_buffer = (uint8_t*)av_malloc(num_bytes);
	uint8_t** my_ptr = &my_buffer;
	int nread = av_audio_fifo_read(ct.afifo, (void**)my_ptr, use_nsamples);
	if(nread != use_nsamples) {
		printf("We only read: %d but we wanted to read %d samples.\n", nread, use_nsamples);
		av_free(my_buffer);
		return false;
	}
	
	// FILL
	int fill_result = avcodec_fill_audio_frame(
		 frame
		,c->channels
		,c->sample_fmt
		,(uint8_t*)my_buffer
		,num_bytes
		,1
	);
	
	if(fill_result != 0) {
		char buf[1024];
		av_strerror(fill_result, buf, 1024);
		printf("av error: %s\n",buf);
		av_free(my_buffer);
		return false;
	}

	// ENCODE
	int64_t now = av_gettime();
	AVRational my_time_base = (AVRational){1,1e6};
	AVRational stream_time_base = ct.as->time_base; // stream time base
	AVRational codec_time_base = ct.as->codec->time_base; // codec time base
	int64_t now_frame_pts = av_rescale_q(now, my_time_base, codec_time_base);
	
	
	if(frame->pts == AV_NOPTS_VALUE) { 
		frame->pts = ct.acounter;
	}
	ct.acounter = frame->pts + use_nsamples;
	printf("frame->nb_samples: %d, counter: %d\n", frame->nb_samples, ct.acounter);
			
	int enc_result = avcodec_encode_audio2(c, &packet, frame, &got_packet);
	packet.stream_index = ct.as->index;
	if(!got_packet) {
		av_free(my_buffer);
		return false;
	}
	if(enc_result < 0) {
		char buf[1024];
		av_strerror(enc_result, buf, 1024);
		printf("av error: %s\n",buf);
	}

	// CORRECT THE PTS, FROM VIDEO_CODEC.time_base TO STREAM.time_base
	packet.pts = av_rescale_q(packet.pts, codec_time_base, stream_time_base);
	packet.dts = av_rescale_q(packet.dts, codec_time_base, stream_time_base);
	//packet.duration = av_rescale_q(packet.duration, codec_time_base, stream_time_base);
	
	//packet.dts = packet.pts;  // just a wild guess
	packet.duration = 0;
	/*
	printf("Audio: stream: %d\n", packet.stream_index);
	printf("Audio: ct.acounter: %d\n", ct.acounter);
	printf("Audio: packet.duration: %d\n", packet.duration);
	printf("Audio: stream.time_base, num=%d, den=%d\n", stream_time_base.num, stream_time_base.den);
	printf("Audio: codec.time_base, num=%d, den=%d\n", codec_time_base.num, codec_time_base.den);
	printf("Audio: coded_frame.pts: %lld\n", ct.as->codec->coded_frame->pts);
	printf("Audio: packet.pts: %lld\n" ,packet.pts);
	printf("-------------------\n");
	*/
	// WRITE
	if(av_interleaved_write_frame(ct.c, &packet) != 0) {
		printf("Cannot write audio frame.\n");
		av_free(my_buffer);
		return false;
	}

	av_free(my_buffer);
	return true;
}
Ejemplo n.º 14
0
int decode_frame_from_packet(VideoState *is, AVFrame decoded_frame)
{
	int64_t src_ch_layout, dst_ch_layout;
	int src_rate, dst_rate;
	uint8_t **src_data = NULL, **dst_data = NULL;
	int src_nb_channels = 0, dst_nb_channels = 0;
	int src_linesize, dst_linesize;
	int src_nb_samples, dst_nb_samples, max_dst_nb_samples;
	enum AVSampleFormat src_sample_fmt, dst_sample_fmt;
	int dst_bufsize;
	int ret;

	src_nb_samples = decoded_frame.nb_samples;
	src_linesize = (int) decoded_frame.linesize;
	src_data = decoded_frame.data;

	if (decoded_frame.channel_layout == 0) {
		decoded_frame.channel_layout = av_get_default_channel_layout(decoded_frame.channels);
	}

	src_rate = decoded_frame.sample_rate;
	dst_rate = decoded_frame.sample_rate;
	src_ch_layout = decoded_frame.channel_layout;
	dst_ch_layout = decoded_frame.channel_layout;
	src_sample_fmt = decoded_frame.format;
	dst_sample_fmt = AV_SAMPLE_FMT_S16;

	av_opt_set_int(is->sws_ctx_audio, "in_channel_layout", src_ch_layout, 0);
	av_opt_set_int(is->sws_ctx_audio, "out_channel_layout", dst_ch_layout,  0);
	av_opt_set_int(is->sws_ctx_audio, "in_sample_rate", src_rate, 0);
	av_opt_set_int(is->sws_ctx_audio, "out_sample_rate", dst_rate, 0);
	av_opt_set_sample_fmt(is->sws_ctx_audio, "in_sample_fmt", src_sample_fmt, 0);
	av_opt_set_sample_fmt(is->sws_ctx_audio, "out_sample_fmt", dst_sample_fmt,  0);

	/* initialize the resampling context */
	if ((ret = swr_init(is->sws_ctx_audio)) < 0) {
		fprintf(stderr, "Failed to initialize the resampling context\n");
		return -1;
	}

	/* allocate source and destination samples buffers */
	src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
	ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels, src_nb_samples, src_sample_fmt, 0);
	if (ret < 0) {
		fprintf(stderr, "Could not allocate source samples\n");
		return -1;
	}

	/* compute the number of converted samples: buffering is avoided
	 * ensuring that the output buffer will contain at least all the
	 * converted input samples */
	max_dst_nb_samples = dst_nb_samples = av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);

	/* buffer is going to be directly written to a rawaudio file, no alignment */
	dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
	ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels, dst_nb_samples, dst_sample_fmt, 0);
	if (ret < 0) {
		fprintf(stderr, "Could not allocate destination samples\n");
		return -1;
	}

	/* compute destination number of samples */
	dst_nb_samples = av_rescale_rnd(swr_get_delay(is->sws_ctx_audio, src_rate) + src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
	/* convert to destination format */
	ret = swr_convert(is->sws_ctx_audio, dst_data, dst_nb_samples, (const uint8_t **)decoded_frame.data, src_nb_samples);
	if (ret < 0) {
		fprintf(stderr, "Error while converting\n");
		return -1;
	}

	dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels, ret, dst_sample_fmt, 1);
	if (dst_bufsize < 0) {
		fprintf(stderr, "Could not get sample buffer size\n");
		return -1;
	}

	memcpy(is->audio_buf, dst_data[0], dst_bufsize);

	if (src_data) {
		av_freep(&src_data[0]);
	}
	av_freep(&src_data);

	if (dst_data) {
		av_freep(&dst_data[0]);
	}
	av_freep(&dst_data);

	return dst_bufsize;
}
Ejemplo n.º 15
0
bool FeMedia::onGetData( Chunk &data )
{
	int offset=0;

	data.samples = NULL;
	data.sampleCount = 0;

	if ( (!m_audio) || end_of_file() )
		return false;

	while ( offset < m_audio->codec_ctx->sample_rate )
	{
		AVPacket *packet = m_audio->pop_packet();
		while (( packet == NULL ) && ( !end_of_file() ))
		{
			read_packet();
			packet = m_audio->pop_packet();
		}

		if ( packet == NULL )
		{
			m_audio->at_end=true;
			if ( offset > 0 )
				return true;
			return false;
		}

#if (LIBAVCODEC_VERSION_INT < AV_VERSION_INT( 53, 25, 0 ))
		{
			sf::Lock l( m_audio->buffer_mutex );

			int bsize = MAX_AUDIO_FRAME_SIZE;
			if ( avcodec_decode_audio3(
						m_audio->codec_ctx,
						(m_audio->buffer + offset),
						&bsize, packet) < 0 )
			{
				std::cerr << "Error decoding audio." << std::endl;
				FeBaseStream::free_packet( packet );
				return false;
			}
			else
			{
				offset += bsize / sizeof( sf::Int16 );
				data.sampleCount += bsize / sizeof(sf::Int16);
				data.samples = m_audio->buffer;
			}
		}
#else
 #if (LIBAVCODEC_VERSION_INT >= AV_VERSION_INT( 55, 45, 0 ))
		AVFrame *frame = av_frame_alloc();
		m_audio->codec_ctx->refcounted_frames = 1;
 #else
		AVFrame *frame = avcodec_alloc_frame();
 #endif
		//
		// TODO: avcodec_decode_audio4() can return multiple frames per packet depending on the codec.
		// We don't deal with this appropriately...
		//
		int got_frame( 0 );
		int len = avcodec_decode_audio4( m_audio->codec_ctx, frame, &got_frame, packet );
		if ( len < 0 )
		{
#ifdef FE_DEBUG
			char buff[256];
			av_strerror( len, buff, 256 );
			std::cerr << "Error decoding audio: " << buff << std::endl;
#endif
		}

		if ( got_frame )
		{
			int data_size = av_samples_get_buffer_size(
				NULL,
				m_audio->codec_ctx->channels,
				frame->nb_samples,
				m_audio->codec_ctx->sample_fmt, 1);

#ifdef DO_RESAMPLE
			if ( m_audio->codec_ctx->sample_fmt == AV_SAMPLE_FMT_S16 )
#endif
			{
				sf::Lock l( m_audio->buffer_mutex );

				memcpy( (m_audio->buffer + offset), frame->data[0], data_size );
				offset += data_size / sizeof( sf::Int16 );
				data.sampleCount += data_size / sizeof(sf::Int16);
				data.samples = m_audio->buffer;
			}
#ifdef DO_RESAMPLE
			else
			{
				sf::Lock l( m_audio->buffer_mutex );

				if ( !m_audio->resample_ctx )
				{
					m_audio->resample_ctx = resample_alloc();
					if ( !m_audio->resample_ctx )
					{
						std::cerr << "Error allocating audio format converter." << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						return false;
					}

					int64_t channel_layout = frame->channel_layout;
					if ( !channel_layout )
					{
						channel_layout = av_get_default_channel_layout(
								m_audio->codec_ctx->channels );
					}

					av_opt_set_int( m_audio->resample_ctx, "in_channel_layout", channel_layout, 0 );
					av_opt_set_int( m_audio->resample_ctx, "in_sample_fmt", frame->format, 0 );
					av_opt_set_int( m_audio->resample_ctx, "in_sample_rate", frame->sample_rate, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_channel_layout", channel_layout, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0 );
					av_opt_set_int( m_audio->resample_ctx, "out_sample_rate", frame->sample_rate, 0 );

#ifdef FE_DEBUG
					std::cout << "Initializing resampler: in_sample_fmt="
						<< av_get_sample_fmt_name( (AVSampleFormat)frame->format )
						<< ", in_sample_rate=" << frame->sample_rate
						<< ", out_sample_fmt=" << av_get_sample_fmt_name( AV_SAMPLE_FMT_S16 )
						<< ", out_sample_rate=" << frame->sample_rate << std::endl;
#endif
					if ( resample_init( m_audio->resample_ctx ) < 0 )
					{
						std::cerr << "Error initializing audio format converter, input format="
							<< av_get_sample_fmt_name( (AVSampleFormat)frame->format )
							<< ", input sample rate=" << frame->sample_rate << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						resample_free( &m_audio->resample_ctx );
						m_audio->resample_ctx = NULL;
						return false;
					}
				}
				if ( m_audio->resample_ctx )
				{
					int out_linesize;
					av_samples_get_buffer_size(
						&out_linesize,
						m_audio->codec_ctx->channels,
						frame->nb_samples,
						AV_SAMPLE_FMT_S16, 0 );

					uint8_t *tmp_ptr = (uint8_t *)(m_audio->buffer + offset);

#ifdef USE_SWRESAMPLE
					int out_samples = swr_convert(
								m_audio->resample_ctx,
								&tmp_ptr,
								frame->nb_samples,
								(const uint8_t **)frame->data,
								frame->nb_samples );
#else // USE_AVRESAMPLE
					int out_samples = avresample_convert(
								m_audio->resample_ctx,
								&tmp_ptr,
								out_linesize,
								frame->nb_samples,
								frame->data,
								frame->linesize[0],
								frame->nb_samples );
#endif
					if ( out_samples < 0 )
					{
						std::cerr << "Error performing audio conversion." << std::endl;
						FeBaseStream::free_packet( packet );
						FeBaseStream::free_frame( frame );
						break;
					}
					offset += out_samples * m_audio->codec_ctx->channels;
					data.sampleCount += out_samples * m_audio->codec_ctx->channels;
					data.samples = m_audio->buffer;
				}
			}
#endif
		}
		FeBaseStream::free_frame( frame );

#endif

		FeBaseStream::free_packet( packet );
	}

	return true;
}
Ejemplo n.º 16
0
int audio_decode_frame(VideoState *is, double *pts_ptr) {

  int len1, data_size = 0, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      int got_frame = 0;
      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
      if(len1 < 0) {
	/* if error, skip frame */
	is->audio_pkt_size = 0;
	break;
      }
      if (got_frame)
      {
          data_size =
            av_samples_get_buffer_size
            (
                NULL,
                is->audio_st->codec->channels,
                is->audio_frame.nb_samples,
                is->audio_st->codec->sample_fmt,
                1
            );
          memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
	/* No data yet, get more frames */
	continue;
      }
      pts = is->audio_clock;
      *pts_ptr = pts;
      n = 2 * is->audio_st->codec->channels;
      is->audio_clock += (double)data_size /
	(double)(n * is->audio_st->codec->sample_rate);

      /* We have data, return it and come back for more later */
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }
    /* next packet */
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
    /* if update, update the audio clock w/pts */
    if(pkt->pts != AV_NOPTS_VALUE) {
      is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
    }
  }
}
Ejemplo n.º 17
0
/*
 * Audio encoding example
 */
static void audio_encode_example(const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    AVFrame *frame;
    AVPacket pkt;
    int i, j, k, ret, got_output;
    int buffer_size;
    FILE *f;
    uint16_t *samples;
    float t, tincr;

    printf("Encode audio file %s\n", filename);

    /* find the MP2 encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
    if (!codec) {
        fprintf(stderr, "Codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
        fprintf(stderr, "Could not allocate audio codec context\n");
        exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 64000;

    /* check that the encoder supports s16 pcm input */
    c->sample_fmt = AV_SAMPLE_FMT_S16;
    if (!check_sample_fmt(codec, c->sample_fmt)) {
        fprintf(stderr, "Encoder does not support sample format %s",
                av_get_sample_fmt_name(c->sample_fmt));
        exit(1);
    }

    /* select other audio parameters supported by the encoder */
    c->sample_rate    = select_sample_rate(codec);
    c->channel_layout = select_channel_layout(codec);
    c->channels       = av_get_channel_layout_nb_channels(c->channel_layout);

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "Could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "Could not open %s\n", filename);
        exit(1);
    }

    /* frame containing input raw audio */
    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate audio frame\n");
        exit(1);
    }

    frame->nb_samples     = c->frame_size;
    frame->format         = c->sample_fmt;
    frame->channel_layout = c->channel_layout;

    /* the codec gives us the frame size, in samples,
     * we calculate the size of the samples buffer in bytes */
    buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
                                             c->sample_fmt, 0);
    if (buffer_size < 0) {
        fprintf(stderr, "Could not get sample buffer size\n");
        exit(1);
    }
    samples = av_malloc(buffer_size);
    if (!samples) {
        fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
                buffer_size);
        exit(1);
    }
    /* setup the data pointers in the AVFrame */
    ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
                                   (const uint8_t*)samples, buffer_size, 0);
    if (ret < 0) {
        fprintf(stderr, "Could not setup audio frame\n");
        exit(1);
    }

    /* encode a single tone sound */
    t = 0;
    tincr = 2 * M_PI * 440.0 / c->sample_rate;
    for (i = 0; i < 200; i++) {
        av_init_packet(&pkt);
        pkt.data = NULL; // packet data will be allocated by the encoder
        pkt.size = 0;

        for (j = 0; j < c->frame_size; j++) {
            samples[2*j] = (int)(sin(t) * 10000);

            for (k = 1; k < c->channels; k++)
                samples[2*j + k] = samples[2*j];
            t += tincr;
        }
        /* encode the samples */
        ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding audio frame\n");
            exit(1);
        }
        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, f);
            av_packet_unref(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "Error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            fwrite(pkt.data, 1, pkt.size, f);
            av_packet_unref(&pkt);
        }
    }
    fclose(f);

    av_freep(&samples);
    av_frame_free(&frame);
    avcodec_close(c);
    av_free(c);
}
Ejemplo n.º 18
0
static int run_function(void)
{
	s_MaicheState = FFMPEG_STATE_PLAYING;
	AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
    av_init_packet(packet);
    packet->data = 0;
    packet->size = 0;
    
    int out_buffer_size = 0;
    gboolean finishedFlag = FALSE;
    int got_picture = 0;
    int ret = 0;
    while (av_read_frame(s_pFormatCtx, packet) >=0)
    {
    	if (s_Seek_Flag)
    	{
    		pthread_mutex_lock(&s_mutex);
    		s_Seek_Flag = FALSE;
    		pthread_mutex_unlock(&s_mutex);
    		long long timestamp = s_want_to_seek_position / s_one_sec_unit * AV_TIME_BASE;
    		ret = av_seek_frame(s_pFormatCtx, -1, timestamp, AVSEEK_FLAG_BACKWARD |AVSEEK_FLAG_ANY);
    		avcodec_flush_buffers(s_pFormatCtx->streams[s_audioStreamIndex]->codec);
    	}

        //处理暂停
        if (get_current_player_state() == FFMPEG_STATE_PAUSED)
        {
            pthread_mutex_lock(&s_mutex);
            while (get_current_player_state() == FFMPEG_STATE_PAUSED)
            {
                pthread_cond_wait(&s_cond, &s_mutex);
            }
            pthread_mutex_unlock(&s_mutex);
        }

        //处理退出
        if (s_Exit_Flag)
        {
            break;
        }
        if (packet->stream_index == s_audioStreamIndex)
        {
            do
            {
                ret = avcodec_decode_audio4( s_pCodecCtx, s_pFrame, &got_picture, packet);
                if (ret < 0)
                {
                    av_free_packet(packet);
                    ret = -1;
                    goto free_point;
                }
                if (got_picture > 0)
                {
                    if (!finishedFlag)
                    {
                        finishedFlag = TRUE;
                        uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
                        uint64_t count = s_pCodecCtx->frame_size;                        
                        if (count == 0)
                        {
                            count = s_pFrame->nb_samples;
                        }
                        
                        enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
                        int out_nb_samples = count;
                        int out_sample_rate = 44100;
                        int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);
                        out_buffer_size = av_samples_get_buffer_size(NULL, out_channels, out_nb_samples, out_sample_fmt, 1);
                        int64_t in_channel_layout = av_get_default_channel_layout(s_pCodecCtx->channels);
                        s_au_convert_ctx = swr_alloc_set_opts(s_au_convert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate, 
                        	in_channel_layout, s_pCodecCtx->sample_fmt, s_pCodecCtx->sample_rate, 0, NULL);
                        swr_init(s_au_convert_ctx);
                    }
                    swr_convert(s_au_convert_ctx, &s_out_buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)s_pFrame->data, s_pFrame->nb_samples);
                    AVRational *time_base = &s_pFormatCtx->streams[packet->stream_index]->time_base;
                    last_known_time_.position = av_q2d(* time_base) * packet->pts * s_one_sec_unit;
                    s_audio_device->play(s_out_buffer, out_buffer_size, AOPLAY_FINAL_CHUNK); 
                    break;
                }
                packet->data += ret;
                packet->size -= ret;
            }
            while(packet->size > 0);
        }
        av_free_packet(packet);
    }

free_point:
    if (play_trans_callback_)
    {
        if (!s_Exit_Flag)
        {
            //play_trans_callback_(PLAY_STOPPED);
        }   
    }
    avformat_close_input(&s_pFormatCtx);
    avcodec_close(s_pCodecCtx);
    av_free(packet);
    Log_error("ffmpeg", " run_function finished !");
    s_MaicheState = FFMPEG_STATE_READY;
    return ret;
}
Ejemplo n.º 19
0
int COMXAudioCodecOMX::GetData(BYTE** dst)
{
  if (!m_bGotFrame)
    return 0;
  int inLineSize, outLineSize;
  /* input audio is aligned */
  int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0);
  /* output audio will be packed */
  int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1);
  bool cont = !m_pFrame1->data[1] || (m_pFrame1->data[1] == m_pFrame1->data[0] + inLineSize && inLineSize == outLineSize && inLineSize * m_pCodecContext->channels == inputSize);

  if (m_iBufferOutputAlloced < outputSize)
  {
     av_free(m_pBufferOutput);
     m_pBufferOutput = (BYTE*)av_malloc(outputSize + FF_INPUT_BUFFER_PADDING_SIZE);
     m_iBufferOutputAlloced = outputSize;
  }
  *dst = m_pBufferOutput;

  /* need to convert format */
  if(m_pCodecContext->sample_fmt != m_desiredSampleFormat)
  {
    if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels))
    {
      swr_free(&m_pConvert);
      m_channels = m_pCodecContext->channels;
    }

    if(!m_pConvert)
    {
      m_iSampleFormat = m_pCodecContext->sample_fmt;
      m_pConvert = swr_alloc_set_opts(NULL,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_desiredSampleFormat, m_pCodecContext->sample_rate,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate,
                      0, NULL);

      if(!m_pConvert || swr_init(m_pConvert) < 0)
      {
        CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to initialise convert format %d to %d", m_pCodecContext->sample_fmt, m_desiredSampleFormat);
        return 0;
      }
    }

    /* use unaligned flag to keep output packed */
    uint8_t *out_planes[m_pCodecContext->channels];
    if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
       swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0)
    {
      CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to convert format %d to %d", (int)m_pCodecContext->sample_fmt, m_desiredSampleFormat);
      outputSize = 0;
    }
  }
  else
  {
    /* if it is already contiguous, just return decoded frame */
    if (cont)
    {
      *dst = m_pFrame1->data[0];
    }
    else
    {
      /* copy to a contiguous buffer */
      uint8_t *out_planes[m_pCodecContext->channels];
      if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
        av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 )
      {
        outputSize = 0;
      }
    }
  }

  if (m_bFirstFrame)
  {
    CLog::Log(LOGDEBUG, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d cont=%d buf=%p", inputSize, outputSize, inLineSize, outLineSize, cont, *dst);
    m_bFirstFrame = false;
  }
  return outputSize;
}
Ejemplo n.º 20
0
int audio_decode_frame(VideoState *is, double *pts_ptr) {

  int len1, data_size = 0;
  AVPacket *pkt = &is->audio_pkt;

  for(;;) {

    while(is->audio_pkt_size > 0) {

      int got_frame = 0;
      len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);

      if(len1 < 0) {
        // if error, skip this frame
        is->audio_pkt_size = 0;
        break;
      }

      if (got_frame) {
        data_size =
        av_samples_get_buffer_size
        (
         NULL,
         is->audio_st->codec->channels,
         is->audio_frame.nb_samples,
         is->audio_st->codec->sample_fmt,
         1
         );
        memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
      }

      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;

      if(data_size <= 0) {
        // No data yet, get more frames
        continue;
      }

      // We have data, return it and come back for more later
      return data_size;
    }

    if(pkt->data) {
      av_free_packet(pkt);
    }

    if(is->quit) {
      return -1;
    }

    // read next packet
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }
    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;
  }

  return 0;
}
Ejemplo n.º 21
0
long audio_tutorial_resample(VideoState *is, struct AVFrame *inframe) {

#ifdef __RESAMPLER__

#ifdef __LIBAVRESAMPLE__

// There is pre 1.0 libavresample and then there is above..
#if LIBAVRESAMPLE_VERSION_MAJOR == 0
    void **resample_input_bytes = (void **)inframe->extended_data;
#else
    uint8_t **resample_input_bytes = (uint8_t **)inframe->extended_data;
#endif

#else
    uint8_t **resample_input_bytes = (uint8_t **)inframe->extended_data;
#endif


    int resample_nblen = 0;
    long resample_long_bytes = 0;

    if( is->pResampledOut == NULL || inframe->nb_samples > is->resample_size) {
#if __LIBAVRESAMPLE__
        is->resample_size = av_rescale_rnd(avresample_get_delay(is->pSwrCtx) +
                                           inframe->nb_samples,
                                           44100,
                                           44100,
                                           AV_ROUND_UP);
#else
        is->resample_size = av_rescale_rnd(swr_get_delay(is->pSwrCtx,
                                           44100) +
                                           inframe->nb_samples,
                                           44100,
                                           44100,
                                           AV_ROUND_UP);
#endif

        if(is->pResampledOut != NULL) {
            av_free(is->pResampledOut);
            is->pResampledOut = NULL;
        }

        av_samples_alloc(&is->pResampledOut, &is->resample_lines, 2, is->resample_size,
                         AV_SAMPLE_FMT_S16, 0);

    }


#ifdef __LIBAVRESAMPLE__

// OLD API (0.0.3) ... still NEW API (1.0.0 and above).. very frustrating..
// USED IN FFMPEG 1.0 (LibAV SOMETHING!). New in FFMPEG 1.1 and libav 9
#if LIBAVRESAMPLE_VERSION_INT <= 3
    // AVResample OLD
    resample_nblen = avresample_convert(is->pSwrCtx, (void **)&is->pResampledOut, 0,
                                        is->resample_size,
                                        (void **)resample_input_bytes, 0, inframe->nb_samples);
#else
    //AVResample NEW
    resample_nblen = avresample_convert(is->pSwrCtx, (uint8_t **)&is->pResampledOut,
                                        0, is->resample_size,
                                        (uint8_t **)resample_input_bytes, 0, inframe->nb_samples);
#endif

#else
    // SWResample
    resample_nblen = swr_convert(is->pSwrCtx, (uint8_t **)&is->pResampledOut,
                                 is->resample_size,
                                 (const uint8_t **)resample_input_bytes, inframe->nb_samples);
#endif

    resample_long_bytes = av_samples_get_buffer_size(NULL, 2, resample_nblen,
                          AV_SAMPLE_FMT_S16, 1);

    if (resample_nblen < 0) {
        fprintf(stderr, "reSample to another sample format failed!\n");
        return -1;
    }

    return resample_long_bytes;

#else
    return -1;
#endif
}
Ejemplo n.º 22
0
int COMXAudioCodecOMX::GetData(BYTE** dst, double &dts, double &pts)
{
  if (!m_bGotFrame)
    return 0;
  int inLineSize, outLineSize;
  /* input audio is aligned */
  int inputSize = av_samples_get_buffer_size(&inLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_pCodecContext->sample_fmt, 0);
  /* output audio will be packed */
  int outputSize = av_samples_get_buffer_size(&outLineSize, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1);

  if (m_iBufferOutputAlloced < m_iBufferOutputUsed + outputSize)
  {
     m_pBufferOutput = (BYTE*)av_realloc(m_pBufferOutput, m_iBufferOutputUsed + outputSize + FF_INPUT_BUFFER_PADDING_SIZE);
     m_iBufferOutputAlloced = m_iBufferOutputUsed + outputSize;
  }
  *dst = m_pBufferOutput;

  /* need to convert format */
  if(m_pCodecContext->sample_fmt != m_desiredSampleFormat)
  {
    if(m_pConvert && (m_pCodecContext->sample_fmt != m_iSampleFormat || m_channels != m_pCodecContext->channels))
    {
      swr_free(&m_pConvert);
      m_channels = m_pCodecContext->channels;
    }

    if(!m_pConvert)
    {
      m_iSampleFormat = m_pCodecContext->sample_fmt;
      m_pConvert = swr_alloc_set_opts(NULL,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_desiredSampleFormat, m_pCodecContext->sample_rate,
                      av_get_default_channel_layout(m_pCodecContext->channels), 
                      m_pCodecContext->sample_fmt, m_pCodecContext->sample_rate,
                      0, NULL);

      if(!m_pConvert || swr_init(m_pConvert) < 0)
      {
        CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to initialise convert format %d to %d", m_pCodecContext->sample_fmt, m_desiredSampleFormat);
        return 0;
      }
    }

    /* use unaligned flag to keep output packed */
    uint8_t *out_planes[m_pCodecContext->channels];
    if(av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
       swr_convert(m_pConvert, out_planes, m_pFrame1->nb_samples, (const uint8_t **)m_pFrame1->data, m_pFrame1->nb_samples) < 0)
    {
      CLog::Log(LOGERROR, "COMXAudioCodecOMX::Decode - Unable to convert format %d to %d", (int)m_pCodecContext->sample_fmt, m_desiredSampleFormat);
      outputSize = 0;
    }
  }
  else
  {
    /* copy to a contiguous buffer */
    uint8_t *out_planes[m_pCodecContext->channels];
    if (av_samples_fill_arrays(out_planes, NULL, m_pBufferOutput + m_iBufferOutputUsed, m_pCodecContext->channels, m_pFrame1->nb_samples, m_desiredSampleFormat, 1) < 0 ||
      av_samples_copy(out_planes, m_pFrame1->data, 0, 0, m_pFrame1->nb_samples, m_pCodecContext->channels, m_desiredSampleFormat) < 0 )
    {
      outputSize = 0;
    }
  }
  int desired_size = AUDIO_DECODE_OUTPUT_BUFFER * (m_pCodecContext->channels * GetBitsPerSample()) >> (rounded_up_channels_shift[m_pCodecContext->channels] + 4);

  if (m_bFirstFrame)
  {
    CLog::Log(LOGDEBUG, "COMXAudioCodecOMX::GetData size=%d/%d line=%d/%d buf=%p, desired=%d", inputSize, outputSize, inLineSize, outLineSize, *dst, desired_size);
    m_bFirstFrame = false;
  }
  m_iBufferOutputUsed += outputSize;

  if (!m_bNoConcatenate && m_pCodecContext->sample_fmt == AV_SAMPLE_FMT_FLTP && m_frameSize && (int)m_frameSize != outputSize)
    CLog::Log(LOGERROR, "COMXAudioCodecOMX::GetData Unexpected change of size (%d->%d)", m_frameSize, outputSize);
  m_frameSize = outputSize;

  // if next buffer submitted won't fit then flush it out
  if (m_iBufferOutputUsed + outputSize > desired_size || m_bNoConcatenate)
  {
     int ret = m_iBufferOutputUsed;
     m_bGotFrame = false;
     m_iBufferOutputUsed = 0;
     dts = m_dts;
     pts = m_pts;
     return ret;
  }
  return 0;
}
Ejemplo n.º 23
0
BOOL DecodeFrame(char* buffer, DWORD size, DWORD& count, PLAYERDECODE* decode)
{
	DWORD p = 0;

	if (decode->left > 0)
	{	
		int len = decode->left;		
		uint8_t* a = decode->ptr + decode->total - decode->left;
		if (decode->left > size)
			len = size;
				
		CopyMemory(buffer, a, len);
		decode->left -= len;
		p += len;
		size -= len;
		count += len;
	}

	int ret = 0;
	int got = 0;
	BOOL err = FALSE;

	while (size > 0)
	{				
		if (decode->packet.size == 0)
		{
			ret = av_read_frame(decode->format, &decode->packet);
			if (ret != 0)
				return FALSE;

			decode->orgdata = decode->packet.data;
			decode->orgsize = decode->packet.size;			
		}	

		while (decode->packet.size > 0)
		{			
			if (decode->frame == NULL)
			{
				decode->frame = avcodec_alloc_frame();
				if (decode->frame == NULL)
					return FALSE;			
			} 
			else
				avcodec_get_frame_defaults(decode->frame);

			int len = avcodec_decode_audio4(decode->context, decode->frame, &got, &decode->packet);
			if (len < 0)
			{
				err = TRUE;
				break;
			}
						
			decode->packet.size -= len;
			decode->packet.data += len;

			if (got > 0) 
			{					
				int data_size = av_samples_get_buffer_size(NULL, decode->context->channels, decode->frame->nb_samples, decode->context->sample_fmt, 1);
				if (data_size > 0)
				{	
					//int src_layout = (decode->frame->channel_layout > 0 && decode->frame->channels == av_get_channel_layout_nb_channels(decode->frame->channel_layout)) ? decode->frame->channel_layout : av_get_default_channel_layout(decode->frame->channels);
					
					if (!decode->swred && decode->context->sample_fmt != decode->fmt)
					{
						swr_free(&decode->swr);
						decode->swr = swr_alloc_set_opts(NULL, decode->layout, decode->fmt, decode->context->sample_rate, decode->layout, decode->context->sample_fmt, decode->frame->sample_rate, 0, NULL);
						if (decode->swr == NULL || (ret = swr_init(decode->swr)) < 0)
							return FALSE;

						decode->buffer = new unsigned char[192000 * 4];
						decode->swred = TRUE;
					}

					if (decode->swr != NULL)
					{
						uint8_t* out = decode->buffer;
						int out_count = 192000 * 4 / decode->channels / av_get_bytes_per_sample(decode->fmt);
						
						len = swr_convert(decode->swr, &out, out_count, (const uint8_t**)decode->frame->extended_data, decode->frame->nb_samples);
						if (len < 0)
							return FALSE;

						if (len == out_count)
							swr_init(decode->swr);

						decode->ptr = decode->buffer;
						decode->total = len * decode->channels * av_get_bytes_per_sample(decode->fmt);
					}
					else
					{
						decode->ptr = decode->frame->data[0];
						decode->total = data_size;
					}

					len = decode->total;
					if (decode->total > size)
					{
						len = size;
						decode->left = decode->total - size;
					}
										
					CopyMemory(buffer + p, decode->ptr, len);
					p += len;
					size -= len;
					count += len;
				}
				else
					break;

				if (size == 0)
					break;
			}
		}

		if (decode->packet.size == 0 || err)
		{
			decode->packet.data = decode->orgdata;
			decode->packet.size = decode->orgsize;
			av_free_packet(&decode->packet);
			decode->orgdata = NULL;
			decode->orgsize = 0;
		}				
	}

	return TRUE;
}
Ejemplo n.º 24
0
  int AudioDecoder::decodeAudio(unsigned char* inBuff, int inBuffLen,
      unsigned char* outBuff, int outBuffLen, int* gotFrame){


    AVPacket avpkt;
    int outSize;
    int decSize = 0;
    int len = -1;
    uint8_t *decBuff = (uint8_t*) malloc(16000);

    av_init_packet(&avpkt);
    avpkt.data = (unsigned char*) inBuff;
    avpkt.size = inBuffLen;

    while (avpkt.size > 0) {

      outSize = 16000;

      //Puede fallar. Cogido de libavcodec/utils.c del paso de avcodec_decode_audio3 a avcodec_decode_audio4
      //avcodec_decode_audio3(aDecoderContext, (short*)decBuff, &outSize, &avpkt);

      AVFrame frame;
      int got_frame = 0;

      //      aDecoderContext->get_buffer = avcodec_default_get_buffer;
      //      aDecoderContext->release_buffer = avcodec_default_release_buffer;

      len = avcodec_decode_audio4(aDecoderContext_, &frame, &got_frame,
          &avpkt);
      if (len >= 0 && got_frame) {
        int plane_size;
        //int planar = av_sample_fmt_is_planar(aDecoderContext->sample_fmt);
        int data_size = av_samples_get_buffer_size(&plane_size,
            aDecoderContext_->channels, frame.nb_samples,
            aDecoderContext_->sample_fmt, 1);
        if (outSize < data_size) {
          ELOG_DEBUG("output buffer size is too small for the current frame");
          free(decBuff);
          return AVERROR(EINVAL);
        }

        memcpy(decBuff, frame.extended_data[0], plane_size);

        /* Si hay más de un canal
           if (planar && aDecoderContext->channels > 1) {
           uint8_t *out = ((uint8_t *)decBuff) + plane_size;
           for (int ch = 1; ch < aDecoderContext->channels; ch++) {
           memcpy(out, frame.extended_data[ch], plane_size);
           out += plane_size;
           }
           }
           */
        outSize = data_size;
      } else {
        outSize = 0;
      }

      if (len < 0) {
        ELOG_DEBUG("Error al decodificar audio");
        free(decBuff);
        return -1;
      }

      avpkt.size -= len;
      avpkt.data += len;

      if (outSize <= 0) {
        continue;
      }

      memcpy(outBuff, decBuff, outSize);
      outBuff += outSize;
      decSize += outSize;
    }

    free(decBuff);

    if (outSize <= 0) {
      ELOG_DEBUG("Error de decodificación de audio debido a tamaño incorrecto");
      return -1;
    }

    return decSize;
  }
Ejemplo n.º 25
0
uint8_t *getAVAudioData(StreamPtr stream, size_t *length)
{
    int got_frame;
    int len;

    if(length) *length = 0;

    if(!stream || stream->CodecCtx->codec_type != AVMEDIA_TYPE_AUDIO)
        return NULL;

next_packet:
    if(!stream->Packets && !getNextPacket(stream->parent, stream->StreamIdx))
        return NULL;

    /* Decode some data, and check for errors */
    avcodec_get_frame_defaults(stream->Frame);
    while((len=avcodec_decode_audio4(stream->CodecCtx, stream->Frame,
                                     &got_frame, &stream->Packets->pkt)) < 0)
    {
        struct PacketList *self;

        /* Error? Drop it and try the next, I guess... */
        self = stream->Packets;
        stream->Packets = self->next;

        av_free_packet(&self->pkt);
        av_free(self);

        if(!stream->Packets)
            goto next_packet;
    }

    if(len < stream->Packets->pkt.size)
    {
        /* Move the unread data to the front and clear the end bits */
        int remaining = stream->Packets->pkt.size - len;
        memmove(stream->Packets->pkt.data, &stream->Packets->pkt.data[len],
                remaining);
        memset(&stream->Packets->pkt.data[remaining], 0,
               stream->Packets->pkt.size - remaining);
        stream->Packets->pkt.size -= len;
    }
    else
    {
        struct PacketList *self;

        self = stream->Packets;
        stream->Packets = self->next;

        av_free_packet(&self->pkt);
        av_free(self);
    }

    if(!got_frame || stream->Frame->nb_samples == 0)
        goto next_packet;

    /* Set the output buffer size */
    *length = av_samples_get_buffer_size(NULL, stream->CodecCtx->channels,
                                               stream->Frame->nb_samples,
                                               stream->CodecCtx->sample_fmt, 1);

    return stream->Frame->data[0];
}
Ejemplo n.º 26
0
static vod_status_t
audio_filter_read_filter_sink(audio_filter_state_t* state)
{
	AVPacket output_packet;
	vod_status_t rc;
	int got_packet;
	int avrc;
#ifdef AUDIO_FILTER_DEBUG
	size_t data_size;
#endif // AUDIO_FILTER_DEBUG

	for (;;)
	{
		avrc = av_buffersink_get_frame_flags(state->sink.buffer_sink, state->filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST);
		if (avrc == AVERROR(EAGAIN) || avrc == AVERROR_EOF)
		{
			break;
		}

		if (avrc < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_read_filter_sink: av_buffersink_get_frame_flags failed %d", avrc);
			return VOD_UNEXPECTED;
		}

#ifdef AUDIO_FILTER_DEBUG
		data_size = av_samples_get_buffer_size(
			NULL,
			state->sink.encoder->channels,
			state->filtered_frame->nb_samples,
			state->sink.encoder->sample_fmt,
			1);
		audio_filter_append_debug_data("sink", "pcm", state->filtered_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG

		av_init_packet(&output_packet);
		output_packet.data = NULL; // packet data will be allocated by the encoder
		output_packet.size = 0;

		got_packet = 0;
		avrc = avcodec_encode_audio2(state->sink.encoder, &output_packet, state->filtered_frame, &got_packet);

		av_frame_unref(state->filtered_frame);

		if (avrc < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_read_filter_sink: avcodec_encode_audio2 failed %d", avrc);
			return VOD_ALLOC_FAILED;
		}

		if (got_packet)
		{
			rc = audio_filter_write_frame(state, &output_packet);

			av_free_packet(&output_packet);

			if (rc != VOD_OK)
			{
				return rc;
			}
		}
	}

	return VOD_OK;
}
Ejemplo n.º 27
0
CBaseDec::RetCode CFfmpegDec::Decoder(FILE *_in, int /*OutputFd*/, State* state, CAudioMetaData* _meta_data, time_t* time_played, unsigned int* secondsToSkip)
{
	in = _in;
	RetCode Status=OK;
	is_stream = fseek((FILE *)in, 0, SEEK_SET);

	if (!SetMetaData((FILE *)in, _meta_data, true)) {
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	AVCodecContext *c = avc->streams[best_stream]->codec;

	mutex.lock();
	int r = avcodec_open2(c, codec, NULL);
	mutex.unlock();
	if (r)
	{
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	SwrContext *swr = swr_alloc();
	if (!swr) {
		mutex.lock();
		avcodec_close(c);
		mutex.unlock();
		DeInit();
		Status=DATA_ERR;
		return Status;
	}

	mSampleRate = samplerate;
	mChannels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
#if __BYTE_ORDER == __LITTLE_ENDIAN
	audioDecoder->PrepareClipPlay(mChannels, mSampleRate, 16, 1);
#else
	audioDecoder->PrepareClipPlay(mChannels, mSampleRate, 16, 0);
#endif

	AVFrame *frame = NULL;
	AVPacket rpacket;
	av_init_packet(&rpacket);

	av_opt_set_int(swr, "in_channel_layout",	c->channel_layout,	0);
	//av_opt_set_int(swr, "out_channel_layout",	c->channel_layout,	0);
	av_opt_set_int(swr, "out_channel_layout",	AV_CH_LAYOUT_STEREO,	0);
	av_opt_set_int(swr, "in_sample_rate",		c->sample_rate,		0);
	av_opt_set_int(swr, "out_sample_rate",		c->sample_rate,		0);
	av_opt_set_int(swr, "in_sample_fmt",		c->sample_fmt,		0);
	av_opt_set_int(swr, "out_sample_fmt",		AV_SAMPLE_FMT_S16,	0);

	swr_init(swr);

	uint8_t *outbuf = NULL;
	int outsamples = 0;
	int outsamples_max = 0;

	int64_t pts = 0, start_pts = 0, next_skip_pts = 0;
	uint64_t skip = 0;
	int seek_flags = 0;

	do
	{
		int actSecsToSkip = *secondsToSkip;
		if (!is_stream && (actSecsToSkip || *state==FF || *state==REV) && avc->streams[best_stream]->time_base.num) {
			if (!next_skip_pts || pts >= next_skip_pts) {
				skip = avc->streams[best_stream]->time_base.den / avc->streams[best_stream]->time_base.num;
				if (actSecsToSkip)
					skip *= actSecsToSkip;
				if (*state == REV) {
					next_skip_pts = pts - skip;
					pts = next_skip_pts - skip/4;
					seek_flags = AVSEEK_FLAG_BACKWARD;
					if (pts < start_pts) {
						pts = start_pts;
						*state = PAUSE; 
					}
				} else {
					pts += skip;
					next_skip_pts = pts + skip/4;
					seek_flags = 0;
				}
				av_seek_frame(avc, best_stream, pts, seek_flags);
				skip = 0;
				// if a custom value was set we only jump once
				if (actSecsToSkip != 0) {
					*state=PLAY;
					*secondsToSkip = 0;
				}
			}
		}

		while(*state==PAUSE && !is_stream)
			usleep(10000);

		if (av_read_frame(avc, &rpacket)) {
			Status=DATA_ERR;
			break;
		}

		if (rpacket.stream_index != best_stream) {
			av_free_packet(&rpacket);
			continue;
		}

		AVPacket packet = rpacket;
		while (packet.size > 0) {
			int got_frame = 0;
			if (!frame) {
				if (!(frame = avcodec_alloc_frame())) {
					Status=DATA_ERR;
					break;
				}
			} else
				avcodec_get_frame_defaults(frame);

			int len = avcodec_decode_audio4(c, frame, &got_frame, &packet);
			if (len < 0) {
				// skip frame
				packet.size = 0;
				avcodec_flush_buffers(c);
				mutex.lock();
				avcodec_close(c);
				avcodec_open2(c, codec, NULL);
				mutex.unlock();
				continue;
			}
			if (got_frame && *state!=PAUSE) {
				int out_samples;
				outsamples = av_rescale_rnd(swr_get_delay(swr, c->sample_rate) + frame->nb_samples,
					c->sample_rate, c->sample_rate, AV_ROUND_UP);
				if (outsamples > outsamples_max) {
					av_free(outbuf);
					if (av_samples_alloc(&outbuf, &out_samples, mChannels, //c->channels,
								frame->nb_samples, AV_SAMPLE_FMT_S16, 1) < 0) {
						Status=WRITE_ERR;
						packet.size = 0;
						break;
					}
					outsamples_max = outsamples;
				}
				outsamples = swr_convert(swr, &outbuf, outsamples,
							(const uint8_t **) &frame->data[0], frame->nb_samples);
				int outbuf_size = av_samples_get_buffer_size(&out_samples, mChannels, //c->channels,
									  outsamples, AV_SAMPLE_FMT_S16, 1);

				if(audioDecoder->WriteClip((unsigned char*) outbuf, outbuf_size) != outbuf_size)
				{
					fprintf(stderr,"%s: PCM write error (%s).\n", ProgName, strerror(errno));
					Status=WRITE_ERR;
				}
				pts = av_frame_get_best_effort_timestamp(frame);
				if (!start_pts)
					start_pts = pts;
			}
			packet.size -= len;
			packet.data += len;
		}
		if (time_played && avc->streams[best_stream]->time_base.den)
			*time_played = (pts - start_pts) * avc->streams[best_stream]->time_base.num / avc->streams[best_stream]->time_base.den;
		av_free_packet(&rpacket);
	} while (*state!=STOP_REQ && Status==OK);

	audioDecoder->StopClip();
	meta_data_valid = false;

	swr_free(&swr);
	av_free(outbuf);
	av_free_packet(&rpacket);
	avcodec_free_frame(&frame);
	avcodec_close(c);
	//av_free(avcc);

	DeInit();
	if (_meta_data->cover_temporary && !_meta_data->cover.empty()) {
		_meta_data->cover_temporary = false;
		unlink(_meta_data->cover.c_str());
	}
	return Status;
}
Ejemplo n.º 28
0
static vod_status_t 
audio_filter_process_frame(audio_filter_state_t* state, u_char* buffer)
{
	audio_filter_source_t* source = state->cur_source;
	input_frame_t* frame = source->cur_frame;
	AVPacket input_packet;
	int got_frame;
	int avrc;
#ifdef AUDIO_FILTER_DEBUG
	size_t data_size;
#endif // AUDIO_FILTER_DEBUG
	
#ifdef AUDIO_FILTER_DEBUG
	audio_filter_append_debug_data("input", "aac", buffer, frame->size);
#endif // AUDIO_FILTER_DEBUG
	
	vod_memzero(&input_packet, sizeof(input_packet));
	input_packet.data = buffer;
	input_packet.size = frame->size;
	input_packet.dts = state->dts;
	input_packet.pts = state->dts + frame->pts_delay;
	input_packet.duration = frame->duration;
	input_packet.flags = AV_PKT_FLAG_KEY;
	state->dts += frame->duration;
	
	av_frame_unref(state->decoded_frame);

	got_frame = 0;
	avrc = avcodec_decode_audio4(source->decoder, state->decoded_frame, &got_frame, &input_packet);
	if (avrc < 0) 
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_process_frame: avcodec_decode_audio4 failed %d", avrc);
		return VOD_BAD_DATA;
	}

	if (!got_frame)
	{
		return VOD_OK;
	}

#ifdef AUDIO_FILTER_DEBUG
	data_size = av_samples_get_buffer_size(
		NULL, 
		source->decoder->channels,
		state->decoded_frame->nb_samples,
		source->decoder->sample_fmt,
		1);
	audio_filter_append_debug_data(source->buffer_src->name, "pcm", state->decoded_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG
	
	avrc = av_buffersrc_add_frame_flags(source->buffer_src, state->decoded_frame, AV_BUFFERSRC_FLAG_PUSH);
	if (avrc < 0) 
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_process_frame: av_buffersrc_add_frame_flags failed %d", avrc);
		return VOD_ALLOC_FAILED;
	}

	return audio_filter_read_filter_sink(state);
}
Ejemplo n.º 29
0
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
  AVCodecContext *c;
  int ret;

  c = st->codec;

  /* open it */
  ret = avcodec_open2(c, codec, NULL);
  if (ret < 0) {
    fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
    exit(1);
  }

  /* init signal generator */
  t     = 0;
  tincr = 2 * M_PI * 110.0 / c->sample_rate;
  /* increment frequency by 110 Hz per second */
  tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;

  src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
    10000 : c->frame_size;

  ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
                                           src_nb_samples, c->sample_fmt, 0);
  if (ret < 0) {
    fprintf(stderr, "Could not allocate source samples\n");
    exit(1);
  }

  /* create resampler context */
  if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
    swr_ctx = swr_alloc();
    if (!swr_ctx) {
      fprintf(stderr, "Could not allocate resampler context\n");
      exit(1);
    }

    /* set options */
    av_opt_set_int       (swr_ctx, "in_channel_count",   c->channels,       0);
    av_opt_set_int       (swr_ctx, "in_sample_rate",     c->sample_rate,    0);
    av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
    av_opt_set_int       (swr_ctx, "out_channel_count",  c->channels,       0);
    av_opt_set_int       (swr_ctx, "out_sample_rate",    c->sample_rate,    0);
    av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);

    /* initialize the resampling context */
    if ((ret = swr_init(swr_ctx)) < 0) {
      fprintf(stderr, "Failed to initialize the resampling context\n");
      exit(1);
    }
  }

  /* compute the number of converted samples: buffering is avoided
   * ensuring that the output buffer will contain at least all the
   * converted input samples */
  max_dst_nb_samples = src_nb_samples;
  ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
                                           max_dst_nb_samples, c->sample_fmt, 0);
  if (ret < 0) {
    fprintf(stderr, "Could not allocate destination samples\n");
    exit(1);
  }
  dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
                                                c->sample_fmt, 0);
}
Ejemplo n.º 30
0
hb_buffer_t* hb_audio_resample(hb_audio_resample_t *resample,
                               uint8_t **samples, int nsamples)
{
    if (resample == NULL)
    {
        hb_error("hb_audio_resample: resample is NULL");
        return NULL;
    }
    if (resample->resample_needed && resample->avresample == NULL)
    {
        hb_error("hb_audio_resample: resample needed but libavresample context "
                 "is NULL");
        return NULL;
    }

    hb_buffer_t *out;
    int out_size, out_samples;

    if (resample->resample_needed)
    {
        int in_linesize, out_linesize;
        // set in/out linesize and out_size
        av_samples_get_buffer_size(&in_linesize,
                                   resample->resample.channels, nsamples,
                                   resample->resample.sample_fmt, 0);
        out_size = av_samples_get_buffer_size(&out_linesize,
                                              resample->out.channels, nsamples,
                                              resample->out.sample_fmt, 0);
        out = hb_buffer_init(out_size);

        out_samples = avresample_convert(resample->avresample,
                                         &out->data, out_linesize, nsamples,
                                         samples,     in_linesize, nsamples);

        if (out_samples <= 0)
        {
            if (out_samples < 0)
                hb_log("hb_audio_resample: avresample_convert() failed");
            // don't send empty buffers downstream (EOF)
            hb_buffer_close(&out);
            return NULL;
        }
        out->size = (out_samples *
                     resample->out.sample_size * resample->out.channels);
    }
    else
    {
        out_samples = nsamples;
        out_size = (out_samples *
                    resample->out.sample_size * resample->out.channels);
        out = hb_buffer_init(out_size);
        memcpy(out->data, samples[0], out_size);
    }

    /*
     * Dual Mono to Mono.
     *
     * Copy all left or right samples to the first half of the buffer and halve
     * the buffer size.
     */
    if (resample->dual_mono_downmix)
    {
        int ii, jj = !!resample->dual_mono_right_only;
        int sample_size = resample->out.sample_size;
        uint8_t *audio_samples = out->data;
        for (ii = 0; ii < out_samples; ii++)
        {
            memcpy(audio_samples + (ii * sample_size),
                   audio_samples + (jj * sample_size), sample_size);
            jj += 2;
        }
        out->size = out_samples * sample_size;
    }

    return out;
}