Ejemplo n.º 1
0
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    AVFrame *frame = avcodec_alloc_frame();
    int got_packet;

    av_init_packet(&pkt);
    c = st->codec;

    get_audio_frame(samples, audio_input_frame_size, c->channels);
    frame->nb_samples = audio_input_frame_size;
    avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
                             (uint8_t *)samples,
                             audio_input_frame_size *
                             av_get_bytes_per_sample(c->sample_fmt) *
                             c->channels, 1);

    avcodec_encode_audio2(c, &pkt, frame, &got_packet);
    if (!got_packet)
        return;

    pkt.stream_index = st->index;

    /* Write the compressed frame to the media file. */
    if (av_interleaved_write_frame(oc, &pkt) != 0) {
        fprintf(stderr, "Error while writing audio frame\n");
        exit(1);
    }
    avcodec_free_frame(&frame);
}
Ejemplo n.º 2
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    AVFrame *frame;
    int ret;
    int got_packet;
    int dst_nb_samples;
    
    av_init_packet(&pkt);
    c = ost->st->codec;
    
    frame = get_audio_frame(ost);
    
    if (frame) {
        /* convert samples from native format to destination codec format, using the resampler */
        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        av_assert0(dst_nb_samples == frame->nb_samples);
        
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);
        
        /* convert to destination format */
        ret = swr_convert(ost->swr_ctx,
                          ost->frame->data, dst_nb_samples,
                          (const uint8_t **)frame->data, frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            exit(1);
        }
        frame = ost->frame;
        
        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }
    
    ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }
    
    if (got_packet) {
        ret = write_frame(oc, &c->time_base, ost->st, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error while writing audio frame: %s\n",
                    av_err2str(ret));
            exit(1);
        }
    }
    
    return (frame || got_packet) ? 0 : 1;
}
/*
 * Class:     com_chris_yanlong_player
 * Method:    get_audio_frame
 * Signature: (I)[B
 */
JNIEXPORT jbyteArray JNICALL Java_com_chris_yanlong_PlayerView_get_1audio_1frame
	(JNIEnv *env, jobject obj, jint handle , jobject audio_pts , jobject audio_size ,jobject byteBuffer){

	jbyte *directBuffer = (*env)->GetDirectBufferAddress(env, byteBuffer);

	media_handle_union_t * media_handle = (media_handle_union_t *) handle;

	jclass c;
	jfieldID id;
	c = (*env)->FindClass(env ,"java/lang/Double");
	if (c == NULL)
	{
	   __android_log_print(ANDROID_LOG_INFO,"chris_magic", "jclass is null");
//	   return -1;
	   exit(NULL_POINT);
	}

	id = (*env)->GetFieldID( env ,c, "value", "D");
	if (id==NULL)
	{
	   __android_log_print(ANDROID_LOG_INFO,"chris_magic", "id is null");
//	   return -1;
	   exit(NULL_POINT);
	}

	double audio_frame_pts = 0;
	int audio_frame_size = 0;
	get_audio_frame((int)handle ,&audio_frame_size ,&audio_frame_pts  ,directBuffer);

	(*env)->SetDoubleField(env ,audio_pts, id, audio_frame_pts);

	jclass c1;
	jfieldID id1;
	c1 = (*env)->FindClass(env ,"java/lang/Integer");
	if (c1 == NULL)
	{
	   __android_log_print(ANDROID_LOG_INFO,"chris_magic", "jclass is null");
//	   return -1;
	   exit(NULL_POINT);
	}

	id1 = (*env)->GetFieldID( env ,c1, "value", "I");
	if (id1==NULL)
	{
	   __android_log_print(ANDROID_LOG_INFO,"chris_magic", "id is null");
//	   return -1;
	   exit(NULL_POINT);
	}
	(*env)->SetIntField(env ,audio_size, id1, audio_frame_size);

	jbyteArray result=NULL;
//	result= (*env)->NewByteArray(env ,media_handle->audio_size);
//
//	(*env)->SetByteArrayRegion(env ,result ,0 ,media_handle->audio_size ,media_handle->audio_buf );

	return result;

}
Ejemplo n.º 4
0
bool AV::addTestAudioFrame() {
	if(!use_audio) {
		printf("Cannot add audio stream, we're not using audio.\n");
		return false;
	}
	AVCodecContext* c = ct.as->codec;
	AVPacket packet = {0}; // data and size must be '0' (allocation is done for you :> )
	AVFrame* frame = avcodec_alloc_frame(); 
	int got_packet = 0;
	
	av_init_packet(&packet);
	get_audio_frame(ct.atest_samples, ct.atest_frame_size);
	
	frame->nb_samples = ct.atest_frame_size;
	
	avcodec_fill_audio_frame(
			frame
			,c->channels
			,c->sample_fmt
			,(uint8_t*)ct.atest_samples
			,ct.atest_frame_size * av_get_bytes_per_sample(c->sample_fmt) * c->channels
			,1
	);
	
	avcodec_encode_audio2(c, &packet, frame, &got_packet);
	if(!got_packet) {
		return false;
	}
	
	AVRational stream_time_base = ct.as->time_base; // stream time base
	AVRational codec_time_base = ct.as->codec->time_base; // codec time base
	
	packet.stream_index = ct.as->index;
	packet.pts = av_rescale_q(packet.pts, codec_time_base, stream_time_base);
	packet.dts = av_rescale_q(packet.dts, codec_time_base, stream_time_base);
	packet.duration  = av_rescale_q(packet.duration, codec_time_base, stream_time_base);
	
	printf("Audio (test): stream: %d\n", packet.stream_index);
	printf("Audio (test): ct.acounter: %d\n", ct.acounter);
	printf("Audio (test): packet.duration: %d\n", packet.duration);
	printf("Audio (test): stream.time_base, num=%d, den=%d\n", stream_time_base.num, stream_time_base.den);
	printf("Audio (test): codec.time_base, num=%d, den=%d\n", codec_time_base.num, codec_time_base.den);
	printf("Audio (test): coded_frame.pts: %lld\n", ct.as->codec->coded_frame->pts);
	printf("Audio (test): packet.pts: %lld\n" ,packet.pts);
	printf("-------------------\n");


	if(av_interleaved_write_frame(ct.c, &packet) != 0) {
		printf("Cannot write frame.\n");
		return false;
	}
	return true;
}
Ejemplo n.º 5
0
BOOL CVideoLivRecord::write_audio_frame(AVStream *st, void* pBuffer, LONG len)
{
	AVCodecContext* avcc = st->codec;
	AVPacket pkt = {0};
	av_init_packet(&pkt);
	AVFrame* frame = get_audio_frame(st, pBuffer, len);
	int ret = 0;
	int dst_nb_samples = 0;
	if (frame){
		dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(m_pAudioSwrctx, avcc->sample_rate) + frame->nb_samples,
			avcc->sample_rate, avcc->sample_rate, AV_ROUND_UP);
		av_assert0(dst_nb_samples == frame->nb_samples);
		ret = av_frame_make_writable(m_pAudioFrame);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		ret = swr_convert(m_pAudioSwrctx, m_pAudioFrame->data, dst_nb_samples, 
			             (const uint8_t**)frame->data, frame->nb_samples);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		frame = m_pAudioFrame;
		AVRational tmp = {1, avcc->sample_rate};

		frame->pts = av_rescale_q(m_AudioSamplesCount, tmp, avcc->time_base);
		m_AudioSamplesCount += dst_nb_samples;
	}
	int got_packet = 0;
	ret = avcodec_encode_audio2(avcc, &pkt, frame, &got_packet);
	if (ret < 0){
		log("[CVideoLivRecord::write_audio_frame] -- avcodec_encode_audio2() error");
		return FALSE;
	}
	if(got_packet){
		av_packet_rescale_ts(&pkt, avcc->time_base, st->time_base);
		pkt.stream_index = st->index;
		ret = av_interleaved_write_frame(m_pAVFormatContext, &pkt);
		//ret = write_audio_frame(m_pAudioStream, pBuffer, len);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- write_audio_frame() error");
			return FALSE;
		}
	}
	return (frame || got_packet)? FALSE : TRUE;
}
Ejemplo n.º 6
0
static void write_audio_frame(AVFormatContext *oc, AVStream *st, int flush)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet, ret, dst_nb_samples;

    av_init_packet(&pkt);
    c = st->codec;

    if (!flush) {
        get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);

        /* convert samples from native format to destination codec format, using the resampler */
        if (swr_ctx) {
            /* compute destination number of samples */
            dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
                                            c->sample_rate, c->sample_rate, AV_ROUND_UP);
            if (dst_nb_samples > max_dst_nb_samples) {
                av_free(dst_samples_data[0]);
                ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
                                       dst_nb_samples, c->sample_fmt, 0);
                if (ret < 0)
                    exit(1);
                max_dst_nb_samples = dst_nb_samples;
                dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
                                                              c->sample_fmt, 0);
            }

            /* convert to destination format */
            ret = swr_convert(swr_ctx,
                              dst_samples_data, dst_nb_samples,
                              (const uint8_t **)src_samples_data, src_nb_samples);
            if (ret < 0) {
                fprintf(stderr, "Error while converting\n");
                exit(1);
            }
        } else {
            dst_nb_samples = src_nb_samples;
        }

        audio_frame->nb_samples = dst_nb_samples;
        audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
                                 dst_samples_data[0], dst_samples_size, 0);
        samples_count += dst_nb_samples;
    }

    ret = avcodec_encode_audio2(c, &pkt, flush ? NULL : audio_frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }

    if (!got_packet) {
        if (flush)
            audio_is_eof = 1;
        return;
    }

    ret = write_frame(oc, &c->time_base, st, &pkt);
    if (ret < 0) {
        fprintf(stderr, "Error while writing audio frame: %s\n",
                av_err2str(ret));
        exit(1);
    }
}
Ejemplo n.º 7
0
Archivo: muxing.c Proyecto: vlakuc/algo
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet, ret, dst_nb_samples;

    av_init_packet(&pkt);
    c = st->codec;

    get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);

    /* convert samples from native format to destination codec format, using the resampler */
    if (swr_ctx) {
        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        if (dst_nb_samples > max_dst_nb_samples) {
            av_free(dst_samples_data[0]);
            ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
                                   dst_nb_samples, c->sample_fmt, 0);
            if (ret < 0)
                exit(1);
            max_dst_nb_samples = dst_nb_samples;
            dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
                                                          c->sample_fmt, 0);
        }

        /* convert to destination format */
        ret = swr_convert(swr_ctx,
                          dst_samples_data, dst_nb_samples,
                          (const uint8_t **)src_samples_data, src_nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            exit(1);
        }
    } else {
        dst_nb_samples = src_nb_samples;
    }

    audio_frame->nb_samples = dst_nb_samples;
    audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
    avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
                             dst_samples_data[0], dst_samples_size, 0);
    samples_count += dst_nb_samples;

    ret = avcodec_encode_audio2(c, &pkt, audio_frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }

    if (!got_packet)
        return;

    /* rescale output packet timestamp values from codec to stream timebase */
    pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
    pkt.stream_index = st->index;

    /* Write the compressed frame to the media file. */
    ret = av_interleaved_write_frame(oc, &pkt);
    if (ret != 0) {
        fprintf(stderr, "Error while writing audio frame: %s\n",
                av_err2str(ret));
        exit(1);
    }
}
Ejemplo n.º 8
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
{
    AVFrame *frame;
    int got_output = 0;
    int ret;

    frame = get_audio_frame(ost);
    got_output |= !!frame;

    /* feed the data to lavr */
    if (frame) {
        ret = avresample_convert(ost->avr, NULL, 0, 0,
                                 frame->extended_data, frame->linesize[0],
                                 frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error feeding audio data to the resampler\n");
            exit(1);
        }
    }

    while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
           (!frame && avresample_get_out_samples(ost->avr, 0))) {
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);

        /* the difference between the two avresample calls here is that the
         * first one just reads the already converted data that is buffered in
         * the lavr output buffer, while the second one also flushes the
         * resampler */
        if (frame) {
            ret = avresample_read(ost->avr, ost->frame->extended_data,
                                  ost->frame->nb_samples);
        } else {
            ret = avresample_convert(ost->avr, ost->frame->extended_data,
                                     ost->frame->linesize[0], ost->frame->nb_samples,
                                     NULL, 0, 0);
        }

        if (ret < 0) {
            fprintf(stderr, "Error while resampling\n");
            exit(1);
        } else if (frame && ret != ost->frame->nb_samples) {
            fprintf(stderr, "Too few samples returned from lavr\n");
            exit(1);
        }

        ost->frame->nb_samples = ret;

        ost->frame->pts        = ost->next_pts;
        ost->next_pts         += ost->frame->nb_samples;

        got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
    }

    return !got_output;
}