예제 #1
0
int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture ){
	CV_FUNCNAME("icv_av_write_frame_FFMPEG");

#if LIBAVFORMAT_BUILD > 4628
	AVCodecContext * c = video_st->codec;
#else
	AVCodecContext * c = &(video_st->codec);
#endif
	int out_size;
	int ret;

	__BEGIN__;

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
           futur for that */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= PKT_FLAG_KEY;
        pkt.stream_index= video_st->index;
        pkt.data= (uint8_t *)picture;
        pkt.size= sizeof(AVPicture);

        ret = av_write_frame(oc, &pkt);
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
        /* if zero size, it means the image was buffered */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

#if LIBAVFORMAT_BUILD > 4752
            pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
#else
			pkt.pts = c->coded_frame->pts;
#endif
            if(c->coded_frame->key_frame)
                pkt.flags |= PKT_FLAG_KEY;
            pkt.stream_index= video_st->index;
            pkt.data= outbuf;
            pkt.size= out_size;

            /* write the compressed frame in the media file */
            ret = av_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
		CV_ERROR(CV_StsError, "Error while writing video frame");
	}

	__END__;
	return CV_StsOk;
}
예제 #2
0
파일: ffmpeg_impl.hpp 프로젝트: 119/vdc
inline  int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
{
#if LIBAVFORMAT_BUILD > 4628
    AVCodecContext * c = video_st->codec;
#else
    AVCodecContext * c = &(video_st->codec);
#endif
    int out_size;
    int ret = 0;

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
           futur for that */
        AVPacket pkt;
        av_init_packet(&pkt);

#ifndef PKT_FLAG_KEY
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
#endif

        pkt.flags |= PKT_FLAG_KEY;
        pkt.stream_index= video_st->index;
        pkt.data= (uint8_t *)picture;
        pkt.size= sizeof(AVPicture);

        ret = av_write_frame(oc, &pkt);
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
        /* if zero size, it means the image was buffered */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

#if LIBAVFORMAT_BUILD > 4752
            if(c->coded_frame->pts != (s64)AV_NOPTS_VALUE_)
                pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
#else
            pkt.pts = c->coded_frame->pts;
#endif
            if(c->coded_frame->key_frame)
                pkt.flags |= PKT_FLAG_KEY;
            pkt.stream_index= video_st->index;
            pkt.data= outbuf;
            pkt.size= out_size;

            /* write the compressed frame in the media file */
            ret = av_write_frame(oc, &pkt);
        } else {
            ret = OPENCV_NO_FRAMES_WRITTEN_CODE;
        }
    }
    return ret;
}
예제 #3
0
파일: ffmpeg.c 프로젝트: Winddoing/motion
/* Encodes and writes a video frame using the av_write_frame API. This is
 * a helper function for ffmpeg_put_image and ffmpeg_put_other_image. 
 */
void ffmpeg_put_frame(struct ffmpeg *ffmpeg, AVFrame *pic)
{
    int out_size, ret;
#ifdef FFMPEG_AVWRITEFRAME_NEWAPI
    AVPacket pkt;

    av_init_packet(&pkt); /* init static structure */
    pkt.stream_index = ffmpeg->video_st->index;
#endif /* FFMPEG_AVWRITEFRAME_NEWAPI */

    if (ffmpeg->oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near future for that */
#ifdef FFMPEG_AVWRITEFRAME_NEWAPI
        pkt.flags |= PKT_FLAG_KEY;
        pkt.data = (uint8_t *)pic;
        pkt.size = sizeof(AVPicture);
        ret = av_write_frame(ffmpeg->oc, &pkt);
#else
        ret = av_write_frame(ffmpeg->oc, ffmpeg->video_st->index,
            (uint8_t *)pic, sizeof(AVPicture));
#endif /* FFMPEG_AVWRITEFRAME_NEWAPI */
    } else {
        /* encode the image */
        out_size = avcodec_encode_video(AVSTREAM_CODEC_PTR(ffmpeg->video_st),
                                        ffmpeg->video_outbuf, 
                                        ffmpeg->video_outbuf_size, pic);

        /* if zero size, it means the image was buffered */
        if (out_size != 0) {
            /* write the compressed frame in the media file */
            /* XXX: in case of B frames, the pts is not yet valid */
#ifdef FFMPEG_AVWRITEFRAME_NEWAPI
            pkt.pts = AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->pts;
            if (AVSTREAM_CODEC_PTR(ffmpeg->video_st)->coded_frame->key_frame) {
                pkt.flags |= PKT_FLAG_KEY;
            }
            pkt.data = ffmpeg->video_outbuf;
            pkt.size = out_size;
            ret = av_write_frame(ffmpeg->oc, &pkt);
#else
            ret = av_write_frame(ffmpeg->oc, ffmpeg->video_st->index, 
                                 ffmpeg->video_outbuf, out_size);
#endif /* FFMPEG_AVWRITEFRAME_NEWAPI */
        } else {
            ret = 0;
        }
    }
    
    if (ret != 0) {
        motion_log(LOG_ERR, 1, "Error while writing video frame");
        return;
    }
}
예제 #4
0
파일: mux.c 프로젝트: tguillem/vlc
static int MuxBlock( sout_mux_t *p_mux, sout_input_t *p_input )
{
    sout_mux_sys_t *p_sys = p_mux->p_sys;
    block_t *p_data = block_FifoGet( p_input->p_fifo );
    int i_stream = *((int *)p_input->p_sys);
    AVStream *p_stream = p_sys->oc->streams[i_stream];
    AVPacket pkt;

    memset( &pkt, 0, sizeof(AVPacket) );

    av_init_packet(&pkt);
    pkt.data = p_data->p_buffer;
    pkt.size = p_data->i_buffer;
    pkt.stream_index = i_stream;

    if( p_data->i_flags & BLOCK_FLAG_TYPE_I )
    {
#ifdef AVFMT_ALLOW_FLUSH
        /* Make sure we don't inadvertedly mark buffered data as keyframes. */
        if( p_sys->oc->oformat->flags & AVFMT_ALLOW_FLUSH )
            av_write_frame( p_sys->oc, NULL );
#endif

        p_sys->b_write_keyframe = true;
        pkt.flags |= AV_PKT_FLAG_KEY;
    }

    if( p_data->i_pts > 0 )
        pkt.pts = p_data->i_pts * p_stream->time_base.den /
            CLOCK_FREQ / p_stream->time_base.num;
    if( p_data->i_dts > 0 )
        pkt.dts = p_data->i_dts * p_stream->time_base.den /
            CLOCK_FREQ / p_stream->time_base.num;

    /* this is another hack to prevent libavformat from triggering the "non monotone timestamps" check in avformat/utils.c */
    p_stream->cur_dts = ( p_data->i_dts * p_stream->time_base.den /
            CLOCK_FREQ / p_stream->time_base.num ) - 1;

    if( av_write_frame( p_sys->oc, &pkt ) < 0 )
    {
        msg_Err( p_mux, "could not write frame (pts: %"PRId64", dts: %"PRId64") "
                 "(pkt pts: %"PRId64", dts: %"PRId64")",
                 p_data->i_pts, p_data->i_dts, pkt.pts, pkt.dts );
        block_Release( p_data );
        return VLC_EGENERIC;
    }

    block_Release( p_data );
    return VLC_SUCCESS;
}
예제 #5
0
파일: encoder.c 프로젝트: icewwn/libgroove
static int encode_buffer(struct GrooveEncoder *encoder, struct GrooveBuffer *buffer) {
    struct GrooveEncoderPrivate *e = (struct GrooveEncoderPrivate *) encoder;

    av_init_packet(&e->pkt);

    AVFrame *frame = NULL;
    if (buffer) {
        e->encode_head = buffer->item;
        e->encode_pos = buffer->pos;
        e->encode_format = buffer->format;

        struct GrooveBufferPrivate *b = (struct GrooveBufferPrivate *) buffer;
        frame = b->frame;
        frame->pts = e->next_pts;
        e->encode_pts = e->next_pts;
        e->next_pts += buffer->frame_count + 1;
    }

    int got_packet = 0;
    int errcode = avcodec_encode_audio2(e->stream->codec, &e->pkt, frame, &got_packet);
    if (errcode < 0) {
        av_strerror(errcode, e->strbuf, sizeof(e->strbuf));
        av_log(NULL, AV_LOG_ERROR, "error encoding audio frame: %s\n", e->strbuf);
        return -1;
    }
    if (!got_packet)
        return -1;

    av_write_frame(e->fmt_ctx, &e->pkt);
    av_free_packet(&e->pkt);

    return 0;
}
예제 #6
0
void
ffCanvas::Impl::addFrame()
{
    AVStream* stream = mOutputCtx->streams[0];
    AVPacket pkt;
    int got_output;
    
    av_init_packet(&pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    
    int ret = avcodec_encode_video2(stream->codec, &pkt, mFrame, &got_output);
    
    if (ret < 0) {
        mError = "video encoding failed";
        return;
    }
    
    if (got_output) {
        pkt.stream_index = stream->index;
        
        if (stream->codec->coded_frame->key_frame)
            pkt.flags |= AV_PKT_FLAG_KEY;
        if (av_write_frame(mOutputCtx, &pkt) < 0) {
            mError = "video frame write error";
            av_free_packet(&pkt);
            return;
        }
        //av_free_packet(&pkt);
    }
}
int  cv_finance_encoder_video_input_frame(
    struct Encoderinfo* encoder_handle,
    unsigned char* image
    // ,int timestamp
) {
    //read YUV
    encoder_handle->pFrame->data[0] = image;              // Y
    encoder_handle->pFrame->data[1] = image + encoder_handle->y_size;     // U
    encoder_handle->pFrame->data[2] = image + encoder_handle->y_size * 5 / 4; // V

    //PTS
    encoder_handle->pFrame->pts = encoder_handle->frameindex;
    int got_picture = 0;

    //Encode
    int ret = avcodec_encode_video2(encoder_handle->pCodecCtx, &encoder_handle->pkt, encoder_handle->pFrame, &got_picture);
    if (ret < 0) {
        printf("Failed to encode! \n");
        return -1;
    }
    if (got_picture == 1) {
        printf("Succeed to encode frame: %5d\tsize:%5d\n", encoder_handle->framecnt, (encoder_handle->pkt).size);
        encoder_handle->framecnt++;
        encoder_handle->pkt.stream_index = encoder_handle->video_st->index;
        ret = av_write_frame(encoder_handle->pFormatCtx, &encoder_handle->pkt);
        av_packet_unref(&encoder_handle->pkt);
    }
    encoder_handle->frameindex++;

    return 0;
}
예제 #8
0
int flush_encoder(AVFormatContext *fmt_ctx,unsigned int stream_index){
    int ret;
    int got_frame;
    AVPacket enc_pkt;
    if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities &
          CODEC_CAP_DELAY))
        return 0;
    while (1) {
        enc_pkt.data = NULL;
        enc_pkt.size = 0;
        av_init_packet(&enc_pkt);
        ret = avcodec_encode_video2 (fmt_ctx->streams[stream_index]->codec, &enc_pkt,
                                     NULL, &got_frame);
        av_frame_free(NULL);
        if (ret < 0)
            break;
        if (!got_frame){
            ret=0;
            break;
        }
        printf("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n",enc_pkt.size);
        /* mux encoded frame */
        ret = av_write_frame(fmt_ctx, &enc_pkt);
        if (ret < 0)
            break;
    }
    return ret;
}
예제 #9
0
/**
 * Encode one frame worth of audio to the output file.
 * @param      frame                 Samples to be encoded
 * @param      output_format_context Format context of the output file
 * @param      output_codec_context  Codec context of the output file
 * @param[out] data_present          Indicates whether data has been
 *                                   decoded
 * @return Error code (0 if successful)
 */
int Transcode::encode_audio_frame(AVFrame *frame,
                              AVFormatContext *output_format_context,
                              AVCodecContext *output_codec_context,
                              int *data_present)
{
    /* Packet used for temporary storage. */
    AVPacket output_packet;
    int error;
    init_packet(&output_packet);

    /* Set a timestamp based on the sample rate for the container. */
    if (frame) {
        frame->pts = pts;
        pts += frame->nb_samples;
    }

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 48, 0)    
    /* Encode the audio frame and store it in the temporary packet.
     * The output audio stream encoder is used to do this. */
    if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
                                       frame, data_present)) < 0) {
        fprintf(stderr, "Could not encode frame (error '%s')\n",
                av_cplus_err2str(error));
        av_packet_unref(&output_packet);
        return error;
    }
    
#else    
    *data_present = 0;
    error = avcodec_send_frame(output_codec_context, frame);
    if ( error != AVERROR_EOF && error != AVERROR(EAGAIN) && error != 0){
        fprintf(stderr, "Could not send frame (error '%s')\n",
                    av_cplus_err2str(error));
        return error;
    }   

    if ( (error = avcodec_receive_packet(output_codec_context, &output_packet)) == 0)
        *data_present = 1;
        
    if ( error != AVERROR_EOF && error != AVERROR(EAGAIN) && error != 0){
        fprintf(stderr, "Could not receive packet (error '%s')\n",
                    av_cplus_err2str(error));
        return error;
    }   
#endif
    

    /* Write one audio frame from the temporary packet to the output file. */
    if (*data_present) {
        if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
            fprintf(stderr, "Could not write frame (error '%s')\n",
                    av_cplus_err2str(error));
            av_packet_unref(&output_packet);
            return error;
        }
        av_packet_unref(&output_packet);
    }

    return 0;
}
예제 #10
0
static void signal_init_ts(void)
{
    AVPacket pkt;
    av_init_packet(&pkt);
    pkt.size = 0;
    pkt.data = NULL;

    pkt.stream_index = 0;
    pkt.dts = video_dts;
    pkt.pts = 0;
    av_write_frame(ctx, &pkt);

    pkt.stream_index = 1;
    pkt.dts = pkt.pts = audio_dts;
    av_write_frame(ctx, &pkt);
}
예제 #11
0
static int decode_packet(struct dec_audio *da, struct demux_packet *mpkt,
                         struct mp_audio **out)
{
    struct spdifContext *spdif_ctx = da->priv;

    spdif_ctx->out_buffer_len  = 0;

    if (!mpkt)
        return 0;

    double pts = mpkt->pts;

    AVPacket pkt;
    mp_set_av_packet(&pkt, mpkt, NULL);
    mpkt->len = 0; // will be fully consumed
    pkt.pts = pkt.dts = 0;
    if (!spdif_ctx->lavf_ctx) {
        if (init_filter(da, &pkt) < 0)
            return -1;
    }
    int ret = av_write_frame(spdif_ctx->lavf_ctx, &pkt);
    avio_flush(spdif_ctx->lavf_ctx->pb);
    if (ret < 0)
        return -1;

    int samples = spdif_ctx->out_buffer_len / spdif_ctx->fmt.sstride;
    *out = mp_audio_pool_get(spdif_ctx->pool, &spdif_ctx->fmt, samples);
    if (!*out)
        return -1;

    memcpy((*out)->planes[0], spdif_ctx->out_buffer, spdif_ctx->out_buffer_len);
    (*out)->pts = pts;

    return 0;
}
예제 #12
0
파일: de.c 프로젝트: chinasarft/ffmpegtest
void encode2file(){
    int got = 0,ret;
	memcpy(h264_frame, frame, sizeof(AVFrame));
	memcpy(&(h264_frame->data), &scale_video_dst_data, sizeof(scale_video_dst_data));
	memcpy(&h264_frame->linesize, &scale_video_dst_linesize, sizeof(scale_video_dst_linesize));
	//video_dec_ctx->bit_rate = 100*1024*8;
    ret = avcodec_encode_video2 (video_enc_ctx, &h264_pkt, h264_frame, &got);
    //ret = avcodec_encode_video2 (video_enc_ctx, &h264_pkt, frame, &got);
    if(ret < 0	){
printf("encode to h264 fail\n");
        av_init_packet(&h264_pkt);
        h264_pkt.data = NULL;
        h264_pkt.size = 0;
    }else{
		if(got){
printf("keyframe:%d\n", h264_frame->key_frame);
av_write_frame(mp4FmtCtx, &h264_pkt);
        av_init_packet(&h264_pkt);
        h264_pkt.data = NULL;
        h264_pkt.size = 0;
		}else{
				printf("not got packet\n");
		}

	}

}
예제 #13
0
void FFMPEGWriter::close()
{
	int got_packet = true;

	while(got_packet)
	{
		AVPacket packet;

		packet.data = nullptr;
		packet.size = 0;

		av_init_packet(&packet);

		if(avcodec_encode_audio2(m_codecCtx, &packet, nullptr, &got_packet))
			AUD_THROW(FileException, "File end couldn't be written, audio encoding failed with ffmpeg.");

		if(got_packet)
		{
			packet.flags |= AV_PKT_FLAG_KEY;
			packet.stream_index = m_stream->index;
			if(av_write_frame(m_formatCtx, &packet))
			{
				av_free_packet(&packet);
				AUD_THROW(FileException, "Final frames couldn't be writen to the file with ffmpeg.");
			}
			av_free_packet(&packet);
		}
	}
}
//___________________________________________________________________________
uint8_t lavMuxer::writeAudioPacket(uint32_t len, uint8_t *buf,uint32_t sample)
{
        
        int ret;
        AVPacket pkt;
        double f;

           if(!audio_st) return 0;
           if(!len) return 1;
            av_init_packet(&pkt);


            pkt.dts=pkt.pts=(int64_t)sample2time_us(sample);

            pkt.flags |= PKT_FLAG_KEY; 
            pkt.data= buf;
            pkt.size= len;
            pkt.stream_index=1;

            aprintf("A: sample: %d frame_pts: %d fq: %d\n",(int32_t )sample,(int32_t )pkt.dts,audio_st->codec->sample_rate); 

            ret = av_write_frame(oc, &pkt);
            _lastAudioDts=pkt.dts;
            if(ret) 
            {
                        printf("Error writing audio packet\n");
                        return 0;
            }
            return 1;
}
예제 #15
0
void ExternalOutput::writeAudioData(char* buf, int len){
    RtpHeader* head = reinterpret_cast<RtpHeader*>(buf);
    uint16_t currentAudioSequenceNumber = head->getSeqNumber();
    if (currentAudioSequenceNumber != lastAudioSequenceNumber_ + 1) {
        // Something screwy.  We should always see sequence numbers incrementing monotonically.
        ELOG_DEBUG("Unexpected audio sequence number; current %d, previous %d", currentAudioSequenceNumber, lastAudioSequenceNumber_);
    }

    lastAudioSequenceNumber_ = currentAudioSequenceNumber;
    if (firstAudioTimestamp_ == -1) {
        firstAudioTimestamp_ = head->getTimestamp();
    }

    timeval time;
    gettimeofday(&time, NULL);

    // Figure out our audio codec.
    if(context_->oformat->audio_codec == AV_CODEC_ID_NONE) {
        //We dont need any other payload at this time
        if(head->getPayloadType() == PCMU_8000_PT){
            context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW;
        } else if (head->getPayloadType() == OPUS_48000_PT) {
            context_->oformat->audio_codec = AV_CODEC_ID_OPUS;
        }
    }

    initContext();

    if (audio_stream_ == NULL) {
        // not yet.
        return;
    }

    long long currentTimestamp = head->getTimestamp();
    if (currentTimestamp - firstAudioTimestamp_ < 0) {
        // we wrapped.  add 2^32 to correct this.  We only handle a single wrap around since that's 13 hours of recording, minimum.
        currentTimestamp += 0xFFFFFFFF;
    }

    long long timestampToWrite = (currentTimestamp - firstAudioTimestamp_) / (audio_stream_->codec->time_base.den / audio_stream_->time_base.den);
    // Adjust for our start time offset
    timestampToWrite += audioOffsetMsec_ / (1000 / audio_stream_->time_base.den);   // in practice, our timebase den is 1000, so this operation is a no-op.

    /* ELOG_DEBUG("Writing audio frame %d with timestamp %u, normalized timestamp %u, audio offset msec %u, length %d, input timebase: %d/%d, target timebase: %d/%d", */
    /*            head->getSeqNumber(), head->getTimestamp(), timestampToWrite, audioOffsetMsec_, ret, */
    /*            audio_stream_->codec->time_base.num, audio_stream_->codec->time_base.den,    // timebase we requested */
    /*            audio_stream_->time_base.num, audio_stream_->time_base.den);                 // actual timebase */

    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = (uint8_t*) buf + head->getHeaderLength();
    avpkt.size = len - head->getHeaderLength();
    avpkt.pts = timestampToWrite;
    avpkt.stream_index = 1;
    av_write_frame(context_, &avpkt);
    av_free_packet(&avpkt);
}
예제 #16
0
static void mux_frames(int n)
{
    int end_frames = frames + n;
    while (1) {
        AVPacket pkt;
        uint8_t pktdata[8] = { 0 };
        av_init_packet(&pkt);

        if (av_compare_ts(audio_dts, audio_st->time_base, video_dts, video_st->time_base) < 0) {
            pkt.dts = pkt.pts = audio_dts;
            pkt.stream_index = 1;
            pkt.duration = audio_duration;
            audio_dts += audio_duration;
        } else {
            if (frames == end_frames)
                break;
            pkt.dts = video_dts;
            pkt.stream_index = 0;
            pkt.duration = duration;
            if ((frames % gop_size) == 0) {
                pkt.flags |= AV_PKT_FLAG_KEY;
                last_picture = AV_PICTURE_TYPE_I;
                pkt.pts = pkt.dts + duration;
                video_dts = pkt.pts;
            } else {
                if (last_picture == AV_PICTURE_TYPE_P) {
                    last_picture = AV_PICTURE_TYPE_B;
                    pkt.pts = pkt.dts;
                    video_dts = next_p_pts;
                } else {
                    last_picture = AV_PICTURE_TYPE_P;
                    if (((frames + 1) % gop_size) == 0) {
                        pkt.pts = pkt.dts + duration;
                        video_dts = pkt.pts;
                    } else {
                        next_p_pts = pkt.pts = pkt.dts + 2 * duration;
                        video_dts += duration;
                    }
                }
            }
            if (!bframes)
                pkt.pts = pkt.dts;
            frames++;
        }

        if (clear_duration)
            pkt.duration = 0;
        AV_WB32(pktdata + 4, pkt.pts);
        pkt.data = pktdata;
        pkt.size = 8;
        if (skip_write)
            continue;
        if (skip_write_audio && pkt.stream_index == 1)
            continue;
        av_write_frame(ctx, &pkt);
    }
}
예제 #17
0
int Pixel_to_JPG(const unsigned char *pixelBuff, int pixelSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *jpgBuff, int *jpgSize) {
	AVFormatContext *formatContext;
	AVOutputFormat *outputFormat;
	AVIOContext *ioContext;
	AVStream *stream;
	AVCodecContext *codecContext;
	AVCodec *codec;
	AVFrame *frame;
	AVPacket packet;
	int ioRet;
	int codecRet;
	int gotPacket;
	int pixelSizeMin;
	int result = -1;

	av_register_all();
	formatContext = avformat_alloc_context();
	outputFormat = av_guess_format("mjpeg", NULL, NULL);
	ioContext = avio_alloc_context(jpgBuff, *jpgSize, 0, NULL, NULL, NULL, NULL);
	formatContext->oformat = outputFormat;
	formatContext->pb = ioContext;
	stream = av_new_stream(formatContext, 0);
	codecContext = stream->codec;
	codecContext->codec_id = outputFormat->video_codec;
	codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
	codecContext->pix_fmt = (enum AVPixelFormat)PF(pixelFmt);
	codecContext->width = pixelWidth;  
	codecContext->height = pixelHeight;
	codecContext->time_base.num = 1;  
	codecContext->time_base.den = 25;   
	codec = avcodec_find_encoder(codecContext->codec_id);
	avcodec_open2(codecContext, codec, NULL);

	avformat_write_header(formatContext, NULL);
	pixelSizeMin = avpicture_get_size(codecContext->pix_fmt, codecContext->width, codecContext->height);
	if (pixelSizeMin <= pixelSize) {
		av_new_packet(&packet, pixelSizeMin);
		frame = avcodec_alloc_frame();
		avpicture_fill((AVPicture *)frame, pixelBuff, codecContext->pix_fmt, codecContext->width, codecContext->height);
		codecRet = avcodec_encode_video2(codecContext, &packet, frame, &gotPacket);
		if (0 <= codecRet && 1 == gotPacket) {
			av_write_frame(formatContext, &packet);
			if (packet.size <= *jpgSize) {
				*jpgSize = packet.size;
				result = *jpgSize;
			}
		}
		avcodec_free_frame(&frame);
		av_free_packet(&packet);
	}
	av_write_trailer(formatContext);

	av_free(ioContext);
	avcodec_close(codecContext);
	avformat_free_context(formatContext);
	return result;
}
예제 #18
0
static int rtsp_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    RTSPState *rt = s->priv_data;
    RTSPStream *rtsp_st;
    fd_set rfds;
    int n, tcp_fd;
    struct timeval tv;
    AVFormatContext *rtpctx;
    AVPacket local_pkt;
    int ret;

    tcp_fd = url_get_file_handle(rt->rtsp_hd);

    while (1) {
        FD_ZERO(&rfds);
        FD_SET(tcp_fd, &rfds);
        tv.tv_sec = 0;
        tv.tv_usec = 0;
        n = select(tcp_fd + 1, &rfds, NULL, NULL, &tv);
        if (n <= 0)
            break;
        if (FD_ISSET(tcp_fd, &rfds)) {
            RTSPMessageHeader reply;

            /* Don't let ff_rtsp_read_reply handle interleaved packets,
             * since it would block and wait for an RTSP reply on the socket
             * (which may not be coming any time soon) if it handles
             * interleaved packets internally. */
            ret = ff_rtsp_read_reply(s, &reply, NULL, 1);
            if (ret < 0)
                return AVERROR(EPIPE);
            if (ret == 1)
                ff_rtsp_skip_packet(s);
            /* XXX: parse message */
            if (rt->state != RTSP_STATE_STREAMING)
                return AVERROR(EPIPE);
        }
    }

    if (pkt->stream_index < 0 || pkt->stream_index >= rt->nb_rtsp_streams)
        return AVERROR_INVALIDDATA;
    rtsp_st = rt->rtsp_streams[pkt->stream_index];
    rtpctx = rtsp_st->transport_priv;

    /* Use a local packet for writing to the chained muxer, otherwise
     * the internal stream_index = 0 becomes visible to the muxer user. */
    local_pkt = *pkt;
    local_pkt.stream_index = 0;
    ret = av_write_frame(rtpctx, &local_pkt);
    /* av_write_frame does all the RTP packetization. If using TCP as
     * transport, rtpctx->pb is only a dyn_packet_buf that queues up the
     * packets, so we need to send them out on the TCP connection separately.
     */
    if (!ret && rt->lower_transport == RTSP_LOWER_TRANSPORT_TCP)
        ret = tcp_write_packet(s, rtsp_st);
    return ret;
}
예제 #19
0
파일: D2V.cpp 프로젝트: dubhater/D2VWitch
bool D2V::handleAudioPacket(AVPacket *packet) {
    if (codecIDRequiresWave64(f->fctx->streams[packet->stream_index]->codec->codec_id)) {
        AVFormatContext *w64_ctx = (AVFormatContext *)audio_files.at(packet->stream_index);

        AVPacket pkt_in = *packet;

        AVFrame *frame = av_frame_alloc();

        while (pkt_in.size) {
            int got_frame = 0;

            AVCodecContext *codec = f->audio_ctx.at(pkt_in.stream_index);

            // We ignore got_frame because pcm_bluray and pcm_dvd decoders don't have any delay.
            int ret = avcodec_decode_audio4(codec, frame, &got_frame, &pkt_in);
            if (ret < 0) {
                char id[20] = { 0 };
                snprintf(id, 19, "%x", f->fctx->streams[pkt_in.stream_index]->id);
                error = "Failed to decode audio packet from stream id ";
                error += id;
                error += ".";

                return false;
            }

            pkt_in.data += ret;
            pkt_in.size -= ret;

            AVPacket pkt_out;
            av_init_packet(&pkt_out);
            pkt_out.data = frame->data[0];
            pkt_out.size = frame->nb_samples * frame->channels * av_get_bytes_per_sample((AVSampleFormat)frame->format);
            pkt_out.stream_index = 0;
            pkt_out.pts = 0;
            pkt_out.dts = 0;

            av_write_frame(w64_ctx, &pkt_out);
        };

        av_frame_free(&frame);
    } else { // Not PCM, just dump it.
        FILE *file = (FILE *)audio_files.at(packet->stream_index);

        if (fwrite(packet->data, 1, packet->size, file) < (size_t)packet->size) {
            char id[20] = { 0 };
            snprintf(id, 19, "%x", f->fctx->streams[packet->stream_index]->id);
            error = "Failed to write audio packet from stream id ";
            error += id;
            error += ": fwrite() failed.";

            return false;
        }
    }

    return true;
}
예제 #20
0
int VideoStream::SendPacket(AVPacket *packet) {
    
    int ret = av_write_frame( ofc, packet );
    if ( ret != 0 )
    {
        Fatal( "Error %d while writing video frame: %s", ret, av_err2str( errno ) );
    }
    av_free_packet(packet);
    return ret;
}
예제 #21
0
void ExternalOutput::writeAudioData(char* buf, int len){
    RtpHeader* head = reinterpret_cast<RtpHeader*>(buf);

    if (firstAudioTimestamp_ == -1) {
        firstAudioTimestamp_ = head->getTimestamp();
    }

    timeval time;
    gettimeofday(&time, NULL);

    // Figure out our audio codec.
    if(context_->oformat->audio_codec == AV_CODEC_ID_NONE) {
        //We dont need any other payload at this time
        if(head->getPayloadType() == PCMU_8000_PT){
            context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW;
        } else if (head->getPayloadType() == OPUS_48000_PT) {
            context_->oformat->audio_codec = AV_CODEC_ID_OPUS;
        }
    }

    initContext();

    if (audio_stream_ == NULL) {
        // not yet.
        return;
    }

    int ret = inputProcessor_->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len, unpackagedAudioBuffer_);
    if (ret <= 0)
        return;

    long long currentTimestamp = head->getTimestamp();
    if (currentTimestamp - firstAudioTimestamp_ < 0) {
        // we wrapped.  add 2^32 to correct this.  We only handle a single wrap around since that's 13 hours of recording, minimum.
        currentTimestamp += 0xFFFFFFFF;
    }

    long long timestampToWrite = (currentTimestamp - firstAudioTimestamp_) / (audio_stream_->codec->time_base.den / audio_stream_->time_base.den);
    // Adjust for our start time offset
    timestampToWrite += audioOffsetMsec_ / (1000 / audio_stream_->time_base.den);   // in practice, our timebase den is 1000, so this operation is a no-op.

    /* ELOG_DEBUG("Writing audio frame %d with timestamp %u, normalized timestamp %u, audio offset msec %u, length %d, input timebase: %d/%d, target timebase: %d/%d", */
    /*            head->getSeqNumber(), head->getTimestamp(), timestampToWrite, audioOffsetMsec_, ret, */
    /*            audio_stream_->codec->time_base.num, audio_stream_->codec->time_base.den,    // timebase we requested */
    /*            audio_stream_->time_base.num, audio_stream_->time_base.den);                 // actual timebase */

    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = unpackagedAudioBuffer_;
    avpkt.size = ret;
    avpkt.pts = timestampToWrite;
    avpkt.stream_index = 1;
    av_write_frame(context_, &avpkt);
    av_free_packet(&avpkt);
}
예제 #22
0
void ExternalOutput::writeAudioData(char* buf, int len) {
    RTPHeader* head = reinterpret_cast<RTPHeader*>(buf);

    if (initTimeAudio_ == -1) {
        initTimeAudio_ = head->getTimestamp();
    }

    timeval time;
    gettimeofday(&time, NULL);
    unsigned long long millis = (time.tv_sec * 1000) + (time.tv_usec / 1000);
    if (millis -lastFullIntraFrameRequest_ >FIR_INTERVAL_MS) {
        this->sendFirPacket();
        lastFullIntraFrameRequest_ = millis;
    }

    // Figure out our audio codec.
    if(context_->oformat->audio_codec == AV_CODEC_ID_NONE) {
        //We dont need any other payload at this time
        if(head->getPayloadType() == PCMU_8000_PT) {
            context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW;
        } else if (head->getPayloadType() == OPUS_48000_PT) {
            context_->oformat->audio_codec = AV_CODEC_ID_OPUS;
        }
    }

    // check if we can initialize our context
    this->initContext();

    if (audio_stream_ == NULL) {
        // not yet.
        return;
    }

    int ret = inputProcessor_->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len, unpackagedAudioBuffer_);
    if (ret <= 0)
        return;

//    ELOG_DEBUG("Writing audio frame %d with timestamp %u, input timebase: %d/%d, target timebase: %d/%d",head->getSeqNumber(), head->getTimestamp(),
//               audio_stream_->codec->time_base.num, audio_stream_->codec->time_base.den,    // timebase we requested
//               audio_stream_->time_base.num, audio_stream_->time_base.den);                 // actual timebase

    long long currentTimestamp = head->getTimestamp();
    if (currentTimestamp - initTimeAudio_ < 0) {
        // we wrapped.  add 2^32 to correct this.  We only handle a single wrap around since that's 13 hours of recording, minimum.
        currentTimestamp += 0xFFFFFFFF;
    }
    AVPacket avpkt;
    av_init_packet(&avpkt);
    avpkt.data = unpackagedAudioBuffer_;
    avpkt.size = ret;
    avpkt.pts = (currentTimestamp - initTimeAudio_) / (audio_stream_->codec->time_base.den / audio_stream_->time_base.den);
    avpkt.stream_index = 1;
    av_write_frame(context_, &avpkt);
    av_free_packet(&avpkt);
}
/**
    \fn writePacket
*/
bool muxerFFmpeg::writePacket(AVPacket *pkt)
{
#if 0
        printf("Track :%d size :%d PTS:%"PRId64" DTS:%"PRId64"\n",
                    pkt->stream_index,pkt->size,pkt->pts,pkt->dts);
#endif
    int ret =av_write_frame(oc, pkt);
    if(ret)
        return false;
    return true;
}
예제 #24
0
static int segment_end(AVFormatContext *oc, int write_trailer)
{
    int ret = 0;

    av_write_frame(oc, NULL); /* Flush any buffered data (fragmented mp4) */
    if (write_trailer)
        av_write_trailer(oc);
    ff_format_io_close(oc, &oc->pb);

    return ret;
}
예제 #25
0
void FFMPEGWriter::close()
{
#ifdef FFMPEG_OLD_CODE
	int got_packet = true;

	while(got_packet)
	{
		m_packet->data = nullptr;
		m_packet->size = 0;

		av_init_packet(m_packet);

		if(avcodec_encode_audio2(m_codecCtx, m_packet, nullptr, &got_packet))
			AUD_THROW(FileException, "File end couldn't be written, audio encoding failed with ffmpeg.");

		if(got_packet)
		{
			m_packet->flags |= AV_PKT_FLAG_KEY;
			m_packet->stream_index = m_stream->index;
			if(av_write_frame(m_formatCtx, m_packet))
			{
				av_free_packet(m_packet);
				AUD_THROW(FileException, "Final frames couldn't be writen to the file with ffmpeg.");
			}
			av_free_packet(m_packet);
		}
	}
#else
	if(avcodec_send_frame(m_codecCtx, nullptr) < 0)
		AUD_THROW(FileException, "File couldn't be written, audio encoding failed with ffmpeg.");

	while(avcodec_receive_packet(m_codecCtx, m_packet) == 0)
	{
		m_packet->stream_index = m_stream->index;

		if(av_write_frame(m_formatCtx, m_packet) < 0)
			AUD_THROW(FileException, "Frame couldn't be writen to the file with ffmpeg.");
	}
#endif
}
예제 #26
0
static int decode_audio(sh_audio_t *sh, unsigned char *buf,
                        int minlen, int maxlen)
{
    struct spdifContext *spdif_ctx = sh->context;
    AVFormatContext     *lavf_ctx  = spdif_ctx->lavf_ctx;
    AVPacket            pkt;
    double              pts;
    int                 ret, in_size, consumed, x;
    unsigned char       *start = NULL;

    consumed = spdif_ctx->out_buffer_len  = 0;
    spdif_ctx->out_buffer_size = maxlen;
    spdif_ctx->out_buffer      = buf;
    while (spdif_ctx->out_buffer_len + spdif_ctx->iec61937_packet_size < maxlen
           && spdif_ctx->out_buffer_len < minlen) {
        if (sh->ds->eof)
            break;
        x = ds_get_packet_pts(sh->ds, &start, &pts);
        if (x <= 0) {
            x = 0;
            ds_parse(sh->ds, &start, &x, MP_NOPTS_VALUE, 0);
            if (x == 0)
                continue; // END_NOT_FOUND
            in_size = x;
        } else {
            in_size = x;
            consumed = ds_parse(sh->ds, &start, &x, pts, 0);
            if (x == 0) {
                mp_msg(MSGT_DECAUDIO,MSGL_V,
                       "start[%p] pkt.size[%d] in_size[%d] consumed[%d] x[%d].\n",
                       start, 0, in_size, consumed, x);
                continue; // END_NOT_FOUND
            }
            sh->ds->buffer_pos -= in_size - consumed;
        }
        av_init_packet(&pkt);
        pkt.data = start;
        pkt.size = x;
        mp_msg(MSGT_DECAUDIO,MSGL_V,
               "start[%p] pkt.size[%d] in_size[%d] consumed[%d] x[%d].\n",
               start, pkt.size, in_size, consumed, x);
        if (pts != MP_NOPTS_VALUE) {
            sh->pts       = pts;
            sh->pts_bytes = 0;
        }
        ret = av_write_frame(lavf_ctx, &pkt);
        if (ret < 0)
            break;
    }
    sh->pts_bytes += spdif_ctx->out_buffer_len;
    return spdif_ctx->out_buffer_len;
}
예제 #27
0
파일: avin.c 프로젝트: xieran1988/simple
void node_read_packet(node_t *n)
{
	int i;
	AVPacket pkt;
	i = av_read_frame(n->ifc, &pkt);
	if (i < 0) 
		return ;
	if (pkt.stream_index == 0) {
		if (!strcmp(n->name, "src1"))
			printf("%s dts=%llu size=%d\n", n->name, pkt.dts, pkt.size);
		av_write_frame(n->ofc, &pkt);
	}
}
예제 #28
0
void SPDIFEncoder::WriteFrame(unsigned char *data, int size)
{
    AVPacket packet;

    av_init_packet(&packet);
    packet.data = data;
    packet.size = size;

    if (av_write_frame(m_oc, &packet) < 0)
    {
        VERBOSE(VB_AUDIO, LOC_ERR + "av_write_frame");
    }
}
//___________________________________________________________________________
uint8_t lavMuxer::writeAudioPacket(uint32_t len, uint8_t *buf,uint32_t sample)
{

        int ret;
        AVPacket pkt;
        double f;
        int64_t timeInUs;
        static uint64_t sz = 0;
            //printf("Audio paclet : size %u, sample %u\n",len,sample);

           if(!audio_st) return 0;
           if(!len) return 1;
            av_init_packet(&pkt);
            timeInUs=(int64_t)sample2time_us(sample);
            aprintf("Sample: %u, time: %"LLU", size: %"LLU", this round: %u\n",sample, timeInUs, sz, len);
            sz+=len;
            /* Rescale to ?? */
            if(_type==MUXER_FLV || _type==MUXER_MATROSKA) /* The FLV muxer expects packets dated in ms, there is something i did not get... WTF */
            {
            			f=timeInUs/1000; // ms
            			f=floor(f+0.4);
            }
            else
            {
            	f=timeInUs;
            	f/=1000000.; // In sec
            	f*=_audioFq; // In samples
            	f=floor(f+0.4);
            }
            pkt.dts=pkt.pts=f;
            aprintf("Adm audio dts: %"LLU"\n",pkt.dts);
            //printf("F:%f Q:%u D=%u\n",f,pkt.pts,timeInUs-_lastAudioDts);

            pkt.flags |= PKT_FLAG_KEY;
            pkt.data= buf;
            pkt.size= len;
            pkt.stream_index=1;
            //pkt.duration=pkt.dts-_lastAudioDts; // Duration
            aprintf("A: sample: %d, frame_pts: %"LLU", fq: %d\n", sample, pkt.dts, audio_st->codec->sample_rate);

            ret = av_write_frame(oc, &pkt);
            _lastAudioDts=timeInUs;
            if(ret)
            {
                        printf("[LavFormat]Error writing audio packet\n");
                        printf("[LavFormat]pts %llu dts %llu\n",pkt.pts,pkt.dts);
                        return 0;
            }
            return 1;
}
예제 #30
0
/**
 * Encode data through created muxer
 * unsigned char data: pointer to data to encode
 * int           size: size of data to encode
 */
void SPDIFEncoder::WriteFrame(unsigned char *data, int size)
{
    AVPacket packet;
    av_init_packet(&packet);
    static int pts = 1; // to avoid warning "Encoder did not produce proper pts"
    packet.pts  = pts++; 
    packet.data    = data;
    packet.size    = size;

    if (av_write_frame(m_oc, &packet) < 0)
    {
        LOG(VB_AUDIO, LOG_ERR, LOC + "av_write_frame");
    }
}