示例#1
0
int ff_qsv_decode(AVCodecContext *avctx, QSVContext *q,
                  AVFrame *frame, int *got_frame,
                  AVPacket *avpkt)
{
    AVPacket pkt_ref = { 0 };
    int ret = 0;

    if (q->pkt_fifo && av_fifo_size(q->pkt_fifo) >= sizeof(AVPacket)) {
        /* we already have got some buffered packets. so add new to tail */
        ret = av_packet_ref(&pkt_ref, avpkt);
        if (ret < 0)
            return ret;
        av_fifo_generic_write(q->pkt_fifo, &pkt_ref, sizeof(pkt_ref), NULL);
    }
    if (q->reinit_pending) {
        ret = do_qsv_decode(avctx, q, frame, got_frame, avpkt);

        if (!*got_frame) {
            /* Flushing complete, no more frames  */
            close_decoder(q);
            //return ff_qsv_decode(avctx, q, frame, got_frame, avpkt);
        }
    }
    if (!q->reinit_pending) {
        if (q->pkt_fifo && av_fifo_size(q->pkt_fifo) >= sizeof(AVPacket)) {
            /* process buffered packets */
            while (!*got_frame && av_fifo_size(q->pkt_fifo) >= sizeof(AVPacket)) {
                av_fifo_generic_read(q->pkt_fifo, &pkt_ref, sizeof(pkt_ref), NULL);
                ret = do_qsv_decode(avctx, q, frame, got_frame, &pkt_ref);
                if (q->reinit_pending) {
                    /*
                       A rare case: new reinit pending when buffering existing.
                       We should to return the pkt_ref back to same place of fifo
                    */
                    qsv_packet_push_front(q, &pkt_ref);
                } else {
                    av_packet_unref(&pkt_ref);
                }
           }
        } else {
            /* general decoding */
            ret = do_qsv_decode(avctx, q, frame, got_frame, avpkt);
            if (q->reinit_pending) {
                ret = av_packet_ref(&pkt_ref, avpkt);
                if (ret < 0)
                    return ret;
                av_fifo_generic_write(q->pkt_fifo, &pkt_ref, sizeof(pkt_ref), NULL);
            }
        }
    }

    return ret;
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

  AVPacketList *pkt1;
  //if(av_dup_packet(pkt) < 0) {
    if(av_packet_ref(pkt,pkt)) {
    return -1;
  }
  pkt1 = av_malloc(sizeof(AVPacketList));
  if (!pkt1)
    return -1;
  pkt1->pkt = *pkt;
  pkt1->next = NULL;
  
  
  SDL_LockMutex(q->mutex);
  
  if (!q->last_pkt)
    q->first_pkt = pkt1;
  else
    q->last_pkt->next = pkt1;
  q->last_pkt = pkt1;
  q->nb_packets++;
  q->size += pkt1->pkt.size;
  SDL_CondSignal(q->cond);
  
  SDL_UnlockMutex(q->mutex);
  return 0;
}
示例#3
0
bool PacketQueue_put(PacketQueue* pq, AVPacket* packet)
{
	AVPacketList* pl;
	pl = av_malloc(sizeof(AVPacketList));
	if (!pl)
		return false;
	if (av_packet_ref(&pl->pkt, packet) < 0)
		return false;
	pl->pkt = *packet;
	pl->next = NULL;

	SDL_LockMutex(pq->mutex);
	if (!pq->last)
		pq->first = pl;
	else
		pq->last->next = pl;
	pq->last = pl;
	++pq->nPackets;
	pq->size += pl->pkt.size;

	SDL_CondSignal(pq->cond);

	SDL_UnlockMutex(pq->mutex);
	return true;
}
示例#4
0
文件: Packet.cpp 项目: karvaporo/QtAV
 PacketPrivate(const PacketPrivate& o)
     : QSharedData(o)
     , initialized(o.initialized)
 { //used by QSharedDataPointer.detach()
     av_init_packet(&avpkt);
     av_packet_ref(&avpkt, (AVPacket*)&o.avpkt);
 }
示例#5
0
	PacketBuffer(const PacketBuffer &pktbuf) {
		av_init_packet(&data);
		if (av_packet_ref(&data, &(pktbuf.data)) != 0) {
			data.size = 0;
			data.data = nullptr;
		}
	}
示例#6
0
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
{
    AVPacketList *pkt1;
    int ret;

    pkt1 = av_malloc(sizeof(AVPacketList));
    if (!pkt1) {
        return AVERROR(ENOMEM);
    }

    if ((ret = av_packet_ref(&pkt1->pkt, pkt)) < 0) {
        av_free(pkt1);
        return ret;
    }

    pkt1->next = NULL;

    pthread_mutex_lock(&q->mutex);

    if (!q->last_pkt) {
        q->first_pkt = pkt1;
    } else {
        q->last_pkt->next = pkt1;
    }

    q->last_pkt = pkt1;
    q->nb_packets++;
    q->size += pkt1->pkt.size + sizeof(*pkt1);

    pthread_cond_signal(&q->cond);

    pthread_mutex_unlock(&q->mutex);
    return 0;
}
示例#7
0
	PacketBuffer(const AVPacket &pkt) {
		av_init_packet(&data);
		if (av_packet_ref(&data, &pkt) != 0) {
			data.size = 0;
			data.data = nullptr;
		}
	}
示例#8
0
文件: bsf.c 项目: HazemSamir/FFmpeg
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
{
    if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
        ctx->internal->eof = 1;
        return 0;
    }

    if (ctx->internal->eof) {
        av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.\n");
        return AVERROR(EINVAL);
    }

    if (ctx->internal->buffer_pkt->data ||
        ctx->internal->buffer_pkt->side_data_elems)
        return AVERROR(EAGAIN);

    if (pkt->buf) {
        av_packet_move_ref(ctx->internal->buffer_pkt, pkt);
    } else {
        int ret = av_packet_ref(ctx->internal->buffer_pkt, pkt);

        if (ret < 0)
            return ret;
        av_packet_unref(pkt);
    }

    return 0;
}
示例#9
0
static int ffat_decode(AVCodecContext *avctx, void *data,
                       int *got_frame_ptr, AVPacket *avpkt)
{
    ATDecodeContext *at = avctx->priv_data;
    AVFrame *frame = data;
    int pkt_size = avpkt->size;
    AVPacket filtered_packet = {0};
    OSStatus ret;
    AudioBufferList out_buffers;

    if (avctx->codec_id == AV_CODEC_ID_AAC && avpkt->size > 2 &&
        (AV_RB16(avpkt->data) & 0xfff0) == 0xfff0) {
        AVPacket filter_pkt = {0};
        if (!at->bsf) {
            const AVBitStreamFilter *bsf = av_bsf_get_by_name("aac_adtstoasc");
            if(!bsf)
                return AVERROR_BSF_NOT_FOUND;
            if ((ret = av_bsf_alloc(bsf, &at->bsf)))
                return ret;
            if (((ret = avcodec_parameters_from_context(at->bsf->par_in, avctx)) < 0) ||
                ((ret = av_bsf_init(at->bsf)) < 0)) {
                av_bsf_free(&at->bsf);
                return ret;
            }
        }

        if ((ret = av_packet_ref(&filter_pkt, avpkt)) < 0)
            return ret;

        if ((ret = av_bsf_send_packet(at->bsf, &filter_pkt)) < 0) {
            av_packet_unref(&filter_pkt);
            return ret;
        }

        if ((ret = av_bsf_receive_packet(at->bsf, &filtered_packet)) < 0)
            return ret;

        at->extradata = at->bsf->par_out->extradata;
        at->extradata_size = at->bsf->par_out->extradata_size;

        avpkt = &filtered_packet;
    }

    if (!at->converter) {
        if ((ret = ffat_create_decoder(avctx, avpkt)) < 0) {
            av_packet_unref(&filtered_packet);
            return ret;
        }
    }

    out_buffers = (AudioBufferList){
        .mNumberBuffers = 1,
        .mBuffers = {
            {
                .mNumberChannels = avctx->channels,
                .mDataByteSize = av_get_bytes_per_sample(avctx->sample_fmt) * avctx->frame_size
                                 * avctx->channels,
            }
        }
    };
示例#10
0
	PacketBuffer& operator=(PacketBuffer &rhs) {
		if (data.data != rhs.data.data) {
			av_packet_unref(&data);
			if (av_packet_ref(&data, &(rhs.data)) != 0) {
				data.size = 0;
				data.data = nullptr;			
			}
		}
		return *this;
	}
示例#11
0
int Packet::SetPacket(AVPacket *pkt)
{
  ASSERT(!m_Packet);

  m_Packet = av_packet_alloc();
  if (!m_Packet)
    return -1;

  return av_packet_ref(m_Packet, pkt);
}
示例#12
0
AVPacket *av_packet_clone(AVPacket *src)
{
    AVPacket *ret = av_packet_alloc();

    if (!ret)
        return ret;

    if (av_packet_ref(ret, src))
        av_packet_free(&ret);

    return ret;
}
示例#13
0
StAVPacket::StAVPacket(const StAVPacket& theCopy)
: myStParams(theCopy.myStParams),
  myDurationSec(theCopy.myDurationSec),
  myType(theCopy.myType),
  myIsOwn(false) {
    avInitPacket();
    if(myType == DATA_PACKET) {
    #if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 106, 102))
        av_packet_ref(&myPacket, &theCopy.myPacket); // copy by reference
    #else
        setAVpkt(theCopy.myPacket);
    #endif
    }
}
示例#14
0
文件: mux.c 项目: Justin790126/libav
int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
                             int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
{
    int ret;
    AVPacketList **next_point, *this_pktl;

    this_pktl      = av_mallocz(sizeof(AVPacketList));
    if (!this_pktl)
        return AVERROR(ENOMEM);

    if ((ret = av_packet_ref(&this_pktl->pkt, pkt)) < 0) {
        av_free(this_pktl);
        return ret;
    }

    if (s->streams[pkt->stream_index]->last_in_packet_buffer) {
        next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
    } else
        next_point = &s->internal->packet_buffer;

    if (*next_point) {
        if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) {
            while (!compare(s, &(*next_point)->pkt, pkt))
                next_point = &(*next_point)->next;
            goto next_non_null;
        } else {
            next_point = &(s->internal->packet_buffer_end->next);
        }
    }
    assert(!*next_point);

    s->internal->packet_buffer_end = this_pktl;
next_non_null:

    this_pktl->next = *next_point;

    s->streams[pkt->stream_index]->last_in_packet_buffer =
        *next_point                                      = this_pktl;

    av_packet_unref(pkt);

    return 0;
}
示例#15
0
文件: tee.c 项目: 15806905685/FFmpeg
static int tee_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
    TeeContext *tee = avf->priv_data;
    AVFormatContext *avf2;
    AVPacket pkt2;
    int ret_all = 0, ret;
    unsigned i, s;
    int s2;
    AVRational tb, tb2;

    for (i = 0; i < tee->nb_slaves; i++) {
        if (!(avf2 = tee->slaves[i].avf))
            continue;

        s = pkt->stream_index;
        s2 = tee->slaves[i].stream_map[s];
        if (s2 < 0)
            continue;

        memset(&pkt2, 0, sizeof(AVPacket));
        if ((ret = av_packet_ref(&pkt2, pkt)) < 0)
            if (!ret_all) {
                ret_all = ret;
                continue;
            }
        tb  = avf ->streams[s ]->time_base;
        tb2 = avf2->streams[s2]->time_base;
        pkt2.pts      = av_rescale_q(pkt->pts,      tb, tb2);
        pkt2.dts      = av_rescale_q(pkt->dts,      tb, tb2);
        pkt2.duration = av_rescale_q(pkt->duration, tb, tb2);
        pkt2.stream_index = s2;

        if ((ret = av_apply_bitstream_filters(avf2->streams[s2]->codec, &pkt2,
                                              tee->slaves[i].bsfs[s2])) < 0 ||
            (ret = av_interleaved_write_frame(avf2, &pkt2)) < 0) {
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }
    }
    return ret_all;
}
示例#16
0
static int mp_media_next_packet(mp_media_t *media)
{
	AVPacket new_pkt;
	AVPacket pkt;
	av_init_packet(&pkt);
	new_pkt = pkt;

	int ret = av_read_frame(media->fmt, &pkt);
	if (ret < 0) {
		if (ret != AVERROR_EOF)
			blog(LOG_WARNING, "MP: av_read_frame failed: %s (%d)",
					av_err2str(ret), ret);
		return ret;
	}

	struct mp_decode *d = get_packet_decoder(media, &pkt);
	if (d && pkt.size) {
		av_packet_ref(&new_pkt, &pkt);
		mp_decode_push_packet(d, &new_pkt);
	}

	av_packet_unref(&pkt);
	return ret;
}
示例#17
0
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
    AVCodecInternal *avci = avctx->internal;
    int ret = 0;

    if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
        return AVERROR(EINVAL);

    if (avctx->internal->draining)
        return AVERROR_EOF;

    ret = bsfs_init(avctx);
    if (ret < 0)
        return ret;

    av_packet_unref(avci->buffer_pkt);
    if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
        ret = av_packet_ref(avci->buffer_pkt, avpkt);
        if (ret < 0)
            return ret;
    }

    ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
    if (ret < 0) {
        av_packet_unref(avci->buffer_pkt);
        return ret;
    }

    if (!avci->buffer_frame->buf[0]) {
        ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
        if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
            return ret;
    }

    return 0;
}
示例#18
0
int main(int argc, char** argv){
	int quadrant_line, quadrant_column;
	
	char *videoFileName = argv[1];
	char quadFileName[64];

	int i = 0, k, j;

	long unsigned int inc = 0;
	long unsigned int incaudio = 0;

	int videoStreamIndex;
	int audioStreamIndex= -1;
	int frameFinished, gotPacket;

	AVDictionary	*codecOptions = NULL;
	
	UDP_PTSframe_t PTS_frame;

	struct tm *start_time_tm;
	char start_time_str[64];
	long unsigned int start_time;
	time_t start_timer_t;
	
	//Crop env
	int tam_quad;
	int frist = 1, marginLeft = 0, marginTop = 0;
	int width , height;

    if(argc < 4){
        usage();    
        return -1;
    }

    signal (SIGTERM, handlerToFinish);
	signal (SIGINT, handlerToFinish);

    tam_quad = sqrt(amount_of_quadrants);
    quadrant_line = atoi(argv[2]);
    quadrant_column = atoi(argv[3]);
    amount_of_quadrants = (quadrant_line * quadrant_column) + 1;

    strcpy (quadFileName, argv[4]);

    //Allocat output streams context
    ff_output = malloc (sizeof(ff_output_t) * amount_of_quadrants);

	av_register_all();
	avformat_network_init();

	//Initialize Input
	if (avformat_open_input (&ff_input.formatCtx, videoFileName, NULL, NULL) != 0) {
		printf ("Cold not open input video file at %s\n", videoFileName);
		return -1;
	}

	if (avformat_find_stream_info(ff_input.formatCtx, NULL) < 0) {
		printf ("Cold not get stream info\n");
		return -1;
	}

	av_dump_format(ff_input.formatCtx, 0, videoFileName, 0);

	videoStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &ff_input.encoder, 0);
	if (videoStreamIndex < 0) {
		printf ("no video streams found\n");
		return -1;
	}

	audioStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_input.audioencoder, 0);
    if (audioStreamIndex < 0) {
        printf ("no audio streams found\n");
        return -1;
    }
    printf ("VIDEO ST %d, AUDIO ST %d\n", videoStreamIndex, audioStreamIndex);

    ff_input.audiocodecCtx = ff_input.formatCtx->streams[audioStreamIndex]->codec;
	ff_input.codecCtx = ff_input.formatCtx->streams[videoStreamIndex]->codec;

	if (avcodec_open2 (ff_input.audiocodecCtx, ff_input.audioencoder, NULL) < 0) {
        printf ("Could not open input codec\n");
        return -1;
    }

	if (avcodec_open2 (ff_input.codecCtx, ff_input.encoder, NULL) < 0) {
		printf ("Could not open input codec\n");
		return -1;
	}

	//Get system time and append as metadata
	getSystemTime (&PTS_frame.frameTimeVal); //Must be the same for all output contexts
	start_time = PTS_frame.frameTimeVal.tv_sec;
	start_timer_t = (time_t) start_time;
	start_time_tm = localtime (&start_timer_t);
	strftime(start_time_str, sizeof start_time_str, "%Y-%m-%d %H:%M:%S", start_time_tm);

	if (avformat_alloc_output_context2(&formatCtx, NULL, AV_OUTPUT_FORMAT, quadFileName) < 0) {
			printf ("could not create output context\n");
			return -1;
	}

	//Initialize Video Output Streams
	for (i = 0; i < amount_of_quadrants - 1; i++) {

		ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
		if (ff_output[i].outStream == NULL) {
			printf ("Could not create output stream\n");
			return -1;
		}

		ff_output[i].outStream->id = formatCtx->nb_streams - 1;

		ff_output[i].codecCtx = ff_output[i].outStream->codec;
		ff_output[i].encoder = avcodec_find_encoder_by_name (AV_OUTPUT_CODEC);
		if (ff_output[i].encoder == NULL) {
			printf ("Codec %s not found..\n", AV_OUTPUT_CODEC);
			return -1;
		}

		//Sliced sizes
		width = ff_input.codecCtx->width/quadrant_column;
		height = ff_input.codecCtx->height/quadrant_line;

		ff_output[i].codecCtx->codec_type 	= AVMEDIA_TYPE_VIDEO;
		ff_output[i].codecCtx->height 		= height;
		ff_output[i].codecCtx->width 		= width;
		ff_output[i].codecCtx->pix_fmt		= ff_input.codecCtx->pix_fmt;

		if (strcmp (AV_OUTPUT_CODEC, "libvpx") == 0) {
			//Maintain input aspect ratio for codec and stream info, and b_frames for codec info
			ff_output[i].codecCtx->sample_aspect_ratio = ff_input.codecCtx->sample_aspect_ratio;
			ff_output[i].codecCtx->max_b_frames = ff_input.codecCtx->max_b_frames;
			ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = AV_FRAMERATE;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 10000;			
		}

		if (strcmp (AV_OUTPUT_CODEC, "libx264") == 0) {
			// ff_output[i].codecCtx->profile = FF_PROFILE_H264_MAIN;
			// av_dict_set(&codecOptions, "profile","main",0);

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			ff_output[i].codecCtx->bit_rate_tolerance = 0;
			ff_output[i].codecCtx->rc_max_rate = 0;
			ff_output[i].codecCtx->rc_buffer_size = 0;
			ff_output[i].codecCtx->gop_size = 40;
			ff_output[i].codecCtx->max_b_frames = 3;
			ff_output[i].codecCtx->b_frame_strategy = 1;
			ff_output[i].codecCtx->coder_type = 1;
			ff_output[i].codecCtx->me_cmp = 1;
			ff_output[i].codecCtx->me_range = 16;
			ff_output[i].codecCtx->qmin = 10;
			ff_output[i].codecCtx->qmax = 51;
			ff_output[i].codecCtx->scenechange_threshold = 40;
			ff_output[i].codecCtx->flags |= CODEC_FLAG_LOOP_FILTER;
			ff_output[i].codecCtx->me_method = ME_HEX;
			ff_output[i].codecCtx->me_subpel_quality = 5;
			ff_output[i].codecCtx->i_quant_factor = 0.71;
			ff_output[i].codecCtx->qcompress = 0.6;
			ff_output[i].codecCtx->max_qdiff = 4;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = 24;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 90000;		
		}

		formatCtx->start_time_realtime = start_time;
		av_dict_set (&formatCtx->metadata, "service_name", start_time_str, 0);
		av_dict_set (&formatCtx->metadata, "creation_time", start_time_str, 0);

		//Open codec
		if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
			printf ("Could not open output codec...\n");
			return -1;
		}
	}

	//Initializing Audio Output
	i = amount_of_quadrants-1; //Last stream
	ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
	if (ff_output[i].outStream == NULL) {
		printf ("Could not create output stream\n");
		return -1;
	}

	ff_output[i].outStream->id = formatCtx->nb_streams - 1;

	ff_output[i].codecCtx = ff_output[i].outStream->codec;
	ff_output[i].encoder = avcodec_find_encoder (ff_input.audiocodecCtx->codec_id);
	if (ff_output[i].encoder == NULL) {
		printf ("Codec %s not found..\n", AUDIO_OUTPUT_CODEC);
		return -1;
	}
  
    ff_output[i].codecCtx = ff_output[amount_of_quadrants-1].outStream->codec;
    ff_output[i].codecCtx->codec_id = ff_input.audiocodecCtx->codec_id;
    ff_output[i].codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
    ff_output[i].codecCtx->sample_fmt = ff_input.audiocodecCtx->sample_fmt;
    ff_output[i].codecCtx->sample_rate = ff_input.audiocodecCtx->sample_rate;
    ff_output[i].codecCtx->channel_layout = ff_input.audiocodecCtx->channel_layout;
    ff_output[i].codecCtx->channels = av_get_channel_layout_nb_channels(ff_output[amount_of_quadrants-1].codecCtx->channel_layout);
    ff_output[i].codecCtx->bit_rate = ff_input.audiocodecCtx->bit_rate;  
    ff_output[i].codecCtx->sample_aspect_ratio = ff_input.audiocodecCtx->sample_aspect_ratio;
    ff_output[i].codecCtx->max_b_frames = ff_input.audiocodecCtx->max_b_frames;
    ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

    ff_output[i].outStream->time_base.num = ff_input.formatCtx->streams[audioStreamIndex]->time_base.num;
	ff_output[i].outStream->time_base.den = ff_input.formatCtx->streams[audioStreamIndex]->time_base.den;

	ff_output[i].codecCtx->time_base.num = ff_input.audiocodecCtx->time_base.num;
	ff_output[i].codecCtx->time_base.den = ff_input.audiocodecCtx->time_base.den;

	printf("sample_rate %d\n", ff_input.audiocodecCtx->sample_rate);

	//Open codec
	if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
		printf ("Could not open output codec...\n");
		return -1;
	}

	av_dump_format (formatCtx, 0, quadFileName, 1);

	//Open output context
	if (avio_open (&formatCtx->pb, quadFileName, AVIO_FLAG_WRITE)) {
		printf ("avio_open failed %s\n", quadFileName);
		return -1;
	}
	
	//Write format context header
	if (avformat_write_header (formatCtx, &formatCtx->metadata)) {
		printf ("fail to write outstream header\n");
		return -1;
	}

	printf ("OUTPUT TO %s, at %lu\n", quadFileName, start_time);


	incaudio = 0;
	printf("Generating video streams...\n");
	while(av_read_frame (ff_input.formatCtx, &ff_input.packet) >= 0 && _keepEncoder) {
		if (ff_input.packet.stream_index == audioStreamIndex)
		{
			av_packet_ref  (&ff_output[amount_of_quadrants-1].packet, &ff_input.packet); 
            ff_output[amount_of_quadrants-1].packet.stream_index = amount_of_quadrants-1;
            ff_output[amount_of_quadrants-1].packet.pts = incaudio;

            // printf("%lu\n", ff_output[amount_of_quadrants-1].packet.pts);
            // if(gotPacket){
            	if (av_write_frame(formatCtx, &ff_output[amount_of_quadrants-1].packet) < 0) {
	                printf ("Unable to write to output stream..\n");
	                pthread_exit(NULL);
            	// }
            }            
            incaudio += 2880;
		}

		if (ff_input.packet.stream_index == videoStreamIndex) {

			ff_input.frame = av_frame_alloc();
			avcodec_decode_video2 (ff_input.codecCtx, ff_input.frame, &frameFinished, &ff_input.packet);

			if (frameFinished) {
				//TODO: Slice inputFrame and fill avQuadFrames[quadrant]
				//By now, inputFrame are replicated to all quadrants

				ff_input.frame->pts = av_frame_get_best_effort_timestamp (ff_input.frame);
				
				i = 0;
				for ( k = 0; k < quadrant_line; ++k) {
                    for (j = 0; j < quadrant_column; ++j) {
            			ff_output[i].frame = av_frame_alloc();

            			//make the cut quadrant ff_output[i]!
            			av_picture_crop((AVPicture *)ff_output[i].frame, (AVPicture *)ff_input.frame,       
            							ff_input.formatCtx->streams[videoStreamIndex]->codec->pix_fmt, marginTop, marginLeft);
            			
            			ff_output[i].frame->width = width; // updates the new width
						ff_output[i].frame->height = height; // updates the new height
						ff_output[i].frame->format = ff_input.frame->format;

						ff_output[i].frame->pts = inc;

						ff_output[i].packet.data = NULL;
						ff_output[i].packet.size = 0;
						av_init_packet (&ff_output[i].packet);

						avcodec_encode_video2 (ff_output[i].codecCtx, &ff_output[i].packet, ff_output[i].frame, &gotPacket);

						if (gotPacket) {
							ff_output[i].packet.stream_index = i;
							av_packet_rescale_ts (&ff_output[i].packet,
													ff_output[i].codecCtx->time_base,
													ff_output[i].outStream->time_base);

							if (av_write_frame (formatCtx, &ff_output[i].packet) < 0) {
								printf ("Unable to write to output stream..\n");
								pthread_exit(NULL);
							}

						}

						av_frame_free (&ff_output[i].frame);	

						i++;
						marginLeft += width;	

            		}
            		marginLeft = 0;
            		marginTop += height;
            	}
            	marginTop = 0; 
            	i = 0;
            	inc++;
			}
			av_frame_free (&ff_input.frame);
		}
	}

	return 0;
}
示例#19
0
static int submit_packet(PerThreadContext *p, AVPacket *avpkt)
{
    FrameThreadContext *fctx = p->parent;
    PerThreadContext *prev_thread = fctx->prev_thread;
    const AVCodec *codec = p->avctx->codec;

    if (!avpkt->size && !(codec->capabilities & CODEC_CAP_DELAY)) return 0;

    pthread_mutex_lock(&p->mutex);

    release_delayed_buffers(p);

    if (prev_thread) {
        int err;
        if (prev_thread->state == STATE_SETTING_UP) {
            pthread_mutex_lock(&prev_thread->progress_mutex);
            while (prev_thread->state == STATE_SETTING_UP)
                pthread_cond_wait(&prev_thread->progress_cond, &prev_thread->progress_mutex);
            pthread_mutex_unlock(&prev_thread->progress_mutex);
        }

        err = update_context_from_thread(p->avctx, prev_thread->avctx, 0);
        if (err) {
            pthread_mutex_unlock(&p->mutex);
            return err;
        }
    }

    av_packet_unref(&p->avpkt);
    av_packet_ref(&p->avpkt, avpkt);

    p->state = STATE_SETTING_UP;
    pthread_cond_signal(&p->input_cond);
    pthread_mutex_unlock(&p->mutex);

    /*
     * If the client doesn't have a thread-safe get_buffer(),
     * then decoding threads call back to the main thread,
     * and it calls back to the client here.
     */

FF_DISABLE_DEPRECATION_WARNINGS
    if (!p->avctx->thread_safe_callbacks && (
         p->avctx->get_format != avcodec_default_get_format ||
#if FF_API_GET_BUFFER
         p->avctx->get_buffer ||
#endif
         p->avctx->get_buffer2 != avcodec_default_get_buffer2)) {
FF_ENABLE_DEPRECATION_WARNINGS
        while (p->state != STATE_SETUP_FINISHED && p->state != STATE_INPUT_READY) {
            int call_done = 1;
            pthread_mutex_lock(&p->progress_mutex);
            while (p->state == STATE_SETTING_UP)
                pthread_cond_wait(&p->progress_cond, &p->progress_mutex);

            switch (p->state) {
            case STATE_GET_BUFFER:
                p->result = ff_get_buffer(p->avctx, p->requested_frame, p->requested_flags);
                break;
            case STATE_GET_FORMAT:
                p->result_format = ff_get_format(p->avctx, p->available_formats);
                break;
            default:
                call_done = 0;
                break;
            }
            if (call_done) {
                p->state  = STATE_SETTING_UP;
                pthread_cond_signal(&p->progress_cond);
            }
            pthread_mutex_unlock(&p->progress_mutex);
        }
    }
示例#20
0
static int tee_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
    TeeContext *tee = avf->priv_data;
    AVFormatContext *avf2;
    AVBSFContext *bsfs;
    AVPacket pkt2;
    int ret_all = 0, ret;
    unsigned i, s;
    int s2;

    for (i = 0; i < tee->nb_slaves; i++) {
        if (!(avf2 = tee->slaves[i].avf))
            continue;

        /* Flush slave if pkt is NULL*/
        if (!pkt) {
            ret = av_interleaved_write_frame(avf2, NULL);
            if (ret < 0) {
                ret = tee_process_slave_failure(avf, i, ret);
                if (!ret_all && ret < 0)
                    ret_all = ret;
            }
            continue;
        }

        s = pkt->stream_index;
        s2 = tee->slaves[i].stream_map[s];
        if (s2 < 0)
            continue;

        memset(&pkt2, 0, sizeof(AVPacket));
        if ((ret = av_packet_ref(&pkt2, pkt)) < 0)
            if (!ret_all) {
                ret_all = ret;
                continue;
            }
        bsfs = tee->slaves[i].bsfs[s2];
        pkt2.stream_index = s2;

        ret = av_bsf_send_packet(bsfs, &pkt2);
        if (ret < 0) {
            av_log(avf, AV_LOG_ERROR, "Error while sending packet to bitstream filter: %s\n",
                   av_err2str(ret));
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }

        while(1) {
            ret = av_bsf_receive_packet(bsfs, &pkt2);
            if (ret == AVERROR(EAGAIN)) {
                ret = 0;
                break;
            } else if (ret < 0) {
                break;
            }

            av_packet_rescale_ts(&pkt2, bsfs->time_base_out,
                                 avf2->streams[s2]->time_base);
            ret = av_interleaved_write_frame(avf2, &pkt2);
            if (ret < 0)
                break;
        };

        if (ret < 0) {
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }
    }
    return ret_all;
}
示例#21
0
文件: cuvid.c 项目: kinetiknz/FFmpeg
static int cuvid_decode_packet(AVCodecContext *avctx, const AVPacket *avpkt)
{
    CuvidContext *ctx = avctx->priv_data;
    AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
    AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
    CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
    CUVIDSOURCEDATAPACKET cupkt;
    AVPacket filter_packet = { 0 };
    AVPacket filtered_packet = { 0 };
    int ret = 0, eret = 0, is_flush = ctx->decoder_flushing;

    av_log(avctx, AV_LOG_TRACE, "cuvid_decode_packet\n");

    if (is_flush && avpkt && avpkt->size)
        return AVERROR_EOF;

    if (av_fifo_size(ctx->frame_queue) / sizeof(CuvidParsedFrame) > MAX_FRAME_COUNT - 2 && avpkt && avpkt->size)
        return AVERROR(EAGAIN);

    if (ctx->bsf && avpkt && avpkt->size) {
        if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n");
            return ret;
        }

        if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n");
            av_packet_unref(&filter_packet);
            return ret;
        }

        if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n");
            return ret;
        }

        avpkt = &filtered_packet;
    }

    ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx));
    if (ret < 0) {
        av_packet_unref(&filtered_packet);
        return ret;
    }

    memset(&cupkt, 0, sizeof(cupkt));

    if (avpkt && avpkt->size) {
        cupkt.payload_size = avpkt->size;
        cupkt.payload = avpkt->data;

        if (avpkt->pts != AV_NOPTS_VALUE) {
            cupkt.flags = CUVID_PKT_TIMESTAMP;
            if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
                cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
            else
                cupkt.timestamp = avpkt->pts;
        }
    } else {
        cupkt.flags = CUVID_PKT_ENDOFSTREAM;
        ctx->decoder_flushing = 1;
    }

    ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &cupkt));

    av_packet_unref(&filtered_packet);

    if (ret < 0)
        goto error;

    // cuvidParseVideoData doesn't return an error just because stuff failed...
    if (ctx->internal_error) {
        av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
        ret = ctx->internal_error;
        goto error;
    }

error:
    eret = CHECK_CU(cuCtxPopCurrent(&dummy));

    if (eret < 0)
        return eret;
    else if (ret < 0)
        return ret;
    else if (is_flush)
        return AVERROR_EOF;
    else
        return 0;
}
示例#22
0
文件: Packet.cpp 项目: karvaporo/QtAV
// time_base: av_q2d(format_context->streams[stream_idx]->time_base)
bool Packet::fromAVPacket(Packet* pkt, const AVPacket *avpkt, double time_base)
{
    if (!pkt || !avpkt)
        return false;

    pkt->position = avpkt->pos;
    pkt->hasKeyFrame = !!(avpkt->flags & AV_PKT_FLAG_KEY);
    // what about marking avpkt as invalid and do not use isCorrupt?
    pkt->isCorrupt = !!(avpkt->flags & AV_PKT_FLAG_CORRUPT);
    if (pkt->isCorrupt)
        qDebug("currupt packet. pts: %f", pkt->pts);

    // from av_read_frame: pkt->pts can be AV_NOPTS_VALUE if the video format has B-frames, so it is better to rely on pkt->dts if you do not decompress the payload.
    // old code set pts as dts is valid
    if (avpkt->pts != (qint64)AV_NOPTS_VALUE)
        pkt->pts = avpkt->pts * time_base;
    else if (avpkt->dts != (qint64)AV_NOPTS_VALUE) // is it ok?
        pkt->pts = avpkt->dts * time_base;
    else
        pkt->pts = 0; // TODO: init value
    if (avpkt->dts != (qint64)AV_NOPTS_VALUE) //has B-frames
        pkt->dts = avpkt->dts * time_base;
    else
        pkt->dts = pkt->pts;
    //qDebug("avpacket pts %lld, dts: %lld ", avpkt->pts, avpkt->dts);
    //TODO: pts must >= 0? look at ffplay
    pkt->pts = qMax<qreal>(0, pkt->pts);
    pkt->dts = qMax<qreal>(0, pkt->dts);

    // subtitle always has a key frame? convergence_duration may be 0
    if (avpkt->convergence_duration > 0  // mpv demux_lavf only check this
            && pkt->hasKeyFrame
#if 0
            && codec->codec_type == AVMEDIA_TYPE_SUBTITLE
#endif
            )
        pkt->duration = avpkt->convergence_duration * time_base;
    else if (avpkt->duration > 0)
        pkt->duration = avpkt->duration * time_base;
    else
        pkt->duration = 0;

    //qDebug("AVPacket.pts=%f, duration=%f, dts=%lld", pkt->pts, pkt->duration, packet.dts);
    pkt->data.clear();
    // TODO: pkt->avpkt. data is not necessary now. see mpv new_demux_packet_from_avpacket
    // copy properties and side data. does not touch data, size and ref
    pkt->d = QSharedDataPointer<PacketPrivate>(new PacketPrivate());
    pkt->d->initialized = true;
    AVPacket *p = &pkt->d->avpkt;
#if AVPACKET_REF
    av_packet_ref(p, (AVPacket*)avpkt);  //properties are copied internally
    // add ref without copy, bytearray does not copy either. bytearray options linke remove() is safe. omit FF_INPUT_BUFFER_PADDING_SIZE
    pkt->data = QByteArray::fromRawData((const char*)p->data, p->size);
#else
    if (avpkt->data) {
        // copy packet data. packet will be reset after AVDemuxer.readFrame() and in next av_read_frame
#if NO_PADDING_DATA
        pkt->data = QByteArray((const char*)avpkt->data, avpkt->size);
#else
    /*!
      larger than the actual read bytes because some optimized bitstream readers read 32 or 64 bits at once and could read over the end.
      The end of the input buffer avpkt->data should be set to 0 to ensure that no overreading happens for damaged MPEG streams
     */
        pkt->data.reserve(avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE);
        pkt->data.resize(avpkt->size);
        // code in ffmpe & mpv copy avpkt->size and set padding data to 0
        memcpy(pkt->data.data(), avpkt->data, avpkt->size);
        memset((char*)pkt->data.constData() + avpkt->size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
#endif //NO_PADDING_DATA
    }
    av_packet_copy_props(p, avpkt);
    if (!pkt->data.isEmpty()) {
        p->data = (uint8_t*)pkt->data.constData();
        p->size = pkt->data.size();
    }
#endif //AVPACKET_REF
    // QtAV always use ms (1/1000s) and s. As a result no time_base is required in Packet
    p->pts = pkt->pts * 1000.0;
    p->dts = pkt->dts * 1000.0;
    p->duration = pkt->duration * 1000.0;
    return true;
}
static int mediacodec_decode_frame(AVCodecContext *avctx, void *data,
                                   int *got_frame, AVPacket *avpkt)
{
    MediaCodecH264DecContext *s = avctx->priv_data;
    AVFrame *frame    = data;
    int ret;

    /* buffer the input packet */
    if (avpkt->size) {
        AVPacket input_pkt = { 0 };

        if (av_fifo_space(s->fifo) < sizeof(input_pkt)) {
            ret = av_fifo_realloc2(s->fifo,
                                   av_fifo_size(s->fifo) + sizeof(input_pkt));
            if (ret < 0)
                return ret;
        }

        ret = av_packet_ref(&input_pkt, avpkt);
        if (ret < 0)
            return ret;
        av_fifo_generic_write(s->fifo, &input_pkt, sizeof(input_pkt), NULL);
    }

    /* process buffered data */
    while (!*got_frame) {
        /* prepare the input data -- convert to Annex B if needed */
        if (s->filtered_pkt.size <= 0) {
            AVPacket input_pkt = { 0 };

            av_packet_unref(&s->filtered_pkt);

            /* no more data */
            if (av_fifo_size(s->fifo) < sizeof(AVPacket)) {
                return avpkt->size ? avpkt->size :
                    ff_mediacodec_dec_decode(avctx, &s->ctx, frame, got_frame, avpkt);
            }

            av_fifo_generic_read(s->fifo, &input_pkt, sizeof(input_pkt), NULL);

            ret = av_bsf_send_packet(s->bsf, &input_pkt);
            if (ret < 0) {
                return ret;
            }

            ret = av_bsf_receive_packet(s->bsf, &s->filtered_pkt);
            if (ret == AVERROR(EAGAIN)) {
                goto done;
            }

            /* h264_mp4toannexb is used here and does not requires flushing */
            av_assert0(ret != AVERROR_EOF);

            if (ret < 0) {
                return ret;
            }
        }

        ret = mediacodec_process_data(avctx, frame, got_frame, &s->filtered_pkt);
        if (ret < 0)
            return ret;

        s->filtered_pkt.size -= ret;
        s->filtered_pkt.data += ret;
    }
done:
    return avpkt->size;
}
示例#24
0
文件: cuvid.c 项目: libichong/FFmpeg
static int cuvid_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
    CuvidContext *ctx = avctx->priv_data;
    AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)ctx->hwdevice->data;
    AVCUDADeviceContext *device_hwctx = device_ctx->hwctx;
    CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
    AVFrame *frame = data;
    CUVIDSOURCEDATAPACKET cupkt;
    AVPacket filter_packet = { 0 };
    AVPacket filtered_packet = { 0 };
    CUdeviceptr mapped_frame = 0;
    int ret = 0, eret = 0;

    if (ctx->bsf && avpkt->size) {
        if ((ret = av_packet_ref(&filter_packet, avpkt)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_packet_ref failed\n");
            return ret;
        }

        if ((ret = av_bsf_send_packet(ctx->bsf, &filter_packet)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_bsf_send_packet failed\n");
            av_packet_unref(&filter_packet);
            return ret;
        }

        if ((ret = av_bsf_receive_packet(ctx->bsf, &filtered_packet)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_bsf_receive_packet failed\n");
            return ret;
        }

        avpkt = &filtered_packet;
    }

    ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx));
    if (ret < 0) {
        av_packet_unref(&filtered_packet);
        return ret;
    }

    memset(&cupkt, 0, sizeof(cupkt));

    if (avpkt->size) {
        cupkt.payload_size = avpkt->size;
        cupkt.payload = avpkt->data;

        if (avpkt->pts != AV_NOPTS_VALUE) {
            cupkt.flags = CUVID_PKT_TIMESTAMP;
            if (avctx->pkt_timebase.num && avctx->pkt_timebase.den)
                cupkt.timestamp = av_rescale_q(avpkt->pts, avctx->pkt_timebase, (AVRational){1, 10000000});
            else
                cupkt.timestamp = avpkt->pts;
        }
    } else {
        cupkt.flags = CUVID_PKT_ENDOFSTREAM;
    }

    ret = CHECK_CU(cuvidParseVideoData(ctx->cuparser, &cupkt));

    av_packet_unref(&filtered_packet);

    if (ret < 0) {
        goto error;
    }

    // cuvidParseVideoData doesn't return an error just because stuff failed...
    if (ctx->internal_error) {
        av_log(avctx, AV_LOG_ERROR, "cuvid decode callback error\n");
        ret = ctx->internal_error;
        goto error;
    }

    if (av_fifo_size(ctx->frame_queue)) {
        CUVIDPARSERDISPINFO dispinfo;
        CUVIDPROCPARAMS params;
        unsigned int pitch = 0;
        int offset = 0;
        int i;

        av_fifo_generic_read(ctx->frame_queue, &dispinfo, sizeof(CUVIDPARSERDISPINFO), NULL);

        memset(&params, 0, sizeof(params));
        params.progressive_frame = dispinfo.progressive_frame;
        params.second_field = 0;
        params.top_field_first = dispinfo.top_field_first;

        ret = CHECK_CU(cuvidMapVideoFrame(ctx->cudecoder, dispinfo.picture_index, &mapped_frame, &pitch, &params));
        if (ret < 0)
            goto error;

        if (avctx->pix_fmt == AV_PIX_FMT_CUDA) {
            ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0);
            if (ret < 0) {
                av_log(avctx, AV_LOG_ERROR, "av_hwframe_get_buffer failed\n");
                goto error;
            }

            ret = ff_decode_frame_props(avctx, frame);
            if (ret < 0) {
                av_log(avctx, AV_LOG_ERROR, "ff_decode_frame_props failed\n");
                goto error;
            }

            for (i = 0; i < 2; i++) {
                CUDA_MEMCPY2D cpy = {
                    .srcMemoryType = CU_MEMORYTYPE_DEVICE,
                    .dstMemoryType = CU_MEMORYTYPE_DEVICE,
                    .srcDevice     = mapped_frame,
                    .dstDevice     = (CUdeviceptr)frame->data[i],
                    .srcPitch      = pitch,
                    .dstPitch      = frame->linesize[i],
                    .srcY          = offset,
                    .WidthInBytes  = FFMIN(pitch, frame->linesize[i]),
                    .Height        = avctx->coded_height >> (i ? 1 : 0),
                };

                ret = CHECK_CU(cuMemcpy2D(&cpy));
                if (ret < 0)
                    goto error;

                offset += avctx->coded_height;
            }
        } else if (avctx->pix_fmt == AV_PIX_FMT_NV12) {