Exemple #1
0
static void create_video_stream(struct ffmpeg_mux *ffm)
{
	AVCodecContext *context;
	void *extradata = NULL;

	if (!new_stream(ffm, &ffm->video_stream, ffm->params.vcodec,
				&ffm->output->oformat->video_codec))
		return;

	if (ffm->video_header.size) {
		extradata = av_memdup(ffm->video_header.data,
				ffm->video_header.size);
	}

	context                 = ffm->video_stream->codec;
	context->bit_rate       = ffm->params.vbitrate * 1000;
	context->width          = ffm->params.width;
	context->height         = ffm->params.height;
	context->coded_width    = ffm->params.width;
	context->coded_height   = ffm->params.height;
	context->extradata      = extradata;
	context->extradata_size = ffm->video_header.size;
	context->time_base =
		(AVRational){ffm->params.fps_den, ffm->params.fps_num};

	ffm->video_stream->time_base = context->time_base;

	if (ffm->output->oformat->flags & AVFMT_GLOBALHEADER)
		context->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
Exemple #2
0
JNIEXPORT jlong JNICALL Java_bits_jav_util_JavMem_memdup
( JNIEnv *env, jclass clazz, jlong ptr, jlong size )
{
    void *p   = *(void**)&ptr;
    void *ret = av_memdup( p, (size_t)size );
    return *(jlong*)&ret;
}
Exemple #3
0
static int set_aes_arg(CryptoContext *c, uint8_t **buf, int *buf_len,
                       uint8_t *default_buf, int default_buf_len,
                       const char *desc)
{
    if (!*buf_len) {
        if (!default_buf_len) {
            av_log(c, AV_LOG_ERROR, "%s not set\n", desc);
            return AVERROR(EINVAL);
        } else if (default_buf_len != BLOCKSIZE) {
            av_log(c, AV_LOG_ERROR,
                   "invalid %s size (%d bytes, block size is %d)\n",
                   desc, default_buf_len, BLOCKSIZE);
            return AVERROR(EINVAL);
        }
        *buf = av_memdup(default_buf, default_buf_len);
        if (!*buf)
            return AVERROR(ENOMEM);
        *buf_len = default_buf_len;
    } else if (*buf_len != BLOCKSIZE) {
        av_log(c, AV_LOG_ERROR,
               "invalid %s size (%d bytes, block size is %d)\n",
               desc, *buf_len, BLOCKSIZE);
        return AVERROR(EINVAL);
    }
    return 0;
}
Exemple #4
0
static void create_audio_stream(struct ffmpeg_mux *ffm, int idx)
{
	AVCodecContext *context;
	AVStream *stream;
	void *extradata = NULL;

	if (!new_stream(ffm, &stream, ffm->params.acodec,
				&ffm->output->oformat->audio_codec))
		return;

	ffm->audio_streams[idx] = stream;

	av_dict_set(&stream->metadata, "title", ffm->audio[idx].name, 0);

	stream->time_base = (AVRational){1, ffm->audio[idx].sample_rate};

	if (ffm->audio_header[idx].size) {
		extradata = av_memdup(ffm->audio_header[idx].data,
				ffm->audio_header[idx].size);
	}

	context                 = stream->codec;
	context->bit_rate       = ffm->audio[idx].abitrate * 1000;
	context->channels       = ffm->audio[idx].channels;
	context->sample_rate    = ffm->audio[idx].sample_rate;
	context->sample_fmt     = AV_SAMPLE_FMT_S16;
	context->time_base      = stream->time_base;
	context->extradata      = extradata;
	context->extradata_size = ffm->audio_header[idx].size;
	context->channel_layout =
			av_get_default_channel_layout(context->channels);

	if (ffm->output->oformat->flags & AVFMT_GLOBALHEADER)
		context->flags |= CODEC_FLAG_GLOBAL_HEADER;

	ffm->num_audio_streams++;
}
Exemple #5
0
void sxpi_demuxing_run(struct demuxing_ctx *ctx)
{
    int ret;
    int in_err, out_err;

    TRACE(ctx, "demuxing packets in queue %p", ctx->pkt_queue);

    for (;;) {
        AVPacket pkt;
        struct message msg;

        ret = av_thread_message_queue_recv(ctx->src_queue, &msg, AV_THREAD_MESSAGE_NONBLOCK);
        if (ret != AVERROR(EAGAIN)) {
            if (ret < 0)
                break;

            if (msg.type == MSG_SEEK) {
                av_assert0(!ctx->is_image);

                /* Make later modules stop working ASAP */
                av_thread_message_flush(ctx->pkt_queue);

                /* do actual seek so the following packet that will be pulled in
                 * this current thread will be at the (approximate) requested time */
                const int64_t seek_to = *(int64_t *)msg.data;
                LOG(ctx, INFO, "Seek in media at ts=%s", PTS2TIMESTR(seek_to));
                ret = avformat_seek_file(ctx->fmt_ctx, -1, INT64_MIN, seek_to, seek_to, 0);
                if (ret < 0) {
                    sxpi_msg_free_data(&msg);
                    break;
                }
            }

            /* Forward the message */
            ret = av_thread_message_queue_send(ctx->pkt_queue, &msg, 0);
            if (ret < 0) {
                sxpi_msg_free_data(&msg);
                break;
            }
        }

        msg.type = MSG_PACKET;

        ret = pull_packet(ctx, &pkt);
        if (ret < 0)
            break;

        TRACE(ctx, "pulled a packet of size %d, sending to decoder", pkt.size);

        msg.data = av_memdup(&pkt, sizeof(pkt));
        if (!msg.data) {
            av_packet_unref(&pkt);
            break;
        }

        ret = av_thread_message_queue_send(ctx->pkt_queue, &msg, 0);
        TRACE(ctx, "sent packet to decoder, ret=%s", av_err2str(ret));

        if (ret < 0) {
            av_packet_unref(&pkt);
            av_freep(&msg.data);
            if (ret != AVERROR_EOF && ret != AVERROR_EXIT)
                LOG(ctx, ERROR, "Unable to send packet to decoder: %s", av_err2str(ret));
            TRACE(ctx, "can't send pkt to decoder: %s", av_err2str(ret));
            av_thread_message_queue_set_err_recv(ctx->pkt_queue, ret);
            break;
        }
    }

    if (ret < 0 && ret != AVERROR_EOF) {
        in_err = out_err = ret;
    } else {
        in_err = AVERROR_EXIT;
        out_err = AVERROR_EOF;
    }
    TRACE(ctx, "notify user with %s and decoder with %s",
          av_err2str(in_err), av_err2str(out_err));
    av_thread_message_queue_set_err_send(ctx->src_queue, in_err);
    av_thread_message_flush(ctx->src_queue);
    av_thread_message_queue_set_err_recv(ctx->pkt_queue, out_err);
}
Exemple #6
0
HRESULT decklink_input_callback::VideoInputFrameArrived(
    IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
{
    void *frameBytes;
    void *audioFrameBytes;
    BMDTimeValue frameTime;
    BMDTimeValue frameDuration;

    // set max thread priority once
    if (ctx->frameCount == 0) {
    	// thanks to MLT for code snippet
    	int r;
    	pthread_t thread;
    	pthread_attr_t tattr;
    	struct sched_param param;

    	pthread_attr_init(&tattr);
    	pthread_attr_setschedpolicy(&tattr, SCHED_FIFO);
    	param.sched_priority = sched_get_priority_max(SCHED_FIFO) - 1;
    	pthread_attr_setschedparam(&tattr, &param);

    	thread = pthread_self();
    	r = pthread_setschedparam(thread, SCHED_FIFO, &param);
    	if (r) {
    		av_log(avctx, AV_LOG_WARNING, "pthread_setschedparam returned %i\n",r);
    	} else {
    		av_log(avctx, AV_LOG_INFO, "param.sched_priority=%i\n",param.sched_priority);
    	}
    }



    ctx->frameCount++;

    // Handle Video Frame
    if (videoFrame) {
        AVPacket pkt;
        AVCodecContext *c;
        av_init_packet(&pkt);
        c = ctx->video_st->codec;
        if (ctx->frameCount % 25 == 0) {
            unsigned long long qsize = avpacket_queue_size(&ctx->queue);
            av_log(avctx, AV_LOG_DEBUG,
                    "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
                    ctx->frameCount,
                    videoFrame->GetRowBytes() * videoFrame->GetHeight(),
                    (double)qsize / 1024 / 1024);
        }

        videoFrame->GetBytes(&frameBytes);
        videoFrame->GetStreamTime(&frameTime, &frameDuration,
                                  ctx->video_st->time_base.den);

        if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
            if (no_video == 0) {
                av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected\n", ctx->frameCount);
            }
            no_video++;
            //
            if (!cctx->nosignal_nofreeze) {
            	if (no_video < 50 && lastFrameBytes != NULL) {
            		memcpy(frameBytes,lastFrameBytes,videoFrame->GetRowBytes() * videoFrame->GetHeight());
            	} else {
            		if (nosignalFrameBytes == NULL) {
            			// init nosignal picture
            			nosignalFrameBytes = av_mallocz(videoFrame->GetRowBytes() * videoFrame->GetHeight());
            			if (videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
            				int width  = videoFrame->GetWidth();
            				int height = videoFrame->GetHeight();
            				unsigned *p = (unsigned *)nosignalFrameBytes;
            				for (int y = 0; y < height; y++) {
            			    	for (int x = 0; x < width; x += 2)
            			    		*p++ = 0x10801080;
            				}
            			}
            		}
            		memcpy(frameBytes,nosignalFrameBytes,videoFrame->GetRowBytes() * videoFrame->GetHeight());
            	}
            }
        } else {
            if (no_video > 0) {
                av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
                        "- Frames dropped %u\n", ctx->frameCount, no_video);
            }
            no_video = 0;
            //
            if (!cctx->nosignal_nofreeze) {
            	if (lastFrameBytes != NULL) {
            		av_free(lastFrameBytes);
            		lastFrameBytes = NULL;
            	}
            	//
            	lastFrameBytes = av_memdup(frameBytes,videoFrame->GetRowBytes() * videoFrame->GetHeight());
            }
        }

        pkt.pts = frameTime / ctx->video_st->time_base.num;

        if (initial_video_pts == AV_NOPTS_VALUE) {
            initial_video_pts = pkt.pts;
        }

        pkt.pts -= initial_video_pts;
        pkt.dts = pkt.pts;

        pkt.duration = frameDuration;
        //To be made sure it still applies
        pkt.flags       |= AV_PKT_FLAG_KEY;
        pkt.stream_index = ctx->video_st->index;
        pkt.data         = (uint8_t *)frameBytes;
        pkt.size         = videoFrame->GetRowBytes() *
                           videoFrame->GetHeight();
        //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
        c->frame_number++;
        if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
            ++ctx->dropped;
        }

        // check pts drift
        int64_t pts = av_rescale_q(pkt.pts,ctx->video_st->time_base,ctx->video_st->codec->time_base);
        if (pts  != (ctx->frameCount - 1L) + videoPtsDrift) {
        	if (no_video == 0) {
        		av_log(avctx, AV_LOG_WARNING, "Video pts mismatch - current -> %ld projected -> %ld drift -> %ld\n",
        				pts, (ctx->frameCount - 1L), videoPtsDrift);
        	}
           	videoPtsDrift = pts - (ctx->frameCount - 1L);
        }
    }

    // Handle Audio Frame
    if (audioFrame) {
        AVCodecContext *c;
        AVPacket pkt;
        BMDTimeValue audio_pts;
        av_init_packet(&pkt);

        c = ctx->audio_st->codec;
        //hack among hacks
        pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codec->channels * (16 / 8);
        audioFrame->GetBytes(&audioFrameBytes);
        audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
        pkt.pts = audio_pts / ctx->audio_st->time_base.num;

        if (initial_audio_pts == AV_NOPTS_VALUE) {
            initial_audio_pts = pkt.pts;
        }

        pkt.pts -= initial_audio_pts;
        pkt.dts = pkt.pts;

        //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
        pkt.flags       |= AV_PKT_FLAG_KEY;
        pkt.stream_index = ctx->audio_st->index;
        pkt.data         = (uint8_t *)audioFrameBytes;

        c->frame_number++;
        if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
            ++ctx->dropped;
        }

        // check pts drift
        int64_t frameNumSamples = audioFrame->GetSampleFrameCount();
        int64_t pts = av_rescale_q(pkt.pts,ctx->audio_st->time_base,ctx->audio_st->codec->time_base);
        if (pts != (ctx->frameCount - 1L) * frameNumSamples + audioPtsDrift) {
        	if (no_video == 0) {
        		av_log(avctx, AV_LOG_WARNING, "Audio pts mismatch - current -> %ld projected -> %ld drift -> %ld\n",
        				pts, (ctx->frameCount - 1L) * frameNumSamples,audioPtsDrift);
        	}
           	audioPtsDrift = pts - (ctx->frameCount - 1L) * frameNumSamples;
        }
    }

    return S_OK;
}