Пример #1
2
static bool initialize_codec(struct aac_encoder *enc)
{
	int ret;

	enc->aframe  = av_frame_alloc();
	if (!enc->aframe) {
		aac_warn("initialize_codec", "Failed to allocate audio frame");
		return false;
	}

	ret = avcodec_open2(enc->context, enc->aac, NULL);
	if (ret < 0) {
		aac_warn("initialize_codec", "Failed to open AAC codec: %s",
				av_err2str(ret));
		return false;
	}

	enc->frame_size = enc->context->frame_size;
	if (!enc->frame_size)
		enc->frame_size = 1024;

	enc->frame_size_bytes = enc->frame_size * (int)enc->audio_size;

	ret = av_samples_alloc(enc->samples, NULL, enc->context->channels,
			enc->frame_size, enc->context->sample_fmt, 0);
	if (ret < 0) {
		aac_warn("initialize_codec", "Failed to create audio buffer: "
		                             "%s", av_err2str(ret));
		return false;
	}

	return true;
}
/**
 * This function will initialize the necessary video codec.
*/
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st) {
	int ret;
	AVCodecContext *c = st->codec;
	/* open the codec */
	ret = avcodec_open2(c, codec, NULL);
	if (ret < 0) {
		fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
		exit(1);
	}
	/* allocate and init a re-usable frame */
	frame = avcodec_alloc_frame();
	if (!frame) {
		fprintf(stderr, "Could not allocate video frame\n");
		exit(1);
	}
	/* Allocate the encoded raw picture. */
	ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
	if (ret < 0) {
		fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
		exit(1);
	}
	/* If the output format is not YUV420P, then a temporary YUV420P
	 * picture is needed too. It is then converted to the required
	 * output format. */
	if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
		ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
		if (ret < 0) {
			fprintf(stderr, "Could not allocate temporary picture: %s\n", av_err2str(ret));
			exit(1);
		}
	} //
	/* copy data and linesize picture pointers to frame */
	*((AVPicture *) frame) = dst_picture;
}
Пример #3
0
static BOOL ffmpeg_resample_frame(AVAudioResampleContext* context,
                                  AVFrame* in, AVFrame* out)
{
	int ret;

	if (!avresample_is_open(context))
	{
		if ((ret = avresample_config(context, out, in)) < 0)
		{
			const char* err = av_err2str(ret);
			WLog_ERR(TAG, "Error during resampling %s [%d]", err, ret);
			return FALSE;
		}

		if ((ret = (avresample_open(context))) < 0)
		{
			const char* err = av_err2str(ret);
			WLog_ERR(TAG, "Error during resampling %s [%d]", err, ret);
			return FALSE;
		}
	}

	if ((ret = avresample_convert_frame(context, out, in)) < 0)
	{
		const char* err = av_err2str(ret);
		WLog_ERR(TAG, "Error during resampling %s [%d]", err, ret);
		return FALSE;
	}

	return TRUE;
}
Пример #4
0
/*
 * encode one video frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;
    AVPacket pkt = { 0 };

    c = ost->enc;

    frame = get_video_frame(ost);

    av_init_packet(&pkt);

    /* encode the image */
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
        exit(1);
    }

    if (got_packet) {
        ret = write_frame(oc, &c->time_base, ost->st, &pkt);
    } else {
        ret = 0;
    }

    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }

    return (frame || got_packet) ? 0 : 1;
}
Пример #5
0
static bool ffmpeg_image_open_decoder_context(struct ffmpeg_image *info)
{
	int ret = av_find_best_stream(info->fmt_ctx, AVMEDIA_TYPE_VIDEO,
			-1, 1, NULL, 0);
	if (ret < 0) {
		blog(LOG_WARNING, "Couldn't find video stream in file '%s': %s",
				info->file, av_err2str(ret));
		return false;
	}

	info->stream_idx  = ret;
	info->stream      = info->fmt_ctx->streams[ret];
	info->decoder_ctx = info->stream->codec;
	info->decoder     = avcodec_find_decoder(info->decoder_ctx->codec_id);

	if (!info->decoder) {
		blog(LOG_WARNING, "Failed to find decoder for file '%s'",
				info->file);
		return false;
	}

	ret = avcodec_open2(info->decoder_ctx, info->decoder, NULL);
	if (ret < 0) {
		blog(LOG_WARNING, "Failed to open video codec for file '%s': "
		                  "%s", info->file, av_err2str(ret));
		return false;
	}

	return true;
}
Пример #6
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    AVFrame *frame;
    int ret;
    int got_packet;
    int dst_nb_samples;
    
    av_init_packet(&pkt);
    c = ost->st->codec;
    
    frame = get_audio_frame(ost);
    
    if (frame) {
        /* convert samples from native format to destination codec format, using the resampler */
        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        av_assert0(dst_nb_samples == frame->nb_samples);
        
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);
        
        /* convert to destination format */
        ret = swr_convert(ost->swr_ctx,
                          ost->frame->data, dst_nb_samples,
                          (const uint8_t **)frame->data, frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            exit(1);
        }
        frame = ost->frame;
        
        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }
    
    ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }
    
    if (got_packet) {
        ret = write_frame(oc, &c->time_base, ost->st, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error while writing audio frame: %s\n",
                    av_err2str(ret));
            exit(1);
        }
    }
    
    return (frame || got_packet) ? 0 : 1;
}
Пример #7
0
static int encode_write(AVFrame *frame)
{
    int ret = 0;
    AVPacket enc_pkt;

    av_init_packet(&enc_pkt);
    enc_pkt.data = NULL;
    enc_pkt.size = 0;

    if ((ret = avcodec_send_frame(encoder_ctx, frame)) < 0) {
        fprintf(stderr, "Error during encoding. Error code: %s\n", av_err2str(ret));
        goto end;
    }
    while (1) {
        ret = avcodec_receive_packet(encoder_ctx, &enc_pkt);
        if (ret)
            break;

        enc_pkt.stream_index = 0;
        av_packet_rescale_ts(&enc_pkt, ifmt_ctx->streams[video_stream]->time_base,
                             ofmt_ctx->streams[0]->time_base);
        ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
        if (ret < 0) {
            fprintf(stderr, "Error during writing data to output file. "
                    "Error code: %s\n", av_err2str(ret));
            return -1;
        }
    }

end:
    if (ret == AVERROR_EOF)
        return 0;
    ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);
    return ret;
}
Пример #8
0
static bool ffmpeg_image_init(struct ffmpeg_image *info, const char *file)
{
	int ret;

	memset(info, 0, sizeof(struct ffmpeg_image));
	info->file       = file;
	info->stream_idx = -1;

	ret = avformat_open_input(&info->fmt_ctx, file, NULL, NULL);
	if (ret < 0) {
		blog(LOG_WARNING, "Failed to open file '%s': %s",
				info->file, av_err2str(ret));
		return false;
	}

	ret = avformat_find_stream_info(info->fmt_ctx, NULL);
	if (ret < 0) {
		blog(LOG_WARNING, "Could not find stream info for file '%s':"
		                  " %s", info->file, av_err2str(ret));
		goto fail;
	}

	if (!ffmpeg_image_open_decoder_context(info))
		goto fail;

	info->cx     = info->decoder_ctx->width;
	info->cy     = info->decoder_ctx->height;
	info->format = info->decoder_ctx->pix_fmt;
	return true;

fail:
	ffmpeg_image_free(info);
	return false;
}
Пример #9
0
    bool setup() {
        avfilter_graph_free(&filter_graph);
        filter_graph = avfilter_graph_alloc();
        //QString sws_flags_str;
        QString buffersrc_args = QString("video_size=%1x%2:pix_fmt=%3:time_base=%4/%5:sar=1")
                .arg(width).arg(height).arg(pixfmt).arg(1).arg(AV_TIME_BASE);
        qDebug("buffersrc_args=%s", buffersrc_args.toUtf8().constData());
        AVFilter *buffersrc  = avfilter_get_by_name("buffer");
        Q_ASSERT(buffersrc);
        int ret = avfilter_graph_create_filter(&in_filter_ctx,
                                               buffersrc,
                                               "in", buffersrc_args.toUtf8().constData(), NULL,
                                               filter_graph);
        if (ret < 0) {
            qWarning("Can not create buffer source: %s", av_err2str(ret));
            return false;
        }
        /* buffer video sink: to terminate the filter chain. */
        AVFilter *buffersink = avfilter_get_by_name("buffersink");
        Q_ASSERT(buffersink);
        if ((ret = avfilter_graph_create_filter(&out_filter_ctx, buffersink, "out",
                                           NULL, NULL, filter_graph)) < 0) {
            qWarning("Can not create buffer sink: %s", av_err2str(ret));
            return false;
        }

        /* Endpoints for the filter graph. */
        AVFilterInOut *outputs = avfilter_inout_alloc();
        AVFilterInOut *inputs  = avfilter_inout_alloc();
        outputs->name       = av_strdup("in");
        outputs->filter_ctx = in_filter_ctx;
        outputs->pad_idx    = 0;
        outputs->next       = NULL;

        inputs->name       = av_strdup("out");
        inputs->filter_ctx = out_filter_ctx;
        inputs->pad_idx    = 0;
        inputs->next       = NULL;


        //avfilter_graph_parse, avfilter_graph_parse2?
        if ((ret = avfilter_graph_parse_ptr(filter_graph, options.toUtf8().constData(),
                                        &inputs, &outputs, NULL)) < 0) {
            qWarning("avfilter_graph_parse_ptr fail: %s", av_err2str(ret));
            avfilter_inout_free(&outputs);
            avfilter_inout_free(&inputs);
            return false;
        }
        if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
            qWarning("avfilter_graph_config fail: %s", av_err2str(ret));
            avfilter_inout_free(&outputs);
            avfilter_inout_free(&inputs);
            return false;
        }
        avfilter_inout_free(&outputs);
        avfilter_inout_free(&inputs);
        avframe = av_frame_alloc();
        return true;
    }
Пример #10
0
jint Java_com_panahit_ui_Components_AnimatedFileDrawable_createDecoder(JNIEnv *env, jclass clazz, jstring src, jintArray data) {
    VideoInfo *info = new VideoInfo();
    
    char const *srcString = env->GetStringUTFChars(src, 0);
    int len = strlen(srcString);
    info->src = new char[len + 1];
    memcpy(info->src, srcString, len);
    info->src[len] = '\0';
    if (srcString != 0) {
        env->ReleaseStringUTFChars(src, srcString);
    }
    
    int ret;
    if ((ret = avformat_open_input(&info->fmt_ctx, info->src, NULL, NULL)) < 0) {
        LOGE("can't open source file %s, %s", info->src, av_err2str(ret));
        delete info;
        return 0;
    }
    
    if ((ret = avformat_find_stream_info(info->fmt_ctx, NULL)) < 0) {
        LOGE("can't find stream information %s, %s", info->src, av_err2str(ret));
        delete info;
        return 0;
    }
    
    if (open_codec_context(&info->video_stream_idx, info->fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        info->video_stream = info->fmt_ctx->streams[info->video_stream_idx];
        info->video_dec_ctx = info->video_stream->codec;
    }
    
    if (info->video_stream <= 0) {
        LOGE("can't find video stream in the input, aborting %s", info->src);
        delete info;
        return 0;
    }
    
    info->frame = av_frame_alloc();
    if (info->frame == nullptr) {
        LOGE("can't allocate frame %s", info->src);
        delete info;
        return 0;
    }
    
    av_init_packet(&info->pkt);
    info->pkt.data = NULL;
    info->pkt.size = 0;
    
    jint *dataArr = env->GetIntArrayElements(data, 0);
    if (dataArr != nullptr) {
        dataArr[0] = info->video_dec_ctx->width;
        dataArr[1] = info->video_dec_ctx->height;
        env->ReleaseIntArrayElements(data, dataArr, 0);
    }
    
    //LOGD("successfully opened file %s", info->src);
    
    return (int) info;
}
Пример #11
0
int main(int argc, char **argv)
{
    AVDictionary *options = NULL;
    AVIOContext *client = NULL, *server = NULL;
    const char *in_uri, *out_uri;
    int ret, pid;
    av_log_set_level(AV_LOG_TRACE);
    if (argc < 3) {
        printf("usage: %s input http://hostname[:port]\n"
               "API example program to serve http to multiple clients.\n"
               "\n", argv[0]);
        return 1;
    }

    in_uri = argv[1];
    out_uri = argv[2];

    avformat_network_init();

    if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
        fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
        return ret;
    }
    if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
        fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
        return ret;
    }
    fprintf(stderr, "Entering main loop.\n");
    for (;;) {
        if ((ret = avio_accept(server, &client)) < 0)
            goto end;
        fprintf(stderr, "Accepted client, forking process.\n");
        // XXX: Since we don't reap our children and don't ignore signals
        //      this produces zombie processes.
        pid = fork();
        if (pid < 0) {
            perror("Fork failed");
            ret = AVERROR(errno);
            goto end;
        }
        if (pid == 0) {
            fprintf(stderr, "In child.\n");
            process_client(client, in_uri);
            avio_close(server);
            exit(0);
        }
        if (pid > 0)
            avio_close(client);
    }
end:
    avio_close(server);
    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
        return 1;
    }
    return 0;
}
Пример #12
0
static av_cold int write_header(AVFormatContext *s1)
{
    int res = 0, flags = O_RDWR;
    struct v4l2_format fmt = {
        .type = V4L2_BUF_TYPE_VIDEO_OUTPUT
    };
    V4L2Context *s = s1->priv_data;
    AVCodecContext *enc_ctx;
    uint32_t v4l2_pixfmt;

    if (s1->flags & AVFMT_FLAG_NONBLOCK)
        flags |= O_NONBLOCK;

    s->fd = open(s1->filename, flags);
    if (s->fd < 0) {
        res = AVERROR(errno);
        av_log(s1, AV_LOG_ERROR, "Unable to open V4L2 device '%s'\n", s1->filename);
        return res;
    }

    if (s1->nb_streams != 1 ||
        s1->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
        s1->streams[0]->codec->codec_id   != AV_CODEC_ID_RAWVIDEO) {
        av_log(s1, AV_LOG_ERROR,
               "V4L2 output device supports only a single raw video stream\n");
        return AVERROR(EINVAL);
    }

    enc_ctx = s1->streams[0]->codec;

    v4l2_pixfmt = avpriv_fmt_ff2v4l(enc_ctx->pix_fmt, AV_CODEC_ID_RAWVIDEO);
    if (!v4l2_pixfmt) { // XXX: try to force them one by one?
        av_log(s1, AV_LOG_ERROR, "Unknown V4L2 pixel format equivalent for %s\n",
               av_get_pix_fmt_name(enc_ctx->pix_fmt));
        return AVERROR(EINVAL);
    }

    if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
        res = AVERROR(errno);
        av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
        return res;
    }

    fmt.fmt.pix.width       = enc_ctx->width;
    fmt.fmt.pix.height      = enc_ctx->height;
    fmt.fmt.pix.pixelformat = v4l2_pixfmt;
    fmt.fmt.pix.sizeimage   = av_image_get_buffer_size(enc_ctx->pix_fmt, enc_ctx->width, enc_ctx->height, 1);

    if (ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) {
        res = AVERROR(errno);
        av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_FMT): %s\n", av_err2str(res));
        return res;
    }

    return res;
}
Пример #13
0
/*
 * encode one video frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;

    c = ost->st->codec;

    frame = get_video_frame(ost);

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* a hack to avoid data copy with some raw video muxers */
        AVPacket pkt;
        av_init_packet(&pkt);

        if (!frame)
            return 1;

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = ost->st->index;
        pkt.data          = (uint8_t *)frame;
        pkt.size          = sizeof(AVPicture);

        pkt.pts = pkt.dts = frame->pts;
        av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        AVPacket pkt = { 0 };
        av_init_packet(&pkt);

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
            exit(1);
        }

        if (got_packet) {
            ret = write_frame(oc, &c->time_base, ost->st, &pkt);
        } else {
            ret = 0;
        }
    }

    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }

    return (frame || got_packet) ? 0 : 1;
}
Пример #14
0
static inline int open_output_file(struct ffmpeg_mux *ffm)
{
	AVOutputFormat *format = ffm->output->oformat;
	int ret;

	if ((format->flags & AVFMT_NOFILE) == 0) {
		ret = avio_open(&ffm->output->pb, ffm->params.file,
				AVIO_FLAG_WRITE);
		if (ret < 0) {
			printf("Couldn't open '%s', %s",
					ffm->params.file, av_err2str(ret));
			return FFM_ERROR;
		}
	}

	strncpy(ffm->output->filename, ffm->params.file,
			sizeof(ffm->output->filename));
	ffm->output->filename[sizeof(ffm->output->filename) - 1] = 0;

	AVDictionary *dict = NULL;
	if ((ret = av_dict_parse_string(&dict, ffm->params.muxer_settings,
				"=", " ", 0))) {
		printf("Failed to parse muxer settings: %s\n%s",
				av_err2str(ret), ffm->params.muxer_settings);

		av_dict_free(&dict);
	}

	if (av_dict_count(dict) > 0) {
		printf("Using muxer settings:");

		AVDictionaryEntry *entry = NULL;
		while ((entry = av_dict_get(dict, "", entry,
						AV_DICT_IGNORE_SUFFIX)))
			printf("\n\t%s=%s", entry->key, entry->value);

		printf("\n");
	}

	ret = avformat_write_header(ffm->output, &dict);
	if (ret < 0) {
		printf("Error opening '%s': %s",
				ffm->params.file, av_err2str(ret));

		av_dict_free(&dict);

		return ret == -22 ? FFM_UNSUPPORTED : FFM_ERROR;
	}

	av_dict_free(&dict);

	return FFM_SUCCESS;
}
Пример #15
0
static int open_input_file(const char *filename)
{
    int ret;
    AVCodec *decoder = NULL;
    AVStream *video = NULL;

    if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
        fprintf(stderr, "Cannot open input file '%s', Error code: %s\n",
                filename, av_err2str(ret));
        return ret;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
        fprintf(stderr, "Cannot find input stream information. Error code: %s\n",
                av_err2str(ret));
        return ret;
    }

    ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
    if (ret < 0) {
        fprintf(stderr, "Cannot find a video stream in the input file. "
                "Error code: %s\n", av_err2str(ret));
        return ret;
    }
    video_stream = ret;

    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
        return AVERROR(ENOMEM);

    video = ifmt_ctx->streams[video_stream];
    if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) < 0) {
        fprintf(stderr, "avcodec_parameters_to_context error. Error code: %s\n",
                av_err2str(ret));
        return ret;
    }

    decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
    if (!decoder_ctx->hw_device_ctx) {
        fprintf(stderr, "A hardware device reference create failed.\n");
        return AVERROR(ENOMEM);
    }
    decoder_ctx->get_format    = get_vaapi_format;

    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0)
        fprintf(stderr, "Failed to open codec for decoding. Error code: %s\n",
                av_err2str(ret));

    return ret;
}
Пример #16
0
static bool do_aac_encode(struct aac_encoder *enc,
		struct encoder_packet *packet, bool *received_packet)
{
	AVRational time_base = {1, enc->context->sample_rate};
	AVPacket   avpacket  = {0};
	int        got_packet;
	int        ret;

	enc->aframe->nb_samples = enc->frame_size;
	enc->aframe->pts = av_rescale_q(enc->total_samples,
			(AVRational){1, enc->context->sample_rate},
			enc->context->time_base);

	ret = avcodec_fill_audio_frame(enc->aframe, enc->context->channels,
			enc->context->sample_fmt, enc->samples[0],
			enc->frame_size_bytes * enc->context->channels, 1);
	if (ret < 0) {
		aac_warn("do_aac_encode", "avcodec_fill_audio_frame failed: %s",
				av_err2str(ret));
		return false;
	}

	enc->total_samples += enc->frame_size;

	ret = avcodec_encode_audio2(enc->context, &avpacket, enc->aframe,
			&got_packet);
	if (ret < 0) {
		aac_warn("do_aac_encode", "avcodec_encode_audio2 failed: %s",
				av_err2str(ret));
		return false;
	}

	*received_packet = !!got_packet;
	if (!got_packet)
		return true;

	da_resize(enc->packet_buffer, 0);
	da_push_back_array(enc->packet_buffer, avpacket.data, avpacket.size);

	packet->pts  = rescale_ts(avpacket.pts, enc->context, time_base);
	packet->dts  = rescale_ts(avpacket.dts, enc->context, time_base);
	packet->data = enc->packet_buffer.array;
	packet->size = avpacket.size;
	packet->type = OBS_ENCODER_AUDIO;
	packet->timebase_num = 1;
	packet->timebase_den = (int32_t)enc->context->sample_rate;
	av_free_packet(&avpacket);
	return true;
}
Пример #17
0
int main(void)
{
    int i;

    for (i = 0; i < FF_ARRAY_ELEMS(error_entries); i++) {
        const struct error_entry *entry = &error_entries[i];
        printf("%d: %s [%s]\n", entry->num, av_err2str(entry->num), entry->tag);
    }

    for (i = 0; i < 256; i++) {
        printf("%d: %s\n", -i, av_err2str(-i));
    }

    return 0;
}
Пример #18
0
static int list_op(const char *input_dir)
{
	AVIODirEntry *entry = NULL;
	AVIODirContext *ctx = NULL;
	int cnt, ret;
	char filemode[4], uid_and_gid[20];

	if ((ret = avio_open_dir(&ctx, input_dir, NULL)) < 0) {
		av_log(NULL, AV_LOG_ERROR, "Cannot open directory: %s.\n", av_err2str(ret));
		goto fail;
	}

	cnt = 0;
	for (;;) {
		if ((ret = avio_read_dir(ctx, &entry)) < 0) {
			av_log(NULL, AV_LOG_ERROR, "Cannot list directory: %s.\n", av_err2str(ret));
			goto fail;
		}
		if (!entry)
			break;
		if (entry->filemode == -1) {
			snprintf(filemode, 4, "???");
		}
		else {
			snprintf(filemode, 4, "%3"PRIo64, entry->filemode);
		}
		snprintf(uid_and_gid, 20, "%"PRId64"(%"PRId64")", entry->user_id, entry->group_id);
		if (cnt == 0)
			av_log(NULL, AV_LOG_INFO, "%-9s %12s %30s %10s %s %16s %16s %16s\n",
			"TYPE", "SIZE", "NAME", "UID(GID)", "UGO", "MODIFIED",
			"ACCESSED", "STATUS_CHANGED");
		av_log(NULL, AV_LOG_INFO, "%-9s %12"PRId64" %30s %10s %s %16"PRId64" %16"PRId64" %16"PRId64"\n",
			type_string(entry->type),
			entry->size,
			entry->name,
			uid_and_gid,
			filemode,
			entry->modification_timestamp,
			entry->access_timestamp,
			entry->status_change_timestamp);
		avio_free_directory_entry(&entry);
		cnt++;
	};

fail:
	avio_close_dir(&ctx);
	return ret;
}
Пример #19
0
static int decode_packet(struct decoder_ctx *ctx, const AVPacket *pkt, int *got_frame)
{
    int ret;
    int decoded = pkt->size;
    AVCodecContext *avctx = ctx->avctx;
    AVFrame *dec_frame = av_frame_alloc();

    *got_frame = 0;

    TRACE(ctx, "decode packet of size %d", pkt->size);

    if (!dec_frame)
        return AVERROR(ENOMEM);

    switch (avctx->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        ret = avcodec_decode_video2(avctx, dec_frame, got_frame, pkt);
        break;
    case AVMEDIA_TYPE_AUDIO:
        ret = avcodec_decode_audio4(avctx, dec_frame, got_frame, pkt);
        break;
    default:
        av_assert0(0);
    }

    if (ret < 0) {
        LOG(ctx, ERROR, "Error decoding %s packet: %s",
            av_get_media_type_string(avctx->codec_type),
            av_err2str(ret));
        av_frame_free(&dec_frame);
        return pkt->size;
    }

    TRACE(ctx, "decoded %d/%d bytes from packet -> got_frame=%d", ret, pkt->size, *got_frame);
    decoded = FFMIN(ret, pkt->size);

    if (*got_frame) {
        ret = sxpi_decoding_queue_frame(ctx->decoding_ctx, dec_frame);
        if (ret < 0) {
            TRACE(ctx, "could not queue frame: %s", av_err2str(ret));
            av_frame_free(&dec_frame);
            return ret;
        }
    } else {
        av_frame_free(&dec_frame);
    }
    return decoded;
}
Пример #20
0
int main(int argc, char **argv)
{
    struct sigaction sigact = { { 0 } };
    int ret = 0;
    config_filename = av_strdup("/etc/ffserver.conf");
    av_lfg_init(&random_state, av_get_random_seed());
    if ((ret = parse_ffconfig(config_filename)) < 0) {
        fprintf(stderr, "Error reading configuration file '%s': %s\n",
                config_filename, av_err2str(ret));
        exit(1);
    }
    av_freep(&config_filename);
    // set logfile
    logfile = stdout;
    build_file_streams();
    /* signal init */
    signal(SIGPIPE, SIG_IGN);

    if (start_event_loop() < 0) {
       // http_log("Could not start server\n");
        printf("could not start server\n");
        exit(1);
    }
    return 0;
}
Пример #21
0
static int move_op(const char *src, const char *dst)
{
	int ret = avpriv_io_move(src, dst);
	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
	return ret;
}
Пример #22
0
static int del_op(const char *url)
{
	int ret = avpriv_io_delete(url);
	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
	return ret;
}
Пример #23
0
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    int ret;
    AVCodecContext *c = ost->st->codec;
    AVDictionary *opt = NULL;

    av_dict_copy(&opt, opt_arg, 0);

    /* open the codec */
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
        exit(1);
    }

    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    ost->tmp_frame = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!ost->tmp_frame) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}
Пример #24
0
bool LibAVFilterPrivate::push(Frame *frame, qreal pts)
{
    VideoFrame *vf = static_cast<VideoFrame*>(frame);
    if (width != vf->width() || height != vf->height() || pixfmt != vf->pixelFormatFFmpeg() || options_changed) {
        width = vf->width();
        height = vf->height();
        pixfmt = (AVPixelFormat)vf->pixelFormatFFmpeg();
        options_changed = false;
        if (!setup()) {
            qWarning("setup filter graph error");
            enabled = false; // skip this filter and avoid crash
            return false;
        }
    }
    Q_ASSERT(avframe);
    avframe->pts = pts * 1000000.0; // time_base is 1/1000000
    avframe->width = vf->width();
    avframe->height = vf->height();
    avframe->format = pixfmt = (AVPixelFormat)vf->pixelFormatFFmpeg();
    for (int i = 0; i < vf->planeCount(); ++i) {
        avframe->data[i] =vf->bits(i);
        avframe->linesize[i] = vf->bytesPerLine(i);
    }
    int ret = av_buffersrc_add_frame_flags(in_filter_ctx, avframe, AV_BUFFERSRC_FLAG_KEEP_REF);
    if (ret != 0) {
        qWarning("av_buffersrc_add_frame error: %s", av_err2str(ret));
        return false;
    }
    return true;
}
Пример #25
0
static int pull_packet(struct demuxing_ctx *ctx, AVPacket *pkt)
{
    int ret;
    AVFormatContext *fmt_ctx = ctx->fmt_ctx;
    const int target_stream_idx = ctx->stream->index;

    for (;;) {
        ret = av_read_frame(fmt_ctx, pkt);
        if (ret < 0)
            break;

        if (pkt->stream_index != target_stream_idx) {
            TRACE(ctx, "pkt->idx=%d vs %d",
                  pkt->stream_index, target_stream_idx);
            av_packet_unref(pkt);
            continue;
        }

        if (ctx->pkt_skip_mod) {
            ctx->pkt_count++;
            if (ctx->pkt_count % ctx->pkt_skip_mod && !(pkt->flags & AV_PKT_FLAG_KEY)) {
                av_packet_unref(pkt);
                continue;
            }
        }

        break;
    }

    TRACE(ctx, "packet ret %s", av_err2str(ret));
    return ret;
}
Пример #26
0
//TODO: seek by byte
void AVDemuxer::seek(qreal q)
{
    if ((!a_codec_context && !v_codec_context) || !format_context) {
        qWarning("can not seek. context not ready: %p %p %p", a_codec_context, v_codec_context, format_context);
        return;
    }
    if (seek_timer.isValid()) {
        if (seek_timer.elapsed() < kSeekInterval)
            return;
        seek_timer.restart();
    } else {
        seek_timer.start();
    }
    QMutexLocker lock(&mutex);
    Q_UNUSED(lock);
    q = qMax<qreal>(0.0, q);
    if (q >= 1.0) {
        qWarning("Invalid seek position %f/1.0", q);
        return;
    }
#if 0
    //t: unit is s
    qreal t = q;// * (double)format_context->duration; //
    int ret = av_seek_frame(format_context, -1, (int64_t)(t*AV_TIME_BASE), t > pkt->pts ? 0 : AVSEEK_FLAG_BACKWARD);
    qDebug("[AVDemuxer] seek to %f %f %lld / %lld", q, pkt->pts, (int64_t)(t*AV_TIME_BASE), duration());
#else
    //t: unit is us (10^-6 s, AV_TIME_BASE)
    int64_t t = int64_t(q*duration());///AV_TIME_BASE;
    //TODO: pkt->pts may be 0, compute manually. Check wether exceed the length
    if (t >= duration()) {
        qWarning("Invailid seek position: %lld/%lld", t, duration());
        return;
    }
    bool backward = t <= (int64_t)(pkt->pts*AV_TIME_BASE);
    qDebug("[AVDemuxer] seek to %f %f %lld / %lld backward=%d", q, pkt->pts, t, duration(), backward);
	//AVSEEK_FLAG_BACKWARD has no effect? because we know the timestamp
	int seek_flag =  (backward ? 0 : AVSEEK_FLAG_BACKWARD); //AVSEEK_FLAG_ANY
	int ret = av_seek_frame(format_context, -1, t, seek_flag);
#endif
    if (ret < 0) {
        qWarning("[AVDemuxer] seek error: %s", av_err2str(ret));
        return;
    }
    //replay
    if (q == 0) {
        qDebug("************seek to 0. started = false");
        started_ = false;
        v_codec_context->frame_number = 0; //TODO: why frame_number not changed after seek?
    }
    if (master_clock) {
        master_clock->updateValue(qreal(t)/qreal(AV_TIME_BASE));
        master_clock->updateExternalClock(t/1000LL); //in msec. ignore usec part using t/1000
    }
    //calc pts
    //use AVThread::flush() when reaching end
    //if (videoCodecContext())
    //    avcodec_flush_buffers(videoCodecContext());
    //if (audioCodecContext())
    //    avcodec_flush_buffers(audioCodecContext());
}
Пример #27
0
static int ffserver_save_avoption(const char *opt, const char *arg, AVDictionary **dict,
                                  int type, FFServerConfig *config, int line_num)
{
    int ret = 0;
    AVDictionaryEntry *e;
    const AVOption *o = av_opt_find(config->dummy_ctx, opt, NULL, type | AV_OPT_FLAG_ENCODING_PARAM, AV_OPT_SEARCH_CHILDREN);
    if (!o) {
        report_config_error(config->filename, line_num, AV_LOG_ERROR,
                &config->errors, "Option not found: %s\n", opt);
    } else if ((ret = av_opt_set(config->dummy_ctx, opt, arg, AV_OPT_SEARCH_CHILDREN)) < 0) {
        report_config_error(config->filename, line_num, AV_LOG_ERROR,
                &config->errors, "Invalid value for option %s (%s): %s\n", opt,
                arg, av_err2str(ret));
    } else if ((e = av_dict_get(*dict, opt, NULL, 0))) {
        if ((o->type == AV_OPT_TYPE_FLAGS) && arg && (arg[0] == '+' || arg[0] == '-'))
            return av_dict_set(dict, opt, arg, AV_DICT_APPEND);
        report_config_error(config->filename, line_num, AV_LOG_ERROR,
                &config->errors,
                "Redeclaring value of the option %s, previous value: %s\n",
                opt, e->value);
    } else if (av_dict_set(dict, opt, arg, 0) < 0) {
        return AVERROR(ENOMEM);
    }
    return 0;
}
Пример #28
0
av_cold int ff_xvid_rate_control_init(MpegEncContext *s)
{
    char *tmp_name;
    int fd, i;
    xvid_plg_create_t xvid_plg_create = { 0 };
    xvid_plugin_2pass2_t xvid_2pass2  = { 0 };

    fd = avpriv_tempfile("xvidrc.", &tmp_name, 0, s->avctx);
    if (fd < 0) {
        av_log(s, AV_LOG_ERROR, "Can't create temporary pass2 file.\n");
        return fd;
    }

    for (i = 0; i < s->rc_context.num_entries; i++) {
        static const char frame_types[] = " ipbs";
        char tmp[256];
        RateControlEntry *rce;

        rce = &s->rc_context.entry[i];

        snprintf(tmp, sizeof(tmp), "%c %d %d %d %d %d %d\n",
                 frame_types[rce->pict_type],
                 (int) lrintf(rce->qscale / FF_QP2LAMBDA),
                 rce->i_count, s->mb_num - rce->i_count - rce->skip_count,
                 rce->skip_count,
                 (rce->i_tex_bits + rce->p_tex_bits + rce->misc_bits + 7) / 8,
                 (rce->header_bits + rce->mv_bits + 7) / 8);

        if (write(fd, tmp, strlen(tmp)) < 0) {
            int ret = AVERROR(errno);
            av_log(s, AV_LOG_ERROR, "Error %s writing 2pass logfile\n", av_err2str(ret));
            av_free(tmp_name);
            close(fd);
            return ret;
        }
    }

    close(fd);

    xvid_2pass2.version     = XVID_MAKE_VERSION(1, 1, 0);
    xvid_2pass2.filename    = tmp_name;
    xvid_2pass2.bitrate     = s->avctx->bit_rate;
    xvid_2pass2.vbv_size    = s->avctx->rc_buffer_size;
    xvid_2pass2.vbv_maxrate = s->avctx->rc_max_rate;
    xvid_2pass2.vbv_initial = s->avctx->rc_initial_buffer_occupancy;

    xvid_plg_create.version = XVID_MAKE_VERSION(1, 1, 0);
    xvid_plg_create.fbase   = s->avctx->time_base.den;
    xvid_plg_create.fincr   = s->avctx->time_base.num;
    xvid_plg_create.param   = &xvid_2pass2;

    if (xvid_plugin_2pass2(NULL, XVID_PLG_CREATE, &xvid_plg_create,
                           &s->rc_context.non_lavc_opaque) < 0) {
        av_log(s, AV_LOG_ERROR, "xvid_plugin_2pass2 failed\n");
        return -1;
    }
    return 0;
}
Пример #29
0
//TODO: use ipp, cuda decode and yuv functions. is sws_scale necessary?
bool VideoDecoder::decode(const QByteArray &encoded)
{
    if (!isAvailable())
        return false;
    DPTR_D(VideoDecoder);
    AVPacket packet;
#if NO_PADDING_DATA
    /*!
      larger than the actual read bytes because some optimized bitstream readers read 32 or 64 bits at once and could read over the end.
      The end of the input buffer avpkt->data should be set to 0 to ensure that no overreading happens for damaged MPEG streams
     */
    // auto released by av_buffer_default_free
    av_new_packet(&packet, encoded.size());
    memcpy(packet.data, encoded.data(), encoded.size());
#else
    av_init_packet(&packet);
    packet.size = encoded.size();
    packet.data = (uint8_t*)encoded.constData();
#endif //NO_PADDING_DATA
//TODO: use AVPacket directly instead of Packet?
    //AVStream *stream = format_context->streams[stream_idx];

    //TODO: some decoders might in addition need other fields like flags&AV_PKT_FLAG_KEY
    int ret = avcodec_decode_video2(d.codec_ctx, d.frame, &d.got_frame_ptr, &packet);
    //qDebug("pic_type=%c", av_get_picture_type_char(d.frame->pict_type));
    d.undecoded_size = qMin(encoded.size() - ret, encoded.size());
    //TODO: decoded format is YUV420P, YUV422P?
    av_free_packet(&packet);
    if (ret < 0) {
        qWarning("[VideoDecoder] %s", av_err2str(ret));
        return false;
    }
    if (!d.got_frame_ptr) {
        qWarning("no frame could be decompressed: %s", av_err2str(ret));
        return true;
    }
    if (!d.codec_ctx->width || !d.codec_ctx->height)
        return false;
    //qDebug("codec %dx%d, frame %dx%d", d.codec_ctx->width, d.codec_ctx->height, d.frame->width, d.frame->height);
    d.width = d.frame->width;
    d.height = d.frame->height;
    //avcodec_align_dimensions2(d.codec_ctx, &d.width_align, &d.height_align, aligns);
    return true;
}
static int decode_packet(int *got_frame, int cached)
{
    int ret = 0;
    int decoded = pkt.size;

    *got_frame = 0;

    if (pkt.stream_index == video_stream_idx) {
        /* decode video frame */
        ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
            return ret;
        }

        if (*got_frame) {

            if (frame->width != width || frame->height != height ||
                frame->format != pix_fmt) {
                /* To handle this change, one could call av_image_alloc again and
                 * decode the following frames into another rawvideo file. */
                fprintf(stderr, "Error: Width, height and pixel format have to be "
                        "constant in a rawvideo file, but the width, height or "
                        "pixel format of the input video changed:\n"
                        "old: width = %d, height = %d, format = %s\n"
                        "new: width = %d, height = %d, format = %s\n",
                        width, height, av_get_pix_fmt_name(pix_fmt),
                        frame->width, frame->height,
                        av_get_pix_fmt_name(frame->format));
                return -1;
            }

            printf("video_frame%s n:%d coded_n:%d pts:%s\n",
                   cached ? "(cached)" : "",
                   video_frame_count++, frame->coded_picture_number,
                   av_ts2timestr(frame->pts, &video_dec_ctx->time_base));

            /* copy decoded frame to destination buffer:
             * this is required since rawvideo expects non aligned data */
            av_image_copy(video_dst_data, video_dst_linesize,
                          (const uint8_t **)(frame->data), frame->linesize,
                          pix_fmt, width, height);

            /* write to rawvideo file */
            fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
        }
    }

    // /* If we use frame reference counting, we own the data and need
    //  * to de-reference it when we don't use it anymore */
    // // if (*got_frame && refcount)
    // if (*got_frame)
    //     av_frame_unref(frame);

    return decoded;
}