static int rawvideo_read_header(AVFormatContext *ctx) { RawVideoDemuxerContext *s = ctx->priv_data; enum AVPixelFormat pix_fmt; AVStream *st; st = avformat_new_stream(ctx, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = ctx->iformat->raw_codec_id; if ((pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } if(s->explicit_pts) { avpriv_set_pts_info(st, 64, 1, 1000); st->time_base = (AVRational){1, 1000}; } else { avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num); } st->codec->width = s->width; st->codec->height = s->height; st->codec->pix_fmt = pix_fmt; st->codec->bit_rate = av_rescale_q(avpicture_get_size(st->codec->pix_fmt, s->width, s->height), (AVRational){8,1}, st->time_base); return 0; }
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) { BufferSourceContext *c = ctx->priv; char pix_fmt_str[128]; int n = 0; if (!args || (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str, &c->time_base.num, &c->time_base.den, &c->pixel_aspect.num, &c->pixel_aspect.den)) != 7) { av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but %d found in '%s'\n", n, args); return AVERROR(EINVAL); } if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) { char *tail; c->pix_fmt = strtol(pix_fmt_str, &tail, 10); if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) { av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); return AVERROR(EINVAL); } } av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s\n", c->w, c->h, (av_getav_pix_fmt_descriptors())[c->pix_fmt].name); return 0; }
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) { BufferSourceContext *c = ctx->priv; char pix_fmt_str[128]; int n = 0; if (!args || (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d", &c->w, &c->h, pix_fmt_str, &c->time_base.num, &c->time_base.den, &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den)) != 7) { av_log(ctx, AV_LOG_ERROR, "Expected 7 arguments, but only %d found in '%s'\n", n, args); return AVERROR(EINVAL); } if (!c->sample_aspect_ratio.num || !c->sample_aspect_ratio.den) { av_log(ctx, AV_LOG_ERROR, "sample aspect ratio cannot be 0\n"); return -1; } if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) { char *tail; c->pix_fmt = strtol(pix_fmt_str, &tail, 10); if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) { av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); return AVERROR(EINVAL); } } return 0; }
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) { FormatContext *format = ctx->priv; const char *cur, *sep; char pix_fmt_name[PIX_FMT_NAME_MAXSIZE]; int pix_fmt_name_len; enum PixelFormat pix_fmt; /* parse the list of formats */ for (cur = args; cur; cur = sep ? sep+1 : NULL) { if (!(sep = strchr(cur, ':'))) pix_fmt_name_len = strlen(cur); else pix_fmt_name_len = sep - cur; if (pix_fmt_name_len >= PIX_FMT_NAME_MAXSIZE) { av_log(ctx, AV_LOG_ERROR, "Format name too long\n"); return -1; } memcpy(pix_fmt_name, cur, pix_fmt_name_len); pix_fmt_name[pix_fmt_name_len] = 0; pix_fmt = av_get_pix_fmt(pix_fmt_name); if (pix_fmt == PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "Unknown pixel format: %s\n", pix_fmt_name); return -1; } format->listed_pix_fmt_flags[pix_fmt] = 1; } return 0; }
VideoFrameDesc::VideoFrameDesc( const size_t width, const size_t height, const std::string& pixelFormat ) : _width( width ) , _height( height ) , _pixelFormat( av_get_pix_fmt( pixelFormat.c_str() ) ) , _fps( 1.0 ) { }
static av_cold int init_video(AVFilterContext *ctx) { BufferSourceContext *c = ctx->priv; if (!c->pix_fmt_str || !c->w || !c->h || av_q2d(c->time_base) <= 0) { av_log(ctx, AV_LOG_ERROR, "Invalid parameters provided.\n"); return AVERROR(EINVAL); } if ((c->pix_fmt = av_get_pix_fmt(c->pix_fmt_str)) == AV_PIX_FMT_NONE) { char *tail; c->pix_fmt = strtol(c->pix_fmt_str, &tail, 10); if (*tail || c->pix_fmt < 0 || !av_pix_fmt_desc_get(c->pix_fmt)) { av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", c->pix_fmt_str); return AVERROR(EINVAL); } } if (!(c->fifo = av_fifo_alloc(sizeof(AVFrame*)))) return AVERROR(ENOMEM); av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d\n", c->w, c->h, av_get_pix_fmt_name(c->pix_fmt), c->time_base.num, c->time_base.den, c->pixel_aspect.num, c->pixel_aspect.den); return 0; }
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) { BufferSourceContext *c = ctx->priv; char pix_fmt_str[128]; int n = 0; *c->sws_param = 0; if (!args || (n = sscanf(args, "%d:%d:%127[^:]:%d:%d:%d:%d:%255c", &c->w, &c->h, pix_fmt_str, &c->time_base.num, &c->time_base.den, &c->sample_aspect_ratio.num, &c->sample_aspect_ratio.den, c->sws_param)) < 7) { av_log(ctx, AV_LOG_ERROR, "Expected at least 7 arguments, but only %d found in '%s'\n", n, args); return AVERROR(EINVAL); } if ((c->pix_fmt = av_get_pix_fmt(pix_fmt_str)) == PIX_FMT_NONE) { char *tail; c->pix_fmt = strtol(pix_fmt_str, &tail, 10); if (*tail || c->pix_fmt < 0 || c->pix_fmt >= PIX_FMT_NB) { av_log(ctx, AV_LOG_ERROR, "Invalid pixel format string '%s'\n", pix_fmt_str); return AVERROR(EINVAL); } } av_log(ctx, AV_LOG_INFO, "w:%d h:%d pixfmt:%s tb:%d/%d sar:%d/%d sws_param:%s\n", c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name, c->time_base.num, c->time_base.den, c->sample_aspect_ratio.num, c->sample_aspect_ratio.den, c->sws_param); return 0; }
int ff_parse_pixel_format(enum AVPixelFormat *ret, const char *arg, void *log_ctx) { char *tail; int pix_fmt = av_get_pix_fmt(arg); if (pix_fmt == AV_PIX_FMT_NONE) { pix_fmt = strtol(arg, &tail, 0); if (*tail || (unsigned)pix_fmt >= AV_PIX_FMT_NB) { av_log(log_ctx, AV_LOG_ERROR, "Invalid pixel format '%s'\n", arg); return AVERROR(EINVAL); } } *ret = pix_fmt; return 0; }
static int qsvscale_init(AVFilterContext *ctx) { QSVScaleContext *s = ctx->priv; if (!strcmp(s->format_str, "same")) { s->format = AV_PIX_FMT_NONE; } else { s->format = av_get_pix_fmt(s->format_str); if (s->format == AV_PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str); return AVERROR(EINVAL); } } return 0; }
static int rawvideo_read_header(AVFormatContext *ctx) { RawVideoDemuxerContext *s = ctx->priv_data; int width = 0, height = 0, ret = 0; enum AVPixelFormat pix_fmt; AVRational framerate; AVStream *st; st = avformat_new_stream(ctx, NULL); if (!st) return AVERROR(ENOMEM); st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = ctx->iformat->raw_codec_id; if (s->video_size && (ret = av_parse_video_size(&width, &height, s->video_size)) < 0) { av_log(ctx, AV_LOG_ERROR, "Couldn't parse video size.\n"); return ret; } if ((pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) { av_log(ctx, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s->framerate); return ret; } avpriv_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; st->codec->bit_rate = av_rescale_q(avpicture_get_size(st->codec->pix_fmt, width, height), (AVRational){8,1}, st->time_base); return 0; }
static av_cold int scale_vaapi_init(AVFilterContext *avctx) { ScaleVAAPIContext *ctx = avctx->priv; ctx->va_config = VA_INVALID_ID; ctx->va_context = VA_INVALID_ID; ctx->valid_ids = 1; if (ctx->output_format_string) { ctx->output_format = av_get_pix_fmt(ctx->output_format_string); if (ctx->output_format == AV_PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "Invalid output format.\n"); return AVERROR(EINVAL); } } else { // Use the input format once that is configured. ctx->output_format = AV_PIX_FMT_NONE; } return 0; }
static int img_read_header(AVFormatContext *s1) { VideoDemuxData *s = s1->priv_data; int first_index, last_index, ret = 0; int width = 0, height = 0; AVStream *st; enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; AVRational framerate; s1->ctx_flags |= AVFMTCTX_NOHEADER; st = avformat_new_stream(s1, NULL); if (!st) { return AVERROR(ENOMEM); } if (s->pixel_format && (pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(s1, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } if (s->video_size && (ret = av_parse_video_size(&width, &height, s->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse video size: %s.\n", s->video_size); return ret; } if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s->framerate); return ret; } av_strlcpy(s->path, s1->filename, sizeof(s->path)); s->img_number = 0; s->img_count = 0; /* find format */ if (s1->iformat->flags & AVFMT_NOFILE) s->is_pipe = 0; else { s->is_pipe = 1; st->need_parsing = AVSTREAM_PARSE_FULL; } avpriv_set_pts_info(st, 60, framerate.den, framerate.num); if (width && height) { st->codec->width = width; st->codec->height = height; } if (!s->is_pipe) { if (find_image_range(&first_index, &last_index, s->path, FFMAX(s->start_number, 5)) < 0) return AVERROR(ENOENT); s->img_first = first_index; s->img_last = last_index; s->img_number = s->start_number != 1 ? s->start_number : first_index; /* compute duration */ st->start_time = 0; st->duration = last_index - first_index + 1; } if (s1->video_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = s1->video_codec_id; } else if (s1->audio_codec_id) { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; st->codec->codec_id = s1->audio_codec_id; } else { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; st->codec->codec_id = ff_guess_image2_codec(s->path); } if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && pix_fmt != AV_PIX_FMT_NONE) st->codec->pix_fmt = pix_fmt; return 0; }
/* raw input */ int ff_raw_read_header(AVFormatContext *s, AVFormatParameters *ap) { AVStream *st; enum CodecID id; st = av_new_stream(s, 0); if (!st) return AVERROR(ENOMEM); id = s->iformat->value; if (id == CODEC_ID_RAWVIDEO) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; } else { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } st->codec->codec_id = id; switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: { RawAudioDemuxerContext *s1 = s->priv_data; #if FF_API_FORMAT_PARAMETERS if (ap->sample_rate) st->codec->sample_rate = ap->sample_rate; if (ap->channels) st->codec->channels = ap->channels; else st->codec->channels = 1; #endif if (s1->sample_rate) st->codec->sample_rate = s1->sample_rate; if (s1->channels) st->codec->channels = s1->channels; st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); assert(st->codec->bits_per_coded_sample > 0); st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8; av_set_pts_info(st, 64, 1, st->codec->sample_rate); break; } case AVMEDIA_TYPE_VIDEO: { FFRawVideoDemuxerContext *s1 = s->priv_data; int width = 0, height = 0, ret = 0; enum PixelFormat pix_fmt; AVRational framerate; if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto fail; } if ((pix_fmt = av_get_pix_fmt(s1->pixel_format)) == PIX_FMT_NONE) { av_log(s, AV_LOG_ERROR, "No such pixel format: %s.\n", s1->pixel_format); ret = AVERROR(EINVAL); goto fail; } if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate); goto fail; } #if FF_API_FORMAT_PARAMETERS if (ap->width > 0) width = ap->width; if (ap->height > 0) height = ap->height; if (ap->pix_fmt) pix_fmt = ap->pix_fmt; if (ap->time_base.num) framerate = (AVRational){ap->time_base.den, ap->time_base.num}; #endif av_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; fail: av_freep(&s1->video_size); av_freep(&s1->pixel_format); av_freep(&s1->framerate); return ret; } default: return -1; } return 0; }
void VideoFrameDesc::setPixelFormat( const std::string& pixelFormat ) { _pixelFormat = av_get_pix_fmt( pixelFormat.c_str() ); }
FFMS_API(int) FFMS_GetPixFmt(const char *Name) { return av_get_pix_fmt(Name); }
static int scale_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame) { AVFilterContext *avctx = inlink->dst; AVFilterLink *outlink = avctx->outputs[0]; ScaleVAAPIContext *ctx = avctx->priv; AVFrame *output_frame = NULL; VASurfaceID input_surface, output_surface; VAProcPipelineParameterBuffer params; VABufferID params_id; VARectangle input_region; VAStatus vas; int err; av_log(ctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n", av_get_pix_fmt_name(input_frame->format), input_frame->width, input_frame->height, input_frame->pts); if (ctx->va_context == VA_INVALID_ID) return AVERROR(EINVAL); input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale input.\n", input_surface); output_frame = av_frame_alloc(); if (!output_frame) { av_log(ctx, AV_LOG_ERROR, "Failed to allocate output frame."); err = AVERROR(ENOMEM); goto fail; } err = av_hwframe_get_buffer(ctx->output_frames_ref, output_frame, 0); if (err < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to get surface for " "output: %d\n.", err); } output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3]; av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for scale output.\n", output_surface); memset(¶ms, 0, sizeof(params)); // If there were top/left cropping, it could be taken into // account here. input_region = (VARectangle) { .x = 0, .y = 0, .width = input_frame->width, .height = input_frame->height, }; params.surface = input_surface; params.surface_region = &input_region; params.surface_color_standard = vaapi_proc_colour_standard(input_frame->colorspace); params.output_region = 0; params.output_background_color = 0xff000000; params.output_color_standard = params.surface_color_standard; params.pipeline_flags = 0; params.filter_flags = VA_FILTER_SCALING_HQ; vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context, output_surface); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Failed to attach new picture: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail; } vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context, VAProcPipelineParameterBufferType, sizeof(params), 1, ¶ms, ¶ms_id); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Failed to create parameter buffer: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail_after_begin; } av_log(ctx, AV_LOG_DEBUG, "Pipeline parameter buffer is %#x.\n", params_id); vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Failed to render parameter buffer: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail_after_begin; } vas = vaEndPicture(ctx->hwctx->display, ctx->va_context); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Failed to start picture processing: " "%d (%s).\n", vas, vaErrorStr(vas)); err = AVERROR(EIO); goto fail_after_render; } if (ctx->hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) { vas = vaDestroyBuffer(ctx->hwctx->display, params_id); if (vas != VA_STATUS_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Failed to free parameter buffer: " "%d (%s).\n", vas, vaErrorStr(vas)); // And ignore. } } av_frame_copy_props(output_frame, input_frame); av_frame_free(&input_frame); av_log(ctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n", av_get_pix_fmt_name(output_frame->format), output_frame->width, output_frame->height, output_frame->pts); return ff_filter_frame(outlink, output_frame); // We want to make sure that if vaBeginPicture has been called, we also // call vaRenderPicture and vaEndPicture. These calls may well fail or // do something else nasty, but once we're in this failure case there // isn't much else we can do. fail_after_begin: vaRenderPicture(ctx->hwctx->display, ctx->va_context, ¶ms_id, 1); fail_after_render: vaEndPicture(ctx->hwctx->display, ctx->va_context); fail: av_frame_free(&input_frame); av_frame_free(&output_frame); return err; } static av_cold int scale_vaapi_init(AVFilterContext *avctx) { ScaleVAAPIContext *ctx = avctx->priv; ctx->va_config = VA_INVALID_ID; ctx->va_context = VA_INVALID_ID; ctx->valid_ids = 1; if (ctx->output_format_string) { ctx->output_format = av_get_pix_fmt(ctx->output_format_string); if (ctx->output_format == AV_PIX_FMT_NONE) { av_log(ctx, AV_LOG_ERROR, "Invalid output format.\n"); return AVERROR(EINVAL); } } else { // Use the input format once that is configured. ctx->output_format = AV_PIX_FMT_NONE; } return 0; }
PixelProperties::PixelProperties( const std::string& avPixelFormat ) : _pixelFormat( AV_PIX_FMT_NONE ) ,_pixelDesc( NULL ) { init( av_get_pix_fmt( avPixelFormat.c_str() ) ); }
int main(int argc, char **argv) { int i, list_fourcc_pix_fmt = 0, list_pix_fmt_fourccs = 0; const char *pix_fmt_name = NULL; char c; if (argc == 1) { usage(); return 0; } while ((c = getopt(argc, argv, "hp:lL")) != -1) { switch (c) { case 'h': usage(); return 0; case 'l': list_fourcc_pix_fmt = 1; break; case 'L': list_pix_fmt_fourccs = 1; break; case 'p': pix_fmt_name = optarg; break; case '?': usage(); return 1; } } if (list_fourcc_pix_fmt) { for (i = 0; ff_raw_pix_fmt_tags[i].pix_fmt != AV_PIX_FMT_NONE; i++) { char buf[32]; av_get_codec_tag_string(buf, sizeof(buf), ff_raw_pix_fmt_tags[i].fourcc); printf("%s: %s\n", buf, av_get_pix_fmt_name(ff_raw_pix_fmt_tags[i].pix_fmt)); } } if (list_pix_fmt_fourccs) { for (i = 0; i < AV_PIX_FMT_NB; i++) { const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(i); if (!pix_desc->name || pix_desc->flags & PIX_FMT_HWACCEL) continue; printf("%s: ", pix_desc->name); print_pix_fmt_fourccs(i, ' '); printf("\n"); } } if (pix_fmt_name) { enum AVPixelFormat pix_fmt = av_get_pix_fmt(pix_fmt_name); if (pix_fmt == AV_PIX_FMT_NONE) { fprintf(stderr, "Invalid pixel format selected '%s'\n", pix_fmt_name); return 1; } print_pix_fmt_fourccs(pix_fmt, '\n'); } return 0; }
/* raw input */ int ff_raw_read_header(AVFormatContext *s) { AVStream *st; enum AVCodecID id; st = avformat_new_stream(s, NULL); if (!st) return AVERROR(ENOMEM); id = s->iformat->raw_codec_id; if (id == AV_CODEC_ID_RAWVIDEO) { st->codec->codec_type = AVMEDIA_TYPE_VIDEO; } else { st->codec->codec_type = AVMEDIA_TYPE_AUDIO; } st->codec->codec_id = id; switch(st->codec->codec_type) { case AVMEDIA_TYPE_AUDIO: { RawAudioDemuxerContext *s1 = s->priv_data; st->codec->channels = 1; if (id == AV_CODEC_ID_ADPCM_G722) st->codec->sample_rate = 16000; if (s1 && s1->sample_rate) st->codec->sample_rate = s1->sample_rate; if (s1 && s1->channels) st->codec->channels = s1->channels; st->codec->bits_per_coded_sample = av_get_bits_per_sample(st->codec->codec_id); assert(st->codec->bits_per_coded_sample > 0); st->codec->block_align = st->codec->bits_per_coded_sample*st->codec->channels/8; avpriv_set_pts_info(st, 64, 1, st->codec->sample_rate); break; } case AVMEDIA_TYPE_VIDEO: { FFRawVideoDemuxerContext *s1 = s->priv_data; int width = 0, height = 0, ret = 0; enum AVPixelFormat pix_fmt; AVRational framerate; if (s1->video_size && (ret = av_parse_video_size(&width, &height, s1->video_size)) < 0) { av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n"); goto fail; } if ((pix_fmt = av_get_pix_fmt(s1->pixel_format)) == AV_PIX_FMT_NONE) { av_log(s, AV_LOG_ERROR, "No such pixel format: %s.\n", s1->pixel_format); ret = AVERROR(EINVAL); goto fail; } if ((ret = av_parse_video_rate(&framerate, s1->framerate)) < 0) { av_log(s, AV_LOG_ERROR, "Could not parse framerate: %s.\n", s1->framerate); goto fail; } avpriv_set_pts_info(st, 64, framerate.den, framerate.num); st->codec->width = width; st->codec->height = height; st->codec->pix_fmt = pix_fmt; fail: return ret; } default: return -1; } return 0; }
void Y_av_create(int argc) { static long default_params[]= {YAV_BIT_RATE, YAV_FRAME_RATE, YAV_GOP_SIZE, YAV_MAX_B_FRAMES}; long *params=default_params; // PARSE ARGUMENTS: SEPARATE KEYWORDS FROM POSITIONAL ARGUMENTS static char * knames[] = { "vcodec", "pix_fmt", "b", "r", "g", "bf", 0 }; #define YAC_CREATE_NKW 6 static long kglobs[YAC_CREATE_NKW+1]; int kiargs[YAC_CREATE_NKW]; int piargs[]={-1, -1}; yarg_kw_init(knames, kglobs, kiargs); int iarg=argc-1, parg=0; while (iarg>=0) { iarg = yarg_kw(iarg, kglobs, kiargs); if (iarg>=0) { if (parg<2) piargs[parg++]=iarg--; else y_error("av_create takes at most 2 positional arguments"); } } // INTERPRET POSITIONAL ARGUMENTS // filename (mandatory) if ((iarg=piargs[0])<0) y_error("FILENAME must be specified"); char *filename = ygets_q(iarg); // params vector (optional) if (yav_arg_set(iarg=piargs[1])) { long ntot ; long dims[Y_DIMSIZE]={0,0}; params = ygeta_l(iarg, &ntot, dims); if (dims[0]!=1 || dims[1]!=4) y_error("bad dimensions for PARAMS vector"); if (params[0]<0 || params[1]<0 || params[2]<0) y_error("bad values in PARAMS vector"); } // INTERPRET KEYWORD ARGUMENTS char* vcodec = NULL, *pix_fmt = NULL; int k=0; if (yav_arg_set(iarg=kiargs[k++])) vcodec = ygets_q(iarg); // vcodec if (yav_arg_set(iarg=kiargs[k++])) pix_fmt = ygets_q(iarg); // pix_fmt if (yav_arg_set(iarg=kiargs[k++])) params[0] = ygets_l(iarg); // b if (yav_arg_set(iarg=kiargs[k++])) params[1] = ygets_l(iarg); // r if (yav_arg_set(iarg=kiargs[k++])) params[2] = ygets_l(iarg); // g if (yav_arg_set(iarg=kiargs[k++])) params[3] = ygets_l(iarg); // bf // PUSH RETURN VALUE yav_ctxt * obj = ypush_av(); /* allocate the output media context */ obj->oc = avformat_alloc_context(); if (!obj->oc) { y_error("Memory error"); } /* auto detect the output format from the name. default is mpeg. */ obj->oc->oformat = av_guess_format(NULL, filename, NULL); if (!obj->oc->oformat) { y_warn("Could not deduce output format from file extension: using MPEG."); obj->oc->oformat = av_guess_format("mpeg", NULL, NULL); } if (!obj->oc->oformat) { y_error("Could not find suitable output format."); } snprintf(obj->oc->filename, sizeof(obj->oc->filename), "%s", filename); /* add the audio and video streams using the default format codecs and initialize the codecs */ obj->video_st = NULL; // audio_st = NULL; if (obj->oc->oformat->video_codec != AV_CODEC_ID_NONE) { AVCodecContext *c; obj->video_st = avformat_new_stream(obj->oc, NULL); c = obj->video_st->codec; if (vcodec) { obj->codec = avcodec_find_encoder_by_name(vcodec); if (!obj->codec) y_error("can't find requested codec"); c->codec_id = obj->codec->id; } else { c->codec_id = obj->oc->oformat->video_codec; obj->codec = avcodec_find_encoder(c->codec_id); if (!obj->codec) y_error("default codec not found"); } c->codec_type = AVMEDIA_TYPE_VIDEO; avcodec_get_context_defaults3(c, obj->codec); if (c->codec_id == AV_CODEC_ID_NONE) c->codec_id = obj->codec->id; /* put sample parameters */ c->width = 0; c->height = 0; c->pix_fmt = pix_fmt ? av_get_pix_fmt(pix_fmt) : YAV_PIX_FMT; c->bit_rate = params[0] ? params[0] : YAV_BIT_RATE; c->time_base.den = params[1] ? params[1] : YAV_FRAME_RATE; c->time_base.num = 1; c->gop_size = params[2] ? params[2] : YAV_GOP_SIZE; c->max_b_frames = (params[3]>=0) ? params[3] : YAV_MAX_B_FRAMES; if(obj->oc->oformat->flags & AVFMT_GLOBALHEADER) c->flags |= CODEC_FLAG_GLOBAL_HEADER; // codec-specific limitations switch (c->codec_id) { case AV_CODEC_ID_RAWVIDEO: case AV_CODEC_ID_GIF: if (!pix_fmt) c->pix_fmt = AV_PIX_FMT_RGB24; break; case AV_CODEC_ID_MSMPEG4V3: case AV_CODEC_ID_H263: case AV_CODEC_ID_H263P: case AV_CODEC_ID_RV10: case AV_CODEC_ID_RV20: case AV_CODEC_ID_FLV1: case AV_CODEC_ID_ASV1: case AV_CODEC_ID_ASV2: c->max_b_frames = 0; break; default:; } } if (!(obj->oc->oformat->flags & AVFMT_RAWPICTURE)) { obj->video_outbuf_size = 200000; obj->video_outbuf = av_malloc(obj->video_outbuf_size); } }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, int line_num, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); avcodec_get_context_defaults3(&config->video_enc, NULL); avcodec_get_context_defaults3(&config->audio_enc, NULL); config->audio_id = AV_CODEC_ID_NONE; config->video_id = AV_CODEC_ID_NONE; if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) strcpy(arg, "mjpeg"); stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: %s\n", arg); } if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: %s\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i, ret; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("'%s' option in configuration file is deprecated, " "use 'Metadata %s VALUE' instead\n", cmd, key); if ((ret = av_dict_set(&stream->metadata, key, arg, 0)) < 0) ERROR("Could not set metadata '%s' to value '%s': %s\n", key, arg, av_err2str(ret)); } else if (!av_strcasecmp(cmd, "Metadata")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if ((ret = av_dict_set(&stream->metadata, arg, arg2, 0)) < 0) { ERROR("Could not set metadata '%s' to value '%s': %s\n", arg, arg2, av_err2str(ret)); } } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO); if (config->audio_id == AV_CODEC_ID_NONE) ERROR("Unknown AudioCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO); if (config->video_id == AV_CODEC_ID_NONE) ERROR("Unknown VideoCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_enc.bit_rate = lrintf(atof(arg) * 1000); } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_enc.channels = atoi(arg); } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_enc.sample_rate = atoi(arg); } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; ffserver_get_arg(arg, sizeof(arg), p); if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) { config->video_enc.rc_min_rate = minrate * 1000; config->video_enc.rc_max_rate = maxrate * 1000; } else ERROR("Incorrect format for VideoBitRateRange -- should be <min>-<max>: %s\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.debug = strtol(arg,0,0); } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.strict_std_compliance = atoi(arg); } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.rc_buffer_size = atoi(arg) * 8*1024; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.bit_rate_tolerance = atoi(arg) * 1000; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.bit_rate = atoi(arg) * 1000; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&config->video_enc.width, &config->video_enc.height, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else if ((config->video_enc.width % 16) != 0 || (config->video_enc.height % 16) != 0) ERROR("Image size must be a multiple of 16\n"); } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { AVRational frame_rate; ffserver_get_arg(arg, sizeof(arg), p); if (av_parse_video_rate(&frame_rate, arg) < 0) { ERROR("Incorrect frame rate: %s\n", arg); } else { config->video_enc.time_base.num = frame_rate.den; config->video_enc.time_base.den = frame_rate.num; } } else if (!av_strcasecmp(cmd, "PixelFormat")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.pix_fmt = av_get_pix_fmt(arg); if (config->video_enc.pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.gop_size = atoi(arg); } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { config->video_enc.gop_size = 1; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { config->video_enc.mb_decision = FF_MB_DECISION_BITS; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { config->video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove config->video_enc.flags |= CODEC_FLAG_4MV; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { AVCodecContext *avctx; int type; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) { avctx = &config->video_enc; type = AV_OPT_FLAG_VIDEO_PARAM; } else { avctx = &config->audio_enc; type = AV_OPT_FLAG_AUDIO_PARAM; } if (ffserver_opt_default(arg, arg2, avctx, type|AV_OPT_FLAG_ENCODING_PARAM)) { ERROR("Error setting %s option to %s %s\n", cmd, arg, arg2); } } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { AVCodecContext *avctx; int type; ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) { avctx = &config->video_enc; config->video_enc.codec_id = config->video_id; type = AV_OPT_FLAG_VIDEO_PARAM; } else { avctx = &config->audio_enc; config->audio_enc.codec_id = config->audio_id; type = AV_OPT_FLAG_AUDIO_PARAM; } if (ffserver_opt_preset(arg, avctx, type|AV_OPT_FLAG_ENCODING_PARAM, &config->audio_id, &config->video_id)) { ERROR("AVPreset error: %s\n", arg); } } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4) config->video_enc.codec_tag = MKTAG(arg[0], arg[1], arg[2], arg[3]); } else if (!av_strcasecmp(cmd, "BitExact")) { config->video_enc.flags |= CODEC_FLAG_BITEXACT; } else if (!av_strcasecmp(cmd, "DctFastint")) { config->video_enc.dct_algo = FF_DCT_FASTINT; } else if (!av_strcasecmp(cmd, "IdctSimple")) { config->video_enc.idct_algo = FF_IDCT_SIMPLE; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.flags |= CODEC_FLAG_QSCALE; config->video_enc.global_quality = FF_QP2LAMBDA * atoi(arg); } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.max_qdiff = atoi(arg); if (config->video_enc.max_qdiff < 1 || config->video_enc.max_qdiff > 31) ERROR("VideoQDiff out of range\n"); } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.qmax = atoi(arg); if (config->video_enc.qmax < 1 || config->video_enc.qmax > 31) ERROR("VideoQMax out of range\n"); } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.qmin = atoi(arg); if (config->video_enc.qmin < 1 || config->video_enc.qmin > 31) ERROR("VideoQMin out of range\n"); } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.lumi_masking = atof(arg); } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_enc.dark_masking = atof(arg); } else if (!av_strcasecmp(cmd, "NoVideo")) { config->video_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->audio_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg) != 0) ERROR("Invalid host/IP address: %s\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); stream->multicast_port = atoi(arg); } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); stream->multicast_ttl = atoi(arg); } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) { if (config->audio_id != AV_CODEC_ID_NONE) { config->audio_enc.codec_type = AVMEDIA_TYPE_AUDIO; config->audio_enc.codec_id = config->audio_id; add_codec(stream, &config->audio_enc); } if (config->video_id != AV_CODEC_ID_NONE) { config->video_enc.codec_type = AVMEDIA_TYPE_VIDEO; config->video_enc.codec_id = config->video_id; add_codec(stream, &config->video_enc); } } *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; }
static bool ffmpeg_init_config(struct ff_config_param *params, const char *config) { params->out_pix_fmt = PIX_FMT_NONE; params->scale_factor = 1; params->threads = 1; params->frame_drop_ratio = 1; if (!config) return true; params->conf = config_file_new(config); if (!params->conf) { RARCH_ERR("Failed to load FFmpeg config \"%s\".\n", config); return false; } config_get_array(params->conf, "vcodec", params->vcodec, sizeof(params->vcodec)); config_get_array(params->conf, "acodec", params->acodec, sizeof(params->acodec)); config_get_array(params->conf, "format", params->format, sizeof(params->format)); config_get_uint(params->conf, "threads", ¶ms->threads); if (!config_get_uint(params->conf, "frame_drop_ratio", ¶ms->frame_drop_ratio) || !params->frame_drop_ratio) params->frame_drop_ratio = 1; if (!config_get_bool(params->conf, "audio_enable", ¶ms->audio_enable)) params->audio_enable = true; config_get_uint(params->conf, "sample_rate", ¶ms->sample_rate); config_get_uint(params->conf, "scale_factor", ¶ms->scale_factor); params->audio_qscale = config_get_int(params->conf, "audio_global_quality", ¶ms->audio_global_quality); config_get_int(params->conf, "audio_bit_rate", ¶ms->audio_bit_rate); params->video_qscale = config_get_int(params->conf, "video_global_quality", ¶ms->video_global_quality); config_get_int(params->conf, "video_bit_rate", ¶ms->video_bit_rate); char pix_fmt[64] = {0}; if (config_get_array(params->conf, "pix_fmt", pix_fmt, sizeof(pix_fmt))) { params->out_pix_fmt = av_get_pix_fmt(pix_fmt); if (params->out_pix_fmt == PIX_FMT_NONE) { RARCH_ERR("Cannot find pix_fmt \"%s\".\n", pix_fmt); return false; } } struct config_file_entry entry; if (!config_get_entry_list_head(params->conf, &entry)) return true; do { if (strstr(entry.key, "video_") == entry.key) { const char *key = entry.key + strlen("video_"); av_dict_set(¶ms->video_opts, key, entry.value, 0); } else if (strstr(entry.key, "audio_") == entry.key) { const char *key = entry.key + strlen("audio_"); av_dict_set(¶ms->audio_opts, key, entry.value, 0); } } while (config_get_entry_list_next(&entry)); return true; }
void VCapsConvertElement::iStream(const QbPacket &packet) { if (!packet.caps().isValid() || packet.caps().mimeType() != "video/x-raw" || this->state() != ElementStatePlaying) return; if (packet.caps() == this->m_caps) { emit this->oStream(packet); return; } int iWidth = packet.caps().property("width").toInt(); int iHeight = packet.caps().property("height").toInt(); QString format = packet.caps().property("format").toString(); PixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str()); QList<QByteArray> props = this->m_caps.dynamicPropertyNames(); int oWidth = props.contains("width")? this->m_caps.property("width").toInt(): iWidth; int oHeight = props.contains("height")? this->m_caps.property("height").toInt(): iHeight; PixelFormat oFormat; if (props.contains("format")) { QString oFormatString = this->m_caps.property("format").toString(); oFormat = av_get_pix_fmt(oFormatString.toStdString().c_str()); } else oFormat = iFormat; SwsContext *scaleContext = sws_getCachedContext(NULL, iWidth, iHeight, iFormat, oWidth, oHeight, oFormat, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!scaleContext) return; int oBufferSize = avpicture_get_size(oFormat, oWidth, oHeight); QSharedPointer<uchar> oBuffer(new uchar[oBufferSize]); AVPicture iPicture; avpicture_fill(&iPicture, (uint8_t *) packet.buffer().data(), iFormat, iWidth, iHeight); AVPicture oPicture; avpicture_fill(&oPicture, (uint8_t *) oBuffer.data(), oFormat, oWidth, oHeight); sws_scale(scaleContext, (uint8_t **) iPicture.data, iPicture.linesize, 0, iHeight, oPicture.data, oPicture.linesize); sws_freeContext(scaleContext); QbPacket oPacket(packet.caps().update(this->m_caps), oBuffer, oBufferSize); oPacket.setPts(packet.pts()); oPacket.setDuration(packet.duration()); oPacket.setTimeBase(packet.timeBase()); oPacket.setIndex(packet.index()); emit this->oStream(oPacket); }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, int line_num, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; int val; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); config->dummy_ctx = avcodec_alloc_context3(NULL); if (!config->dummy_ctx) { av_free(stream); return AVERROR(ENOMEM); } ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } else { config->audio_id = AV_CODEC_ID_NONE; config->video_id = AV_CODEC_ID_NONE; } *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) strcpy(arg, "mjpeg"); stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: %s\n", arg); } if (stream->fmt) { config->audio_id = stream->fmt->audio_codec; config->video_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: %s\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("'%s' option in configuration file is deprecated, " "use 'Metadata %s VALUE' instead\n", cmd, key); if (av_dict_set(&stream->metadata, key, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Metadata")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (av_dict_set(&stream->metadata, arg, arg2, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO); if (config->audio_id == AV_CODEC_ID_NONE) ERROR("Unknown AudioCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); config->video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO); if (config->video_id == AV_CODEC_ID_NONE) ERROR("Unknown VideoCodec: %s\n", arg); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { float f; ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(&f, arg, 1000, 0, FLT_MAX, config, line_num, "Invalid %s: %s\n", cmd, arg); if (av_dict_set_int(&config->audio_conf, cmd, lrintf(f), 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 8, config, line_num, "Invalid %s: %s, valid range is 1-8.", cmd, arg); if (av_dict_set(&config->audio_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->audio_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; ffserver_get_arg(arg, sizeof(arg), p); if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) { if (av_dict_set_int(&config->video_conf, "VideoBitRateRangeMin", minrate, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoBitRateRangeMax", maxrate, 0) < 0) goto nomem; } else ERROR("Incorrect format for VideoBitRateRange -- should be " "<min>-<max>: %s\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 8*1024, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 1000, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 1000, 0, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret, w, h; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&w, &h, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else if ((w % 16) || (h % 16)) ERROR("Image size must be a multiple of 16\n"); if (av_dict_set_int(&config->video_conf, "VideoSizeWidth", w, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoSizeHeight", h, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { AVRational frame_rate; ffserver_get_arg(arg, sizeof(arg), p); if (av_parse_video_rate(&frame_rate, arg) < 0) { ERROR("Incorrect frame rate: %s\n", arg); } else { if (av_dict_set_int(&config->video_conf, "VideoFrameRateNum", frame_rate.num, 0) < 0 || av_dict_set_int(&config->video_conf, "VideoFrameRateDen", frame_rate.den, 0) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "PixelFormat")) { enum AVPixelFormat pix_fmt; ffserver_get_arg(arg, sizeof(arg), p); pix_fmt = av_get_pix_fmt(arg); if (pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: %s\n", arg); if (av_dict_set_int(&config->video_conf, cmd, pix_fmt, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { if (av_dict_set(&config->video_conf, cmd, "1", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) ret = ffserver_save_avoption(arg, arg2, &config->video_opts, AV_OPT_FLAG_VIDEO_PARAM ,config, line_num); else ret = ffserver_save_avoption(arg, arg2, &config->audio_opts, AV_OPT_FLAG_AUDIO_PARAM ,config, line_num); if (ret < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { char **preset = NULL; ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) { preset = &config->video_preset; ffserver_opt_preset(arg, NULL, 0, NULL, &config->video_id); } else { preset = &config->audio_preset; ffserver_opt_preset(arg, NULL, 0, &config->audio_id, NULL); } *preset = av_strdup(arg); if (!preset) return AVERROR(ENOMEM); } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4) { if (av_dict_set(&config->video_conf, "VideoTag", "arg", 0) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "BitExact")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DctFastint")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "IdctSimple")) { if (av_dict_set(&config->video_conf, cmd, "", 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(NULL, arg, 0, 1, 31, config, line_num, "%s out of range\n", cmd); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(NULL, arg, 0, -FLT_MAX, FLT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(NULL, arg, 0, -FLT_MAX, FLT_MAX, config, line_num, "Invalid %s: %s", cmd, arg); if (av_dict_set(&config->video_conf, cmd, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "NoVideo")) { config->video_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->audio_id = AV_CODEC_ID_NONE; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg)) ERROR("Invalid host/IP address: %s\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, 1, 65535, config, line_num, "Invalid MulticastPort: %s\n", arg); stream->multicast_port = val; } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, line_num, "Invalid MulticastTTL: %s\n", arg); stream->multicast_ttl = val; } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm")) { if (config->audio_id != AV_CODEC_ID_NONE) { AVCodecContext *audio_enc = avcodec_alloc_context3(avcodec_find_encoder(config->audio_id)); if (config->audio_preset && ffserver_opt_preset(arg, audio_enc, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_ENCODING_PARAM, NULL, NULL) < 0) ERROR("Could not apply preset '%s'\n", arg); ffserver_apply_stream_config(audio_enc, config->audio_conf, &config->audio_opts); add_codec(stream, audio_enc); } if (config->video_id != AV_CODEC_ID_NONE) { AVCodecContext *video_enc = avcodec_alloc_context3(avcodec_find_encoder(config->video_id)); if (config->video_preset && ffserver_opt_preset(arg, video_enc, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_ENCODING_PARAM, NULL, NULL) < 0) ERROR("Could not apply preset '%s'\n", arg); ffserver_apply_stream_config(video_enc, config->video_conf, &config->video_opts); add_codec(stream, video_enc); } } av_dict_free(&config->video_opts); av_dict_free(&config->video_conf); av_dict_free(&config->audio_opts); av_dict_free(&config->audio_conf); av_freep(&config->video_preset); av_freep(&config->audio_preset); avcodec_free_context(&config->dummy_ctx); *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; nomem: av_log(NULL, AV_LOG_ERROR, "Out of memory. Aborting.\n"); av_dict_free(&config->video_opts); av_dict_free(&config->video_conf); av_dict_free(&config->audio_opts); av_dict_free(&config->audio_conf); av_freep(&config->video_preset); av_freep(&config->audio_preset); avcodec_free_context(&config->dummy_ctx); return AVERROR(ENOMEM); }
AkPacket ConvertVideo::convert(const AkPacket &packet) { AkVideoPacket videoPacket(packet); // Convert input format. QString format = AkVideoCaps::pixelFormatToString(videoPacket.caps().format()); AVPixelFormat iFormat = av_get_pix_fmt(format.toStdString().c_str()); // Initialize rescaling context. this->m_scaleContext = sws_getCachedContext(this->m_scaleContext, videoPacket.caps().width(), videoPacket.caps().height(), iFormat, videoPacket.caps().width(), videoPacket.caps().height(), AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL); if (!this->m_scaleContext) return AkPacket(); // Create iPicture. AVFrame iFrame; memset(&iFrame, 0, sizeof(AVFrame)); if (av_image_fill_arrays((uint8_t **) iFrame.data, iFrame.linesize, (const uint8_t *) videoPacket.buffer().constData(), iFormat, videoPacket.caps().width(), videoPacket.caps().height(), 1) < 0) return AkPacket(); // Create oPicture int frameSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA, videoPacket.caps().width(), videoPacket.caps().height(), 1); QByteArray oBuffer(frameSize, Qt::Uninitialized); AVFrame oFrame; memset(&oFrame, 0, sizeof(AVFrame)); if (av_image_fill_arrays((uint8_t **) oFrame.data, oFrame.linesize, (const uint8_t *) oBuffer.constData(), AV_PIX_FMT_BGRA, videoPacket.caps().width(), videoPacket.caps().height(), 1) < 0) return AkPacket(); // Convert picture format sws_scale(this->m_scaleContext, iFrame.data, iFrame.linesize, 0, videoPacket.caps().height(), oFrame.data, oFrame.linesize); // Create packet AkVideoPacket oPacket(packet); oPacket.caps().format() = AkVideoCaps::Format_bgra; oPacket.buffer() = oBuffer; return oPacket.toPacket(); }
AVPixelFormat getAVPixelFormat( const std::string& pixelFormat ) { return av_get_pix_fmt( pixelFormat.c_str() ); }
inline PixelFormat osg_av_get_pix_fmt(const char *name) { return av_get_pix_fmt(name); }
int RealFFMpegCodecEncoder::InitCodec(const char *codecStr,FFMpegCodecEncoderParam *param) { codec = avcodec_find_encoder_by_name(codecStr); if (!codec) { fprintf(stderr, "codec not found\n"); return -1; } c = avcodec_alloc_context(); c->qmin = param->qmin; c->qmax = param->qmax; c->bit_rate = param->bitrate; /* resolution must be a multiple of two */ c->width = param->encodeWidth; c->height = param->encodeHeight; c->max_b_frames = param->max_bframes; c->gop_size = param->gop_size; c->pix_fmt = codec->pix_fmts[0]; //c->color_range = AVCOL_RANGE_JPEG; c->time_base.den = 24; c->time_base.num = 1; if (codec->id == CODEC_ID_H264) { /* c->max_qdiff = 4; c->me_range = 16; c->qcompress = 0.6; c->keyint_min = 10; c->trellis = 0; c->level = 13; c->me_threshold = 7; c->thread_count = 2; c->qblur = 0.5; c->profile = 66; */ //ultrafast c->coder_type = 0; c->flags |= CODEC_FLAG_LOOP_FILTER; c->flags |= CODEC_FLAG_CLOSED_GOP; c->me_cmp = FF_CMP_CHROMA; c->me_method = ME_EPZS; c->me_subpel_quality = 0; c->me_range = 16; c->gop_size = 250; c->keyint_min = 25; c->scenechange_threshold = 40; c->i_quant_factor = 0.71; c->b_frame_strategy = 1; c->qcompress = 0.6; c->qmin = 0; c->qmax = 69; c->max_qdiff = 4; c->directpred = 1; c->flags2 |= CODEC_FLAG2_FASTPSKIP; c->cqp = 0; c->thread_count = 4; // } /* open it */ if (avcodec_open(c, codec) < 0) { fprintf(stderr, "could not open codec\n"); return -2; } //setup conversion context picConv = new RealFFMpegBitmapConverter( param->inputWidth,param->inputHeight,av_get_pix_fmt(param->inputPixelType), c->width,c->height,c->pix_fmt); //setup input buffers picSrc = (AVPicture*)malloc(sizeof(AVPicture)); frameSrc = avcodec_alloc_frame(); frameSrc->pts = 0; return 0; }
int ff_img_read_header(AVFormatContext *s1) { VideoDemuxData *s = s1->priv_data; int first_index = 1, last_index = 1; AVStream *st; enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE; s1->ctx_flags |= AVFMTCTX_NOHEADER; st = avformat_new_stream(s1, NULL); if (!st) { return AVERROR(ENOMEM); } if (s->pixel_format && (pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) { av_log(s1, AV_LOG_ERROR, "No such pixel format: %s.\n", s->pixel_format); return AVERROR(EINVAL); } av_strlcpy(s->path, s1->url, sizeof(s->path)); s->img_number = 0; s->img_count = 0; /* find format */ if (s1->iformat->flags & AVFMT_NOFILE) s->is_pipe = 0; else { s->is_pipe = 1; st->need_parsing = AVSTREAM_PARSE_FULL; } if (s->ts_from_file == 2) { #if !HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC av_log(s1, AV_LOG_ERROR, "POSIX.1-2008 not supported, nanosecond file timestamps unavailable\n"); return AVERROR(ENOSYS); #endif avpriv_set_pts_info(st, 64, 1, 1000000000); } else if (s->ts_from_file) avpriv_set_pts_info(st, 64, 1, 1); else avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num); if (s->width && s->height) { st->codecpar->width = s->width; st->codecpar->height = s->height; } if (!s->is_pipe) { if (s->pattern_type == PT_DEFAULT) { if (s1->pb) { s->pattern_type = PT_NONE; } else s->pattern_type = PT_GLOB_SEQUENCE; } if (s->pattern_type == PT_GLOB_SEQUENCE) { s->use_glob = is_glob(s->path); if (s->use_glob) { #if HAVE_GLOB char *p = s->path, *q, *dup; int gerr; #endif av_log(s1, AV_LOG_WARNING, "Pattern type 'glob_sequence' is deprecated: " "use pattern_type 'glob' instead\n"); #if HAVE_GLOB dup = q = av_strdup(p); while (*q) { /* Do we have room for the next char and a \ insertion? */ if ((p - s->path) >= (sizeof(s->path) - 2)) break; if (*q == '%' && strspn(q + 1, "%*?[]{}")) ++q; else if (strspn(q, "\\*?[]{}")) *p++ = '\\'; *p++ = *q++; } *p = 0; av_free(dup); gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate); if (gerr != 0) { return AVERROR(ENOENT); } first_index = 0; last_index = s->globstate.gl_pathc - 1; #endif } } if ((s->pattern_type == PT_GLOB_SEQUENCE && !s->use_glob) || s->pattern_type == PT_SEQUENCE) { if (find_image_range(s1->pb, &first_index, &last_index, s->path, s->start_number, s->start_number_range) < 0) { av_log(s1, AV_LOG_ERROR, "Could find no file with path '%s' and index in the range %d-%d\n", s->path, s->start_number, s->start_number + s->start_number_range - 1); return AVERROR(ENOENT); } } else if (s->pattern_type == PT_GLOB) { #if HAVE_GLOB int gerr; gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate); if (gerr != 0) { return AVERROR(ENOENT); } first_index = 0; last_index = s->globstate.gl_pathc - 1; s->use_glob = 1; #else av_log(s1, AV_LOG_ERROR, "Pattern type 'glob' was selected but globbing " "is not supported by this libavformat build\n"); return AVERROR(ENOSYS); #endif } else if (s->pattern_type != PT_GLOB_SEQUENCE && s->pattern_type != PT_NONE) { av_log(s1, AV_LOG_ERROR, "Unknown value '%d' for pattern_type option\n", s->pattern_type); return AVERROR(EINVAL); } s->img_first = first_index; s->img_last = last_index; s->img_number = first_index; /* compute duration */ if (!s->ts_from_file) { st->start_time = 0; st->duration = last_index - first_index + 1; } } if (s1->video_codec_id) { st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = s1->video_codec_id; } else if (s1->audio_codec_id) { st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; st->codecpar->codec_id = s1->audio_codec_id; } else if (s1->iformat->raw_codec_id) { st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; st->codecpar->codec_id = s1->iformat->raw_codec_id; } else { const char *str = strrchr(s->path, '.'); s->split_planes = str && !av_strcasecmp(str + 1, "y"); st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO; if (s1->pb) { int probe_buffer_size = 2048; uint8_t *probe_buffer = av_realloc(NULL, probe_buffer_size + AVPROBE_PADDING_SIZE); const AVInputFormat *fmt = NULL; void *fmt_iter = NULL; AVProbeData pd = { 0 }; if (!probe_buffer) return AVERROR(ENOMEM); probe_buffer_size = avio_read(s1->pb, probe_buffer, probe_buffer_size); if (probe_buffer_size < 0) { av_free(probe_buffer); return probe_buffer_size; } memset(probe_buffer + probe_buffer_size, 0, AVPROBE_PADDING_SIZE); pd.buf = probe_buffer; pd.buf_size = probe_buffer_size; pd.filename = s1->url; while ((fmt = av_demuxer_iterate(&fmt_iter))) { if (fmt->read_header != ff_img_read_header || !fmt->read_probe || (fmt->flags & AVFMT_NOFILE) || !fmt->raw_codec_id) continue; if (fmt->read_probe(&pd) > 0) { st->codecpar->codec_id = fmt->raw_codec_id; break; } } if (s1->flags & AVFMT_FLAG_CUSTOM_IO) { avio_seek(s1->pb, 0, SEEK_SET); } else ffio_rewind_with_probe_data(s1->pb, &probe_buffer, probe_buffer_size); } if (st->codecpar->codec_id == AV_CODEC_ID_NONE) st->codecpar->codec_id = ff_guess_image2_codec(s->path); if (st->codecpar->codec_id == AV_CODEC_ID_LJPEG) st->codecpar->codec_id = AV_CODEC_ID_MJPEG; if (st->codecpar->codec_id == AV_CODEC_ID_ALIAS_PIX) // we cannot distingiush this from BRENDER_PIX st->codecpar->codec_id = AV_CODEC_ID_NONE; } if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && pix_fmt != AV_PIX_FMT_NONE) st->codecpar->format = pix_fmt; return 0; }
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p, FFServerStream **pstream) { char arg[1024], arg2[1024]; FFServerStream *stream; int val; av_assert0(pstream); stream = *pstream; if (!av_strcasecmp(cmd, "<Stream")) { char *q; FFServerStream *s; stream = av_mallocz(sizeof(FFServerStream)); if (!stream) return AVERROR(ENOMEM); config->dummy_actx = avcodec_alloc_context3(NULL); config->dummy_vctx = avcodec_alloc_context3(NULL); if (!config->dummy_vctx || !config->dummy_actx) { av_free(stream); avcodec_free_context(&config->dummy_vctx); avcodec_free_context(&config->dummy_actx); return AVERROR(ENOMEM); } config->dummy_actx->codec_type = AVMEDIA_TYPE_AUDIO; config->dummy_vctx->codec_type = AVMEDIA_TYPE_VIDEO; ffserver_get_arg(stream->filename, sizeof(stream->filename), p); q = strrchr(stream->filename, '>'); if (q) *q = '\0'; for (s = config->first_stream; s; s = s->next) { if (!strcmp(stream->filename, s->filename)) ERROR("Stream '%s' already registered\n", s->filename); } stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL); if (stream->fmt) { config->guessed_audio_codec_id = stream->fmt->audio_codec; config->guessed_video_codec_id = stream->fmt->video_codec; } else { config->guessed_audio_codec_id = AV_CODEC_ID_NONE; config->guessed_video_codec_id = AV_CODEC_ID_NONE; } config->stream_use_defaults = config->use_defaults; *pstream = stream; return 0; } av_assert0(stream); if (!av_strcasecmp(cmd, "Feed")) { FFServerStream *sfeed; ffserver_get_arg(arg, sizeof(arg), p); sfeed = config->first_feed; while (sfeed) { if (!strcmp(sfeed->filename, arg)) break; sfeed = sfeed->next_feed; } if (!sfeed) ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename); else stream->feed = sfeed; } else if (!av_strcasecmp(cmd, "Format")) { ffserver_get_arg(arg, sizeof(arg), p); if (!strcmp(arg, "status")) { stream->stream_type = STREAM_TYPE_STATUS; stream->fmt = NULL; } else { stream->stream_type = STREAM_TYPE_LIVE; /* JPEG cannot be used here, so use single frame MJPEG */ if (!strcmp(arg, "jpeg")) { strcpy(arg, "singlejpeg"); stream->single_frame=1; } stream->fmt = ffserver_guess_format(arg, NULL, NULL); if (!stream->fmt) ERROR("Unknown Format: '%s'\n", arg); } if (stream->fmt) { config->guessed_audio_codec_id = stream->fmt->audio_codec; config->guessed_video_codec_id = stream->fmt->video_codec; } } else if (!av_strcasecmp(cmd, "InputFormat")) { ffserver_get_arg(arg, sizeof(arg), p); stream->ifmt = av_find_input_format(arg); if (!stream->ifmt) ERROR("Unknown input format: '%s'\n", arg); } else if (!av_strcasecmp(cmd, "FaviconURL")) { if (stream->stream_type == STREAM_TYPE_STATUS) ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); else ERROR("FaviconURL only permitted for status streams\n"); } else if (!av_strcasecmp(cmd, "Author") || !av_strcasecmp(cmd, "Comment") || !av_strcasecmp(cmd, "Copyright") || !av_strcasecmp(cmd, "Title")) { char key[32]; int i; ffserver_get_arg(arg, sizeof(arg), p); for (i = 0; i < strlen(cmd); i++) key[i] = av_tolower(cmd[i]); key[i] = 0; WARNING("Deprecated '%s' option in configuration file. Use " "'Metadata %s VALUE' instead.\n", cmd, key); if (av_dict_set(&stream->metadata, key, arg, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Metadata")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (av_dict_set(&stream->metadata, arg, arg2, 0) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Preroll")) { ffserver_get_arg(arg, sizeof(arg), p); stream->prebuffer = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "StartSendOnKey")) { stream->send_on_key = 1; } else if (!av_strcasecmp(cmd, "AudioCodec")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_codec(config->dummy_actx, arg, config); } else if (!av_strcasecmp(cmd, "VideoCodec")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_codec(config->dummy_vctx, arg, config); } else if (!av_strcasecmp(cmd, "MaxTime")) { ffserver_get_arg(arg, sizeof(arg), p); stream->max_time = atof(arg) * 1000; } else if (!av_strcasecmp(cmd, "AudioBitRate")) { float f; ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_float_param(&f, arg, 1000, -FLT_MAX, FLT_MAX, config, "Invalid %s: '%s'\n", cmd, arg); if (ffserver_save_avoption_int("b", (int64_t)lrintf(f), AV_OPT_FLAG_AUDIO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioChannels")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("ac", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AudioSampleRate")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("ar", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) { int minrate, maxrate; char *dash; ffserver_get_arg(arg, sizeof(arg), p); dash = strchr(arg, '-'); if (dash) { *dash = '\0'; dash++; if (ffserver_set_int_param(&minrate, arg, 1000, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg) >= 0 && ffserver_set_int_param(&maxrate, dash, 1000, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg) >= 0) { if (ffserver_save_avoption_int("minrate", minrate, AV_OPT_FLAG_VIDEO_PARAM, config) < 0 || ffserver_save_avoption_int("maxrate", maxrate, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } } else ERROR("Incorrect format for VideoBitRateRange. It should be " "<min>-<max>: '%s'.\n", arg); } else if (!av_strcasecmp(cmd, "Debug")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("debug", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0 || ffserver_save_avoption("debug", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Strict")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("strict", arg, AV_OPT_FLAG_AUDIO_PARAM, config) < 0 || ffserver_save_avoption("strict", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBufferSize")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 8*1024, 0, INT_MAX, config, "Invalid %s: '%s'", cmd, arg); if (ffserver_save_avoption_int("bufsize", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 1000, INT_MIN, INT_MAX, config, "Invalid %s: '%s'", cmd, arg); if (ffserver_save_avoption_int("bt", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoBitRate")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 1000, INT_MIN, INT_MAX, config, "Invalid %s: '%s'", cmd, arg); if (ffserver_save_avoption_int("b", val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoSize")) { int ret, w, h; ffserver_get_arg(arg, sizeof(arg), p); ret = av_parse_video_size(&w, &h, arg); if (ret < 0) ERROR("Invalid video size '%s'\n", arg); else { if (w % 2 || h % 2) WARNING("Image size is not a multiple of 2\n"); if (ffserver_save_avoption("video_size", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } } else if (!av_strcasecmp(cmd, "VideoFrameRate")) { ffserver_get_arg(&arg[2], sizeof(arg) - 2, p); arg[0] = '1'; arg[1] = '/'; if (ffserver_save_avoption("time_base", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "PixelFormat")) { enum AVPixelFormat pix_fmt; ffserver_get_arg(arg, sizeof(arg), p); pix_fmt = av_get_pix_fmt(arg); if (pix_fmt == AV_PIX_FMT_NONE) ERROR("Unknown pixel format: '%s'\n", arg); else if (ffserver_save_avoption("pixel_format", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoGopSize")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("g", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) { if (ffserver_save_avoption("g", "1", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoHighQuality")) { if (ffserver_save_avoption("mbd", "+bits", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Video4MotionVector")) { if (ffserver_save_avoption("mbd", "+bits", AV_OPT_FLAG_VIDEO_PARAM, config) < 0 || //FIXME remove ffserver_save_avoption("flags", "+mv4", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVOptionVideo") || !av_strcasecmp(cmd, "AVOptionAudio")) { int ret; ffserver_get_arg(arg, sizeof(arg), p); ffserver_get_arg(arg2, sizeof(arg2), p); if (!av_strcasecmp(cmd, "AVOptionVideo")) ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_VIDEO_PARAM, config); else ret = ffserver_save_avoption(arg, arg2, AV_OPT_FLAG_AUDIO_PARAM, config); if (ret < 0) goto nomem; } else if (!av_strcasecmp(cmd, "AVPresetVideo") || !av_strcasecmp(cmd, "AVPresetAudio")) { ffserver_get_arg(arg, sizeof(arg), p); if (!av_strcasecmp(cmd, "AVPresetVideo")) ffserver_opt_preset(arg, AV_OPT_FLAG_VIDEO_PARAM, config); else ffserver_opt_preset(arg, AV_OPT_FLAG_AUDIO_PARAM, config); } else if (!av_strcasecmp(cmd, "VideoTag")) { ffserver_get_arg(arg, sizeof(arg), p); if (strlen(arg) == 4 && ffserver_save_avoption_int("codec_tag", MKTAG(arg[0], arg[1], arg[2], arg[3]), AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "BitExact")) { if (ffserver_save_avoption("flags", "+bitexact", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DctFastint")) { if (ffserver_save_avoption("dct", "fastint", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "IdctSimple")) { if (ffserver_save_avoption("idct", "simple", AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "Qscale")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, "Invalid Qscale: '%s'\n", arg); if (ffserver_save_avoption("flags", "+qscale", AV_OPT_FLAG_VIDEO_PARAM, config) < 0 || ffserver_save_avoption_int("global_quality", FF_QP2LAMBDA * val, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQDiff")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("qdiff", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMax")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("qmax", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "VideoQMin")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("qmin", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "LumiMask")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("lumi_mask", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "DarkMask")) { ffserver_get_arg(arg, sizeof(arg), p); if (ffserver_save_avoption("dark_mask", arg, AV_OPT_FLAG_VIDEO_PARAM, config) < 0) goto nomem; } else if (!av_strcasecmp(cmd, "NoVideo")) { config->no_video = 1; } else if (!av_strcasecmp(cmd, "NoAudio")) { config->no_audio = 1; } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, config->line_num); } else if (!av_strcasecmp(cmd, "DynamicACL")) { ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p); } else if (!av_strcasecmp(cmd, "RTSPOption")) { ffserver_get_arg(arg, sizeof(arg), p); av_freep(&stream->rtsp_option); stream->rtsp_option = av_strdup(arg); } else if (!av_strcasecmp(cmd, "MulticastAddress")) { ffserver_get_arg(arg, sizeof(arg), p); if (resolve_host(&stream->multicast_ip, arg)) ERROR("Invalid host/IP address: '%s'\n", arg); stream->is_multicast = 1; stream->loop = 1; /* default is looping */ } else if (!av_strcasecmp(cmd, "MulticastPort")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, 1, 65535, config, "Invalid MulticastPort: '%s'\n", arg); stream->multicast_port = val; } else if (!av_strcasecmp(cmd, "MulticastTTL")) { ffserver_get_arg(arg, sizeof(arg), p); ffserver_set_int_param(&val, arg, 0, INT_MIN, INT_MAX, config, "Invalid MulticastTTL: '%s'\n", arg); stream->multicast_ttl = val; } else if (!av_strcasecmp(cmd, "NoLoop")) { stream->loop = 0; } else if (!av_strcasecmp(cmd, "</Stream>")) { config->stream_use_defaults &= 1; if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm")) { if (config->dummy_actx->codec_id == AV_CODEC_ID_NONE) config->dummy_actx->codec_id = config->guessed_audio_codec_id; if (!config->no_audio && config->dummy_actx->codec_id != AV_CODEC_ID_NONE) { AVCodecContext *audio_enc = avcodec_alloc_context3(avcodec_find_encoder(config->dummy_actx->codec_id)); add_codec(stream, audio_enc, config); } if (config->dummy_vctx->codec_id == AV_CODEC_ID_NONE) config->dummy_vctx->codec_id = config->guessed_video_codec_id; if (!config->no_video && config->dummy_vctx->codec_id != AV_CODEC_ID_NONE) { AVCodecContext *video_enc = avcodec_alloc_context3(avcodec_find_encoder(config->dummy_vctx->codec_id)); add_codec(stream, video_enc, config); } } av_dict_free(&config->video_opts); av_dict_free(&config->audio_opts); avcodec_free_context(&config->dummy_vctx); avcodec_free_context(&config->dummy_actx); *pstream = NULL; } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p); } else if (!av_strcasecmp(cmd, "UseDefaults")) { if (config->stream_use_defaults > 1) WARNING("Multiple UseDefaults/NoDefaults entries.\n"); config->stream_use_defaults = 3; } else if (!av_strcasecmp(cmd, "NoDefaults")) { if (config->stream_use_defaults > 1) WARNING("Multiple UseDefaults/NoDefaults entries.\n"); config->stream_use_defaults = 2; } else { ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd); } return 0; nomem: av_log(NULL, AV_LOG_ERROR, "Out of memory. Aborting.\n"); av_dict_free(&config->video_opts); av_dict_free(&config->audio_opts); avcodec_free_context(&config->dummy_vctx); avcodec_free_context(&config->dummy_actx); return AVERROR(ENOMEM); }