static av_cold int init(AVFilterContext *ctx) { SelectContext *select = ctx->priv; int i, ret; if ((ret = av_expr_parse(&select->expr, select->expr_str, var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", select->expr_str); return ret; } select->do_scene_detect = !!strstr(select->expr_str, "scene"); for (i = 0; i < select->nb_outputs; i++) { AVFilterPad pad = { 0 }; pad.name = av_asprintf("output%d", i); if (!pad.name) return AVERROR(ENOMEM); pad.type = ctx->filter->inputs[0].type; pad.request_frame = request_frame; ff_insert_outpad(ctx, i, &pad); } return 0; }
static int parse_definition(AVFilterContext *ctx, int nb_pads, void *filter_frame, int is_audio) { const int is_input = !!filter_frame; const char *padtype = is_input ? "in" : "out"; int i = 0, ret = 0; for (i = 0; i < nb_pads; i++) { AVFilterPad pad = { 0 }; pad.type = is_audio ? AVMEDIA_TYPE_AUDIO : AVMEDIA_TYPE_VIDEO; pad.name = av_asprintf("%sput%d", padtype, i); if (!pad.name) return AVERROR(ENOMEM); av_log(ctx, AV_LOG_DEBUG, "Add %s pad %s\n", padtype, pad.name); if (is_input) { pad.filter_frame = filter_frame; ret = ff_insert_inpad(ctx, i, &pad); } else { pad.config_props = config_output; pad.request_frame = request_frame; ret = ff_insert_outpad(ctx, i, &pad); } if (ret < 0) { av_freep(&pad.name); return ret; } } return 0; }
static av_cold int asr_init(AVFilterContext *ctx) { ASRContext *s = ctx->priv; const float frate = s->rate; char *rate = av_asprintf("%f", frate); const char *argv[] = { "-logfn", s->logfn, "-hmm", s->hmm, "-lm", s->lm, "-lmctl", s->lmctl, "-lmname", s->lmname, "-dict", s->dict, "-samprate", rate, NULL }; s->config = cmd_ln_parse_r(NULL, ps_args(), 14, (char **)argv, 0); av_free(rate); if (!s->config) return AVERROR(ENOMEM); ps_default_search_args(s->config); s->ps = ps_init(s->config); if (!s->ps) return AVERROR(ENOMEM); return 0; }
static av_cold int init(AVFilterContext *ctx) { StackContext *s = ctx->priv; int i, ret; if (!strcmp(ctx->filter->name, "vstack")) s->is_vertical = 1; s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames)); if (!s->frames) return AVERROR(ENOMEM); for (i = 0; i < s->nb_inputs; i++) { AVFilterPad pad = { 0 }; pad.type = AVMEDIA_TYPE_VIDEO; pad.name = av_asprintf("input%d", i); if (!pad.name) return AVERROR(ENOMEM); pad.filter_frame = filter_frame; if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) { av_freep(&pad.name); return ret; } } return 0; }
static int modplug_load_metadata(AVFormatContext *s) { ModPlugContext *modplug = s->priv_data; ModPlugFile *f = modplug->f; char *extra; const char *name = ModPlug_GetName(f); const char *msg = ModPlug_GetMessage(f); unsigned n_instruments = ModPlug_NumInstruments(f); unsigned n_samples = ModPlug_NumSamples(f); unsigned n_patterns = ModPlug_NumPatterns(f); unsigned n_channels = ModPlug_NumChannels(f); if (name && *name) av_dict_set(&s->metadata, "name", name, 0); if (msg && *msg) av_dict_set(&s->metadata, "message", msg, 0); extra = av_asprintf("%u pattern%s, %u channel%s", n_patterns, n_patterns > 1 ? "s" : "", n_channels, n_channels > 1 ? "s" : ""); if (!extra) return AVERROR(ENOMEM); av_dict_set(&s->metadata, "extra info", extra, AV_DICT_DONT_STRDUP_VAL); ADD_META_MULTIPLE_ENTRIES(instrument, ModPlug_InstrumentName); ADD_META_MULTIPLE_ENTRIES(sample, ModPlug_SampleName); return 0; }
STDMETHODIMP CBDDemuxer::OpenMVCExtensionDemuxer(int playItem) { int ret; MPLS_PL *pl = bd_get_title_mpls(m_pBD); if (!pl) return E_FAIL; const char *clip_id = pl->ext_sub_path[m_MVCExtensionSubPathIndex].sub_play_item[playItem].clip->clip_id; char *fileName = av_asprintf("%sBDMV\\STREAM\\%s.m2ts", m_cBDRootPath, clip_id); DbgLog((LOG_TRACE, 10, "CBDDemuxer::OpenMVCExtensionDemuxer(): Opening MVC extension stream at %s", fileName)); // Try to open the MVC stream AVInputFormat *format = av_find_input_format("mpegts"); ret = avformat_open_input(&m_MVCFormatContext, fileName, format, nullptr); if (ret < 0) { DbgLog((LOG_TRACE, 10, "-> Opening MVC demuxing context failed (%d)", ret)); goto fail; } av_opt_set_int(m_MVCFormatContext, "correct_ts_overflow", 0, 0); m_MVCFormatContext->flags |= AVFMT_FLAG_KEEP_SIDE_DATA; // Find the streams ret = avformat_find_stream_info(m_MVCFormatContext, nullptr); if (ret < 0) { DbgLog((LOG_TRACE, 10, "-> avformat_find_stream_info failed (%d)", ret)); goto fail; } // Find and select our MVC stream DbgLog((LOG_TRACE, 10, "-> MVC m2ts has %d streams", m_MVCFormatContext->nb_streams)); for (unsigned i = 0; i < m_MVCFormatContext->nb_streams; i++) { if (m_MVCFormatContext->streams[i]->codecpar->codec_id == AV_CODEC_ID_H264_MVC && m_MVCFormatContext->streams[i]->codecpar->extradata_size > 0) { m_MVCStreamIndex = i; break; } else { m_MVCFormatContext->streams[i]->discard = AVDISCARD_ALL; } } if (m_MVCStreamIndex < 0) { DbgLog((LOG_TRACE, 10, "-> MVC Stream not found")); goto fail; } m_MVCExtensionClip = playItem; return S_OK; fail: CloseMVCExtensionDemuxer(); return E_FAIL; }
static int load_path(AVFilterContext *ctx, void **handle_ptr, const char *prefix, const char *name) { char *path = av_asprintf("%s%s%s", prefix, name, SLIBSUF); if (!path) return AVERROR(ENOMEM); av_log(ctx, AV_LOG_DEBUG, "Looking for frei0r effect in '%s'.\n", path); *handle_ptr = dlopen(path, RTLD_NOW|RTLD_LOCAL); av_free(path); return 0; }
static void *try_load(const char *dir, const char *soname) { char *path = av_asprintf("%s/%s.so", dir, soname); void *ret = NULL; if (path) { ret = dlopen(path, RTLD_LOCAL|RTLD_NOW); av_free(path); } return ret; }
static int write_packet(AVFormatContext *s, AVPacket *pkt) { ASSContext *ass = s->priv_data; if (ass->write_ts) { long int layer; char *p = pkt->data; int64_t start = pkt->pts; int64_t end = start + pkt->duration; int hh1, mm1, ss1, ms1; int hh2, mm2, ss2, ms2; DialogueLine *dialogue = av_mallocz(sizeof(*dialogue)); if (!dialogue) return AVERROR(ENOMEM); dialogue->readorder = strtol(p, &p, 10); if (dialogue->readorder < ass->expected_readorder) av_log(s, AV_LOG_WARNING, "Unexpected ReadOrder %d\n", dialogue->readorder); if (*p == ',') p++; if (ass->ssa_mode && !strncmp(p, "Marked=", 7)) p += 7; layer = strtol(p, &p, 10); if (*p == ',') p++; hh1 = (int)(start / 360000); mm1 = (int)(start / 6000) % 60; hh2 = (int)(end / 360000); mm2 = (int)(end / 6000) % 60; ss1 = (int)(start / 100) % 60; ms1 = (int)(start % 100); ss2 = (int)(end / 100) % 60; ms2 = (int)(end % 100); if (hh1 > 9) hh1 = 9, mm1 = 59, ss1 = 59, ms1 = 99; if (hh2 > 9) hh2 = 9, mm2 = 59, ss2 = 59, ms2 = 99; dialogue->line = av_asprintf("%s%ld,%d:%02d:%02d.%02d,%d:%02d:%02d.%02d,%s", ass->ssa_mode ? "Marked=" : "", layer, hh1, mm1, ss1, ms1, hh2, mm2, ss2, ms2, p); if (!dialogue->line) { av_free(dialogue); return AVERROR(ENOMEM); } insert_dialogue(ass, dialogue); purge_dialogues(s, 0); } else { avio_write(s->pb, pkt->data, pkt->size); } return 0; }
int ff_ass_subtitle_header(AVCodecContext *avctx, const char *font, int font_size, int color, int back_color, int bold, int italic, int underline, int alignment) { avctx->subtitle_header = av_asprintf( "[Script Info]\r\n" "; Script generated by FFmpeg/Lavc%s\r\n" "ScriptType: v4.00+\r\n" "PlayResX: %d\r\n" "PlayResY: %d\r\n" "\r\n" "[V4+ Styles]\r\n" /* ASSv4 header */ "Format: Name, " "Fontname, Fontsize, " "PrimaryColour, SecondaryColour, OutlineColour, BackColour, " "Bold, Italic, Underline, StrikeOut, " "ScaleX, ScaleY, " "Spacing, Angle, " "BorderStyle, Outline, Shadow, " "Alignment, MarginL, MarginR, MarginV, " "Encoding\r\n" "Style: " "Default," /* Name */ "%s,%d," /* Font{name,size} */ "&H%x,&H%x,&H%x,&H%x," /* {Primary,Secondary,Outline,Back}Colour */ "%d,%d,%d,0," /* Bold, Italic, Underline, StrikeOut */ "100,100," /* Scale{X,Y} */ "0,0," /* Spacing, Angle */ "1,1,0," /* BorderStyle, Outline, Shadow */ "%d,10,10,10," /* Alignment, Margin[LRV] */ "0\r\n" /* Encoding */ "\r\n" "[Events]\r\n" "Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\r\n", !(avctx->flags & AV_CODEC_FLAG_BITEXACT) ? AV_STRINGIFY(LIBAVCODEC_VERSION) : "", ASS_DEFAULT_PLAYRESX, ASS_DEFAULT_PLAYRESY, font, font_size, color, color, back_color, back_color, -bold, -italic, -underline, alignment); if (!avctx->subtitle_header) return AVERROR(ENOMEM); avctx->subtitle_header_size = strlen(avctx->subtitle_header); return 0; }
static av_cold int program_opencl_init(AVFilterContext *avctx) { ProgramOpenCLContext *ctx = avctx->priv; int err; ff_opencl_filter_init(avctx); ctx->ocf.output_width = ctx->width; ctx->ocf.output_height = ctx->height; if (!strcmp(avctx->filter->name, "openclsrc")) { if (!ctx->ocf.output_width || !ctx->ocf.output_height) { av_log(avctx, AV_LOG_ERROR, "OpenCL source requires output " "dimensions to be specified.\n"); return AVERROR(EINVAL); } ctx->nb_inputs = 0; ctx->ocf.output_format = ctx->source_format; } else { int i; ctx->frames = av_mallocz_array(ctx->nb_inputs, sizeof(*ctx->frames)); if (!ctx->frames) return AVERROR(ENOMEM); for (i = 0; i < ctx->nb_inputs; i++) { AVFilterPad input; memset(&input, 0, sizeof(input)); input.type = AVMEDIA_TYPE_VIDEO; input.name = av_asprintf("input%d", i); if (!input.name) return AVERROR(ENOMEM); input.config_props = &ff_opencl_filter_config_input; err = ff_insert_inpad(avctx, i, &input); if (err < 0) { av_freep(&input.name); return err; } } } return 0; }
/** * Create an instance of a filter, initialize and insert it in the * filtergraph in *ctx. * * @param filt_ctx put here a filter context in case of successful creation and configuration, NULL otherwise. * @param ctx the filtergraph context * @param index an index which is supposed to be unique for each filter instance added to the filtergraph * @param filt_name the name of the filter to create * @param args the arguments provided to the filter during its initialization * @param log_ctx the log context to use * @return >= 0 in case of success, a negative AVERROR code otherwise */ static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index, const char *filt_name, const char *args, void *log_ctx) { AVFilter *filt; char inst_name[30]; char *tmp_args = NULL; int ret; snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index); filt = avfilter_get_by_name(filt_name); if (!filt) { av_log(log_ctx, AV_LOG_ERROR, "No such filter: '%s'\n", filt_name); return AVERROR(EINVAL); } *filt_ctx = avfilter_graph_alloc_filter(ctx, filt, inst_name); if (!*filt_ctx) { av_log(log_ctx, AV_LOG_ERROR, "Error creating filter '%s'\n", filt_name); return AVERROR(ENOMEM); } if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") && ctx->scale_sws_opts) { tmp_args = av_asprintf("%s:%s", args, ctx->scale_sws_opts); if (!tmp_args) return AVERROR(ENOMEM); args = tmp_args; } ret = avfilter_init_str(*filt_ctx, args); if (ret < 0) { av_log(log_ctx, AV_LOG_ERROR, "Error initializing filter '%s'", filt_name); if (args) av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args); av_log(log_ctx, AV_LOG_ERROR, "\n"); avfilter_free(*filt_ctx); *filt_ctx = NULL; } av_free(tmp_args); return ret; }
static av_cold int init(AVFilterContext *ctx) { MixContext *s = ctx->priv; char *p, *arg, *saveptr = NULL; float last_weight = 1.f; int i, ret; for (i = 0; i < s->nb_inputs; i++) { AVFilterPad pad = { 0 }; pad.type = AVMEDIA_TYPE_AUDIO; pad.name = av_asprintf("input%d", i); if (!pad.name) return AVERROR(ENOMEM); if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) { av_freep(&pad.name); return ret; } } s->fdsp = avpriv_float_dsp_alloc(0); if (!s->fdsp) return AVERROR(ENOMEM); s->weights = av_mallocz_array(s->nb_inputs, sizeof(*s->weights)); if (!s->weights) return AVERROR(ENOMEM); p = s->weights_str; for (i = 0; i < s->nb_inputs; i++) { if (!(arg = av_strtok(p, " ", &saveptr))) break; p = NULL; sscanf(arg, "%f", &last_weight); s->weights[i] = last_weight; s->weight_sum += last_weight; } for (; i < s->nb_inputs; i++) { s->weights[i] = last_weight; s->weight_sum += last_weight; } return 0; }
static av_cold int init(AVFilterContext *ctx) { MixContext *s = ctx->priv; char *p, *arg, *saveptr = NULL; int i, ret; s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames)); if (!s->frames) return AVERROR(ENOMEM); s->weights = av_calloc(s->nb_inputs, sizeof(*s->weights)); if (!s->weights) return AVERROR(ENOMEM); for (i = 0; i < s->nb_inputs; i++) { AVFilterPad pad = { 0 }; pad.type = AVMEDIA_TYPE_VIDEO; pad.name = av_asprintf("input%d", i); if (!pad.name) return AVERROR(ENOMEM); if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) { av_freep(&pad.name); return ret; } } p = s->weights_str; for (i = 0; i < s->nb_inputs; i++) { if (!(arg = av_strtok(p, " ", &saveptr))) break; p = NULL; sscanf(arg, "%f", &s->weights[i]); s->wfactor += s->weights[i]; } s->wfactor = 1 / s->wfactor; return 0; }
int ff_ass_subtitle_header(AVCodecContext *avctx, const char *font, int font_size, int color, int back_color, int bold, int italic, int underline, int alignment) { avctx->subtitle_header = av_asprintf( "[Script Info]\r\n" "ScriptType: v4.00+\r\n" "\r\n" "[V4+ Styles]\r\n" "Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, AlphaLevel, Encoding\r\n" "Style: Default,%s,%d,&H%x,&H%x,&H%x,&H%x,%d,%d,%d,1,1,0,%d,10,10,10,0,0\r\n" "\r\n" "[Events]\r\n" "Format: Layer, Start, End, Style, Text\r\n", font, font_size, color, color, back_color, back_color, -bold, -italic, -underline, alignment); if (!avctx->subtitle_header) return AVERROR(ENOMEM); avctx->subtitle_header_size = strlen(avctx->subtitle_header); return 0; }
static int avi_write_header(AVFormatContext *s) { AVIContext *avi = s->priv_data; AVIOContext *pb = s->pb; int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale; AVCodecContext *stream, *video_enc; int64_t list1, list2, strh, strf; AVDictionaryEntry *t = NULL; if (s->nb_streams > AVI_MAX_STREAM_COUNT) { av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n", AVI_MAX_STREAM_COUNT); return AVERROR(EINVAL); } for(n=0;n<s->nb_streams;n++) { s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream)); if(!s->streams[n]->priv_data) return AVERROR(ENOMEM); } /* header list */ avi->riff_id = 0; list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl"); /* avi header */ ffio_wfourcc(pb, "avih"); avio_wl32(pb, 14 * 4); bitrate = 0; video_enc = NULL; for(n=0;n<s->nb_streams;n++) { stream = s->streams[n]->codec; bitrate += stream->bit_rate; if (stream->codec_type == AVMEDIA_TYPE_VIDEO) video_enc = stream; } nb_frames = 0; if(video_enc){ avio_wl32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den)); } else { avio_wl32(pb, 0); } avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */ avio_wl32(pb, 0); /* padding */ if (!pb->seekable) avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */ else avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */ avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */ avio_wl32(pb, nb_frames); /* nb frames, filled later */ avio_wl32(pb, 0); /* initial frame */ avio_wl32(pb, s->nb_streams); /* nb streams */ avio_wl32(pb, 1024 * 1024); /* suggested buffer size */ if(video_enc){ avio_wl32(pb, video_enc->width); avio_wl32(pb, video_enc->height); } else { avio_wl32(pb, 0); avio_wl32(pb, 0); } avio_wl32(pb, 0); /* reserved */ avio_wl32(pb, 0); /* reserved */ avio_wl32(pb, 0); /* reserved */ avio_wl32(pb, 0); /* reserved */ /* stream list */ for(i=0;i<n;i++) { AVIStream *avist= s->streams[i]->priv_data; list2 = ff_start_tag(pb, "LIST"); ffio_wfourcc(pb, "strl"); stream = s->streams[i]->codec; /* stream generic header */ strh = ff_start_tag(pb, "strh"); switch(stream->codec_type) { case AVMEDIA_TYPE_SUBTITLE: // XSUB subtitles behave like video tracks, other subtitles // are not (yet) supported. if (stream->codec_id != AV_CODEC_ID_XSUB) { av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n"); return AVERROR_PATCHWELCOME; } case AVMEDIA_TYPE_VIDEO: ffio_wfourcc(pb, "vids"); break; case AVMEDIA_TYPE_AUDIO: ffio_wfourcc(pb, "auds"); break; // case AVMEDIA_TYPE_TEXT : ffio_wfourcc(pb, "txts"); break; case AVMEDIA_TYPE_DATA : ffio_wfourcc(pb, "dats"); break; } if(stream->codec_type == AVMEDIA_TYPE_VIDEO || stream->codec_id == AV_CODEC_ID_XSUB) avio_wl32(pb, stream->codec_tag); else avio_wl32(pb, 1); avio_wl32(pb, 0); /* flags */ avio_wl16(pb, 0); /* priority */ avio_wl16(pb, 0); /* language */ avio_wl32(pb, 0); /* initial frame */ ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale); if ( stream->codec_type == AVMEDIA_TYPE_VIDEO && stream->codec_id != AV_CODEC_ID_XSUB && au_byterate > 1000LL*au_scale) { au_byterate = 600; au_scale = 1; } avpriv_set_pts_info(s->streams[i], 64, au_scale, au_byterate); if(stream->codec_id == AV_CODEC_ID_XSUB) au_scale = au_byterate = 0; avio_wl32(pb, au_scale); /* scale */ avio_wl32(pb, au_byterate); /* rate */ avio_wl32(pb, 0); /* start */ avist->frames_hdr_strm = avio_tell(pb); /* remember this offset to fill later */ if (!pb->seekable) avio_wl32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */ else avio_wl32(pb, 0); /* length, XXX: filled later */ /* suggested buffer size */ //FIXME set at the end to largest chunk if(stream->codec_type == AVMEDIA_TYPE_VIDEO) avio_wl32(pb, 1024 * 1024); else if(stream->codec_type == AVMEDIA_TYPE_AUDIO) avio_wl32(pb, 12 * 1024); else avio_wl32(pb, 0); avio_wl32(pb, -1); /* quality */ avio_wl32(pb, au_ssize); /* sample size */ avio_wl32(pb, 0); avio_wl16(pb, stream->width); avio_wl16(pb, stream->height); ff_end_tag(pb, strh); if(stream->codec_type != AVMEDIA_TYPE_DATA){ int ret; strf = ff_start_tag(pb, "strf"); switch(stream->codec_type) { case AVMEDIA_TYPE_SUBTITLE: // XSUB subtitles behave like video tracks, other subtitles // are not (yet) supported. if (stream->codec_id != AV_CODEC_ID_XSUB) break; case AVMEDIA_TYPE_VIDEO: ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0, 0); break; case AVMEDIA_TYPE_AUDIO: if ((ret = ff_put_wav_header(pb, stream)) < 0) { return ret; } break; default: av_log(s, AV_LOG_ERROR, "Invalid or not supported codec type '%s' found in the input\n", (char *)av_x_if_null(av_get_media_type_string(stream->codec_type), "?")); return AVERROR(EINVAL); } ff_end_tag(pb, strf); if ((t = av_dict_get(s->streams[i]->metadata, "title", NULL, 0))) { ff_riff_write_info_tag(s->pb, "strn", t->value); t = NULL; } if(stream->codec_id == AV_CODEC_ID_XSUB && (t = av_dict_get(s->streams[i]->metadata, "language", NULL, 0))) { const char* langstr = av_convert_lang_to(t->value, AV_LANG_ISO639_1); t = NULL; if (langstr) { char* str = av_asprintf("Subtitle - %s-xx;02", langstr); ff_riff_write_info_tag(s->pb, "strn", str); av_free(str); } } } if (pb->seekable) { unsigned char tag[5]; int j; /* Starting to lay out AVI OpenDML master index. * We want to make it JUNK entry for now, since we'd * like to get away without making AVI an OpenDML one * for compatibility reasons. */ avist->indexes.entry = avist->indexes.ents_allocated = 0; avist->indexes.indx_start = ff_start_tag(pb, "JUNK"); avio_wl16(pb, 4); /* wLongsPerEntry */ avio_w8(pb, 0); /* bIndexSubType (0 == frame index) */ avio_w8(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */ avio_wl32(pb, 0); /* nEntriesInUse (will fill out later on) */ ffio_wfourcc(pb, avi_stream2fourcc(tag, i, stream->codec_type)); /* dwChunkId */ avio_wl64(pb, 0); /* dwReserved[3] avio_wl32(pb, 0); Must be 0. */ for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++) avio_wl64(pb, 0); ff_end_tag(pb, avist->indexes.indx_start); } if( stream->codec_type == AVMEDIA_TYPE_VIDEO && s->streams[i]->sample_aspect_ratio.num>0 && s->streams[i]->sample_aspect_ratio.den>0){ int vprp= ff_start_tag(pb, "vprp"); AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio, (AVRational){stream->width, stream->height}); int num, den; av_reduce(&num, &den, dar.num, dar.den, 0xFFFF); avio_wl32(pb, 0); //video format = unknown avio_wl32(pb, 0); //video standard= unknown avio_wl32(pb, lrintf(1.0/av_q2d(stream->time_base))); avio_wl32(pb, stream->width ); avio_wl32(pb, stream->height); avio_wl16(pb, den); avio_wl16(pb, num); avio_wl32(pb, stream->width ); avio_wl32(pb, stream->height); avio_wl32(pb, 1); //progressive FIXME avio_wl32(pb, stream->height); avio_wl32(pb, stream->width ); avio_wl32(pb, stream->height); avio_wl32(pb, stream->width ); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, 0); ff_end_tag(pb, vprp); } ff_end_tag(pb, list2); } if (pb->seekable) { /* AVI could become an OpenDML one, if it grows beyond 2Gb range */ avi->odml_list = ff_start_tag(pb, "JUNK"); ffio_wfourcc(pb, "odml"); ffio_wfourcc(pb, "dmlh"); avio_wl32(pb, 248); for (i = 0; i < 248; i+= 4) avio_wl32(pb, 0); ff_end_tag(pb, avi->odml_list); } ff_end_tag(pb, list1); ff_riff_write_info(s); /* some padding for easier tag editing */ list2 = ff_start_tag(pb, "JUNK"); for (i = 0; i < 1016; i += 4) avio_wl32(pb, 0); ff_end_tag(pb, list2); avi->movi_list = ff_start_tag(pb, "LIST"); ffio_wfourcc(pb, "movi"); avio_flush(pb); return 0; }
static int mpjpeg_read_packet(AVFormatContext *s, AVPacket *pkt) { int size; int ret; MPJPEGDemuxContext *mpjpeg = s->priv_data; if (mpjpeg->boundary == NULL) { uint8_t* boundary = NULL; if (mpjpeg->strict_mime_boundary) { boundary = mpjpeg_get_boundary(s->pb); } if (boundary != NULL) { mpjpeg->boundary = boundary; mpjpeg->searchstr = av_asprintf( "\r\n%s\r\n", boundary ); } else { mpjpeg->boundary = av_strdup("--"); mpjpeg->searchstr = av_strdup("\r\n--"); } if (!mpjpeg->boundary || !mpjpeg->searchstr) { av_freep(&mpjpeg->boundary); av_freep(&mpjpeg->searchstr); return AVERROR(ENOMEM); } mpjpeg->searchstr_len = strlen(mpjpeg->searchstr); } ret = parse_multipart_header(s->pb, &size, mpjpeg->boundary, s); if (ret < 0) return ret; if (size > 0) { /* size has been provided to us in MIME header */ ret = av_get_packet(s->pb, pkt, size); } else { /* no size was given -- we read until the next boundary or end-of-file */ int remaining = 0, len; const int read_chunk = 2048; av_init_packet(pkt); pkt->data = NULL; pkt->size = 0; pkt->pos = avio_tell(s->pb); /* we may need to return as much as all we've read back to the buffer */ ffio_ensure_seekback(s->pb, read_chunk); while ((ret = av_append_packet(s->pb, pkt, read_chunk - remaining)) >= 0) { /* scan the new data */ char *start; len = ret + remaining; start = pkt->data + pkt->size - len; do { if (!memcmp(start, mpjpeg->searchstr, mpjpeg->searchstr_len)) { // got the boundary! rewind the stream avio_seek(s->pb, -(len-2), SEEK_CUR); pkt->size -= (len-2); return pkt->size; } len--; start++; } while (len >= mpjpeg->searchstr_len); remaining = len; } /* error or EOF occurred */ if (ret == AVERROR_EOF) { ret = pkt->size > 0 ? pkt->size : AVERROR_EOF; } else { av_packet_unref(pkt); } } return ret; }
static av_cold int frei0r_init(AVFilterContext *ctx, const char *dl_name, int type) { Frei0rContext *s = ctx->priv; f0r_init_f f0r_init; f0r_get_plugin_info_f f0r_get_plugin_info; f0r_plugin_info_t *pi; char *path; int ret = 0; int i; static const char* const frei0r_pathlist[] = { "/usr/local/lib/frei0r-1/", "/usr/lib/frei0r-1/", "/usr/local/lib64/frei0r-1/", "/usr/lib64/frei0r-1/" }; if (!dl_name) { av_log(ctx, AV_LOG_ERROR, "No filter name provided.\n"); return AVERROR(EINVAL); } /* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */ if ((path = av_strdup(getenv("FREI0R_PATH")))) { #ifdef _WIN32 const char *separator = ";"; #else const char *separator = ":"; #endif char *p, *ptr = NULL; for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) { /* add additional trailing slash in case it is missing */ char *p1 = av_asprintf("%s/", p); if (!p1) { ret = AVERROR(ENOMEM); goto check_path_end; } ret = load_path(ctx, &s->dl_handle, p1, dl_name); av_free(p1); if (ret < 0) goto check_path_end; if (s->dl_handle) break; } check_path_end: av_free(path); if (ret < 0) return ret; } if (!s->dl_handle && (path = getenv("HOME"))) { char *prefix = av_asprintf("%s/.frei0r-1/lib/", path); if (!prefix) return AVERROR(ENOMEM); ret = load_path(ctx, &s->dl_handle, prefix, dl_name); av_free(prefix); if (ret < 0) return ret; } for (i = 0; !s->dl_handle && i < FF_ARRAY_ELEMS(frei0r_pathlist); i++) { ret = load_path(ctx, &s->dl_handle, frei0r_pathlist[i], dl_name); if (ret < 0) return ret; } if (!s->dl_handle) { av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'.\n", dl_name); return AVERROR(EINVAL); } if (!(f0r_init = load_sym(ctx, "f0r_init" )) || !(f0r_get_plugin_info = load_sym(ctx, "f0r_get_plugin_info")) || !(s->get_param_info = load_sym(ctx, "f0r_get_param_info" )) || !(s->get_param_value = load_sym(ctx, "f0r_get_param_value")) || !(s->set_param_value = load_sym(ctx, "f0r_set_param_value")) || !(s->update = load_sym(ctx, "f0r_update" )) || !(s->construct = load_sym(ctx, "f0r_construct" )) || !(s->destruct = load_sym(ctx, "f0r_destruct" )) || !(s->deinit = load_sym(ctx, "f0r_deinit" ))) return AVERROR(EINVAL); if (f0r_init() < 0) { av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module.\n"); return AVERROR(EINVAL); } f0r_get_plugin_info(&s->plugin_info); pi = &s->plugin_info; if (pi->plugin_type != type) { av_log(ctx, AV_LOG_ERROR, "Invalid type '%s' for this plugin\n", pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? "filter" : pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? "source" : pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? "mixer2" : pi->plugin_type == F0R_PLUGIN_TYPE_MIXER3 ? "mixer3" : "unknown"); return AVERROR(EINVAL); } av_log(ctx, AV_LOG_VERBOSE, "name:%s author:'%s' explanation:'%s' color_model:%s " "frei0r_version:%d version:%d.%d num_params:%d\n", pi->name, pi->author, pi->explanation, pi->color_model == F0R_COLOR_MODEL_BGRA8888 ? "bgra8888" : pi->color_model == F0R_COLOR_MODEL_RGBA8888 ? "rgba8888" : pi->color_model == F0R_COLOR_MODEL_PACKED32 ? "packed32" : "unknown", pi->frei0r_version, pi->major_version, pi->minor_version, pi->num_params); return 0; }
static av_cold int init(AVFilterContext *ctx) { LADSPAContext *s = ctx->priv; LADSPA_Descriptor_Function descriptor_fn; const LADSPA_Descriptor *desc; LADSPA_PortDescriptor pd; AVFilterPad pad = { NULL }; char *p, *arg, *saveptr = NULL; unsigned long nb_ports; int i; if (!s->dl_name) { av_log(ctx, AV_LOG_ERROR, "No plugin name provided\n"); return AVERROR(EINVAL); } if (s->dl_name[0] == '/' || s->dl_name[0] == '.') { // argument is a path s->dl_handle = dlopen(s->dl_name, RTLD_LOCAL|RTLD_NOW); } else { // argument is a shared object name char *paths = av_strdup(getenv("LADSPA_PATH")); const char *separator = ":"; if (paths) { p = paths; while ((arg = av_strtok(p, separator, &saveptr)) && !s->dl_handle) { s->dl_handle = try_load(arg, s->dl_name); p = NULL; } } av_free(paths); if (!s->dl_handle && (paths = av_asprintf("%s/.ladspa/lib", getenv("HOME")))) { s->dl_handle = try_load(paths, s->dl_name); av_free(paths); } if (!s->dl_handle) s->dl_handle = try_load("/usr/local/lib/ladspa", s->dl_name); if (!s->dl_handle) s->dl_handle = try_load("/usr/lib/ladspa", s->dl_name); } if (!s->dl_handle) { av_log(ctx, AV_LOG_ERROR, "Failed to load '%s'\n", s->dl_name); return AVERROR(EINVAL); } descriptor_fn = dlsym(s->dl_handle, "ladspa_descriptor"); if (!descriptor_fn) { av_log(ctx, AV_LOG_ERROR, "Could not find ladspa_descriptor: %s\n", dlerror()); return AVERROR(EINVAL); } // Find the requested plugin, or list plugins if (!s->plugin) { av_log(ctx, AV_LOG_INFO, "The '%s' library contains the following plugins:\n", s->dl_name); av_log(ctx, AV_LOG_INFO, "I = Input Channels\n"); av_log(ctx, AV_LOG_INFO, "O = Output Channels\n"); av_log(ctx, AV_LOG_INFO, "I:O %-25s %s\n", "Plugin", "Description"); av_log(ctx, AV_LOG_INFO, "\n"); for (i = 0; desc = descriptor_fn(i); i++) { unsigned long inputs = 0, outputs = 0; count_ports(desc, &inputs, &outputs); av_log(ctx, AV_LOG_INFO, "%lu:%lu %-25s %s\n", inputs, outputs, desc->Label, av_x_if_null(desc->Name, "?")); av_log(ctx, AV_LOG_VERBOSE, "Maker: %s\n", av_x_if_null(desc->Maker, "?")); av_log(ctx, AV_LOG_VERBOSE, "Copyright: %s\n", av_x_if_null(desc->Copyright, "?")); } return AVERROR_EXIT; } else { for (i = 0;; i++) { desc = descriptor_fn(i); if (!desc) { av_log(ctx, AV_LOG_ERROR, "Could not find plugin: %s\n", s->plugin); return AVERROR(EINVAL); } if (desc->Label && !strcmp(desc->Label, s->plugin)) break; } } s->desc = desc; nb_ports = desc->PortCount; s->ipmap = av_calloc(nb_ports, sizeof(*s->ipmap)); s->opmap = av_calloc(nb_ports, sizeof(*s->opmap)); s->icmap = av_calloc(nb_ports, sizeof(*s->icmap)); s->ocmap = av_calloc(nb_ports, sizeof(*s->ocmap)); s->ictlv = av_calloc(nb_ports, sizeof(*s->ictlv)); s->octlv = av_calloc(nb_ports, sizeof(*s->octlv)); s->ctl_needs_value = av_calloc(nb_ports, sizeof(*s->ctl_needs_value)); if (!s->ipmap || !s->opmap || !s->icmap || !s->ocmap || !s->ictlv || !s->octlv || !s->ctl_needs_value) return AVERROR(ENOMEM); for (i = 0; i < nb_ports; i++) { pd = desc->PortDescriptors[i]; if (LADSPA_IS_PORT_AUDIO(pd)) { if (LADSPA_IS_PORT_INPUT(pd)) { s->ipmap[s->nb_inputs] = i; s->nb_inputs++; } else if (LADSPA_IS_PORT_OUTPUT(pd)) { s->opmap[s->nb_outputs] = i; s->nb_outputs++; } } else if (LADSPA_IS_PORT_CONTROL(pd)) { if (LADSPA_IS_PORT_INPUT(pd)) { s->icmap[s->nb_inputcontrols] = i; if (LADSPA_IS_HINT_HAS_DEFAULT(desc->PortRangeHints[i].HintDescriptor)) set_default_ctl_value(s, s->nb_inputcontrols, s->icmap, s->ictlv); else s->ctl_needs_value[s->nb_inputcontrols] = 1; s->nb_inputcontrols++; } else if (LADSPA_IS_PORT_OUTPUT(pd)) { s->ocmap[s->nb_outputcontrols] = i; s->nb_outputcontrols++; } } } // List Control Ports if "help" is specified if (s->options && !strcmp(s->options, "help")) { if (!s->nb_inputcontrols) { av_log(ctx, AV_LOG_INFO, "The '%s' plugin does not have any input controls.\n", desc->Label); } else { av_log(ctx, AV_LOG_INFO, "The '%s' plugin has the following input controls:\n", desc->Label); for (i = 0; i < s->nb_inputcontrols; i++) print_ctl_info(ctx, AV_LOG_INFO, s, i, s->icmap, s->ictlv, 0); } return AVERROR_EXIT; } // Parse control parameters p = s->options; while (s->options) { LADSPA_Data val; int ret; if (!(arg = av_strtok(p, "|", &saveptr))) break; p = NULL; if (sscanf(arg, "c%d=%f", &i, &val) != 2) { av_log(ctx, AV_LOG_ERROR, "Invalid syntax.\n"); return AVERROR(EINVAL); } if ((ret = set_control(ctx, i, val)) < 0) return ret; s->ctl_needs_value[i] = 0; } // Check if any controls are not set for (i = 0; i < s->nb_inputcontrols; i++) { if (s->ctl_needs_value[i]) { av_log(ctx, AV_LOG_ERROR, "Control c%d must be set.\n", i); print_ctl_info(ctx, AV_LOG_ERROR, s, i, s->icmap, s->ictlv, 0); return AVERROR(EINVAL); } } pad.type = AVMEDIA_TYPE_AUDIO; if (s->nb_inputs) { pad.name = av_asprintf("in0:%s%lu", desc->Label, s->nb_inputs); if (!pad.name) return AVERROR(ENOMEM); pad.filter_frame = filter_frame; pad.config_props = config_input; if (ff_insert_inpad(ctx, ctx->nb_inputs, &pad) < 0) { av_freep(&pad.name); return AVERROR(ENOMEM); } } av_log(ctx, AV_LOG_DEBUG, "ports: %lu\n", nb_ports); av_log(ctx, AV_LOG_DEBUG, "inputs: %lu outputs: %lu\n", s->nb_inputs, s->nb_outputs); av_log(ctx, AV_LOG_DEBUG, "input controls: %lu output controls: %lu\n", s->nb_inputcontrols, s->nb_outputcontrols); return 0; }
static int avi_write_header(AVFormatContext *s) { AVIContext *avi = s->priv_data; AVIOContext *pb = s->pb; int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale; AVCodecContext *video_enc; AVStream *video_st = NULL; int64_t list1, list2, strh, strf; AVDictionaryEntry *t = NULL; int padding; if (s->nb_streams > AVI_MAX_STREAM_COUNT) { av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n", AVI_MAX_STREAM_COUNT); return AVERROR(EINVAL); } for (n = 0; n < s->nb_streams; n++) { s->streams[n]->priv_data = av_mallocz(sizeof(AVIStream)); if (!s->streams[n]->priv_data) return AVERROR(ENOMEM); } /* header list */ avi->riff_id = 0; list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl"); /* avi header */ ffio_wfourcc(pb, "avih"); avio_wl32(pb, 14 * 4); bitrate = 0; video_enc = NULL; for (n = 0; n < s->nb_streams; n++) { AVCodecContext *codec = s->streams[n]->codec; bitrate += codec->bit_rate; if (codec->codec_type == AVMEDIA_TYPE_VIDEO) { video_enc = codec; video_st = s->streams[n]; } } nb_frames = 0; // TODO: should be avg_frame_rate if (video_st) avio_wl32(pb, (uint32_t) (INT64_C(1000000) * video_st->time_base.num / video_st->time_base.den)); else avio_wl32(pb, 0); avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */ avio_wl32(pb, 0); /* padding */ if (!pb->seekable) avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */ else avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */ avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */ avio_wl32(pb, nb_frames); /* nb frames, filled later */ avio_wl32(pb, 0); /* initial frame */ avio_wl32(pb, s->nb_streams); /* nb streams */ avio_wl32(pb, 1024 * 1024); /* suggested buffer size */ if (video_enc) { avio_wl32(pb, video_enc->width); avio_wl32(pb, video_enc->height); } else { avio_wl32(pb, 0); avio_wl32(pb, 0); } avio_wl32(pb, 0); /* reserved */ avio_wl32(pb, 0); /* reserved */ avio_wl32(pb, 0); /* reserved */ avio_wl32(pb, 0); /* reserved */ /* stream list */ for (i = 0; i < n; i++) { AVStream *st = s->streams[i]; AVCodecContext *enc = st->codec; AVIStream *avist = st->priv_data; list2 = ff_start_tag(pb, "LIST"); ffio_wfourcc(pb, "strl"); /* stream generic header */ strh = ff_start_tag(pb, "strh"); switch (enc->codec_type) { case AVMEDIA_TYPE_SUBTITLE: // XSUB subtitles behave like video tracks, other subtitles // are not (yet) supported. if (enc->codec_id != AV_CODEC_ID_XSUB) { av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n"); return AVERROR_PATCHWELCOME; } case AVMEDIA_TYPE_VIDEO: ffio_wfourcc(pb, "vids"); break; case AVMEDIA_TYPE_AUDIO: ffio_wfourcc(pb, "auds"); break; // case AVMEDIA_TYPE_TEXT: // ffio_wfourcc(pb, "txts"); // break; case AVMEDIA_TYPE_DATA: ffio_wfourcc(pb, "dats"); break; } if (enc->codec_type == AVMEDIA_TYPE_VIDEO || enc->codec_id == AV_CODEC_ID_XSUB) avio_wl32(pb, enc->codec_tag); else avio_wl32(pb, 1); avio_wl32(pb, 0); /* flags */ avio_wl16(pb, 0); /* priority */ avio_wl16(pb, 0); /* language */ avio_wl32(pb, 0); /* initial frame */ ff_parse_specific_params(st, &au_byterate, &au_ssize, &au_scale); if ( enc->codec_type == AVMEDIA_TYPE_VIDEO && enc->codec_id != AV_CODEC_ID_XSUB && au_byterate > 1000LL*au_scale) { au_byterate = 600; au_scale = 1; } avpriv_set_pts_info(st, 64, au_scale, au_byterate); if (enc->codec_id == AV_CODEC_ID_XSUB) au_scale = au_byterate = 0; avio_wl32(pb, au_scale); /* scale */ avio_wl32(pb, au_byterate); /* rate */ avio_wl32(pb, 0); /* start */ /* remember this offset to fill later */ avist->frames_hdr_strm = avio_tell(pb); if (!pb->seekable) /* FIXME: this may be broken, but who cares */ avio_wl32(pb, AVI_MAX_RIFF_SIZE); else avio_wl32(pb, 0); /* length, XXX: filled later */ /* suggested buffer size, is set to largest chunk size in avi_write_trailer */ if (enc->codec_type == AVMEDIA_TYPE_VIDEO) avio_wl32(pb, 1024 * 1024); else if (enc->codec_type == AVMEDIA_TYPE_AUDIO) avio_wl32(pb, 12 * 1024); else avio_wl32(pb, 0); avio_wl32(pb, -1); /* quality */ avio_wl32(pb, au_ssize); /* sample size */ avio_wl32(pb, 0); avio_wl16(pb, enc->width); avio_wl16(pb, enc->height); ff_end_tag(pb, strh); if (enc->codec_type != AVMEDIA_TYPE_DATA) { int ret; enum AVPixelFormat pix_fmt; strf = ff_start_tag(pb, "strf"); switch (enc->codec_type) { case AVMEDIA_TYPE_SUBTITLE: /* XSUB subtitles behave like video tracks, other subtitles * are not (yet) supported. */ if (enc->codec_id != AV_CODEC_ID_XSUB) break; case AVMEDIA_TYPE_VIDEO: /* WMP expects RGB 5:5:5 rawvideo in avi to have bpp set to 16. */ if ( !enc->codec_tag && enc->codec_id == AV_CODEC_ID_RAWVIDEO && enc->pix_fmt == AV_PIX_FMT_RGB555LE && enc->bits_per_coded_sample == 15) enc->bits_per_coded_sample = 16; ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 0, 0); pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi, enc->bits_per_coded_sample); if ( !enc->codec_tag && enc->codec_id == AV_CODEC_ID_RAWVIDEO && enc->pix_fmt != pix_fmt && enc->pix_fmt != AV_PIX_FMT_NONE) av_log(s, AV_LOG_ERROR, "%s rawvideo cannot be written to avi, output file will be unreadable\n", av_get_pix_fmt_name(enc->pix_fmt)); break; case AVMEDIA_TYPE_AUDIO: if ((ret = ff_put_wav_header(pb, enc, 0)) < 0) return ret; break; default: av_log(s, AV_LOG_ERROR, "Invalid or not supported codec type '%s' found in the input\n", (char *)av_x_if_null(av_get_media_type_string(enc->codec_type), "?")); return AVERROR(EINVAL); } ff_end_tag(pb, strf); if ((t = av_dict_get(st->metadata, "title", NULL, 0))) { ff_riff_write_info_tag(s->pb, "strn", t->value); t = NULL; } if (enc->codec_id == AV_CODEC_ID_XSUB && (t = av_dict_get(s->streams[i]->metadata, "language", NULL, 0))) { const char* langstr = av_convert_lang_to(t->value, AV_LANG_ISO639_1); t = NULL; if (langstr) { char* str = av_asprintf("Subtitle - %s-xx;02", langstr); if (!str) return AVERROR(ENOMEM); ff_riff_write_info_tag(s->pb, "strn", str); av_free(str); } } } if (pb->seekable) { write_odml_master(s, i); } if (enc->codec_type == AVMEDIA_TYPE_VIDEO && st->sample_aspect_ratio.num > 0 && st->sample_aspect_ratio.den > 0) { int vprp = ff_start_tag(pb, "vprp"); AVRational dar = av_mul_q(st->sample_aspect_ratio, (AVRational) { enc->width, enc->height }); int num, den; av_reduce(&num, &den, dar.num, dar.den, 0xFFFF); avio_wl32(pb, 0); // video format = unknown avio_wl32(pb, 0); // video standard = unknown // TODO: should be avg_frame_rate avio_wl32(pb, (2LL*st->time_base.den + st->time_base.num - 1) / (2LL * st->time_base.num)); avio_wl32(pb, enc->width); avio_wl32(pb, enc->height); avio_wl16(pb, den); avio_wl16(pb, num); avio_wl32(pb, enc->width); avio_wl32(pb, enc->height); avio_wl32(pb, 1); // progressive FIXME avio_wl32(pb, enc->height); avio_wl32(pb, enc->width); avio_wl32(pb, enc->height); avio_wl32(pb, enc->width); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, 0); avio_wl32(pb, 0); ff_end_tag(pb, vprp); } ff_end_tag(pb, list2); } if (pb->seekable) { /* AVI could become an OpenDML one, if it grows beyond 2Gb range */ avi->odml_list = ff_start_tag(pb, "JUNK"); ffio_wfourcc(pb, "odml"); ffio_wfourcc(pb, "dmlh"); avio_wl32(pb, 248); for (i = 0; i < 248; i += 4) avio_wl32(pb, 0); ff_end_tag(pb, avi->odml_list); } ff_end_tag(pb, list1); ff_riff_write_info(s); padding = s->metadata_header_padding; if (padding < 0) padding = 1016; /* some padding for easier tag editing */ if (padding) { list2 = ff_start_tag(pb, "JUNK"); for (i = padding; i > 0; i -= 4) avio_wl32(pb, 0); ff_end_tag(pb, list2); } avi->movi_list = ff_start_tag(pb, "LIST"); ffio_wfourcc(pb, "movi"); avio_flush(pb); return 0; }
static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, const char **p, FFServerStream **pfeed) { FFServerStream *feed; char arg[1024]; av_assert0(pfeed); feed = *pfeed; if (!av_strcasecmp(cmd, "<Feed")) { char *q; FFServerStream *s; feed = av_mallocz(sizeof(FFServerStream)); if (!feed) return AVERROR(ENOMEM); ffserver_get_arg(feed->filename, sizeof(feed->filename), p); q = strrchr(feed->filename, '>'); if (*q) *q = '\0'; for (s = config->first_feed; s; s = s->next) { if (!strcmp(feed->filename, s->filename)) ERROR("Feed '%s' already registered\n", s->filename); } feed->fmt = av_guess_format("ffm", NULL, NULL); /* default feed file */ snprintf(feed->feed_filename, sizeof(feed->feed_filename), "/tmp/%s.ffm", feed->filename); feed->feed_max_size = 5 * 1024 * 1024; feed->is_feed = 1; feed->feed = feed; /* self feeding :-) */ *pfeed = feed; return 0; } av_assert0(feed); if (!av_strcasecmp(cmd, "Launch")) { int i; feed->child_argv = av_mallocz_array(MAX_CHILD_ARGS, sizeof(char *)); if (!feed->child_argv) return AVERROR(ENOMEM); for (i = 0; i < MAX_CHILD_ARGS - 2; i++) { ffserver_get_arg(arg, sizeof(arg), p); if (!arg[0]) break; feed->child_argv[i] = av_strdup(arg); if (!feed->child_argv[i]) return AVERROR(ENOMEM); } feed->child_argv[i] = av_asprintf("http://%s:%d/%s", (config->http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" : inet_ntoa(config->http_addr.sin_addr), ntohs(config->http_addr.sin_port), feed->filename); if (!feed->child_argv[i]) return AVERROR(ENOMEM); } else if (!av_strcasecmp(cmd, "ACL")) { ffserver_parse_acl_row(NULL, feed, NULL, *p, config->filename, config->line_num); } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) { ffserver_get_arg(feed->feed_filename, sizeof(feed->feed_filename), p); feed->readonly = !av_strcasecmp(cmd, "ReadOnlyFile"); } else if (!av_strcasecmp(cmd, "Truncate")) { ffserver_get_arg(arg, sizeof(arg), p); /* assume Truncate is true in case no argument is specified */ if (!arg[0]) { feed->truncate = 1; } else { WARNING("Truncate N syntax in configuration file is deprecated. " "Use Truncate alone with no arguments.\n"); feed->truncate = strtod(arg, NULL); } } else if (!av_strcasecmp(cmd, "FileMaxSize")) { char *p1; double fsize; ffserver_get_arg(arg, sizeof(arg), p); p1 = arg; fsize = strtod(p1, &p1); switch(av_toupper(*p1)) { case 'K': fsize *= 1024; break; case 'M': fsize *= 1024 * 1024; break; case 'G': fsize *= 1024 * 1024 * 1024; break; default: ERROR("Invalid file size: '%s'\n", arg); break; } feed->feed_max_size = (int64_t)fsize; if (feed->feed_max_size < FFM_PACKET_SIZE*4) { ERROR("Feed max file size is too small. Must be at least %d.\n", FFM_PACKET_SIZE*4); } } else if (!av_strcasecmp(cmd, "</Feed>")) { *pfeed = NULL; } else { ERROR("Invalid entry '%s' inside <Feed></Feed>\n", cmd); } return 0; }
static void *sender_thread(void *arg) { int i, ret = 0; struct sender_data *wd = arg; av_log(NULL, AV_LOG_INFO, "sender #%d: workload=%d\n", wd->id, wd->workload); for (i = 0; i < wd->workload; i++) { if (rand() % wd->workload < wd->workload / 10) { av_log(NULL, AV_LOG_INFO, "sender #%d: flushing the queue\n", wd->id); av_thread_message_flush(wd->queue); } else { char *val; AVDictionary *meta = NULL; struct message msg = { .magic = MAGIC, .frame = av_frame_alloc(), }; if (!msg.frame) { ret = AVERROR(ENOMEM); break; } /* we add some metadata to identify the frames */ val = av_asprintf("frame %d/%d from sender %d", i + 1, wd->workload, wd->id); if (!val) { av_frame_free(&msg.frame); ret = AVERROR(ENOMEM); break; } ret = av_dict_set(&meta, "sig", val, AV_DICT_DONT_STRDUP_VAL); if (ret < 0) { av_frame_free(&msg.frame); break; } msg.frame->metadata = meta; /* allocate a real frame in order to simulate "real" work */ msg.frame->format = AV_PIX_FMT_RGBA; msg.frame->width = 320; msg.frame->height = 240; ret = av_frame_get_buffer(msg.frame, 32); if (ret < 0) { av_frame_free(&msg.frame); break; } /* push the frame in the common queue */ av_log(NULL, AV_LOG_INFO, "sender #%d: sending my work (%d/%d frame:%p)\n", wd->id, i + 1, wd->workload, msg.frame); ret = av_thread_message_queue_send(wd->queue, &msg, 0); if (ret < 0) { av_frame_free(&msg.frame); break; } } } av_log(NULL, AV_LOG_INFO, "sender #%d: my work is done here (%s)\n", wd->id, av_err2str(ret)); av_thread_message_queue_set_err_recv(wd->queue, ret < 0 ? ret : AVERROR_EOF); return NULL; }