Exemple #1
0
static int ffserver_parse_config_feed(FFServerConfig *config, const char *cmd, const char **p,
                                      int line_num, FFServerStream **pfeed)
{
    FFServerStream *feed;
    char arg[1024];
    av_assert0(pfeed);
    feed = *pfeed;
    if (!av_strcasecmp(cmd, "<Feed")) {
        char *q;
        FFServerStream *s;
        feed = av_mallocz(sizeof(FFServerStream));
        if (!feed)
            return AVERROR(ENOMEM);
        ffserver_get_arg(feed->filename, sizeof(feed->filename), p);
        q = strrchr(feed->filename, '>');
        if (*q)
            *q = '\0';

        for (s = config->first_feed; s; s = s->next) {
            if (!strcmp(feed->filename, s->filename))
                ERROR("Feed '%s' already registered\n", s->filename);
        }

        feed->fmt = av_guess_format("ffm", NULL, NULL);
        /* default feed file */
        snprintf(feed->feed_filename, sizeof(feed->feed_filename),
                 "/tmp/%s.ffm", feed->filename);
        feed->feed_max_size = 5 * 1024 * 1024;
        feed->is_feed = 1;
        feed->feed = feed; /* self feeding :-) */
        *pfeed = feed;
        return 0;
    }
    av_assert0(feed);
    if (!av_strcasecmp(cmd, "Launch")) {
        int i;

        feed->child_argv = av_mallocz(64 * sizeof(char *));
        if (!feed->child_argv)
            return AVERROR(ENOMEM);
        for (i = 0; i < 62; i++) {
            ffserver_get_arg(arg, sizeof(arg), p);
            if (!arg[0])
                break;

            feed->child_argv[i] = av_strdup(arg);
            if (!feed->child_argv[i])
                return AVERROR(ENOMEM);
        }

        feed->child_argv[i] =
            av_asprintf("http://%s:%d/%s",
                        (config->http_addr.sin_addr.s_addr == INADDR_ANY) ? "127.0.0.1" :
                        inet_ntoa(config->http_addr.sin_addr), ntohs(config->http_addr.sin_port),
                        feed->filename);
        if (!feed->child_argv[i])
            return AVERROR(ENOMEM);
    } else if (!av_strcasecmp(cmd, "ACL")) {
        ffserver_parse_acl_row(NULL, feed, NULL, *p, config->filename, line_num);
    } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) {
        ffserver_get_arg(feed->feed_filename, sizeof(feed->feed_filename), p);
        feed->readonly = !av_strcasecmp(cmd, "ReadOnlyFile");
    } else if (!av_strcasecmp(cmd, "Truncate")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        /* assume Truncate is true in case no argument is specified */
        if (!arg[0]) {
            feed->truncate = 1;
        } else {
            WARNING("Truncate N syntax in configuration file is deprecated, "
                    "use Truncate alone with no arguments\n");
            feed->truncate = strtod(arg, NULL);
        }
    } else if (!av_strcasecmp(cmd, "FileMaxSize")) {
        char *p1;
        double fsize;

        ffserver_get_arg(arg, sizeof(arg), p);
        p1 = arg;
        fsize = strtod(p1, &p1);
        switch(av_toupper(*p1)) {
        case 'K':
            fsize *= 1024;
            break;
        case 'M':
            fsize *= 1024 * 1024;
            break;
        case 'G':
            fsize *= 1024 * 1024 * 1024;
            break;
        }
        feed->feed_max_size = (int64_t)fsize;
        if (feed->feed_max_size < FFM_PACKET_SIZE*4)
            ERROR("Feed max file size is too small, must be at least %d\n", FFM_PACKET_SIZE*4);
    } else if (!av_strcasecmp(cmd, "</Feed>")) {
        *pfeed = NULL;
    } else {
        ERROR("Invalid entry '%s' inside <Feed></Feed>\n", cmd);
    }
    return 0;
}
Exemple #2
0
static int icecast_open(URLContext *h, const char *uri, int flags)
{
    IcecastContext *s = h->priv_data;

    // Dict to set options that we pass to the HTTP protocol
    AVDictionary *opt_dict = NULL;

    // URI part variables
    char h_url[1024], host[1024], auth[1024], path[1024];
    char *headers = NULL, *user = NULL;
    int port, ret;
    AVBPrint bp;

    if (flags & AVIO_FLAG_READ)
        return AVERROR(ENOSYS);

    av_bprint_init(&bp, 0, 1);

    // Build header strings
    cat_header(&bp, "Ice-Name", s->name);
    cat_header(&bp, "Ice-Description", s->description);
    cat_header(&bp, "Ice-URL", s->url);
    cat_header(&bp, "Ice-Genre", s->genre);
    cat_header(&bp, "Ice-Public", s->public ? "1" : "0");
    if (!av_bprint_is_complete(&bp)) {
        ret = AVERROR(ENOMEM);
        goto cleanup;
    }
    av_bprint_finalize(&bp, &headers);

    // Set options
    av_dict_set(&opt_dict, "method", s->legacy_icecast ? "SOURCE" : "PUT", 0);
    av_dict_set(&opt_dict, "auth_type", "basic", 0);
    av_dict_set(&opt_dict, "headers", headers, 0);
    av_dict_set(&opt_dict, "chunked_post", "0", 0);
    av_dict_set(&opt_dict, "send_expect_100", s->legacy_icecast ? "0" : "1", 0);
    if (NOT_EMPTY(s->content_type))
        av_dict_set(&opt_dict, "content_type", s->content_type, 0);
    else
        av_dict_set(&opt_dict, "content_type", "audio/mpeg", 0);
    if (NOT_EMPTY(s->user_agent))
        av_dict_set(&opt_dict, "user_agent", s->user_agent, 0);

    // Parse URI
    av_url_split(NULL, 0, auth, sizeof(auth), host, sizeof(host),
                 &port, path, sizeof(path), uri);

    // Check for auth data in URI
    if (auth[0]) {
        char *sep = strchr(auth, ':');
        if (sep) {
            *sep = 0;
            sep++;
            if (s->pass) {
                av_free(s->pass);
                av_log(h, AV_LOG_WARNING, "Overwriting -password <pass> with URI password!\n");
            }
            if (!(s->pass = av_strdup(sep))) {
                ret = AVERROR(ENOMEM);
                goto cleanup;
            }
        }
        if (!(user = av_strdup(auth))) {
            ret = AVERROR(ENOMEM);
            goto cleanup;
        }
    }

    // Build new authstring
    snprintf(auth, sizeof(auth),
             "%s:%s",
             user ? user : DEFAULT_ICE_USER,
             s->pass ? s->pass : "");

    // Check for mountpoint (path)
    if (!path[0] || strcmp(path, "/") == 0) {
        av_log(h, AV_LOG_ERROR, "No mountpoint (path) specified!\n");
        ret = AVERROR(EIO);
        goto cleanup;
    }

    // Build new URI for passing to http protocol
    ff_url_join(h_url, sizeof(h_url), "http", auth, host, port, "%s", path);
    // Finally open http proto handler
    ret = ffurl_open(&s->hd, h_url, AVIO_FLAG_READ_WRITE, NULL, &opt_dict);

cleanup:
    av_freep(&user);
    av_freep(&headers);
    av_dict_free(&opt_dict);

    return ret;
}
static int ffm2_read_header(AVFormatContext *s)
{
    FFMContext *ffm = s->priv_data;
    AVStream *st;
    AVIOContext *pb = s->pb;
    AVCodecContext *codec;
    int ret;
    int f_main = 0, f_cprv = -1, f_stvi = -1, f_stau = -1;
    AVCodec *enc;
    char *buffer;

    ffm->packet_size = avio_rb32(pb);
    if (ffm->packet_size != FFM_PACKET_SIZE) {
        av_log(s, AV_LOG_ERROR, "Invalid packet size %d, expected size was %d\n",
               ffm->packet_size, FFM_PACKET_SIZE);
        ret = AVERROR_INVALIDDATA;
        goto fail;
    }

    ffm->write_index = avio_rb64(pb);
    /* get also filesize */
    if (pb->seekable) {
        ffm->file_size = avio_size(pb);
        if (ffm->write_index && 0)
            adjust_write_index(s);
    } else {
        ffm->file_size = (UINT64_C(1) << 63) - 1;
    }

    while(!avio_feof(pb)) {
        unsigned id = avio_rb32(pb);
        unsigned size = avio_rb32(pb);
        int64_t next = avio_tell(pb) + size;
        char rc_eq_buf[128];

        if(!id)
            break;

        switch(id) {
        case MKBETAG('M', 'A', 'I', 'N'):
            if (f_main++) {
                ret = AVERROR(EINVAL);
                goto fail;
            }
            avio_rb32(pb); /* nb_streams */
            avio_rb32(pb); /* total bitrate */
            break;
        case MKBETAG('C', 'O', 'M', 'M'):
            f_cprv = f_stvi = f_stau = 0;
            st = avformat_new_stream(s, NULL);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }

            avpriv_set_pts_info(st, 64, 1, 1000000);

            codec = st->codec;
            /* generic info */
            codec->codec_id = avio_rb32(pb);
            codec->codec_type = avio_r8(pb);
            codec->bit_rate = avio_rb32(pb);
            codec->flags = avio_rb32(pb);
            codec->flags2 = avio_rb32(pb);
            codec->debug = avio_rb32(pb);
            if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
                if (ff_get_extradata(codec, pb, avio_rb32(pb)) < 0)
                    return AVERROR(ENOMEM);
            }
            break;
        case MKBETAG('S', 'T', 'V', 'I'):
            if (f_stvi++) {
                ret = AVERROR(EINVAL);
                goto fail;
            }
            codec->time_base.num = avio_rb32(pb);
            codec->time_base.den = avio_rb32(pb);
            if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
                av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
                       codec->time_base.num, codec->time_base.den);
                ret = AVERROR_INVALIDDATA;
                goto fail;
            }
            codec->width = avio_rb16(pb);
            codec->height = avio_rb16(pb);
            codec->gop_size = avio_rb16(pb);
            codec->pix_fmt = avio_rb32(pb);
            codec->qmin = avio_r8(pb);
            codec->qmax = avio_r8(pb);
            codec->max_qdiff = avio_r8(pb);
            codec->qcompress = avio_rb16(pb) / 10000.0;
            codec->qblur = avio_rb16(pb) / 10000.0;
            codec->bit_rate_tolerance = avio_rb32(pb);
            avio_get_str(pb, INT_MAX, rc_eq_buf, sizeof(rc_eq_buf));
            codec->rc_eq = av_strdup(rc_eq_buf);
            codec->rc_max_rate = avio_rb32(pb);
            codec->rc_min_rate = avio_rb32(pb);
            codec->rc_buffer_size = avio_rb32(pb);
            codec->i_quant_factor = av_int2double(avio_rb64(pb));
            codec->b_quant_factor = av_int2double(avio_rb64(pb));
            codec->i_quant_offset = av_int2double(avio_rb64(pb));
            codec->b_quant_offset = av_int2double(avio_rb64(pb));
            codec->dct_algo = avio_rb32(pb);
            codec->strict_std_compliance = avio_rb32(pb);
            codec->max_b_frames = avio_rb32(pb);
            codec->mpeg_quant = avio_rb32(pb);
            codec->intra_dc_precision = avio_rb32(pb);
            codec->me_method = avio_rb32(pb);
            codec->mb_decision = avio_rb32(pb);
            codec->nsse_weight = avio_rb32(pb);
            codec->frame_skip_cmp = avio_rb32(pb);
            codec->rc_buffer_aggressivity = av_int2double(avio_rb64(pb));
            codec->codec_tag = avio_rb32(pb);
            codec->thread_count = avio_r8(pb);
            codec->coder_type = avio_rb32(pb);
            codec->me_cmp = avio_rb32(pb);
            codec->me_subpel_quality = avio_rb32(pb);
            codec->me_range = avio_rb32(pb);
            codec->keyint_min = avio_rb32(pb);
            codec->scenechange_threshold = avio_rb32(pb);
            codec->b_frame_strategy = avio_rb32(pb);
            codec->qcompress = av_int2double(avio_rb64(pb));
            codec->qblur = av_int2double(avio_rb64(pb));
            codec->max_qdiff = avio_rb32(pb);
            codec->refs = avio_rb32(pb);
            break;
        case MKBETAG('S', 'T', 'A', 'U'):
            if (f_stau++) {
                ret = AVERROR(EINVAL);
                goto fail;
            }
            codec->sample_rate = avio_rb32(pb);
            codec->channels = avio_rl16(pb);
            codec->frame_size = avio_rl16(pb);
            break;
        case MKBETAG('C', 'P', 'R', 'V'):
            if (f_cprv++) {
                ret = AVERROR(EINVAL);
                goto fail;
            }
            enc = avcodec_find_encoder(codec->codec_id);
            if (enc && enc->priv_data_size && enc->priv_class) {
                buffer = av_malloc(size + 1);
                if (!buffer) {
                    ret = AVERROR(ENOMEM);
                    goto fail;
                }
                avio_get_str(pb, size, buffer, size + 1);
                if ((ret = ffm_append_recommended_configuration(st, &buffer)) < 0)
                    goto fail;
            }
            break;
        case MKBETAG('S', '2', 'V', 'I'):
            if (f_stvi++) {
                ret = AVERROR(EINVAL);
                goto fail;
            }
            buffer = av_malloc(size);
            if (!buffer) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avio_get_str(pb, INT_MAX, buffer, size);
            av_set_options_string(codec, buffer, "=", ",");
            if ((ret = ffm_append_recommended_configuration(st, &buffer)) < 0)
                goto fail;
            break;
        case MKBETAG('S', '2', 'A', 'U'):
            if (f_stau++) {
                ret = AVERROR(EINVAL);
                goto fail;
            }
            buffer = av_malloc(size);
            if (!buffer) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avio_get_str(pb, INT_MAX, buffer, size);
            av_set_options_string(codec, buffer, "=", ",");
            ffm_append_recommended_configuration(st, &buffer);
            break;
        }
        avio_seek(pb, next, SEEK_SET);
    }

    /* get until end of block reached */
    while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
        avio_r8(pb);

    /* init packet demux */
    ffm->packet_ptr = ffm->packet;
    ffm->packet_end = ffm->packet;
    ffm->frame_offset = 0;
    ffm->dts = 0;
    ffm->read_state = READ_HEADER;
    ffm->first_packet = 1;
    return 0;
 fail:
    ffm_close(s);
    return ret;
}
Exemple #4
0
int parse_option(void *optctx, const char *opt, const char *arg, const OptionDef *options)
{
    const OptionDef *po;
    int bool_val = 1;
    int *dstcount;
    void *dst;

    po = find_option(options, opt);
    if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
        /* handle 'no' bool option */
        po = find_option(options, opt + 2);
        if (!(po->name && (po->flags & OPT_BOOL)))
            goto unknown_opt;
        bool_val = 0;
    }
    if (!po->name)
        po = find_option(options, "default");
    if (!po->name) {
unknown_opt:
        av_log(NULL, AV_LOG_ERROR, "Unrecognized option '%s'\n", opt);
        return AVERROR(EINVAL);
    }
    if (po->flags & HAS_ARG && !arg) {
        av_log(NULL, AV_LOG_ERROR, "Missing argument for option '%s'\n", opt);
        return AVERROR(EINVAL);
    }

    /* new-style options contain an offset into optctx, old-style address of
     * a global var*/
    dst = po->flags & (OPT_OFFSET|OPT_SPEC) ? (uint8_t*)optctx + po->u.off : po->u.dst_ptr;

    if (po->flags & OPT_SPEC) {
        SpecifierOpt **so = dst;
        char *p = strchr(opt, ':');

        dstcount = (int*)(so + 1);
        *so = grow_array(*so, sizeof(**so), dstcount, *dstcount + 1);
        (*so)[*dstcount - 1].specifier = av_strdup(p ? p + 1 : "");
        dst = &(*so)[*dstcount - 1].u;
    }

    if (po->flags & OPT_STRING) {
        char *str;
        str = av_strdup(arg);
        *(char**)dst = str;
    } else if (po->flags & OPT_BOOL) {
        *(int*)dst = bool_val;
    } else if (po->flags & OPT_INT) {
        *(int*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
    } else if (po->flags & OPT_INT64) {
        *(int64_t*)dst = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
    } else if (po->flags & OPT_TIME) {
        *(int64_t*)dst = parse_time_or_die(opt, arg, 1);
    } else if (po->flags & OPT_FLOAT) {
        *(float*)dst = parse_number_or_die(opt, arg, OPT_FLOAT, -INFINITY, INFINITY);
    } else if (po->flags & OPT_DOUBLE) {
        *(double*)dst = parse_number_or_die(opt, arg, OPT_DOUBLE, -INFINITY, INFINITY);
    } else if (po->u.func_arg) {
        int ret = po->flags & OPT_FUNC2 ? po->u.func2_arg(optctx, opt, arg) :
                                          po->u.func_arg(opt, arg);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", arg, opt);
            return ret;
        }
    }
    if (po->flags & OPT_EXIT)
        exit_program(0);
    return !!(po->flags & HAS_ARG);
}
static char *dup_cfstring_to_utf8(CFStringRef w)
{
    char s[256];
    CFStringGetCString(w, s, 255, kCFStringEncodingUTF8);
    return av_strdup(s);
}
Exemple #6
0
static int realtext_read_header(AVFormatContext *s)
{
    RealTextContext *rt = s->priv_data;
    AVStream *st = avformat_new_stream(s, NULL);
    AVBPrint buf;
    char c = 0;
    int res = 0, duration = read_ts("60"); // default duration is 60 seconds
    FFTextReader tr;
    ff_text_init_avio(&tr, s->pb);

    if (!st)
        return AVERROR(ENOMEM);
    avpriv_set_pts_info(st, 64, 1, 100);
    st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
    st->codec->codec_id   = AV_CODEC_ID_REALTEXT;

    av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);

    while (!ff_text_eof(&tr)) {
        AVPacket *sub;
        const int64_t pos = ff_text_pos(&tr) - (c != 0);
        int n = ff_smil_extract_next_text_chunk(&tr, &buf, &c);

        if (n == 0)
            break;

        if (!av_strncasecmp(buf.str, "<window", 7)) {
            /* save header to extradata */
            const char *p = ff_smil_get_attr_ptr(buf.str, "duration");

            if (p)
                duration = read_ts(p);
            st->codec->extradata = av_strdup(buf.str);
            if (!st->codec->extradata) {
                res = AVERROR(ENOMEM);
                goto end;
            }
            st->codec->extradata_size = buf.len + 1;
        } else {
            /* if we just read a <time> tag, introduce a new event, otherwise merge
             * with the previous one */
            int merge = !av_strncasecmp(buf.str, "<time", 5) ? 0 : 1;
            sub = ff_subtitles_queue_insert(&rt->q, buf.str, buf.len, merge);
            if (!sub) {
                res = AVERROR(ENOMEM);
                goto end;
            }
            if (!merge) {
                const char *begin = ff_smil_get_attr_ptr(buf.str, "begin");
                const char *end   = ff_smil_get_attr_ptr(buf.str, "end");

                sub->pos      = pos;
                sub->pts      = begin ? read_ts(begin) : 0;
                sub->duration = end ? (read_ts(end) - sub->pts) : duration;
            }
        }
        av_bprint_clear(&buf);
    }
    ff_subtitles_queue_finalize(&rt->q);

end:
    av_bprint_finalize(&buf, NULL);
    return res;
}
Exemple #7
0
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
        AVCodecContext *enc_ctx, const char *filter_spec)
{
    char args[512];
    int ret = 0;
    AVFilter *buffersrc = NULL;
    AVFilter *buffersink = NULL;
    AVFilterContext *buffersrc_ctx = NULL;
    AVFilterContext *buffersink_ctx = NULL;
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    AVFilterGraph *filter_graph = avfilter_graph_alloc();

    if (!outputs || !inputs || !filter_graph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        buffersrc = avfilter_get_by_name("buffer");
        buffersink = avfilter_get_by_name("buffersink");
        if (!buffersrc || !buffersink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        snprintf(args, sizeof(args),
                "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
                dec_ctx->time_base.num, dec_ctx->time_base.den,
                dec_ctx->sample_aspect_ratio.num,
                dec_ctx->sample_aspect_ratio.den);

        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                args, NULL, filter_graph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
            goto end;
        }

        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                NULL, NULL, filter_graph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
            goto end;
        }

        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
                (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
                AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
            goto end;
        }
    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
        buffersrc = avfilter_get_by_name("abuffer");
        buffersink = avfilter_get_by_name("abuffersink");
        if (!buffersrc || !buffersink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        if (!dec_ctx->channel_layout)
            dec_ctx->channel_layout =
                av_get_default_channel_layout(dec_ctx->channels);
        snprintf(args, sizeof(args),
                "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
                dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
                av_get_sample_fmt_name(dec_ctx->sample_fmt),
                dec_ctx->channel_layout);
        ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                args, NULL, filter_graph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
            goto end;
        }

        ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                NULL, NULL, filter_graph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
            goto end;
        }

        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
                (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
                AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
            goto end;
        }

        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
                (uint8_t*)&enc_ctx->channel_layout,
                sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
            goto end;
        }

        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
                (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
                AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
            goto end;
        }
    } else {
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    /* Endpoints for the filter graph. */
    outputs->name       = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx    = 0;
    outputs->next       = NULL;

    inputs->name       = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx    = 0;
    inputs->next       = NULL;

    if (!outputs->name || !inputs->name) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
                    &inputs, &outputs, NULL)) < 0)
        goto end;

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
        goto end;

    /* Fill FilteringContext */
    fctx->buffersrc_ctx = buffersrc_ctx;
    fctx->buffersink_ctx = buffersink_ctx;
    fctx->filter_graph = filter_graph;

end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return ret;
}
Exemple #8
0
/**
 * Initialize the x11 grab device demuxer (public device demuxer API).
 *
 * @param s1 Context from avformat core
 * @return <ul>
 *          <li>AVERROR(ENOMEM) no memory left</li>
 *          <li>AVERROR(EIO) other failure case</li>
 *          <li>0 success</li>
 *         </ul>
 */
static int
x11grab_read_header(AVFormatContext *s1)
{
    struct x11grab *x11grab = s1->priv_data;
    Display *dpy;
    AVStream *st = NULL;
    enum AVPixelFormat input_pixfmt;
    XImage *image;
    int x_off = 0;
    int y_off = 0;
    int screen;
    int use_shm;
    char *param, *offset;
    int ret = 0;
    AVRational framerate;

    param = av_strdup(s1->filename);
    if (!param)
        goto out;

    offset = strchr(param, '+');
    if (offset) {
        sscanf(offset, "%d,%d", &x_off, &y_off);
        x11grab->draw_mouse = !strstr(offset, "nomouse");
        *offset= 0;
    }

    if ((ret = av_parse_video_size(&x11grab->width, &x11grab->height, x11grab->video_size)) < 0) {
        av_log(s1, AV_LOG_ERROR, "Couldn't parse video size.\n");
        goto out;
    }
    if ((ret = av_parse_video_rate(&framerate, x11grab->framerate)) < 0) {
        av_log(s1, AV_LOG_ERROR, "Could not parse framerate: %s.\n", x11grab->framerate);
        goto out;
    }
    av_log(s1, AV_LOG_INFO, "device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
           s1->filename, param, x_off, y_off, x11grab->width, x11grab->height);

    dpy = XOpenDisplay(param);
    if(!dpy) {
        av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
        ret = AVERROR(EIO);
        goto out;
    }

    st = avformat_new_stream(s1, NULL);
    if (!st) {
        ret = AVERROR(ENOMEM);
        goto out;
    }
    avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */

    screen = DefaultScreen(dpy);

    if (x11grab->follow_mouse) {
        int screen_w, screen_h;
        Window w;

        screen_w = DisplayWidth(dpy, screen);
        screen_h = DisplayHeight(dpy, screen);
        XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off, &ret, &ret, &ret);
        x_off -= x11grab->width / 2;
        y_off -= x11grab->height / 2;
        x_off = FFMIN(FFMAX(x_off, 0), screen_w - x11grab->width);
        y_off = FFMIN(FFMAX(y_off, 0), screen_h - x11grab->height);
        av_log(s1, AV_LOG_INFO, "followmouse is enabled, resetting grabbing region to x: %d y: %d\n", x_off, y_off);
    }

    use_shm = XShmQueryExtension(dpy);
    av_log(s1, AV_LOG_INFO, "shared memory extension %s found\n", use_shm ? "" : "not");

    if(use_shm) {
        int scr = XDefaultScreen(dpy);
        image = XShmCreateImage(dpy,
                                DefaultVisual(dpy, scr),
                                DefaultDepth(dpy, scr),
                                ZPixmap,
                                NULL,
                                &x11grab->shminfo,
                                x11grab->width, x11grab->height);
        x11grab->shminfo.shmid = shmget(IPC_PRIVATE,
                                        image->bytes_per_line * image->height,
                                        IPC_CREAT|0777);
        if (x11grab->shminfo.shmid == -1) {
            av_log(s1, AV_LOG_ERROR, "Fatal: Can't get shared memory!\n");
            ret = AVERROR(ENOMEM);
            goto out;
        }
        x11grab->shminfo.shmaddr = image->data = shmat(x11grab->shminfo.shmid, 0, 0);
        x11grab->shminfo.readOnly = False;

        if (!XShmAttach(dpy, &x11grab->shminfo)) {
            av_log(s1, AV_LOG_ERROR, "Fatal: Failed to attach shared memory!\n");
            /* needs some better error subroutine :) */
            ret = AVERROR(EIO);
            goto out;
        }
    } else {
        image = XGetImage(dpy, RootWindow(dpy, screen),
                          x_off,y_off,
                          x11grab->width, x11grab->height,
                          AllPlanes, ZPixmap);
    }

    switch (image->bits_per_pixel) {
    case 8:
        av_log (s1, AV_LOG_DEBUG, "8 bit palette\n");
        input_pixfmt = AV_PIX_FMT_PAL8;
        break;
    case 16:
        if (       image->red_mask   == 0xf800 &&
                   image->green_mask == 0x07e0 &&
                   image->blue_mask  == 0x001f ) {
            av_log (s1, AV_LOG_DEBUG, "16 bit RGB565\n");
            input_pixfmt = AV_PIX_FMT_RGB565;
        } else if (image->red_mask   == 0x7c00 &&
                   image->green_mask == 0x03e0 &&
                   image->blue_mask  == 0x001f ) {
            av_log(s1, AV_LOG_DEBUG, "16 bit RGB555\n");
            input_pixfmt = AV_PIX_FMT_RGB555;
        } else {
            av_log(s1, AV_LOG_ERROR, "RGB ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
            av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
            ret = AVERROR(EIO);
            goto out;
        }
        break;
    case 24:
        if (        image->red_mask   == 0xff0000 &&
                    image->green_mask == 0x00ff00 &&
                    image->blue_mask  == 0x0000ff ) {
            input_pixfmt = AV_PIX_FMT_BGR24;
        } else if ( image->red_mask   == 0x0000ff &&
                    image->green_mask == 0x00ff00 &&
                    image->blue_mask  == 0xff0000 ) {
            input_pixfmt = AV_PIX_FMT_RGB24;
        } else {
            av_log(s1, AV_LOG_ERROR,"rgb ordering at image depth %i not supported ... aborting\n", image->bits_per_pixel);
            av_log(s1, AV_LOG_ERROR, "color masks: r 0x%.6lx g 0x%.6lx b 0x%.6lx\n", image->red_mask, image->green_mask, image->blue_mask);
            ret = AVERROR(EIO);
            goto out;
        }
        break;
    case 32:
        input_pixfmt = AV_PIX_FMT_RGB32;
        break;
    default:
        av_log(s1, AV_LOG_ERROR, "image depth %i not supported ... aborting\n", image->bits_per_pixel);
        ret = AVERROR(EINVAL);
        goto out;
    }

    x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel/8;
    x11grab->dpy = dpy;
    x11grab->time_base  = (AVRational){framerate.den, framerate.num};
    x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
    x11grab->x_off = x_off;
    x11grab->y_off = y_off;
    x11grab->image = image;
    x11grab->use_shm = use_shm;

    st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
    st->codec->width  = x11grab->width;
    st->codec->height = x11grab->height;
    st->codec->pix_fmt = input_pixfmt;
    st->codec->time_base = x11grab->time_base;
    st->codec->bit_rate = x11grab->frame_size * 1/av_q2d(x11grab->time_base) * 8;

out:
    av_free(param);
    return ret;
}
Exemple #9
0
static char *get_geokey_val(int key, int val)
{
    char *ap;

    if (val == TIFF_GEO_KEY_UNDEFINED)
        return av_strdup("undefined");
    if (val == TIFF_GEO_KEY_USER_DEFINED)
        return av_strdup("User-Defined");

#define RET_GEOKEY_VAL(TYPE, array)\
    if (val >= TIFF_##TYPE##_OFFSET &&\
        val - TIFF_##TYPE##_OFFSET < FF_ARRAY_ELEMS(ff_tiff_##array##_codes))\
        return av_strdup(ff_tiff_##array##_codes[val - TIFF_##TYPE##_OFFSET]);

    switch (key) {
    case TIFF_GT_MODEL_TYPE_GEOKEY:
        RET_GEOKEY_VAL(GT_MODEL_TYPE, gt_model_type);
        break;
    case TIFF_GT_RASTER_TYPE_GEOKEY:
        RET_GEOKEY_VAL(GT_RASTER_TYPE, gt_raster_type);
        break;
    case TIFF_GEOG_LINEAR_UNITS_GEOKEY:
    case TIFF_PROJ_LINEAR_UNITS_GEOKEY:
    case TIFF_VERTICAL_UNITS_GEOKEY:
        RET_GEOKEY_VAL(LINEAR_UNIT, linear_unit);
        break;
    case TIFF_GEOG_ANGULAR_UNITS_GEOKEY:
    case TIFF_GEOG_AZIMUTH_UNITS_GEOKEY:
        RET_GEOKEY_VAL(ANGULAR_UNIT, angular_unit);
        break;
    case TIFF_GEOGRAPHIC_TYPE_GEOKEY:
        RET_GEOKEY_VAL(GCS_TYPE, gcs_type);
        RET_GEOKEY_VAL(GCSE_TYPE, gcse_type);
        break;
    case TIFF_GEOG_GEODETIC_DATUM_GEOKEY:
        RET_GEOKEY_VAL(GEODETIC_DATUM, geodetic_datum);
        RET_GEOKEY_VAL(GEODETIC_DATUM_E, geodetic_datum_e);
        break;
    case TIFF_GEOG_ELLIPSOID_GEOKEY:
        RET_GEOKEY_VAL(ELLIPSOID, ellipsoid);
        break;
    case TIFF_GEOG_PRIME_MERIDIAN_GEOKEY:
        RET_GEOKEY_VAL(PRIME_MERIDIAN, prime_meridian);
        break;
    case TIFF_PROJECTED_CS_TYPE_GEOKEY:
        ap = av_strdup(search_keyval(ff_tiff_proj_cs_type_codes, FF_ARRAY_ELEMS(ff_tiff_proj_cs_type_codes), val));
        if(ap) return ap;
        break;
    case TIFF_PROJECTION_GEOKEY:
        ap = av_strdup(search_keyval(ff_tiff_projection_codes, FF_ARRAY_ELEMS(ff_tiff_projection_codes), val));
        if(ap) return ap;
        break;
    case TIFF_PROJ_COORD_TRANS_GEOKEY:
        RET_GEOKEY_VAL(COORD_TRANS, coord_trans);
        break;
    case TIFF_VERTICAL_CS_TYPE_GEOKEY:
        RET_GEOKEY_VAL(VERT_CS, vert_cs);
        RET_GEOKEY_VAL(ORTHO_VERT_CS, ortho_vert_cs);
        break;

    }

    ap = av_malloc(14);
    if (ap)
        snprintf(ap, 14, "Unknown-%d", val);
    return ap;
}
Exemple #10
0
AVFilterContext *ff_filter_alloc(const AVFilter *filter, const char *inst_name)
{
    AVFilterContext *ret;

    if (!filter)
        return NULL;

    ret = av_mallocz(sizeof(AVFilterContext));
    if (!ret)
        return NULL;

    ret->av_class = &avfilter_class;
    ret->filter   = filter;
    ret->name     = inst_name ? av_strdup(inst_name) : NULL;
    if (filter->priv_size) {
        ret->priv     = av_mallocz(filter->priv_size);
        if (!ret->priv)
            goto err;
    }

    if (filter->priv_class) {
        *(const AVClass**)ret->priv = filter->priv_class;
        av_opt_set_defaults(ret->priv);
    }

    ret->nb_inputs = avfilter_pad_count(filter->inputs);
    if (ret->nb_inputs ) {
        ret->input_pads   = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
        if (!ret->input_pads)
            goto err;
        memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
        ret->inputs       = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
        if (!ret->inputs)
            goto err;
    }

    ret->nb_outputs = avfilter_pad_count(filter->outputs);
    if (ret->nb_outputs) {
        ret->output_pads  = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
        if (!ret->output_pads)
            goto err;
        memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
        ret->outputs      = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
        if (!ret->outputs)
            goto err;
    }
#if FF_API_FOO_COUNT
    ret->output_count = ret->nb_outputs;
    ret->input_count  = ret->nb_inputs;
#endif

    return ret;

err:
    av_freep(&ret->inputs);
    av_freep(&ret->input_pads);
    ret->nb_inputs = 0;
    av_freep(&ret->outputs);
    av_freep(&ret->output_pads);
    ret->nb_outputs = 0;
    av_freep(&ret->priv);
    av_free(ret);
    return NULL;
}
Exemple #11
0
int avfilter_init_str(AVFilterContext *filter, const char *args)
{
    AVDictionary *options = NULL;
    AVDictionaryEntry *e;
    int ret = 0;

    if (args && *args) {
        if (!filter->filter->priv_class) {
            av_log(filter, AV_LOG_ERROR, "This filter does not take any "
                   "options, but options were provided: %s.\n", args);
            return AVERROR(EINVAL);
        }

#if FF_API_OLD_FILTER_OPTS
            if (   !strcmp(filter->filter->name, "format")     ||
                   !strcmp(filter->filter->name, "noformat")   ||
                   !strcmp(filter->filter->name, "frei0r")     ||
                   !strcmp(filter->filter->name, "frei0r_src") ||
                   !strcmp(filter->filter->name, "ocv")        ||
                   !strcmp(filter->filter->name, "pan")        ||
                   !strcmp(filter->filter->name, "pp")         ||
                   !strcmp(filter->filter->name, "aevalsrc")) {
            /* a hack for compatibility with the old syntax
             * replace colons with |s */
            char *copy = av_strdup(args);
            char *p    = copy;
            int nb_leading = 0; // number of leading colons to skip
            int deprecated = 0;

            if (!copy) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }

            if (!strcmp(filter->filter->name, "frei0r") ||
                !strcmp(filter->filter->name, "ocv"))
                nb_leading = 1;
            else if (!strcmp(filter->filter->name, "frei0r_src"))
                nb_leading = 3;

            while (nb_leading--) {
                p = strchr(p, ':');
                if (!p) {
                    p = copy + strlen(copy);
                    break;
                }
                p++;
            }

            deprecated = strchr(p, ':') != NULL;

            if (!strcmp(filter->filter->name, "aevalsrc")) {
                deprecated = 0;
                while ((p = strchr(p, ':')) && p[1] != ':') {
                    const char *epos = strchr(p + 1, '=');
                    const char *spos = strchr(p + 1, ':');
                    const int next_token_is_opt = epos && (!spos || epos < spos);
                    if (next_token_is_opt) {
                        p++;
                        break;
                    }
                    /* next token does not contain a '=', assume a channel expression */
                    deprecated = 1;
                    *p++ = '|';
                }
                if (p && *p == ':') { // double sep '::' found
                    deprecated = 1;
                    memmove(p, p + 1, strlen(p));
                }
            } else
            while ((p = strchr(p, ':')))
                *p++ = '|';

            if (deprecated)
                av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
                       "'|' to separate the list items.\n");

            av_log(filter, AV_LOG_DEBUG, "compat: called with args=[%s]\n", copy);
            ret = process_options(filter, &options, copy);
            av_freep(&copy);

            if (ret < 0)
                goto fail;
#endif
        } else {
#if CONFIG_MP_FILTER
            if (!strcmp(filter->filter->name, "mp")) {
                char *escaped;

                if (!strncmp(args, "filter=", 7))
                    args += 7;
                ret = av_escape(&escaped, args, ":=", AV_ESCAPE_MODE_BACKSLASH, 0);
                if (ret < 0) {
                    av_log(filter, AV_LOG_ERROR, "Unable to escape MPlayer filters arg '%s'\n", args);
                    goto fail;
                }
                ret = process_options(filter, &options, escaped);
                av_free(escaped);
            } else
#endif
            ret = process_options(filter, &options, args);
            if (ret < 0)
                goto fail;
        }
    }

    ret = avfilter_init_dict(filter, &options);
    if (ret < 0)
        goto fail;

    if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto fail;
    }

fail:
    av_dict_free(&options);

    return ret;
}
Exemple #12
0
static av_cold int frei0r_init(AVFilterContext *ctx,
                               const char *dl_name, int type)
{
    Frei0rContext *frei0r = ctx->priv;
    f0r_init_f            f0r_init;
    f0r_get_plugin_info_f f0r_get_plugin_info;
    f0r_plugin_info_t *pi;
    char *path;

    /* see: http://piksel.org/frei0r/1.2/spec/1.2/spec/group__pluglocations.html */
    if ((path = av_strdup(getenv("FREI0R_PATH")))) {
        char *p, *ptr = NULL;
        for (p = path; p = strtok_r(p, ":", &ptr); p = NULL)
            if (frei0r->dl_handle = load_path(ctx, p, dl_name))
                break;
        av_free(path);
    }
    if (!frei0r->dl_handle && (path = getenv("HOME"))) {
        char prefix[1024];
        snprintf(prefix, sizeof(prefix), "%s/.frei0r-1/lib/", path);
        frei0r->dl_handle = load_path(ctx, prefix, dl_name);
    }
    if (!frei0r->dl_handle)
        frei0r->dl_handle = load_path(ctx, "/usr/local/lib/frei0r-1/", dl_name);
    if (!frei0r->dl_handle)
        frei0r->dl_handle = load_path(ctx, "/usr/lib/frei0r-1/", dl_name);
    if (!frei0r->dl_handle) {
        av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'\n", dl_name);
        return AVERROR(EINVAL);
    }

    if (!(f0r_init                = load_sym(ctx, "f0r_init"           )) ||
        !(f0r_get_plugin_info     = load_sym(ctx, "f0r_get_plugin_info")) ||
        !(frei0r->get_param_info  = load_sym(ctx, "f0r_get_param_info" )) ||
        !(frei0r->get_param_value = load_sym(ctx, "f0r_get_param_value")) ||
        !(frei0r->set_param_value = load_sym(ctx, "f0r_set_param_value")) ||
        !(frei0r->update          = load_sym(ctx, "f0r_update"         )) ||
        !(frei0r->construct       = load_sym(ctx, "f0r_construct"      )) ||
        !(frei0r->destruct        = load_sym(ctx, "f0r_destruct"       )) ||
        !(frei0r->deinit          = load_sym(ctx, "f0r_deinit"         )))
        return AVERROR(EINVAL);

    if (f0r_init() < 0) {
        av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module");
        return AVERROR(EINVAL);
    }

    f0r_get_plugin_info(&frei0r->plugin_info);
    pi = &frei0r->plugin_info;
    if (pi->plugin_type != type) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid type '%s' for the plugin\n",
               pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? "filter" :
               pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? "source" :
               pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? "mixer2" :
               pi->plugin_type == F0R_PLUGIN_TYPE_MIXER3 ? "mixer3" : "unknown");
        return AVERROR(EINVAL);
    }

    av_log(ctx, AV_LOG_INFO,
           "name:%s author:'%s' explanation:'%s' color_model:%s "
           "frei0r_version:%d version:%d.%d num_params:%d\n",
           pi->name, pi->author, pi->explanation,
           pi->color_model == F0R_COLOR_MODEL_BGRA8888 ? "bgra8888" :
           pi->color_model == F0R_COLOR_MODEL_RGBA8888 ? "rgba8888" :
           pi->color_model == F0R_COLOR_MODEL_PACKED32 ? "packed32" : "unknown",
           pi->frei0r_version, pi->major_version, pi->minor_version, pi->num_params);

    return 0;
}
Exemple #13
0
static int plot_freqs(AVFilterLink *inlink, AVFrame *in)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    ShowFreqsContext *s = ctx->priv;
    const int win_size = s->win_size;
    char *colors, *color, *saveptr = NULL;
    AVFrame *out;
    int ch, n;

    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
    if (!out)
        return AVERROR(ENOMEM);

    for (n = 0; n < outlink->h; n++)
        memset(out->data[0] + out->linesize[0] * n, 0, outlink->w * 4);

    /* fill FFT input with the number of samples available */
    for (ch = 0; ch < s->nb_channels; ch++) {
        const float *p = (float *)in->extended_data[ch];

        for (n = 0; n < in->nb_samples; n++) {
            s->fft_data[ch][n].re = p[n] * s->window_func_lut[n];
            s->fft_data[ch][n].im = 0;
        }
        for (; n < win_size; n++) {
            s->fft_data[ch][n].re = 0;
            s->fft_data[ch][n].im = 0;
        }
    }

    /* run FFT on each samples set */
    for (ch = 0; ch < s->nb_channels; ch++) {
        av_fft_permute(s->fft, s->fft_data[ch]);
        av_fft_calc(s->fft, s->fft_data[ch]);
    }

#define RE(x, ch) s->fft_data[ch][x].re
#define IM(x, ch) s->fft_data[ch][x].im
#define M(a, b) (sqrt((a) * (a) + (b) * (b)))

    colors = av_strdup(s->colors);
    if (!colors) {
        av_frame_free(&out);
        return AVERROR(ENOMEM);
    }

    for (ch = 0; ch < s->nb_channels; ch++) {
        uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
        int prev_y = -1, f;
        double a;

        color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
        if (color)
            av_parse_color(fg, color, -1, ctx);

        a = av_clipd(M(RE(0, ch), 0) / s->scale, 0, 1);
        plot_freq(s, ch, a, 0, fg, &prev_y, out, outlink);

        for (f = 1; f < s->nb_freq; f++) {
            a = av_clipd(M(RE(f, ch), IM(f, ch)) / s->scale, 0, 1);

            plot_freq(s, ch, a, f, fg, &prev_y, out, outlink);
        }
    }

    av_free(colors);
    out->pts = in->pts;
    return ff_filter_frame(outlink, out);
}
Exemple #14
0
static int ffserver_parse_config_stream(FFServerConfig *config, const char *cmd, const char **p,
                                        int line_num, FFServerStream **pstream)
{
    char arg[1024], arg2[1024];
    FFServerStream *stream;

    av_assert0(pstream);
    stream = *pstream;

    if (!av_strcasecmp(cmd, "<Stream")) {
        char *q;
        FFServerStream *s;
        stream = av_mallocz(sizeof(FFServerStream));
        if (!stream)
            return AVERROR(ENOMEM);
        ffserver_get_arg(stream->filename, sizeof(stream->filename), p);
        q = strrchr(stream->filename, '>');
        if (q)
            *q = '\0';

        for (s = config->first_stream; s; s = s->next) {
            if (!strcmp(stream->filename, s->filename))
                ERROR("Stream '%s' already registered\n", s->filename);
        }

        stream->fmt = ffserver_guess_format(NULL, stream->filename, NULL);
        avcodec_get_context_defaults3(&config->video_enc, NULL);
        avcodec_get_context_defaults3(&config->audio_enc, NULL);

        config->audio_id = AV_CODEC_ID_NONE;
        config->video_id = AV_CODEC_ID_NONE;
        if (stream->fmt) {
            config->audio_id = stream->fmt->audio_codec;
            config->video_id = stream->fmt->video_codec;
        }
        *pstream = stream;
        return 0;
    }
    av_assert0(stream);
    if (!av_strcasecmp(cmd, "Feed")) {
        FFServerStream *sfeed;
        ffserver_get_arg(arg, sizeof(arg), p);
        sfeed = config->first_feed;
        while (sfeed) {
            if (!strcmp(sfeed->filename, arg))
                break;
            sfeed = sfeed->next_feed;
        }
        if (!sfeed)
            ERROR("Feed with name '%s' for stream '%s' is not defined\n", arg, stream->filename);
        else
            stream->feed = sfeed;
    } else if (!av_strcasecmp(cmd, "Format")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        if (!strcmp(arg, "status")) {
            stream->stream_type = STREAM_TYPE_STATUS;
            stream->fmt = NULL;
        } else {
            stream->stream_type = STREAM_TYPE_LIVE;
            /* JPEG cannot be used here, so use single frame MJPEG */
            if (!strcmp(arg, "jpeg"))
                strcpy(arg, "mjpeg");
            stream->fmt = ffserver_guess_format(arg, NULL, NULL);
            if (!stream->fmt)
                ERROR("Unknown Format: %s\n", arg);
        }
        if (stream->fmt) {
            config->audio_id = stream->fmt->audio_codec;
            config->video_id = stream->fmt->video_codec;
        }
    } else if (!av_strcasecmp(cmd, "InputFormat")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        stream->ifmt = av_find_input_format(arg);
        if (!stream->ifmt)
            ERROR("Unknown input format: %s\n", arg);
    } else if (!av_strcasecmp(cmd, "FaviconURL")) {
        if (stream->stream_type == STREAM_TYPE_STATUS)
            ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p);
        else
            ERROR("FaviconURL only permitted for status streams\n");
    } else if (!av_strcasecmp(cmd, "Author")    ||
               !av_strcasecmp(cmd, "Comment")   ||
               !av_strcasecmp(cmd, "Copyright") ||
               !av_strcasecmp(cmd, "Title")) {
        char key[32];
        int i, ret;
        ffserver_get_arg(arg, sizeof(arg), p);
        for (i = 0; i < strlen(cmd); i++)
            key[i] = av_tolower(cmd[i]);
        key[i] = 0;
        WARNING("'%s' option in configuration file is deprecated, "
                "use 'Metadata %s VALUE' instead\n", cmd, key);
        if ((ret = av_dict_set(&stream->metadata, key, arg, 0)) < 0)
            ERROR("Could not set metadata '%s' to value '%s': %s\n", key, arg, av_err2str(ret));
    } else if (!av_strcasecmp(cmd, "Metadata")) {
        int ret;
        ffserver_get_arg(arg, sizeof(arg), p);
        ffserver_get_arg(arg2, sizeof(arg2), p);
        if ((ret = av_dict_set(&stream->metadata, arg, arg2, 0)) < 0) {
            ERROR("Could not set metadata '%s' to value '%s': %s\n",
                  arg, arg2, av_err2str(ret));
        }
    } else if (!av_strcasecmp(cmd, "Preroll")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        stream->prebuffer = atof(arg) * 1000;
    } else if (!av_strcasecmp(cmd, "StartSendOnKey")) {
        stream->send_on_key = 1;
    } else if (!av_strcasecmp(cmd, "AudioCodec")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->audio_id = opt_codec(arg, AVMEDIA_TYPE_AUDIO);
        if (config->audio_id == AV_CODEC_ID_NONE)
            ERROR("Unknown AudioCodec: %s\n", arg);
    } else if (!av_strcasecmp(cmd, "VideoCodec")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_id = opt_codec(arg, AVMEDIA_TYPE_VIDEO);
        if (config->video_id == AV_CODEC_ID_NONE)
            ERROR("Unknown VideoCodec: %s\n", arg);
    } else if (!av_strcasecmp(cmd, "MaxTime")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        stream->max_time = atof(arg) * 1000;
    } else if (!av_strcasecmp(cmd, "AudioBitRate")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->audio_enc.bit_rate = lrintf(atof(arg) * 1000);
    } else if (!av_strcasecmp(cmd, "AudioChannels")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->audio_enc.channels = atoi(arg);
    } else if (!av_strcasecmp(cmd, "AudioSampleRate")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->audio_enc.sample_rate = atoi(arg);
    } else if (!av_strcasecmp(cmd, "VideoBitRateRange")) {
        int minrate, maxrate;
        ffserver_get_arg(arg, sizeof(arg), p);
        if (sscanf(arg, "%d-%d", &minrate, &maxrate) == 2) {
            config->video_enc.rc_min_rate = minrate * 1000;
            config->video_enc.rc_max_rate = maxrate * 1000;
        } else
            ERROR("Incorrect format for VideoBitRateRange -- should be <min>-<max>: %s\n", arg);
    } else if (!av_strcasecmp(cmd, "Debug")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.debug = strtol(arg,0,0);
    } else if (!av_strcasecmp(cmd, "Strict")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.strict_std_compliance = atoi(arg);
    } else if (!av_strcasecmp(cmd, "VideoBufferSize")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.rc_buffer_size = atoi(arg) * 8*1024;
    } else if (!av_strcasecmp(cmd, "VideoBitRateTolerance")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.bit_rate_tolerance = atoi(arg) * 1000;
    } else if (!av_strcasecmp(cmd, "VideoBitRate")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.bit_rate = atoi(arg) * 1000;
    } else if (!av_strcasecmp(cmd, "VideoSize")) {
        int ret;
        ffserver_get_arg(arg, sizeof(arg), p);
        ret = av_parse_video_size(&config->video_enc.width, &config->video_enc.height, arg);
        if (ret < 0)
            ERROR("Invalid video size '%s'\n", arg);
        else if ((config->video_enc.width % 16) != 0 || (config->video_enc.height % 16) != 0)
            ERROR("Image size must be a multiple of 16\n");
    } else if (!av_strcasecmp(cmd, "VideoFrameRate")) {
        AVRational frame_rate;
        ffserver_get_arg(arg, sizeof(arg), p);
        if (av_parse_video_rate(&frame_rate, arg) < 0) {
            ERROR("Incorrect frame rate: %s\n", arg);
        } else {
            config->video_enc.time_base.num = frame_rate.den;
            config->video_enc.time_base.den = frame_rate.num;
        }
    } else if (!av_strcasecmp(cmd, "PixelFormat")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.pix_fmt = av_get_pix_fmt(arg);
        if (config->video_enc.pix_fmt == AV_PIX_FMT_NONE)
            ERROR("Unknown pixel format: %s\n", arg);
    } else if (!av_strcasecmp(cmd, "VideoGopSize")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.gop_size = atoi(arg);
    } else if (!av_strcasecmp(cmd, "VideoIntraOnly")) {
        config->video_enc.gop_size = 1;
    } else if (!av_strcasecmp(cmd, "VideoHighQuality")) {
        config->video_enc.mb_decision = FF_MB_DECISION_BITS;
    } else if (!av_strcasecmp(cmd, "Video4MotionVector")) {
        config->video_enc.mb_decision = FF_MB_DECISION_BITS; //FIXME remove
        config->video_enc.flags |= CODEC_FLAG_4MV;
    } else if (!av_strcasecmp(cmd, "AVOptionVideo") ||
               !av_strcasecmp(cmd, "AVOptionAudio")) {
        AVCodecContext *avctx;
        int type;
        ffserver_get_arg(arg, sizeof(arg), p);
        ffserver_get_arg(arg2, sizeof(arg2), p);
        if (!av_strcasecmp(cmd, "AVOptionVideo")) {
            avctx = &config->video_enc;
            type = AV_OPT_FLAG_VIDEO_PARAM;
        } else {
            avctx = &config->audio_enc;
            type = AV_OPT_FLAG_AUDIO_PARAM;
        }
        if (ffserver_opt_default(arg, arg2, avctx, type|AV_OPT_FLAG_ENCODING_PARAM)) {
            ERROR("Error setting %s option to %s %s\n", cmd, arg, arg2);
        }
    } else if (!av_strcasecmp(cmd, "AVPresetVideo") ||
               !av_strcasecmp(cmd, "AVPresetAudio")) {
        AVCodecContext *avctx;
        int type;
        ffserver_get_arg(arg, sizeof(arg), p);
        if (!av_strcasecmp(cmd, "AVPresetVideo")) {
            avctx = &config->video_enc;
            config->video_enc.codec_id = config->video_id;
            type = AV_OPT_FLAG_VIDEO_PARAM;
        } else {
            avctx = &config->audio_enc;
            config->audio_enc.codec_id = config->audio_id;
            type = AV_OPT_FLAG_AUDIO_PARAM;
        }
        if (ffserver_opt_preset(arg, avctx, type|AV_OPT_FLAG_ENCODING_PARAM, &config->audio_id, &config->video_id)) {
            ERROR("AVPreset error: %s\n", arg);
        }
    } else if (!av_strcasecmp(cmd, "VideoTag")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        if (strlen(arg) == 4)
            config->video_enc.codec_tag = MKTAG(arg[0], arg[1], arg[2], arg[3]);
    } else if (!av_strcasecmp(cmd, "BitExact")) {
        config->video_enc.flags |= CODEC_FLAG_BITEXACT;
    } else if (!av_strcasecmp(cmd, "DctFastint")) {
        config->video_enc.dct_algo  = FF_DCT_FASTINT;
    } else if (!av_strcasecmp(cmd, "IdctSimple")) {
        config->video_enc.idct_algo = FF_IDCT_SIMPLE;
    } else if (!av_strcasecmp(cmd, "Qscale")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.flags |= CODEC_FLAG_QSCALE;
        config->video_enc.global_quality = FF_QP2LAMBDA * atoi(arg);
    } else if (!av_strcasecmp(cmd, "VideoQDiff")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.max_qdiff = atoi(arg);
        if (config->video_enc.max_qdiff < 1 || config->video_enc.max_qdiff > 31)
            ERROR("VideoQDiff out of range\n");
    } else if (!av_strcasecmp(cmd, "VideoQMax")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.qmax = atoi(arg);
        if (config->video_enc.qmax < 1 || config->video_enc.qmax > 31)
            ERROR("VideoQMax out of range\n");
    } else if (!av_strcasecmp(cmd, "VideoQMin")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.qmin = atoi(arg);
        if (config->video_enc.qmin < 1 || config->video_enc.qmin > 31)
            ERROR("VideoQMin out of range\n");
    } else if (!av_strcasecmp(cmd, "LumiMask")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.lumi_masking = atof(arg);
    } else if (!av_strcasecmp(cmd, "DarkMask")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        config->video_enc.dark_masking = atof(arg);
    } else if (!av_strcasecmp(cmd, "NoVideo")) {
        config->video_id = AV_CODEC_ID_NONE;
    } else if (!av_strcasecmp(cmd, "NoAudio")) {
        config->audio_id = AV_CODEC_ID_NONE;
    } else if (!av_strcasecmp(cmd, "ACL")) {
        ffserver_parse_acl_row(stream, NULL, NULL, *p, config->filename, line_num);
    } else if (!av_strcasecmp(cmd, "DynamicACL")) {
        ffserver_get_arg(stream->dynamic_acl, sizeof(stream->dynamic_acl), p);
    } else if (!av_strcasecmp(cmd, "RTSPOption")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        av_freep(&stream->rtsp_option);
        stream->rtsp_option = av_strdup(arg);
    } else if (!av_strcasecmp(cmd, "MulticastAddress")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        if (resolve_host(&stream->multicast_ip, arg) != 0)
            ERROR("Invalid host/IP address: %s\n", arg);
        stream->is_multicast = 1;
        stream->loop = 1; /* default is looping */
    } else if (!av_strcasecmp(cmd, "MulticastPort")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        stream->multicast_port = atoi(arg);
    } else if (!av_strcasecmp(cmd, "MulticastTTL")) {
        ffserver_get_arg(arg, sizeof(arg), p);
        stream->multicast_ttl = atoi(arg);
    } else if (!av_strcasecmp(cmd, "NoLoop")) {
        stream->loop = 0;
    } else if (!av_strcasecmp(cmd, "</Stream>")) {
        if (stream->feed && stream->fmt && strcmp(stream->fmt->name, "ffm") != 0) {
            if (config->audio_id != AV_CODEC_ID_NONE) {
                config->audio_enc.codec_type = AVMEDIA_TYPE_AUDIO;
                config->audio_enc.codec_id = config->audio_id;
                add_codec(stream, &config->audio_enc);
            }
            if (config->video_id != AV_CODEC_ID_NONE) {
                config->video_enc.codec_type = AVMEDIA_TYPE_VIDEO;
                config->video_enc.codec_id = config->video_id;
                add_codec(stream, &config->video_enc);
            }
        }
        *pstream = NULL;
    } else if (!av_strcasecmp(cmd, "File") || !av_strcasecmp(cmd, "ReadOnlyFile")) {
        ffserver_get_arg(stream->feed_filename, sizeof(stream->feed_filename), p);
    } else {
        ERROR("Invalid entry '%s' inside <Stream></Stream>\n", cmd);
    }
    return 0;
}
Exemple #15
0
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave)
{
    int i, ret;
    AVDictionary *options = NULL;
    AVDictionaryEntry *entry;
    char *filename;
    char *format = NULL, *select = NULL, *on_fail = NULL;
    AVFormatContext *avf2 = NULL;
    AVStream *st, *st2;
    int stream_count;
    int fullret;
    char *subselect = NULL, *next_subselect = NULL, *first_subselect = NULL, *tmp_select = NULL;

    if ((ret = ff_tee_parse_slave_options(avf, slave, &options, &filename)) < 0)
        return ret;

#define STEAL_OPTION(option, field) do {                                \
        if ((entry = av_dict_get(options, option, NULL, 0))) {          \
            field = entry->value;                                       \
            entry->value = NULL; /* prevent it from being freed */      \
            av_dict_set(&options, option, NULL, 0);                     \
        }                                                               \
    } while (0)

    STEAL_OPTION("f", format);
    STEAL_OPTION("select", select);
    STEAL_OPTION("onfail", on_fail);

    ret = parse_slave_failure_policy_option(on_fail, tee_slave);
    if (ret < 0) {
        av_log(avf, AV_LOG_ERROR,
               "Invalid onfail option value, valid options are 'abort' and 'ignore'\n");
        goto end;
    }

    ret = avformat_alloc_output_context2(&avf2, NULL, format, filename);
    if (ret < 0)
        goto end;
    tee_slave->avf = avf2;
    av_dict_copy(&avf2->metadata, avf->metadata, 0);
    avf2->opaque   = avf->opaque;
    avf2->io_open  = avf->io_open;
    avf2->io_close = avf->io_close;
    avf2->interrupt_callback = avf->interrupt_callback;
    avf2->flags = avf->flags;

    tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map));
    if (!tee_slave->stream_map) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    stream_count = 0;
    for (i = 0; i < avf->nb_streams; i++) {
        st = avf->streams[i];
        if (select) {
            tmp_select = av_strdup(select);  // av_strtok is destructive so we regenerate it in each loop
            if (!tmp_select) {
                ret = AVERROR(ENOMEM);
                goto end;
            }
            fullret = 0;
            first_subselect = tmp_select;
            next_subselect = NULL;
            while (subselect = av_strtok(first_subselect, slave_select_sep, &next_subselect)) {
                first_subselect = NULL;

                ret = avformat_match_stream_specifier(avf, avf->streams[i], subselect);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Invalid stream specifier '%s' for output '%s'\n",
                           subselect, slave);
                    goto end;
                }
                if (ret != 0) {
                    fullret = 1; // match
                    break;
                }
            }
            av_freep(&tmp_select);

            if (fullret == 0) { /* no match */
                tee_slave->stream_map[i] = -1;
                continue;
            }
        }
        tee_slave->stream_map[i] = stream_count++;

        if (!(st2 = avformat_new_stream(avf2, NULL))) {
            ret = AVERROR(ENOMEM);
            goto end;
        }

        ret = ff_stream_encode_params_copy(st2, st);
        if (ret < 0)
            goto end;
    }

    ret = ff_format_output_open(avf2, filename, NULL);
    if (ret < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n", slave,
               av_err2str(ret));
        goto end;
    }

    if ((ret = avformat_write_header(avf2, &options)) < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n",
               slave, av_err2str(ret));
        goto end;
    }
    tee_slave->header_written = 1;

    tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(*tee_slave->bsfs));
    if (!tee_slave->bsfs) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    entry = NULL;
    while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) {
        const char *spec = entry->key + strlen("bsfs");
        if (*spec) {
            if (strspn(spec, slave_bsfs_spec_sep) != 1) {
                av_log(avf, AV_LOG_ERROR,
                       "Specifier separator in '%s' is '%c', but only characters '%s' "
                       "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep);
                ret = AVERROR(EINVAL);
                goto end;
            }
            spec++; /* consume separator */
        }

        for (i = 0; i < avf2->nb_streams; i++) {
            ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' in bsfs option '%s' for slave "
                       "output '%s'\n", spec, entry->key, filename);
                goto end;
            }

            if (ret > 0) {
                av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave "
                       "output '%s'\n", spec, entry->value, i, filename);
                if (tee_slave->bsfs[i]) {
                    av_log(avf, AV_LOG_WARNING,
                           "Duplicate bsfs specification associated to stream %d of slave "
                           "output '%s', filters will be ignored\n", i, filename);
                    continue;
                }
                ret = av_bsf_list_parse_str(entry->value, &tee_slave->bsfs[i]);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Error parsing bitstream filter sequence '%s' associated to "
                           "stream %d of slave output '%s'\n", entry->value, i, filename);
                    goto end;
                }
            }
        }

        av_dict_set(&options, entry->key, NULL, 0);
    }

    for (i = 0; i < avf->nb_streams; i++){
        int target_stream = tee_slave->stream_map[i];
        if (target_stream < 0)
            continue;

        if (!tee_slave->bsfs[target_stream]) {
            /* Add pass-through bitstream filter */
            ret = av_bsf_get_null_filter(&tee_slave->bsfs[target_stream]);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Failed to create pass-through bitstream filter: %s\n",
                       av_err2str(ret));
                goto end;
            }
        }

        tee_slave->bsfs[target_stream]->time_base_in = avf->streams[i]->time_base;
        ret = avcodec_parameters_copy(tee_slave->bsfs[target_stream]->par_in,
                                      avf->streams[i]->codecpar);
        if (ret < 0)
            goto end;

        ret = av_bsf_init(tee_slave->bsfs[target_stream]);
        if (ret < 0) {
            av_log(avf, AV_LOG_ERROR,
            "Failed to initialize bitstream filter(s): %s\n",
            av_err2str(ret));
            goto end;
        }
    }

    if (options) {
        entry = NULL;
        while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)))
            av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto end;
    }

end:
    av_free(format);
    av_free(select);
    av_free(on_fail);
    av_dict_free(&options);
    av_freep(&tmp_select);
    return ret;
}
Exemple #16
0
static int sami_paragraph_to_ass(AVCodecContext *avctx, const char *src)
{
    SAMIContext *sami = avctx->priv_data;
    int ret = 0;
    char *tag = NULL;
    char *dupsrc = av_strdup(src);
    char *p = dupsrc;

    av_bprint_clear(&sami->content);
    for (;;) {
        char *saveptr = NULL;
        int prev_chr_is_space = 0;
        AVBPrint *dst = &sami->content;

        /* parse & extract paragraph tag */
        p = av_stristr(p, "<P");
        if (!p)
            break;
        if (p[2] != '>' && !av_isspace(p[2])) { // avoid confusion with tags such as <PRE>
            p++;
            continue;
        }
        if (dst->len) // add a separator with the previous paragraph if there was one
            av_bprintf(dst, "\\N");
        tag = av_strtok(p, ">", &saveptr);
        if (!tag || !saveptr)
            break;
        p = saveptr;

        /* check if the current paragraph is the "source" (speaker name) */
        if (av_stristr(tag, "ID=Source") || av_stristr(tag, "ID=\"Source\"")) {
            dst = &sami->source;
            av_bprint_clear(dst);
        }

        /* if empty event -> skip subtitle */
        while (av_isspace(*p))
            p++;
        if (!strncmp(p, "&nbsp;", 6)) {
            ret = -1;
            goto end;
        }

        /* extract the text, stripping most of the tags */
        while (*p) {
            if (*p == '<') {
                if (!av_strncasecmp(p, "<P", 2) && (p[2] == '>' || av_isspace(p[2])))
                    break;
                if (!av_strncasecmp(p, "<BR", 3))
                    av_bprintf(dst, "\\N");
                p++;
                while (*p && *p != '>')
                    p++;
                if (!*p)
                    break;
                if (*p == '>')
                    p++;
            }
            if (!av_isspace(*p))
                av_bprint_chars(dst, *p, 1);
            else if (!prev_chr_is_space)
                av_bprint_chars(dst, ' ', 1);
            prev_chr_is_space = av_isspace(*p);
            p++;
        }
    }

    av_bprint_clear(&sami->full);
    if (sami->source.len)
        av_bprintf(&sami->full, "{\\i1}%s{\\i0}\\N", sami->source.str);
    av_bprintf(&sami->full, "%s", sami->content.str);

end:
    av_free(dupsrc);
    return ret;
}
Exemple #17
0
static av_cold int frei0r_init(AVFilterContext *ctx,
                               const char *dl_name, int type)
{
    Frei0rContext *s = ctx->priv;
    f0r_init_f            f0r_init;
    f0r_get_plugin_info_f f0r_get_plugin_info;
    f0r_plugin_info_t *pi;
    char *path;
    int ret = 0;
    int i;
    static const char* const frei0r_pathlist[] = {
        "/usr/local/lib/frei0r-1/",
        "/usr/lib/frei0r-1/",
        "/usr/local/lib64/frei0r-1/",
        "/usr/lib64/frei0r-1/"
    };

    if (!dl_name) {
        av_log(ctx, AV_LOG_ERROR, "No filter name provided.\n");
        return AVERROR(EINVAL);
    }

    /* see: http://frei0r.dyne.org/codedoc/html/group__pluglocations.html */
    if ((path = av_strdup(getenv("FREI0R_PATH")))) {
#ifdef _WIN32
        const char *separator = ";";
#else
        const char *separator = ":";
#endif
        char *p, *ptr = NULL;
        for (p = path; p = av_strtok(p, separator, &ptr); p = NULL) {
            /* add additional trailing slash in case it is missing */
            char *p1 = av_asprintf("%s/", p);
            if (!p1) {
                ret = AVERROR(ENOMEM);
                goto check_path_end;
            }
            ret = load_path(ctx, &s->dl_handle, p1, dl_name);
            av_free(p1);
            if (ret < 0)
                goto check_path_end;
            if (s->dl_handle)
                break;
        }

    check_path_end:
        av_free(path);
        if (ret < 0)
            return ret;
    }
    if (!s->dl_handle && (path = getenv("HOME"))) {
        char *prefix = av_asprintf("%s/.frei0r-1/lib/", path);
        if (!prefix)
            return AVERROR(ENOMEM);
        ret = load_path(ctx, &s->dl_handle, prefix, dl_name);
        av_free(prefix);
        if (ret < 0)
            return ret;
    }
    for (i = 0; !s->dl_handle && i < FF_ARRAY_ELEMS(frei0r_pathlist); i++) {
        ret = load_path(ctx, &s->dl_handle, frei0r_pathlist[i], dl_name);
        if (ret < 0)
            return ret;
    }
    if (!s->dl_handle) {
        av_log(ctx, AV_LOG_ERROR, "Could not find module '%s'.\n", dl_name);
        return AVERROR(EINVAL);
    }

    if (!(f0r_init                = load_sym(ctx, "f0r_init"           )) ||
        !(f0r_get_plugin_info     = load_sym(ctx, "f0r_get_plugin_info")) ||
        !(s->get_param_info  = load_sym(ctx, "f0r_get_param_info" )) ||
        !(s->get_param_value = load_sym(ctx, "f0r_get_param_value")) ||
        !(s->set_param_value = load_sym(ctx, "f0r_set_param_value")) ||
        !(s->update          = load_sym(ctx, "f0r_update"         )) ||
        !(s->construct       = load_sym(ctx, "f0r_construct"      )) ||
        !(s->destruct        = load_sym(ctx, "f0r_destruct"       )) ||
        !(s->deinit          = load_sym(ctx, "f0r_deinit"         )))
        return AVERROR(EINVAL);

    if (f0r_init() < 0) {
        av_log(ctx, AV_LOG_ERROR, "Could not init the frei0r module.\n");
        return AVERROR(EINVAL);
    }

    f0r_get_plugin_info(&s->plugin_info);
    pi = &s->plugin_info;
    if (pi->plugin_type != type) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid type '%s' for this plugin\n",
               pi->plugin_type == F0R_PLUGIN_TYPE_FILTER ? "filter" :
               pi->plugin_type == F0R_PLUGIN_TYPE_SOURCE ? "source" :
               pi->plugin_type == F0R_PLUGIN_TYPE_MIXER2 ? "mixer2" :
               pi->plugin_type == F0R_PLUGIN_TYPE_MIXER3 ? "mixer3" : "unknown");
        return AVERROR(EINVAL);
    }

    av_log(ctx, AV_LOG_VERBOSE,
           "name:%s author:'%s' explanation:'%s' color_model:%s "
           "frei0r_version:%d version:%d.%d num_params:%d\n",
           pi->name, pi->author, pi->explanation,
           pi->color_model == F0R_COLOR_MODEL_BGRA8888 ? "bgra8888" :
           pi->color_model == F0R_COLOR_MODEL_RGBA8888 ? "rgba8888" :
           pi->color_model == F0R_COLOR_MODEL_PACKED32 ? "packed32" : "unknown",
           pi->frei0r_version, pi->major_version, pi->minor_version, pi->num_params);

    return 0;
}
Exemple #18
0
static bool recreate_graph(struct af_instance *af, struct mp_audio *config)
{
    void *tmp = talloc_new(NULL);
    struct priv *p = af->priv;
    AVFilterContext *in = NULL, *out = NULL, *f_format = NULL;

    if (bstr0(p->cfg_graph).len == 0) {
        MP_FATAL(af, "lavfi: no filter graph set\n");
        return false;
    }

    destroy_graph(af);
    MP_VERBOSE(af, "lavfi: create graph: '%s'\n", p->cfg_graph);

    AVFilterGraph *graph = avfilter_graph_alloc();
    if (!graph)
        goto error;

    if (mp_set_avopts(af->log, graph, p->cfg_avopts) < 0)
        goto error;

    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    if (!outputs || !inputs)
        goto error;

    // Build list of acceptable output sample formats. libavfilter will insert
    // conversion filters if needed.
    static const enum AVSampleFormat sample_fmts[] = {
        AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32,
        AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL,
        AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
        AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
        AV_SAMPLE_FMT_NONE
    };
    char *fmtstr = talloc_strdup(tmp, "");
    for (int n = 0; sample_fmts[n] != AV_SAMPLE_FMT_NONE; n++) {
        const char *name = av_get_sample_fmt_name(sample_fmts[n]);
        if (name) {
            const char *s = fmtstr[0] ? "|" : "";
            fmtstr = talloc_asprintf_append_buffer(fmtstr, "%s%s", s, name);
        }
    }

    char *src_args = talloc_asprintf(tmp,
        "sample_rate=%d:sample_fmt=%s:time_base=%d/%d:"
        "channel_layout=0x%"PRIx64,  config->rate,
        av_get_sample_fmt_name(af_to_avformat(config->format)),
        1, config->rate, mp_chmap_to_lavc(&config->channels));

    if (avfilter_graph_create_filter(&in, avfilter_get_by_name("abuffer"),
                                     "src", src_args, NULL, graph) < 0)
        goto error;

    if (avfilter_graph_create_filter(&out, avfilter_get_by_name("abuffersink"),
                                     "out", NULL, NULL, graph) < 0)
        goto error;

    if (avfilter_graph_create_filter(&f_format, avfilter_get_by_name("aformat"),
                                     "format", fmtstr, NULL, graph) < 0)
        goto error;

    if (avfilter_link(f_format, 0, out, 0) < 0)
        goto error;

    outputs->name = av_strdup("in");
    outputs->filter_ctx = in;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = f_format;

    if (graph_parse(graph, p->cfg_graph, inputs, outputs, NULL) < 0)
        goto error;

    if (avfilter_graph_config(graph, NULL) < 0)
        goto error;

    p->in = in;
    p->out = out;
    p->graph = graph;

    assert(out->nb_inputs == 1);
    assert(in->nb_outputs == 1);

    talloc_free(tmp);
    return true;

error:
    MP_FATAL(af, "Can't configure libavfilter graph.\n");
    avfilter_graph_free(&graph);
    talloc_free(tmp);
    return false;
}
Exemple #19
0
static int init_filters(const char *filters_descr)
{
    char args[512];
    int ret;
    AVFilter *abuffersrc  = avfilter_get_by_name("abuffer");
    AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
    AVABufferSinkParams *abuffersink_params;
    const AVFilterLink *outlink;
    AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;

    filter_graph = avfilter_graph_alloc();

    /* buffer audio source: the decoded frames from the decoder will be inserted here. */
    if (!dec_ctx->channel_layout)
        dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
    snprintf(args, sizeof(args),
            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
             time_base.num, time_base.den, dec_ctx->sample_rate,
             av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
    ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
                                       args, NULL, filter_graph);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
        return ret;
    }

    /* buffer audio sink: to terminate the filter chain. */
    abuffersink_params = av_abuffersink_params_alloc();
    abuffersink_params->sample_fmts     = sample_fmts;
    ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
                                       NULL, abuffersink_params, filter_graph);
    av_free(abuffersink_params);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
        return ret;
    }

    /* Endpoints for the filter graph. */
    outputs->name       = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx    = 0;
    outputs->next       = NULL;

    inputs->name       = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx    = 0;
    inputs->next       = NULL;

    if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
                                    &inputs, &outputs, NULL)) < 0)
        return ret;

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
        return ret;

    /* Print summary of the sink buffer
     * Note: args buffer is reused to store channel layout string */
    outlink = buffersink_ctx->inputs[0];
    av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
    av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
           (int)outlink->sample_rate,
           (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
           args);

    return 0;
}
Exemple #20
0
static int mtv_read_header(AVFormatContext *s)
{
    MTVDemuxContext *mtv = s->priv_data;
    AVIOContext   *pb  = s->pb;
    AVStream        *st;
    unsigned int    audio_subsegments;

    avio_skip(pb, 3);
    mtv->file_size         = avio_rl32(pb);
    mtv->segments          = avio_rl32(pb);
    avio_skip(pb, 32);
    mtv->audio_identifier  = avio_rl24(pb);
    mtv->audio_br          = avio_rl16(pb);
    mtv->img_colorfmt      = avio_rl24(pb);
    mtv->img_bpp           = avio_r8(pb)>>3;
    mtv->img_width         = avio_rl16(pb);
    mtv->img_height        = avio_rl16(pb);
    mtv->img_segment_size  = avio_rl16(pb);

    /* Assume 16bpp even if claimed otherwise.
     * We know its going to be RGBG565/555 anyway
     */
    if (mtv->img_bpp != MTV_IMAGE_DEFAULT_BPP) {
        av_log (s, AV_LOG_WARNING, "Header claims %dbpp (!= 16). Ignoring\n",
                mtv->img_bpp);
        mtv->img_bpp = MTV_IMAGE_DEFAULT_BPP;
    }

    /* Calculate width and height if missing from header */

    if(!mtv->img_width && mtv->img_height)
        mtv->img_width=mtv->img_segment_size / (mtv->img_bpp)
                        / mtv->img_height;

    if(!mtv->img_height && mtv->img_width)
        mtv->img_height=mtv->img_segment_size / (mtv->img_bpp)
                        / mtv->img_width;

    if(!mtv->img_height || !mtv->img_width || !mtv->img_segment_size){
        av_log(s, AV_LOG_ERROR, "width or height or segment_size is invalid and I cannot calculate them from other information\n");
        return AVERROR(EINVAL);
    }

    avio_skip(pb, 4);
    audio_subsegments = avio_rl16(pb);

    if (audio_subsegments == 0) {
        avpriv_request_sample(s, "MTV files without audio");
        return AVERROR_PATCHWELCOME;
    }

    mtv->full_segment_size =
        audio_subsegments * (MTV_AUDIO_PADDING_SIZE + MTV_ASUBCHUNK_DATA_SIZE) +
        mtv->img_segment_size;
    mtv->video_fps         = (mtv->audio_br / 4) / audio_subsegments;

    // FIXME Add sanity check here

    // all systems go! init decoders

    // video - raw rgb565

    st = avformat_new_stream(s, NULL);
    if(!st)
        return AVERROR(ENOMEM);

    avpriv_set_pts_info(st, 64, 1, mtv->video_fps);
    st->codec->codec_type      = AVMEDIA_TYPE_VIDEO;
    st->codec->codec_id        = AV_CODEC_ID_RAWVIDEO;
    st->codec->pix_fmt         = AV_PIX_FMT_RGB565BE;
    st->codec->width           = mtv->img_width;
    st->codec->height          = mtv->img_height;
    st->codec->extradata       = av_strdup("BottomUp");
    st->codec->extradata_size  = 9;

    // audio - mp3

    st = avformat_new_stream(s, NULL);
    if(!st)
        return AVERROR(ENOMEM);

    avpriv_set_pts_info(st, 64, 1, MTV_AUDIO_SAMPLING_RATE);
    st->codec->codec_type      = AVMEDIA_TYPE_AUDIO;
    st->codec->codec_id        = AV_CODEC_ID_MP3;
    st->codec->bit_rate        = mtv->audio_br;
    st->need_parsing           = AVSTREAM_PARSE_FULL;

    // Jump over header

    if(avio_seek(pb, MTV_HEADER_SIZE, SEEK_SET) != MTV_HEADER_SIZE)
        return AVERROR(EIO);

    return 0;

}
void parse_options(int argc, char **argv, const OptionDef *options,
                   void (* parse_arg_function)(const char*))
{
    const char *opt, *arg;
    int optindex, handleoptions=1;
    const OptionDef *po;

    /* parse options */
    optindex = 1;
    while (optindex < argc) {
        opt = argv[optindex++];

        if (handleoptions && opt[0] == '-' && opt[1] != '\0') {
            int bool_val = 1;
            if (opt[1] == '-' && opt[2] == '\0') {
                handleoptions = 0;
                continue;
            }
            opt++;
            po= find_option(options, opt);
            if (!po->name && opt[0] == 'n' && opt[1] == 'o') {
                /* handle 'no' bool option */
                po = find_option(options, opt + 2);
                if (!(po->name && (po->flags & OPT_BOOL)))
                    goto unknown_opt;
                bool_val = 0;
            }
            if (!po->name)
                po= find_option(options, "default");
            if (!po->name) {
unknown_opt:
                fprintf(stderr, "%s: unrecognized option '%s'\n", argv[0], opt);
                exit(1);
            }
            arg = NULL;
            if (po->flags & HAS_ARG) {
                arg = argv[optindex++];
                if (!arg) {
                    fprintf(stderr, "%s: missing argument for option '%s'\n", argv[0], opt);
                    exit(1);
                }
            }
            if (po->flags & OPT_STRING) {
                char *str;
                str = av_strdup(arg);
                *po->u.str_arg = str;
            } else if (po->flags & OPT_BOOL) {
                *po->u.int_arg = bool_val;
            } else if (po->flags & OPT_INT) {
                *po->u.int_arg = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
            } else if (po->flags & OPT_INT64) {
                *po->u.int64_arg = parse_number_or_die(opt, arg, OPT_INT64, INT64_MIN, INT64_MAX);
            } else if (po->flags & OPT_FLOAT) {
                *po->u.float_arg = parse_number_or_die(opt, arg, OPT_FLOAT, -1.0/0.0, 1.0/0.0);
            } else if (po->flags & OPT_FUNC2) {
                if (po->u.func2_arg(opt, arg) < 0) {
                    fprintf(stderr, "%s: failed to set value '%s' for option '%s'\n", argv[0], arg, opt);
                    exit(1);
                }
            } else {
                po->u.func_arg(arg);
            }
            if(po->flags & OPT_EXIT)
                exit(0);
        } else {
            if (parse_arg_function)
                parse_arg_function(opt);
        }
    }
}
Exemple #22
0
static vod_status_t
audio_filter_init_source(
	request_context_t* request_context,
	AVFilterGraph *filter_graph,
	const u_char* source_name,
	media_info_t* media_info,
	audio_filter_source_t* source, 
	AVFilterInOut** outputs)
{
	char filter_args[sizeof(BUFFERSRC_ARGS_FORMAT) + 4 * VOD_INT64_LEN + MAX_SAMPLE_FORMAT_NAME_LEN];
	AVCodecContext* decoder;
	AVFilterInOut* output_link;
	uint8_t channel_config;
	int avrc;

	// init the decoder	
	decoder = avcodec_alloc_context3(decoder_codec);
	if (decoder == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_source: avcodec_alloc_context3 failed");
		return VOD_ALLOC_FAILED;
	}

	source->decoder = decoder;

	decoder->codec_tag = media_info->format;
	decoder->bit_rate = media_info->bitrate;
	decoder->time_base.num = 1;
	decoder->time_base.den = media_info->frames_timescale;
	decoder->pkt_timebase = decoder->time_base;
	decoder->extradata = (u_char*)media_info->extra_data;
	decoder->extradata_size = media_info->extra_data_size;
	decoder->channels = media_info->u.audio.channels;
	decoder->bits_per_coded_sample = media_info->u.audio.bits_per_sample;
	decoder->sample_rate = media_info->u.audio.sample_rate;
	channel_config = media_info->u.audio.codec_config.channel_config;
	if (channel_config < vod_array_entries(aac_channel_layout))
	{
		decoder->channel_layout = aac_channel_layout[channel_config];
	}
	else
	{
		decoder->channel_layout = 0;
	}

	avrc = avcodec_open2(decoder, decoder_codec, NULL);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_source: avcodec_open2 failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	// create the buffer source
	vod_sprintf((u_char*)filter_args, BUFFERSRC_ARGS_FORMAT,
		decoder->time_base.num,
		decoder->time_base.den,
		decoder->sample_rate,
		av_get_sample_fmt_name(decoder->sample_fmt),
		decoder->channel_layout);

	avrc = avfilter_graph_create_filter(
		&source->buffer_src,
		buffersrc_filter,
		(char*)source_name,
		filter_args,
		NULL,
		filter_graph);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_source: avfilter_graph_create_filter failed %d", avrc);
		return VOD_ALLOC_FAILED;
	}

	// add to the outputs list
	output_link = avfilter_inout_alloc();
	if (output_link == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_source: avfilter_inout_alloc failed");
		return VOD_ALLOC_FAILED;
	}
	output_link->filter_ctx = source->buffer_src;
	output_link->pad_idx = 0;
	output_link->next = *outputs;
	*outputs = output_link;

	output_link->name = av_strdup((char*)source_name);
	if (output_link->name == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_source: av_strdup failed");
		return VOD_ALLOC_FAILED;
	}

	return VOD_OK;
}
Exemple #23
0
int av_metadata_set_custom(AVMetadata **pm, AVMetadataTag **ret, enum AVMetadataType type,
                           const char *key, const char *value, unsigned len,
                           int flags)
{
    AVMetadata *m= *pm;
    AVMetadataTag *tag= av_metadata_get(m, key, NULL, flags);

    if(!key || !pm || !len || !value)
        return -1;

    if (ret)
        *ret = NULL;

    if(!m)
        m=*pm= av_mallocz(sizeof(*m));
    if(!m)
        return AVERROR(ENOMEM);

    if(tag){
        if (flags & AV_METADATA_DONT_OVERWRITE)
            return 0;
        av_metadata_free_tag(tag);
    }else{
        AVMetadataTag *tmp;
        if(m->count >= UINT_MAX / sizeof(*m->elems))
            return -1;
        tmp= av_realloc(m->elems, (m->count+1) * sizeof(*m->elems));
        if(tmp){
            m->elems= tmp;
        }else
            return AVERROR(ENOMEM);
        tag= &m->elems[m->count++];
    }

    if (flags & AV_METADATA_DONT_STRDUP_KEY)
        tag->key = key;
    else
        tag->key = av_strdup(key);

    if (flags & AV_METADATA_DONT_STRDUP_VAL) {
        tag->value = value;
    } else if (type == METADATA_BYTEARRAY) {
        tag->value = av_malloc(len);
        if (!tag->value)
            return AVERROR(ENOMEM);
        memcpy(tag->value, value, len);
    } else {
        tag->value = av_malloc(len+1);
        if (!tag->value)
            return AVERROR(ENOMEM);
        memcpy(tag->value, value, len);
        tag->value[len] = 0;
    }

    tag->len = len;
    tag->type = type;
    tag->attributes = NULL;

    if (ret)
        *ret = tag;

    return 0;
}
Exemple #24
0
static vod_status_t
audio_filter_init_sink(
	request_context_t* request_context,
	AVFilterGraph *filter_graph,
	media_track_t* reference_track,
	const u_char* sink_name,
	audio_filter_sink_t* sink, 
	AVFilterInOut** inputs)
{
	AVCodecContext* encoder;
	AVFilterInOut* input_link;
	enum AVSampleFormat out_sample_fmts[2];
	int64_t out_channel_layouts[2];
	uint64_t channel_layout;
	uint8_t channel_config;
	int out_sample_rates[2];
	int avrc;

	// Note: matching the output to some reference track, may need to change in the future
	//		if filters such as 'join' will be added

	channel_config = reference_track->media_info.u.audio.codec_config.channel_config;
	if (channel_config < vod_array_entries(aac_channel_layout))
	{
		channel_layout = aac_channel_layout[channel_config];
	}
	else
	{
		channel_layout = 0;
	}

	// create the buffer sink
	avrc = avfilter_graph_create_filter(
		&sink->buffer_sink,
		buffersink_filter,
		(char*)sink_name,
		NULL,
		NULL,
		filter_graph);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: avfilter_graph_create_filter failed %d", avrc);
		return VOD_ALLOC_FAILED;
	}

	// configure the buffer sink
	out_sample_fmts[0] = ENCODER_INPUT_SAMPLE_FORMAT;
	out_sample_fmts[1] = -1;
	avrc = av_opt_set_int_list(
		sink->buffer_sink,
		BUFFERSINK_PARAM_SAMPLE_FORMATS,
		out_sample_fmts,
		-1,
		AV_OPT_SEARCH_CHILDREN);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: av_opt_set_int_list(sample formats) failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	out_channel_layouts[0] = channel_layout;
	out_channel_layouts[1] = -1;
	avrc = av_opt_set_int_list(
		sink->buffer_sink,
		BUFFERSINK_PARAM_CHANNEL_LAYOUTS,
		out_channel_layouts,
		-1,
		AV_OPT_SEARCH_CHILDREN);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: av_opt_set_int_list(channel layouts) failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	out_sample_rates[0] = reference_track->media_info.u.audio.sample_rate;
	out_sample_rates[1] = -1;
	avrc = av_opt_set_int_list(
		sink->buffer_sink,
		BUFFERSINK_PARAM_SAMPLE_RATES,
		out_sample_rates,
		-1,
		AV_OPT_SEARCH_CHILDREN);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: av_opt_set_int_list(sample rates) failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	// init the encoder
	encoder = avcodec_alloc_context3(encoder_codec);
	if (encoder == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: avcodec_alloc_context3 failed");
		return VOD_ALLOC_FAILED;
	}

	sink->encoder = encoder;

	encoder->sample_fmt = ENCODER_INPUT_SAMPLE_FORMAT;
	encoder->sample_rate = reference_track->media_info.u.audio.sample_rate;
	encoder->channel_layout = channel_layout;
	encoder->channels = reference_track->media_info.u.audio.channels;
	encoder->bit_rate = reference_track->media_info.bitrate;
	encoder->flags |= CODEC_FLAG_GLOBAL_HEADER;		// make the codec generate the extra data

	avrc = avcodec_open2(encoder, encoder_codec, NULL);
	if (avrc < 0)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: avcodec_open2 failed %d", avrc);
		return VOD_UNEXPECTED;
	}

	// add to the inputs list
	input_link = avfilter_inout_alloc();
	if (input_link == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_sink: avfilter_inout_alloc failed");
		return VOD_ALLOC_FAILED;
	}

	input_link->filter_ctx = sink->buffer_sink;
	input_link->pad_idx = 0;
	input_link->next = *inputs;
	*inputs = input_link;

	input_link->name = av_strdup((const char*)sink_name);
	if (input_link->name == NULL)
	{
		vod_log_error(VOD_LOG_ERR, request_context->log, 0,
			"audio_filter_init_source: av_strdup failed");
		return VOD_ALLOC_FAILED;
	}

	return VOD_OK;
}
Exemple #25
0
static int sap_read_header(AVFormatContext *s,
                           AVFormatParameters *ap)
{
    struct SAPState *sap = s->priv_data;
    char host[1024], path[1024], url[1024];
    uint8_t recvbuf[1500];
    int port;
    int ret, i;
    AVInputFormat* infmt;

    if (!ff_network_init())
        return AVERROR(EIO);

    av_url_split(NULL, 0, NULL, 0, host, sizeof(host), &port,
                 path, sizeof(path), s->filename);
    if (port < 0)
        port = 9875;

    if (!host[0]) {
        /* Listen for announcements on sap.mcast.net if no host was specified */
        av_strlcpy(host, "224.2.127.254", sizeof(host));
    }

    ff_url_join(url, sizeof(url), "udp", NULL, host, port, "?localport=%d",
                port);
    ret = ffurl_open(&sap->ann_fd, url, AVIO_RDONLY);
    if (ret)
        goto fail;

    while (1) {
        int addr_type, auth_len;
        int pos;

        ret = ffurl_read(sap->ann_fd, recvbuf, sizeof(recvbuf) - 1);
        if (ret == AVERROR(EAGAIN))
            continue;
        if (ret < 0)
            goto fail;
        recvbuf[ret] = '\0'; /* Null terminate for easier parsing */
        if (ret < 8) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }

        if ((recvbuf[0] & 0xe0) != 0x20) {
            av_log(s, AV_LOG_WARNING, "Unsupported SAP version packet "
                                      "received\n");
            continue;
        }

        if (recvbuf[0] & 0x04) {
            av_log(s, AV_LOG_WARNING, "Received stream deletion "
                                      "announcement\n");
            continue;
        }
        addr_type = recvbuf[0] & 0x10;
        auth_len  = recvbuf[1];
        sap->hash = AV_RB16(&recvbuf[2]);
        pos = 4;
        if (addr_type)
            pos += 16; /* IPv6 */
        else
            pos += 4; /* IPv4 */
        pos += auth_len * 4;
        if (pos + 4 >= ret) {
            av_log(s, AV_LOG_WARNING, "Received too short packet\n");
            continue;
        }
#define MIME "application/sdp"
        if (strcmp(&recvbuf[pos], MIME) == 0) {
            pos += strlen(MIME) + 1;
        } else if (strncmp(&recvbuf[pos], "v=0\r\n", 5) == 0) {
            // Direct SDP without a mime type
        } else {
            av_log(s, AV_LOG_WARNING, "Unsupported mime type %s\n",
                                      &recvbuf[pos]);
            continue;
        }

        sap->sdp = av_strdup(&recvbuf[pos]);
        break;
    }

    av_log(s, AV_LOG_VERBOSE, "SDP:\n%s\n", sap->sdp);
    ffio_init_context(&sap->sdp_pb, sap->sdp, strlen(sap->sdp), 0, NULL, NULL,
                  NULL, NULL);

    infmt = av_find_input_format("sdp");
    if (!infmt)
        goto fail;
    sap->sdp_ctx = avformat_alloc_context();
    if (!sap->sdp_ctx) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    sap->sdp_ctx->max_delay = s->max_delay;
    sap->sdp_ctx->pb        = &sap->sdp_pb;
    ret = avformat_open_input(&sap->sdp_ctx, "temp.sdp", infmt, NULL);
    if (ret < 0)
        goto fail;
    if (sap->sdp_ctx->ctx_flags & AVFMTCTX_NOHEADER)
        s->ctx_flags |= AVFMTCTX_NOHEADER;
    for (i = 0; i < sap->sdp_ctx->nb_streams; i++) {
        AVStream *st = av_new_stream(s, i);
        if (!st) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        avcodec_copy_context(st->codec, sap->sdp_ctx->streams[i]->codec);
        st->time_base = sap->sdp_ctx->streams[i]->time_base;
    }

    return 0;

fail:
    sap_read_close(s);
    return ret;
}
Exemple #26
0
static int microdvd_read_header(AVFormatContext *s)
{
    AVRational pts_info = (AVRational){ 2997, 125 };  /* default: 23.976 fps */
    MicroDVDContext *microdvd = s->priv_data;
    AVStream *st = avformat_new_stream(s, NULL);
    int i = 0;
    char line[MAX_LINESIZE];

    if (!st)
        return AVERROR(ENOMEM);

    while (!url_feof(s->pb)) {
        char *p = line;
        AVPacket *sub;
        int64_t pos = avio_tell(s->pb);
        int len = ff_get_line(s->pb, line, sizeof(line));

        if (!len)
            break;
        line[strcspn(line, "\r\n")] = 0;
        if (i++ < 3) {
            int frame;
            double fps;
            char c;

            if ((sscanf(line, "{%d}{}%6lf",    &frame, &fps) == 2 ||
                 sscanf(line, "{%d}{%*d}%6lf", &frame, &fps) == 2)
                && frame <= 1 && fps > 3 && fps < 100)
                pts_info = av_d2q(fps, 100000);
            if (!st->codec->extradata && sscanf(line, "{DEFAULT}{}%c", &c) == 1) {
                st->codec->extradata = av_strdup(line + 11);
                if (!st->codec->extradata)
                    return AVERROR(ENOMEM);
                st->codec->extradata_size = strlen(st->codec->extradata) + 1;
                continue;
            }
        }
#define SKIP_FRAME_ID                                       \
    p = strchr(p, '}');                                     \
    if (!p) {                                               \
        av_log(s, AV_LOG_WARNING, "Invalid event \"%s\""    \
               " at line %d\n", line, i);                   \
        continue;                                           \
    }                                                       \
    p++
        SKIP_FRAME_ID;
        SKIP_FRAME_ID;
        if (!*p)
            continue;
        sub = ff_subtitles_queue_insert(&microdvd->q, p, strlen(p), 0);
        if (!sub)
            return AVERROR(ENOMEM);
        sub->pos = pos;
        sub->pts = get_pts(line);
        sub->duration = get_duration(line);
    }
    ff_subtitles_queue_finalize(&microdvd->q);
    avpriv_set_pts_info(st, 64, pts_info.den, pts_info.num);
    st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
    st->codec->codec_id   = AV_CODEC_ID_MICRODVD;
    return 0;
}
Exemple #27
0
int ff_img_read_header(AVFormatContext *s1)
{
    VideoDemuxData *s = s1->priv_data;
    int first_index, last_index;
    AVStream *st;
    enum AVPixelFormat pix_fmt = AV_PIX_FMT_NONE;

    s1->ctx_flags |= AVFMTCTX_NOHEADER;

    st = avformat_new_stream(s1, NULL);
    if (!st) {
        return AVERROR(ENOMEM);
    }

    if (s->pixel_format &&
        (pix_fmt = av_get_pix_fmt(s->pixel_format)) == AV_PIX_FMT_NONE) {
        av_log(s1, AV_LOG_ERROR, "No such pixel format: %s.\n",
               s->pixel_format);
        return AVERROR(EINVAL);
    }

    av_strlcpy(s->path, s1->filename, sizeof(s->path));
    s->img_number = 0;
    s->img_count  = 0;

    /* find format */
    if (s1->iformat->flags & AVFMT_NOFILE)
        s->is_pipe = 0;
    else {
        s->is_pipe       = 1;
        st->need_parsing = AVSTREAM_PARSE_FULL;
    }

    if (s->ts_from_file == 2) {
#if !HAVE_STRUCT_STAT_ST_MTIM_TV_NSEC
        av_log(s1, AV_LOG_ERROR, "POSIX.1-2008 not supported, nanosecond file timestamps unavailable\n");
        return AVERROR(ENOSYS);
#endif
        avpriv_set_pts_info(st, 64, 1, 1000000000);
    } else if (s->ts_from_file)
        avpriv_set_pts_info(st, 64, 1, 1);
    else
        avpriv_set_pts_info(st, 64, s->framerate.den, s->framerate.num);

    if (s->width && s->height) {
        st->codec->width  = s->width;
        st->codec->height = s->height;
    }

    if (!s->is_pipe) {
        if (s->pattern_type == PT_GLOB_SEQUENCE) {
        s->use_glob = is_glob(s->path);
        if (s->use_glob) {
            char *p = s->path, *q, *dup;
            int gerr;

            av_log(s1, AV_LOG_WARNING, "Pattern type 'glob_sequence' is deprecated: "
                   "use pattern_type 'glob' instead\n");
#if HAVE_GLOB
            dup = q = av_strdup(p);
            while (*q) {
                /* Do we have room for the next char and a \ insertion? */
                if ((p - s->path) >= (sizeof(s->path) - 2))
                  break;
                if (*q == '%' && strspn(q + 1, "%*?[]{}"))
                    ++q;
                else if (strspn(q, "\\*?[]{}"))
                    *p++ = '\\';
                *p++ = *q++;
            }
            *p = 0;
            av_free(dup);

            gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate);
            if (gerr != 0) {
                return AVERROR(ENOENT);
            }
            first_index = 0;
            last_index = s->globstate.gl_pathc - 1;
#endif
        }
        }
        if ((s->pattern_type == PT_GLOB_SEQUENCE && !s->use_glob) || s->pattern_type == PT_SEQUENCE) {
            if (find_image_range(&first_index, &last_index, s->path,
                                 s->start_number, s->start_number_range) < 0) {
                av_log(s1, AV_LOG_ERROR,
                       "Could find no file with path '%s' and index in the range %d-%d\n",
                       s->path, s->start_number, s->start_number + s->start_number_range - 1);
                return AVERROR(ENOENT);
            }
        } else if (s->pattern_type == PT_GLOB) {
#if HAVE_GLOB
            int gerr;
            gerr = glob(s->path, GLOB_NOCHECK|GLOB_BRACE|GLOB_NOMAGIC, NULL, &s->globstate);
            if (gerr != 0) {
                return AVERROR(ENOENT);
            }
            first_index = 0;
            last_index = s->globstate.gl_pathc - 1;
            s->use_glob = 1;
#else
            av_log(s1, AV_LOG_ERROR,
                   "Pattern type 'glob' was selected but globbing "
                   "is not supported by this libavformat build\n");
            return AVERROR(ENOSYS);
#endif
        } else if (s->pattern_type != PT_GLOB_SEQUENCE) {
            av_log(s1, AV_LOG_ERROR,
                   "Unknown value '%d' for pattern_type option\n", s->pattern_type);
            return AVERROR(EINVAL);
        }
        s->img_first  = first_index;
        s->img_last   = last_index;
        s->img_number = first_index;
        /* compute duration */
        if (!s->ts_from_file) {
            st->start_time = 0;
            st->duration   = last_index - first_index + 1;
        }
    }

    if (s1->video_codec_id) {
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id   = s1->video_codec_id;
    } else if (s1->audio_codec_id) {
        st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
        st->codec->codec_id   = s1->audio_codec_id;
    } else if (s1->iformat->raw_codec_id) {
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        st->codec->codec_id   = s1->iformat->raw_codec_id;
    } else {
        const char *str = strrchr(s->path, '.');
        s->split_planes       = str && !av_strcasecmp(str + 1, "y");
        st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
        if (s1->pb) {
            int probe_buffer_size = 2048;
            uint8_t *probe_buffer = av_realloc(NULL, probe_buffer_size + AVPROBE_PADDING_SIZE);
            AVInputFormat *fmt = NULL;
            AVProbeData pd = { 0 };

            if (!probe_buffer)
                return AVERROR(ENOMEM);

            probe_buffer_size = avio_read(s1->pb, probe_buffer, probe_buffer_size);
            if (probe_buffer_size < 0) {
                av_free(probe_buffer);
                return probe_buffer_size;
            }
            memset(probe_buffer + probe_buffer_size, 0, AVPROBE_PADDING_SIZE);

            pd.buf = probe_buffer;
            pd.buf_size = probe_buffer_size;
            pd.filename = s1->filename;

            while ((fmt = av_iformat_next(fmt))) {
                if (fmt->read_header != ff_img_read_header ||
                    !fmt->read_probe ||
                    (fmt->flags & AVFMT_NOFILE) ||
                    !fmt->raw_codec_id)
                    continue;
                if (fmt->read_probe(&pd) > 0) {
                    st->codec->codec_id = fmt->raw_codec_id;
                    break;
                }
            }
            if (s1->flags & AVFMT_FLAG_CUSTOM_IO) {
                avio_seek(s1->pb, 0, SEEK_SET);
            } else
                ffio_rewind_with_probe_data(s1->pb, &probe_buffer, probe_buffer_size);
        }
        if (st->codec->codec_id == AV_CODEC_ID_NONE)
            st->codec->codec_id = ff_guess_image2_codec(s->path);
        if (st->codec->codec_id == AV_CODEC_ID_LJPEG)
            st->codec->codec_id = AV_CODEC_ID_MJPEG;
        if (st->codec->codec_id == AV_CODEC_ID_ALIAS_PIX) // we cannot distingiush this from BRENDER_PIX
            st->codec->codec_id = AV_CODEC_ID_NONE;
    }
    if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
        pix_fmt != AV_PIX_FMT_NONE)
        st->codec->pix_fmt = pix_fmt;

    return 0;
}
int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name)
{
    AVFilterContext *ret;
    *filter_ctx = NULL;

    if (!filter)
        return AVERROR(EINVAL);

    ret = av_mallocz(sizeof(AVFilterContext));
    if (!ret)
        return AVERROR(ENOMEM);

    ret->av_class = &avfilter_class;
    ret->filter   = filter;
    ret->name     = inst_name ? av_strdup(inst_name) : NULL;
    if (filter->priv_size) {
        ret->priv     = av_mallocz(filter->priv_size);
        if (!ret->priv)
            goto err;
    }

    ret->nb_inputs = pad_count(filter->inputs);
    if (ret->nb_inputs ) {
        ret->input_pads   = av_malloc(sizeof(AVFilterPad) * ret->nb_inputs);
        if (!ret->input_pads)
            goto err;
        memcpy(ret->input_pads, filter->inputs, sizeof(AVFilterPad) * ret->nb_inputs);
        ret->inputs       = av_mallocz(sizeof(AVFilterLink*) * ret->nb_inputs);
        if (!ret->inputs)
            goto err;
    }

    ret->nb_outputs = pad_count(filter->outputs);
    if (ret->nb_outputs) {
        ret->output_pads  = av_malloc(sizeof(AVFilterPad) * ret->nb_outputs);
        if (!ret->output_pads)
            goto err;
        memcpy(ret->output_pads, filter->outputs, sizeof(AVFilterPad) * ret->nb_outputs);
        ret->outputs      = av_mallocz(sizeof(AVFilterLink*) * ret->nb_outputs);
        if (!ret->outputs)
            goto err;
    }
#if FF_API_FOO_COUNT
    ret->output_count = ret->nb_outputs;
    ret->input_count  = ret->nb_inputs;
#endif

    *filter_ctx = ret;
    return 0;

err:
    av_freep(&ret->inputs);
    av_freep(&ret->input_pads);
    ret->nb_inputs = 0;
    av_freep(&ret->outputs);
    av_freep(&ret->output_pads);
    ret->nb_outputs = 0;
    av_freep(&ret->priv);
    av_free(ret);
    return AVERROR(ENOMEM);
}
static int ffm_read_header(AVFormatContext *s)
{
    FFMContext *ffm = s->priv_data;
    AVStream *st;
    AVIOContext *pb = s->pb;
    AVCodecContext *codec;
    int i, nb_streams;
    uint32_t tag;

    /* header */
    tag = avio_rl32(pb);
    if (tag == MKTAG('F', 'F', 'M', '2'))
        return ffm2_read_header(s);
    if (tag != MKTAG('F', 'F', 'M', '1'))
        goto fail;
    ffm->packet_size = avio_rb32(pb);
    if (ffm->packet_size != FFM_PACKET_SIZE)
        goto fail;
    ffm->write_index = avio_rb64(pb);
    /* get also filesize */
    if (pb->seekable) {
        ffm->file_size = avio_size(pb);
        if (ffm->write_index && 0)
            adjust_write_index(s);
    } else {
        ffm->file_size = (UINT64_C(1) << 63) - 1;
    }

    nb_streams = avio_rb32(pb);
    avio_rb32(pb); /* total bitrate */
    /* read each stream */
    for(i=0;i<nb_streams;i++) {
        char rc_eq_buf[128];

        st = avformat_new_stream(s, NULL);
        if (!st)
            goto fail;

        avpriv_set_pts_info(st, 64, 1, 1000000);

        codec = st->codec;
        /* generic info */
        codec->codec_id = avio_rb32(pb);
        codec->codec_type = avio_r8(pb); /* codec_type */
        codec->bit_rate = avio_rb32(pb);
        codec->flags = avio_rb32(pb);
        codec->flags2 = avio_rb32(pb);
        codec->debug = avio_rb32(pb);
        /* specific info */
        switch(codec->codec_type) {
        case AVMEDIA_TYPE_VIDEO:
            codec->time_base.num = avio_rb32(pb);
            codec->time_base.den = avio_rb32(pb);
            if (codec->time_base.num <= 0 || codec->time_base.den <= 0) {
                av_log(s, AV_LOG_ERROR, "Invalid time base %d/%d\n",
                       codec->time_base.num, codec->time_base.den);
                goto fail;
            }
            codec->width = avio_rb16(pb);
            codec->height = avio_rb16(pb);
            codec->gop_size = avio_rb16(pb);
            codec->pix_fmt = avio_rb32(pb);
            codec->qmin = avio_r8(pb);
            codec->qmax = avio_r8(pb);
            codec->max_qdiff = avio_r8(pb);
            codec->qcompress = avio_rb16(pb) / 10000.0;
            codec->qblur = avio_rb16(pb) / 10000.0;
            codec->bit_rate_tolerance = avio_rb32(pb);
            avio_get_str(pb, INT_MAX, rc_eq_buf, sizeof(rc_eq_buf));
            codec->rc_eq = av_strdup(rc_eq_buf);
            codec->rc_max_rate = avio_rb32(pb);
            codec->rc_min_rate = avio_rb32(pb);
            codec->rc_buffer_size = avio_rb32(pb);
            codec->i_quant_factor = av_int2double(avio_rb64(pb));
            codec->b_quant_factor = av_int2double(avio_rb64(pb));
            codec->i_quant_offset = av_int2double(avio_rb64(pb));
            codec->b_quant_offset = av_int2double(avio_rb64(pb));
            codec->dct_algo = avio_rb32(pb);
            codec->strict_std_compliance = avio_rb32(pb);
            codec->max_b_frames = avio_rb32(pb);
            codec->mpeg_quant = avio_rb32(pb);
            codec->intra_dc_precision = avio_rb32(pb);
            codec->me_method = avio_rb32(pb);
            codec->mb_decision = avio_rb32(pb);
            codec->nsse_weight = avio_rb32(pb);
            codec->frame_skip_cmp = avio_rb32(pb);
            codec->rc_buffer_aggressivity = av_int2double(avio_rb64(pb));
            codec->codec_tag = avio_rb32(pb);
            codec->thread_count = avio_r8(pb);
            codec->coder_type = avio_rb32(pb);
            codec->me_cmp = avio_rb32(pb);
            codec->me_subpel_quality = avio_rb32(pb);
            codec->me_range = avio_rb32(pb);
            codec->keyint_min = avio_rb32(pb);
            codec->scenechange_threshold = avio_rb32(pb);
            codec->b_frame_strategy = avio_rb32(pb);
            codec->qcompress = av_int2double(avio_rb64(pb));
            codec->qblur = av_int2double(avio_rb64(pb));
            codec->max_qdiff = avio_rb32(pb);
            codec->refs = avio_rb32(pb);
            break;
        case AVMEDIA_TYPE_AUDIO:
            codec->sample_rate = avio_rb32(pb);
            codec->channels = avio_rl16(pb);
            codec->frame_size = avio_rl16(pb);
            break;
        default:
            goto fail;
        }
        if (codec->flags & CODEC_FLAG_GLOBAL_HEADER) {
            if (ff_get_extradata(codec, pb, avio_rb32(pb)) < 0)
                return AVERROR(ENOMEM);
        }
    }

    /* get until end of block reached */
    while ((avio_tell(pb) % ffm->packet_size) != 0 && !pb->eof_reached)
        avio_r8(pb);

    /* init packet demux */
    ffm->packet_ptr = ffm->packet;
    ffm->packet_end = ffm->packet;
    ffm->frame_offset = 0;
    ffm->dts = 0;
    ffm->read_state = READ_HEADER;
    ffm->first_packet = 1;
    return 0;
 fail:
    ffm_close(s);
    return -1;
}
Exemple #30
0
/* add a codec and set the default parameters */
static void add_codec(FFServerStream *stream, AVCodecContext *av)
{
    AVStream *st;

    if(stream->nb_streams >= FF_ARRAY_ELEMS(stream->streams))
        return;

    /* compute default parameters */
    switch(av->codec_type) {
    case AVMEDIA_TYPE_AUDIO:
        if (av->bit_rate == 0)
            av->bit_rate = 64000;
        if (av->sample_rate == 0)
            av->sample_rate = 22050;
        if (av->channels == 0)
            av->channels = 1;
        break;
    case AVMEDIA_TYPE_VIDEO:
        if (av->bit_rate == 0)
            av->bit_rate = 64000;
        if (av->time_base.num == 0){
            av->time_base.den = 5;
            av->time_base.num = 1;
        }
        if (av->width == 0 || av->height == 0) {
            av->width = 160;
            av->height = 128;
        }
        /* Bitrate tolerance is less for streaming */
        if (av->bit_rate_tolerance == 0)
            av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,
                      (int64_t)av->bit_rate*av->time_base.num/av->time_base.den);
        if (av->qmin == 0)
            av->qmin = 3;
        if (av->qmax == 0)
            av->qmax = 31;
        if (av->max_qdiff == 0)
            av->max_qdiff = 3;
        av->qcompress = 0.5;
        av->qblur = 0.5;

        if (!av->nsse_weight)
            av->nsse_weight = 8;

        av->frame_skip_cmp = FF_CMP_DCTMAX;
        if (!av->me_method)
            av->me_method = ME_EPZS;
        av->rc_buffer_aggressivity = 1.0;

        if (!av->rc_eq)
            av->rc_eq = av_strdup("tex^qComp");
        if (!av->i_quant_factor)
            av->i_quant_factor = -0.8;
        if (!av->b_quant_factor)
            av->b_quant_factor = 1.25;
        if (!av->b_quant_offset)
            av->b_quant_offset = 1.25;
        if (!av->rc_max_rate)
            av->rc_max_rate = av->bit_rate * 2;

        if (av->rc_max_rate && !av->rc_buffer_size) {
            av->rc_buffer_size = av->rc_max_rate;
        }


        break;
    default:
        abort();
    }

    st = av_mallocz(sizeof(AVStream));
    if (!st)
        return;
    st->codec = avcodec_alloc_context3(NULL);
    stream->streams[stream->nb_streams++] = st;
    memcpy(st->codec, av, sizeof(AVCodecContext));
}