static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD ?
        inlink->h*2 : inlink->h;

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 16, 128, 128, 16 };
        int i, ret;
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            black[0] = black[3] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 1);
        if (ret < 0)
            return ret;

        /* fill black picture with black */
        for (i = 0; i < 4 && tinterlace->black_data[i]; i++) {
            int h = i == 1 || i == 2 ? FF_CEIL_RSHIFT(outlink->h, desc->log2_chroma_h) : outlink->h;
            memset(tinterlace->black_data[i], black[i],
                   tinterlace->black_linesize[i] * h);
        }
    }
    if ((tinterlace->flags & TINTERLACE_FLAG_VLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flag ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~TINTERLACE_FLAG_VLPF;
    }
    if (tinterlace->mode == MODE_INTERLACEX2) {
        outlink->time_base.num = inlink->time_base.num;
        outlink->time_base.den = inlink->time_base.den * 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
    }

    if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n",
           tinterlace->mode, (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "on" : "off",
           inlink->h, outlink->h);

    return 0;
}
Example #2
0
AVRational av_div_q(AVRational b, AVRational c){
#if defined(_MSC_VER)
	AVRational r;
	r.num = c.den;
	r.den = c.num;
	return av_mul_q(b, r);
#else
    return av_mul_q(b, (AVRational){c.den, c.num});
#endif
}
Example #3
0
static int config_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    TileContext *tile   = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];

    if (inlink->w > INT_MAX / tile->w) {
        av_log(ctx, AV_LOG_ERROR, "Total width %ux%u is too much.\n",
               tile->w, inlink->w);
        return AVERROR(EINVAL);
    }
    if (inlink->h > INT_MAX / tile->h) {
        av_log(ctx, AV_LOG_ERROR, "Total height %ux%u is too much.\n",
               tile->h, inlink->h);
        return AVERROR(EINVAL);
    }
    outlink->w = tile->w * inlink->w;
    outlink->h = tile->h * inlink->h;
    outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
    outlink->frame_rate = av_mul_q(inlink->frame_rate,
                                   (AVRational){ 1, tile->w * tile->h });
    ff_draw_init(&tile->draw, inlink->format, 0);
    /* TODO make the color an option, or find an unified way of choosing it */
    ff_draw_color(&tile->draw, &tile->blank, (uint8_t[]){ 0, 0, 0, -1 });

    return 0;
}
Example #4
0
static int find_frame_rate_index(MpegEncContext *s){
    int i;
    AVRational bestq= (AVRational){0, 0};
    AVRational ext;
    AVRational target = av_inv_q(s->avctx->time_base);

    for(i=1;i<14;i++) {
        if(s->avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL && i>=9) break;

        for (ext.num=1; ext.num <= 4; ext.num++) {
            for (ext.den=1; ext.den <= 32; ext.den++) {
                AVRational q = av_mul_q(ext, ff_mpeg12_frame_rate_tab[i]);

                if(s->codec_id != AV_CODEC_ID_MPEG2VIDEO && (ext.den!=1 || ext.num!=1))
                    continue;
                if(av_gcd(ext.den, ext.num) != 1)
                    continue;

                if(    bestq.num==0
                    || av_nearer_q(target, bestq, q) < 0
                    || ext.num==1 && ext.den==1 && av_nearer_q(target, bestq, q) == 0){
                    bestq = q;
                    s->frame_rate_index= i;
                    s->mpeg2_frame_rate_ext.num = ext.num;
                    s->mpeg2_frame_rate_ext.den = ext.den;
                }
            }
        }
    }
    if(av_cmp_q(target, bestq))
        return -1;
    else
        return 0;
}
Example #5
0
// Create the 'pasp' atom for video tracks. No guesswork required.
// References http://www.uwasa.fi/~f76998/video/conversion/
void set_track_clean_aperture_ext(ImageDescriptionHandle imgDesc, Fixed displayW, Fixed displayH, Fixed pixelW, Fixed pixelH)
{
    if (displayW == pixelW && displayH == pixelH)
        return;

    AVRational dar, invPixelSize, sar;

    dar			   = (AVRational) {
        displayW, displayH
    };
    invPixelSize   = (AVRational) {
        pixelH, pixelW
    };
    sar = av_mul_q(dar, invPixelSize);

    av_reduce(&sar.num, &sar.den, sar.num, sar.den, fixed1);

    if (sar.num == sar.den)
        return;

    PixelAspectRatioImageDescriptionExtension **pasp = (PixelAspectRatioImageDescriptionExtension**)NewHandle(sizeof(PixelAspectRatioImageDescriptionExtension));

    **pasp = (PixelAspectRatioImageDescriptionExtension) {
        EndianU32_NtoB(sar.num), EndianU32_NtoB(sar.den)
    };

    AddImageDescriptionExtension(imgDesc, (Handle)pasp, kPixelAspectRatioImageDescriptionExtension);

    DisposeHandle((Handle)pasp);
}
Example #6
0
 sf::Time Stream::computeEncodedPosition()
 {
     if (!m_packetList.size())
     {
         m_dataSource.requestMoreData(*this);
     }
     
     if (!m_packetList.size())
     {
         return sf::Time::Zero;
     }
     else
     {
         sf::Lock l(m_readerMutex);
         AVPacket* packet = m_packetList.front();
         CHECK(packet, "internal inconcistency");
         
         int64_t timestamp = -424242;
         
         if (packet->dts != AV_NOPTS_VALUE)
         {
             timestamp = packet->dts;
         }
         else if (packet->pts != AV_NOPTS_VALUE)
         {
             int64_t startTime = m_stream->start_time != AV_NOPTS_VALUE ? m_stream->start_time : 0;
             timestamp = packet->pts - startTime;
         }
         
         AVRational seconds = av_mul_q(av_make_q(timestamp, 1), m_stream->time_base);
         return sf::milliseconds(1000 * av_q2d(seconds));
     }
 }
Example #7
0
AVRational av_sub_q(AVRational b, AVRational c){
#if defined(_MSC_VER)
	AVRational r;
	r.num = -c.num;
	r.den = c.den;
	return av_mul_q(b, r);
#else
	return av_add_q(b, (AVRational){-c.num, c.den});
#endif    
}
Example #8
0
 sf::Time Stream::packetDuration(const AVPacket* packet) const
 {
     CHECK(packet, "inconcistency error: null packet");
     CHECK(packet->stream_index == m_streamID, "Asking for duration of a packet for a different stream!");
     
     if (packet->duration != 0)
     {
         AVRational seconds = av_mul_q(av_make_q(packet->duration, 1), m_stream->time_base);
         return sf::seconds(av_q2d(seconds));
     }
     else
     {
         return sf::seconds(1. / av_q2d(av_guess_frame_rate(m_formatCtx, m_stream, nullptr)));
     }
 }
Example #9
0
static void put_videoinfoheader2(AVIOContext *pb, AVStream *st)
{
    AVRational dar = av_mul_q(st->sample_aspect_ratio, (AVRational){st->codec->width, st->codec->height});
    unsigned int num, den;
    av_reduce(&num, &den, dar.num, dar.den, 0xFFFFFFFF);

    /* VIDEOINFOHEADER2 */
    avio_wl32(pb, 0);
    avio_wl32(pb, 0);
    avio_wl32(pb, st->codec->width);
    avio_wl32(pb, st->codec->height);

    avio_wl32(pb, 0);
    avio_wl32(pb, 0);
    avio_wl32(pb, 0);
    avio_wl32(pb, 0);

    avio_wl32(pb, st->codec->bit_rate);
    avio_wl32(pb, 0);
    avio_wl64(pb, st->avg_frame_rate.num && st->avg_frame_rate.den ? INT64_C(10000000) / av_q2d(st->avg_frame_rate) : 0);
    avio_wl32(pb, 0);
    avio_wl32(pb, 0);

    avio_wl32(pb, num);
    avio_wl32(pb, den);
    avio_wl32(pb, 0);
    avio_wl32(pb, 0);

    ff_put_bmp_header(pb, st->codec, ff_codec_bmp_tags, 0, 1);

    if (st->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
        int padding = (st->codec->extradata_size & 3) ? 4 - (st->codec->extradata_size & 3) : 0;
        /* MPEG2VIDEOINFO */
        avio_wl32(pb, 0);
        avio_wl32(pb, st->codec->extradata_size + padding);
        avio_wl32(pb, -1);
        avio_wl32(pb, -1);
        avio_wl32(pb, 0);
        avio_write(pb, st->codec->extradata, st->codec->extradata_size);
        ffio_fill(pb, 0, padding);
    }
}
Example #10
0
static int config(struct vf_instance *vf, int w, int h, int dw, int dh,
                        unsigned flags, unsigned fmt)
{
    int ret;
    AVFilterLink *out;
    AVRational iar, dar;

    av_reduce(&iar.num, &iar.den, w, h, INT_MAX);
    av_reduce(&dar.num, &dar.den, dw, dh, INT_MAX);
    vf->priv->in_pixfmt = imgfmt2pixfmt(fmt);
    vf->priv->in_imgfmt = fmt;
    vf->priv->in_w = w;
    vf->priv->in_h = h;
    vf->priv->in_sar = av_div_q(dar, iar);
    ret = avfilter_graph_config(vf->priv->graph, NULL);
    if (ret < 0)
        return 0;
    out = vf->priv->out->inputs[0];
    vf->priv->out_w = out->w;
    vf->priv->out_h = out->h;
    vf->priv->out_pixfmt = out->format;
    vf->priv->out_imgfmt = pixfmt2imgfmt(out->format);
    vf->priv->out_sar = out->sample_aspect_ratio;
    if (vf->priv->out_sar.num != vf->priv->in_sar.num ||
        vf->priv->out_sar.den != vf->priv->in_sar.den ||
        out->w != w || out->h != h) {
        av_reduce(&iar.num, &iar.den, out->w, out->h, INT_MAX);
        dar = av_mul_q(iar, out->sample_aspect_ratio);
        if (av_cmp_q(dar, iar) >= 0) {
            dh = out->h;
            dw = av_rescale(dh, dar.num, dar.den);
        } else {
            dw = out->w;
            dh = av_rescale(dw, dar.den, dar.num);
        }
    }
    return vf_next_config(vf, out->w, out->h, dw, dh, flags, fmt);
}
Example #11
0
static int config_props_output(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    SeparateFieldsContext *sf = ctx->priv;
    AVFilterLink *inlink = ctx->inputs[0];

    sf->nb_planes = av_pix_fmt_count_planes(inlink->format);;

    if (inlink->h & 1) {
        av_log(ctx, AV_LOG_ERROR, "height must be even\n");
        return AVERROR_INVALIDDATA;
    }

    outlink->time_base.num = inlink->time_base.num;
    outlink->time_base.den = inlink->time_base.den * 2;
    outlink->frame_rate.num = inlink->frame_rate.num * 2;
    outlink->frame_rate.den = inlink->frame_rate.den;
    outlink->w = inlink->w;
    outlink->h = inlink->h / 2;
    sf->ts_unit = av_q2d(av_inv_q(av_mul_q(outlink->frame_rate, outlink->time_base)));

    return 0;
}
Example #12
0
static int config_input(AVFilterLink *link)
{
    AVFilterContext *ctx = link->dst;
    CropContext *crop = ctx->priv;
    const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[link->format];
    int ret;
    const char *expr;
    double res;

    crop->var_values[VAR_IN_W]  = crop->var_values[VAR_IW] = ctx->inputs[0]->w;
    crop->var_values[VAR_IN_H]  = crop->var_values[VAR_IH] = ctx->inputs[0]->h;
    crop->var_values[VAR_A]     = (float) link->w / link->h;
    crop->var_values[VAR_SAR]   = link->sample_aspect_ratio.num ? av_q2d(link->sample_aspect_ratio) : 1;
    crop->var_values[VAR_DAR]   = crop->var_values[VAR_A] * crop->var_values[VAR_SAR];
    crop->var_values[VAR_HSUB]  = 1<<pix_desc->log2_chroma_w;
    crop->var_values[VAR_VSUB]  = 1<<pix_desc->log2_chroma_h;
    crop->var_values[VAR_X]     = NAN;
    crop->var_values[VAR_Y]     = NAN;
    crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = NAN;
    crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN;
    crop->var_values[VAR_N]     = 0;
    crop->var_values[VAR_T]     = NAN;
    crop->var_values[VAR_POS]   = NAN;

    av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
    crop->hsub = av_pix_fmt_descriptors[link->format].log2_chroma_w;
    crop->vsub = av_pix_fmt_descriptors[link->format].log2_chroma_h;

    if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr),
                                      var_names, crop->var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
    crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res;
    if ((ret = av_expr_parse_and_eval(&res, (expr = crop->oh_expr),
                                      var_names, crop->var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
    crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = res;
    /* evaluate again ow as it may depend on oh */
    if ((ret = av_expr_parse_and_eval(&res, (expr = crop->ow_expr),
                                      var_names, crop->var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0) goto fail_expr;
    crop->var_values[VAR_OUT_W] = crop->var_values[VAR_OW] = res;
    if (normalize_double(&crop->w, crop->var_values[VAR_OUT_W]) < 0 ||
        normalize_double(&crop->h, crop->var_values[VAR_OUT_H]) < 0) {
        av_log(ctx, AV_LOG_ERROR,
               "Too big value or invalid expression for out_w/ow or out_h/oh. "
               "Maybe the expression for out_w:'%s' or for out_h:'%s' is self-referencing.\n",
               crop->ow_expr, crop->oh_expr);
        return AVERROR(EINVAL);
    }
    crop->w &= ~((1 << crop->hsub) - 1);
    crop->h &= ~((1 << crop->vsub) - 1);

    if ((ret = av_expr_parse(&crop->x_pexpr, crop->x_expr, var_names,
                             NULL, NULL, NULL, NULL, 0, ctx)) < 0 ||
        (ret = av_expr_parse(&crop->y_pexpr, crop->y_expr, var_names,
                             NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        return AVERROR(EINVAL);

    if (crop->keep_aspect) {
        AVRational dar = av_mul_q(link->sample_aspect_ratio,
                                  (AVRational){ link->w, link->h });
        av_reduce(&crop->out_sar.num, &crop->out_sar.den,
                  dar.num * crop->h, dar.den * crop->w, INT_MAX);
    } else
        crop->out_sar = link->sample_aspect_ratio;

    av_log(ctx, AV_LOG_INFO, "w:%d h:%d sar:%d/%d -> w:%d h:%d sar:%d/%d\n",
           link->w, link->h, link->sample_aspect_ratio.num, link->sample_aspect_ratio.den,
           crop->w, crop->h, crop->out_sar.num, crop->out_sar.den);

    if (crop->w <= 0 || crop->h <= 0 ||
        crop->w > link->w || crop->h > link->h) {
        av_log(ctx, AV_LOG_ERROR,
               "Invalid too big or non positive size for width '%d' or height '%d'\n",
               crop->w, crop->h);
        return AVERROR(EINVAL);
    }

    /* set default, required in the case the first computed value for x/y is NAN */
    crop->x = (link->w - crop->w) / 2;
    crop->y = (link->h - crop->h) / 2;
    crop->x &= ~((1 << crop->hsub) - 1);
    crop->y &= ~((1 << crop->vsub) - 1);
    return 0;

fail_expr:
    av_log(NULL, AV_LOG_ERROR, "Error when evaluating the expression '%s'\n", expr);
    return ret;
}
Example #13
0
static av_cold int cudascale_config_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    CUDAScaleContext *s  = ctx->priv;
    AVHWFramesContext     *frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
    AVCUDADeviceContext *device_hwctx = frames_ctx->device_ctx->hwctx;
    CUcontext dummy, cuda_ctx = device_hwctx->cuda_ctx;
    int w, h;
    int ret;

    extern char vf_scale_cuda_ptx[];

    ret = CHECK_CU(cuCtxPushCurrent(cuda_ctx));
    if (ret < 0)
        goto fail;

    ret = CHECK_CU(cuModuleLoadData(&s->cu_module, vf_scale_cuda_ptx));
    if (ret < 0)
        goto fail;

    CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "Subsample_Bilinear_uchar"));
    CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "Subsample_Bilinear_uchar2"));
    CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar4, s->cu_module, "Subsample_Bilinear_uchar4"));
    CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "Subsample_Bilinear_ushort"));
    CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "Subsample_Bilinear_ushort2"));
    CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort4, s->cu_module, "Subsample_Bilinear_ushort4"));

    CHECK_CU(cuModuleGetTexRef(&s->cu_tex_uchar, s->cu_module, "uchar_tex"));
    CHECK_CU(cuModuleGetTexRef(&s->cu_tex_uchar2, s->cu_module, "uchar2_tex"));
    CHECK_CU(cuModuleGetTexRef(&s->cu_tex_uchar4, s->cu_module, "uchar4_tex"));
    CHECK_CU(cuModuleGetTexRef(&s->cu_tex_ushort, s->cu_module, "ushort_tex"));
    CHECK_CU(cuModuleGetTexRef(&s->cu_tex_ushort2, s->cu_module, "ushort2_tex"));
    CHECK_CU(cuModuleGetTexRef(&s->cu_tex_ushort4, s->cu_module, "ushort4_tex"));

    CHECK_CU(cuTexRefSetFlags(s->cu_tex_uchar, CU_TRSF_READ_AS_INTEGER));
    CHECK_CU(cuTexRefSetFlags(s->cu_tex_uchar2, CU_TRSF_READ_AS_INTEGER));
    CHECK_CU(cuTexRefSetFlags(s->cu_tex_uchar4, CU_TRSF_READ_AS_INTEGER));
    CHECK_CU(cuTexRefSetFlags(s->cu_tex_ushort, CU_TRSF_READ_AS_INTEGER));
    CHECK_CU(cuTexRefSetFlags(s->cu_tex_ushort2, CU_TRSF_READ_AS_INTEGER));
    CHECK_CU(cuTexRefSetFlags(s->cu_tex_ushort4, CU_TRSF_READ_AS_INTEGER));

    CHECK_CU(cuTexRefSetFilterMode(s->cu_tex_uchar, CU_TR_FILTER_MODE_LINEAR));
    CHECK_CU(cuTexRefSetFilterMode(s->cu_tex_uchar2, CU_TR_FILTER_MODE_LINEAR));
    CHECK_CU(cuTexRefSetFilterMode(s->cu_tex_uchar4, CU_TR_FILTER_MODE_LINEAR));
    CHECK_CU(cuTexRefSetFilterMode(s->cu_tex_ushort, CU_TR_FILTER_MODE_LINEAR));
    CHECK_CU(cuTexRefSetFilterMode(s->cu_tex_ushort2, CU_TR_FILTER_MODE_LINEAR));
    CHECK_CU(cuTexRefSetFilterMode(s->cu_tex_ushort4, CU_TR_FILTER_MODE_LINEAR));

    CHECK_CU(cuCtxPopCurrent(&dummy));

    if ((ret = ff_scale_eval_dimensions(s,
                                        s->w_expr, s->h_expr,
                                        inlink, outlink,
                                        &w, &h)) < 0)
        goto fail;

    if (((int64_t)h * inlink->w) > INT_MAX  ||
        ((int64_t)w * inlink->h) > INT_MAX)
        av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");

    outlink->w = w;
    outlink->h = h;

    ret = init_processing_chain(ctx, inlink->w, inlink->h, w, h);
    if (ret < 0)
        return ret;

    av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
           inlink->w, inlink->h, outlink->w, outlink->h);

    if (inlink->sample_aspect_ratio.num) {
        outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
                                                             outlink->w*inlink->h},
                                                inlink->sample_aspect_ratio);
    } else {
        outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
    }

    return 0;

fail:
    return ret;
}
Example #14
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    ByteIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *stream, *video_enc;
    int64_t list1, list2, strh, strf;
	AVRational temp, dar;
	int num, den;
    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(avi, pb, "AVI ", "hdrl");

    /* avi header */
    put_tag(pb, "avih");
    put_le32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for(n=0;n<s->nb_streams;n++) {
        stream = s->streams[n]->codec;
        bitrate += stream->bit_rate;
        if (stream->codec_type == CODEC_TYPE_VIDEO)
            video_enc = stream;
    }

    nb_frames = 0;

    if(video_enc){
        put_le32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
    } else {
        put_le32(pb, 0);
    }
    put_le32(pb, bitrate / 8); /* XXX: not quite exact */
    put_le32(pb, 0); /* padding */
    if (url_is_streamed(pb))
        put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
    else
        put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
    avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
    put_le32(pb, nb_frames); /* nb frames, filled later */
    put_le32(pb, 0); /* initial frame */
    put_le32(pb, s->nb_streams); /* nb streams */
    put_le32(pb, 1024 * 1024); /* suggested buffer size */
    if(video_enc){
        put_le32(pb, video_enc->width);
        put_le32(pb, video_enc->height);
    } else {
        put_le32(pb, 0);
        put_le32(pb, 0);
    }
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */
    put_le32(pb, 0); /* reserved */

    /* stream list */
    for(i=0;i<n;i++) {
        list2 = start_tag(pb, "LIST");
        put_tag(pb, "strl");

        stream = s->streams[i]->codec;

        /* stream generic header */
        strh = start_tag(pb, "strh");
        switch(stream->codec_type) {
        case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); break;
        case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); break;
//        case CODEC_TYPE_TEXT : put_tag(pb, "txts"); break;
        case CODEC_TYPE_DATA : put_tag(pb, "dats"); break;
        }
        if(stream->codec_type == CODEC_TYPE_VIDEO)
            put_le32(pb, stream->codec_tag);
        else
            put_le32(pb, 1);
        put_le32(pb, 0); /* flags */
        put_le16(pb, 0); /* priority */
        put_le16(pb, 0); /* language */
        put_le32(pb, 0); /* initial frame */

        ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);

        put_le32(pb, au_scale); /* scale */
        put_le32(pb, au_byterate); /* rate */
        av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);

        put_le32(pb, 0); /* start */
        avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */
        if (url_is_streamed(pb))
            put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
        else
            put_le32(pb, 0); /* length, XXX: filled later */

        /* suggested buffer size */ //FIXME set at the end to largest chunk
        if(stream->codec_type == CODEC_TYPE_VIDEO)
            put_le32(pb, 1024 * 1024);
        else if(stream->codec_type == CODEC_TYPE_AUDIO)
            put_le32(pb, 12 * 1024);
        else
            put_le32(pb, 0);
        put_le32(pb, -1); /* quality */
        put_le32(pb, au_ssize); /* sample size */
        put_le32(pb, 0);
        put_le16(pb, stream->width);
        put_le16(pb, stream->height);
        end_tag(pb, strh);

      if(stream->codec_type != CODEC_TYPE_DATA){
        strf = start_tag(pb, "strf");
        switch(stream->codec_type) {
        case CODEC_TYPE_VIDEO:
            put_bmp_header(pb, stream, codec_bmp_tags, 0);
            break;
        case CODEC_TYPE_AUDIO:
            if (put_wav_header(pb, stream) < 0) {
                return -1;
            }
            break;
        default:
            return -1;
        }
        end_tag(pb, strf);
      }

        if (!url_is_streamed(pb)) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons.
             */
            avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0;
            avi->indexes[i].indx_start = start_tag(pb, "JUNK");
            put_le16(pb, 4);        /* wLongsPerEntry */
            put_byte(pb, 0);        /* bIndexSubType (0 == frame index) */
            put_byte(pb, 0);        /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            put_le32(pb, 0);        /* nEntriesInUse (will fill out later on) */
            put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
                                    /* dwChunkId */
            put_le64(pb, 0);        /* dwReserved[3]
            put_le32(pb, 0);           Must be 0.    */
            for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                 put_le64(pb, 0);
            end_tag(pb, avi->indexes[i].indx_start);
        }

        if(   stream->codec_type == CODEC_TYPE_VIDEO
           && s->streams[i]->sample_aspect_ratio.num>0
           && s->streams[i]->sample_aspect_ratio.den>0){
            int vprp= start_tag(pb, "vprp");
            temp.num = stream->width; temp.den = stream->height;//{stream->width, stream->height};
			dar = av_mul_q(s->streams[i]->sample_aspect_ratio, temp);
            
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            put_le32(pb, 0); //video format  = unknown
            put_le32(pb, 0); //video standard= unknown
            put_le32(pb, lrintf(1.0/av_q2d(stream->time_base)));
            put_le32(pb, stream->width );
            put_le32(pb, stream->height);
            put_le16(pb, den);
            put_le16(pb, num);
            put_le32(pb, stream->width );
            put_le32(pb, stream->height);
            put_le32(pb, 1); //progressive FIXME

            put_le32(pb, stream->height);
            put_le32(pb, stream->width );
            put_le32(pb, stream->height);
            put_le32(pb, stream->width );
            put_le32(pb, 0);
            put_le32(pb, 0);

            put_le32(pb, 0);
            put_le32(pb, 0);
            end_tag(pb, vprp);
        }

        end_tag(pb, list2);
    }

    if (!url_is_streamed(pb)) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = start_tag(pb, "JUNK");
        put_tag(pb, "odml");
        put_tag(pb, "dmlh");
        put_le32(pb, 248);
        for (i = 0; i < 248; i+= 4)
             put_le32(pb, 0);
        end_tag(pb, avi->odml_list);
    }

    end_tag(pb, list1);

    list2 = start_tag(pb, "LIST");
    put_tag(pb, "INFO");
    avi_write_info_tag(pb, "INAM", s->title);
    avi_write_info_tag(pb, "IART", s->author);
    avi_write_info_tag(pb, "ICOP", s->copyright);
    avi_write_info_tag(pb, "ICMT", s->comment);
    avi_write_info_tag(pb, "IPRD", s->album);
    avi_write_info_tag(pb, "IGNR", s->genre);
    if (s->track) {
        char str_track[4];
        snprintf(str_track, 4, "%d", s->track);
        avi_write_info_tag(pb, "IPRT", str_track);
    }
    if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
        avi_write_info_tag(pb, "ISFT", LIBAVFORMAT_IDENT);
    end_tag(pb, list2);

    /* some padding for easier tag editing */
    list2 = start_tag(pb, "JUNK");
    for (i = 0; i < 1016; i += 4)
        put_le32(pb, 0);
    end_tag(pb, list2);

    avi->movi_list = start_tag(pb, "LIST");
    put_tag(pb, "movi");

    put_flush_packet(pb);

    return 0;
}
static void vc1_extract_header(AVCodecParserContext *s, AVCodecContext *avctx,
                               const uint8_t *buf, int buf_size)
{
    /* Parse the header we just finished unescaping */
    VC1ParseContext *vpc = s->priv_data;
    GetBitContext gb;
    int ret;
    vpc->v.s.avctx = avctx;
    vpc->v.parse_only = 1;
    init_get_bits(&gb, buf, buf_size * 8);
    switch (vpc->prev_start_code) {
    case VC1_CODE_SEQHDR & 0xFF:
        ff_vc1_decode_sequence_header(avctx, &vpc->v, &gb);
        break;
    case VC1_CODE_ENTRYPOINT & 0xFF:
        ff_vc1_decode_entry_point(avctx, &vpc->v, &gb);
        break;
    case VC1_CODE_FRAME & 0xFF:
        if(vpc->v.profile < PROFILE_ADVANCED)
            ret = ff_vc1_parse_frame_header    (&vpc->v, &gb);
        else
            ret = ff_vc1_parse_frame_header_adv(&vpc->v, &gb);

        if (ret < 0)
            break;

        /* keep AV_PICTURE_TYPE_BI internal to VC1 */
        if (vpc->v.s.pict_type == AV_PICTURE_TYPE_BI)
            s->pict_type = AV_PICTURE_TYPE_B;
        else
            s->pict_type = vpc->v.s.pict_type;

        if (avctx->ticks_per_frame > 1){
            // process pulldown flags
            s->repeat_pict = 1;
            // Pulldown flags are only valid when 'broadcast' has been set.
            // So ticks_per_frame will be 2
            if (vpc->v.rff){
                // repeat field
                s->repeat_pict = 2;
            }else if (vpc->v.rptfrm){
                // repeat frames
                s->repeat_pict = vpc->v.rptfrm * 2 + 1;
            }
        }else{
            s->repeat_pict = 0;
        }

        if (vpc->v.broadcast && vpc->v.interlace && !vpc->v.psf)
            s->field_order = vpc->v.tff ? AV_FIELD_TT : AV_FIELD_BB;
        else
            s->field_order = AV_FIELD_PROGRESSIVE;

        break;
    }
    if (avctx->framerate.num)
        avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
    s->format = vpc->v.chromaformat == 1 ? AV_PIX_FMT_YUV420P
                                         : AV_PIX_FMT_NONE;
    if (avctx->width && avctx->height) {
        s->width        = avctx->width;
        s->height       = avctx->height;
        s->coded_width  = FFALIGN(avctx->coded_width,  16);
        s->coded_height = FFALIGN(avctx->coded_height, 16);
    }
}
static int ndi_find_sources(AVFormatContext *avctx, const char *name, NDIlib_source_t *source_to_connect_to)
{
    int j = AVERROR(ENODEV);
    unsigned int n, i;
    struct NDIContext *ctx = avctx->priv_data;
    const NDIlib_source_t *ndi_srcs = NULL;
    const NDIlib_find_create_t find_create_desc = { .show_local_sources = true,
        .p_groups = NULL, .p_extra_ips = NULL };

    if (!ctx->ndi_find)
        ctx->ndi_find = NDIlib_find_create2(&find_create_desc);
    if (!ctx->ndi_find) {
        av_log(avctx, AV_LOG_ERROR, "NDIlib_find_create failed.\n");
        return AVERROR(EIO);
    }

    while (1)
    {
        int f, t = ctx->wait_sources / 1000;
        av_log(avctx, AV_LOG_DEBUG, "Waiting for sources %d miliseconds\n", t);
        f = NDIlib_find_wait_for_sources(ctx->ndi_find, t);
        av_log(avctx, AV_LOG_DEBUG, "NDIlib_find_wait_for_sources returns %d\n", f);
        if (!f)
            break;
    };

    ndi_srcs = NDIlib_find_get_current_sources(ctx->ndi_find, &n);

    if (ctx->find_sources)
        av_log(avctx, AV_LOG_INFO, "Found %d NDI sources:\n", n);

    for (i = 0; i < n; i++) {
        if (ctx->find_sources)
            av_log(avctx, AV_LOG_INFO, "\t'%s'\t'%s'\n", ndi_srcs[i].p_ndi_name, ndi_srcs[i].p_ip_address);

        if (!strcmp(name, ndi_srcs[i].p_ndi_name)) {
            *source_to_connect_to = ndi_srcs[i];
            j = i;
        }
    }

    return j;
}

static int ndi_read_header(AVFormatContext *avctx)
{
    int ret;
    NDIlib_recv_create_t recv_create_desc;
    const NDIlib_tally_t tally_state = { .on_program = true, .on_preview = false };
    struct NDIContext *ctx = avctx->priv_data;

    if (!NDIlib_initialize()) {
        av_log(avctx, AV_LOG_ERROR, "NDIlib_initialize failed.\n");
        return AVERROR_EXTERNAL;
    }

    /* Find available sources. */
    ret = ndi_find_sources(avctx, avctx->url, &recv_create_desc.source_to_connect_to);
    if (ctx->find_sources) {
        return AVERROR_EXIT;
    }
    if (ret < 0)
        return ret;

    /* Create receiver description */
    recv_create_desc.color_format = NDIlib_recv_color_format_e_UYVY_RGBA;
    recv_create_desc.bandwidth = NDIlib_recv_bandwidth_highest;
    recv_create_desc.allow_video_fields = ctx->allow_video_fields;

    /* Create the receiver */
    ctx->recv = NDIlib_recv_create(&recv_create_desc);
    if (!ctx->recv) {
        av_log(avctx, AV_LOG_ERROR, "NDIlib_recv_create2 failed.\n");
        return AVERROR(EIO);
    }

    /* Set tally */
    NDIlib_recv_set_tally(ctx->recv, &tally_state);

    avctx->ctx_flags |= AVFMTCTX_NOHEADER;

    return 0;
}

static int ndi_create_video_stream(AVFormatContext *avctx, NDIlib_video_frame_t *v)
{
    AVStream *st;
    AVRational tmp;
    struct NDIContext *ctx = avctx->priv_data;

    st = avformat_new_stream(avctx, NULL);
    if (!st) {
        av_log(avctx, AV_LOG_ERROR, "Cannot add video stream\n");
        return AVERROR(ENOMEM);
    }

    st->time_base                   = NDI_TIME_BASE_Q;
    st->r_frame_rate                = av_make_q(v->frame_rate_N, v->frame_rate_D);

    tmp = av_mul_q(av_d2q(v->picture_aspect_ratio, INT_MAX), (AVRational){v->yres, v->xres});
    av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, tmp.num, tmp.den, 1000);
    st->codecpar->sample_aspect_ratio = st->sample_aspect_ratio;

    st->codecpar->codec_type        = AVMEDIA_TYPE_VIDEO;
    st->codecpar->width             = v->xres;
    st->codecpar->height            = v->yres;
    st->codecpar->codec_id          = AV_CODEC_ID_RAWVIDEO;
    st->codecpar->bit_rate          = av_rescale(v->xres * v->yres * 16, v->frame_rate_N, v->frame_rate_D);
    st->codecpar->field_order       = v->frame_format_type == NDIlib_frame_format_type_progressive
        ? AV_FIELD_PROGRESSIVE : AV_FIELD_TT;

    if (NDIlib_FourCC_type_UYVY == v->FourCC || NDIlib_FourCC_type_UYVA == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_UYVY422;
        st->codecpar->codec_tag     = MKTAG('U', 'Y', 'V', 'Y');
        if (NDIlib_FourCC_type_UYVA == v->FourCC)
            av_log(avctx, AV_LOG_WARNING, "Alpha channel ignored\n");
    } else if (NDIlib_FourCC_type_BGRA == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_BGRA;
        st->codecpar->codec_tag     = MKTAG('B', 'G', 'R', 'A');
    } else if (NDIlib_FourCC_type_BGRX == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_BGR0;
        st->codecpar->codec_tag     = MKTAG('B', 'G', 'R', '0');
    } else if (NDIlib_FourCC_type_RGBA == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_RGBA;
        st->codecpar->codec_tag     = MKTAG('R', 'G', 'B', 'A');
    } else if (NDIlib_FourCC_type_RGBX == v->FourCC) {
        st->codecpar->format        = AV_PIX_FMT_RGB0;
        st->codecpar->codec_tag     = MKTAG('R', 'G', 'B', '0');
    } else {
        av_log(avctx, AV_LOG_ERROR, "Unsupported video stream format, v->FourCC=%d\n", v->FourCC);
        return AVERROR(EINVAL);
    }

    avpriv_set_pts_info(st, 64, 1, NDI_TIME_BASE);

    ctx->video_st = st;

    return 0;
}
static int qsvscale_config_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    QSVScaleContext  *s = ctx->priv;
    int64_t w, h;
    double var_values[VARS_NB], res;
    char *expr;
    int ret;

    var_values[VAR_PI]    = M_PI;
    var_values[VAR_PHI]   = M_PHI;
    var_values[VAR_E]     = M_E;
    var_values[VAR_IN_W]  = var_values[VAR_IW] = inlink->w;
    var_values[VAR_IN_H]  = var_values[VAR_IH] = inlink->h;
    var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
    var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
    var_values[VAR_A]     = (double) inlink->w / inlink->h;
    var_values[VAR_SAR]   = inlink->sample_aspect_ratio.num ?
        (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
    var_values[VAR_DAR]   = var_values[VAR_A] * var_values[VAR_SAR];

    /* evaluate width and height */
    av_expr_parse_and_eval(&res, (expr = s->w_expr),
                           var_names, var_values,
                           NULL, NULL, NULL, NULL, NULL, 0, ctx);
    s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
    if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        goto fail;
    s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
    /* evaluate again the width, as it may depend on the output height */
    if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
                                      var_names, var_values,
                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
        goto fail;
    s->w = res;

    w = s->w;
    h = s->h;

    /* sanity check params */
    if (w <  -1 || h <  -1) {
        av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
        return AVERROR(EINVAL);
    }
    if (w == -1 && h == -1)
        s->w = s->h = 0;

    if (!(w = s->w))
        w = inlink->w;
    if (!(h = s->h))
        h = inlink->h;
    if (w == -1)
        w = av_rescale(h, inlink->w, inlink->h);
    if (h == -1)
        h = av_rescale(w, inlink->h, inlink->w);

    if (w > INT_MAX || h > INT_MAX ||
        (h * inlink->w) > INT_MAX  ||
        (w * inlink->h) > INT_MAX)
        av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");

    outlink->w = w;
    outlink->h = h;

    ret = init_scale_session(ctx, inlink->w, inlink->h, w, h);
    if (ret < 0)
        return ret;

    av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
           inlink->w, inlink->h, outlink->w, outlink->h);

    if (inlink->sample_aspect_ratio.num)
        outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
                                                             outlink->w*inlink->h},
                                                inlink->sample_aspect_ratio);
    else
        outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;

    return 0;

fail:
    av_log(NULL, AV_LOG_ERROR,
           "Error when evaluating the expression '%s'\n", expr);
    return ret;
}
Example #18
0
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    TInterlaceContext *tinterlace = ctx->priv;
    AVFrame *cur, *next, *out;
    int field, tff, ret;

    av_frame_free(&tinterlace->cur);
    tinterlace->cur  = tinterlace->next;
    tinterlace->next = picref;

    cur = tinterlace->cur;
    next = tinterlace->next;
    /* we need at least two frames */
    if (!tinterlace->cur)
        return 0;

    switch (tinterlace->mode) {
    case MODE_MERGE: /* move the odd frame into the upper field of the new image, even into
             * the lower field, generating a double-height video at half framerate */
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->height = outlink->h;
        out->interlaced_frame = 1;
        out->top_field_first = 1;
        out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));

        /* write odd frame lines into the upper field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, FIELD_UPPER, tinterlace->flags);
        /* write even frame lines into the lower field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, FIELD_LOWER, tinterlace->flags);
        av_frame_free(&tinterlace->next);
        break;

    case MODE_DROP_ODD:  /* only output even frames, odd  frames are dropped; height unchanged, half framerate */
    case MODE_DROP_EVEN: /* only output odd  frames, even frames are dropped; height unchanged, half framerate */
        out = av_frame_clone(tinterlace->mode == MODE_DROP_EVEN ? cur : next);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_free(&tinterlace->next);
        break;

    case MODE_PAD: /* expand each frame to double height, but pad alternate
                    * lines with black; framerate unchanged */
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->height = outlink->h;
        out->sample_aspect_ratio = av_mul_q(cur->sample_aspect_ratio, av_make_q(2, 1));

        field = (1 + tinterlace->frame) & 1 ? FIELD_UPPER : FIELD_LOWER;
        /* copy upper and lower fields */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, field, tinterlace->flags);
        /* pad with black the other field */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)tinterlace->black_data, tinterlace->black_linesize,
                           inlink->format, inlink->w, inlink->h,
                           FIELD_UPPER_AND_LOWER, 1, !field, tinterlace->flags);
        break;

        /* interleave upper/lower lines from odd frames with lower/upper lines from even frames,
         * halving the frame rate and preserving image height */
    case MODE_INTERLEAVE_TOP:    /* top    field first */
    case MODE_INTERLEAVE_BOTTOM: /* bottom field first */
        tff = tinterlace->mode == MODE_INTERLEAVE_TOP;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, cur);
        out->interlaced_frame = 1;
        out->top_field_first = tff;

        /* copy upper/lower field from cur */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
                           tinterlace->flags);
        /* copy lower/upper field from next */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
                           tinterlace->flags);
        av_frame_free(&tinterlace->next);
        break;
    case MODE_INTERLACEX2: /* re-interlace preserving image height, double frame rate */
        /* output current frame first */
        out = av_frame_clone(cur);
        if (!out)
            return AVERROR(ENOMEM);
        out->interlaced_frame = 1;
        if (cur->pts != AV_NOPTS_VALUE)
            out->pts = cur->pts*2;

        out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
        if ((ret = ff_filter_frame(outlink, out)) < 0)
            return ret;

        /* output mix of current and next frame */
        tff = next->top_field_first;
        out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
        if (!out)
            return AVERROR(ENOMEM);
        av_frame_copy_props(out, next);
        out->interlaced_frame = 1;
        out->top_field_first = !tff;

        if (next->pts != AV_NOPTS_VALUE && cur->pts != AV_NOPTS_VALUE)
            out->pts = cur->pts + next->pts;
        else
            out->pts = AV_NOPTS_VALUE;
        /* write current frame second field lines into the second field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)cur->data, cur->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_LOWER : FIELD_UPPER, 1, tff ? FIELD_LOWER : FIELD_UPPER,
                           tinterlace->flags);
        /* write next frame first field lines into the first field of the new frame */
        copy_picture_field(tinterlace, out->data, out->linesize,
                           (const uint8_t **)next->data, next->linesize,
                           inlink->format, inlink->w, inlink->h,
                           tff ? FIELD_UPPER : FIELD_LOWER, 1, tff ? FIELD_UPPER : FIELD_LOWER,
                           tinterlace->flags);
        break;
    default:
        av_assert0(0);
    }

    out->pts = av_rescale_q(out->pts, tinterlace->preout_time_base, outlink->time_base);
    ret = ff_filter_frame(outlink, out);
    tinterlace->frame++;

    return ret;
}
Example #19
0
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format)
{
    AVCodecContext *avctx = opaque;
    CuvidContext *ctx = avctx->priv_data;
    AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
    CUVIDDECODECREATEINFO cuinfo;

    av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);

    ctx->internal_error = 0;

    avctx->width = format->display_area.right;
    avctx->height = format->display_area.bottom;

    ff_set_sar(avctx, av_div_q(
        (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
        (AVRational){ avctx->width, avctx->height }));

    if (!format->progressive_sequence && ctx->deint_mode == cudaVideoDeinterlaceMode_Weave)
        avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
    else
        avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;

    if (format->video_signal_description.video_full_range_flag)
        avctx->color_range = AVCOL_RANGE_JPEG;
    else
        avctx->color_range = AVCOL_RANGE_MPEG;

    avctx->color_primaries = format->video_signal_description.color_primaries;
    avctx->color_trc = format->video_signal_description.transfer_characteristics;
    avctx->colorspace = format->video_signal_description.matrix_coefficients;

    if (format->bitrate)
        avctx->bit_rate = format->bitrate;

    if (format->frame_rate.numerator && format->frame_rate.denominator) {
        avctx->framerate.num = format->frame_rate.numerator;
        avctx->framerate.den = format->frame_rate.denominator;
    }

    if (ctx->cudecoder
            && avctx->coded_width == format->coded_width
            && avctx->coded_height == format->coded_height
            && ctx->chroma_format == format->chroma_format
            && ctx->codec_type == format->codec)
        return 1;

    if (ctx->cudecoder) {
        av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
        ctx->internal_error = CHECK_CU(cuvidDestroyDecoder(ctx->cudecoder));
        if (ctx->internal_error < 0)
            return 0;
        ctx->cudecoder = NULL;
    }

    if (hwframe_ctx->pool && (
            hwframe_ctx->width < avctx->width ||
            hwframe_ctx->height < avctx->height ||
            hwframe_ctx->format != AV_PIX_FMT_CUDA ||
            hwframe_ctx->sw_format != AV_PIX_FMT_NV12)) {
        av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    if (format->chroma_format != cudaVideoChromaFormat_420) {
        av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n");
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    avctx->coded_width = format->coded_width;
    avctx->coded_height = format->coded_height;

    ctx->chroma_format = format->chroma_format;

    memset(&cuinfo, 0, sizeof(cuinfo));

    cuinfo.CodecType = ctx->codec_type = format->codec;
    cuinfo.ChromaFormat = format->chroma_format;
    cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;

    cuinfo.ulWidth = avctx->coded_width;
    cuinfo.ulHeight = avctx->coded_height;
    cuinfo.ulTargetWidth = cuinfo.ulWidth;
    cuinfo.ulTargetHeight = cuinfo.ulHeight;

    cuinfo.target_rect.left = 0;
    cuinfo.target_rect.top = 0;
    cuinfo.target_rect.right = cuinfo.ulWidth;
    cuinfo.target_rect.bottom = cuinfo.ulHeight;

    cuinfo.ulNumDecodeSurfaces = MAX_FRAME_COUNT;
    cuinfo.ulNumOutputSurfaces = 1;
    cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
    cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;

    if (format->progressive_sequence) {
        ctx->deint_mode = cuinfo.DeinterlaceMode = cudaVideoDeinterlaceMode_Weave;
    } else {
        cuinfo.DeinterlaceMode = ctx->deint_mode;
    }

    if (ctx->deint_mode != cudaVideoDeinterlaceMode_Weave)
        avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});

    ctx->internal_error = CHECK_CU(cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
    if (ctx->internal_error < 0)
        return 0;

    if (!hwframe_ctx->pool) {
        hwframe_ctx->format = AV_PIX_FMT_CUDA;
        hwframe_ctx->sw_format = AV_PIX_FMT_NV12;
        hwframe_ctx->width = avctx->width;
        hwframe_ctx->height = avctx->height;

        if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
            return 0;
        }
    }

    return 1;
}
Example #20
0
AVRational av_div_q(AVRational b, AVRational c){
    AVRational temp = {c.den, c.num};
	return av_mul_q(b, temp);
}
Example #21
0
AVRational av_div_q(AVRational b, AVRational c)
{
    return av_mul_q(b, (AVRational) { c.den, c.num });
}
Example #22
0
bool D2V::printSettings() {
    int stream_type = getStreamType(f->fctx->iformat->name);

    int video_id = video_stream->id;
    int audio_id = 0;
    int64_t ts_packetsize = 0;
    if (stream_type == TRANSPORT_STREAM) {
        const AVStream *audio_stream = nullptr;
        for (unsigned i = 0; i < f->fctx->nb_streams; i++) {
            if (f->fctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                audio_stream = f->fctx->streams[i];
                break;
            }
        }

        if (audio_stream)
            audio_id = audio_stream->id;

        if (av_opt_get_int(f->fctx, "ts_packetsize", AV_OPT_SEARCH_CHILDREN, &ts_packetsize) < 0)
            ts_packetsize = 0;
    }

    int mpeg_type = 0;
    if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG1VIDEO)
        mpeg_type = 1;
    else if (video_stream->codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)
        mpeg_type = 2;

    int yuvrgb_scale = input_range == ColourRangeLimited ? 1 : 0;

    int width, height;
    if (av_opt_get_image_size(video_stream->codec, "video_size", 0, &width, &height) < 0)
        width = height = -1;

    AVRational sar;
    if (av_opt_get_q(video_stream->codec, "aspect", 0, &sar) < 0)
        sar = { 1, 1 };
    AVRational dar = av_mul_q(av_make_q(width, height), sar);
    av_reduce(&dar.num, &dar.den, dar.num, dar.den, 1024);

    // No AVOption for framerate?
    AVRational frame_rate = video_stream->codec->framerate;

    std::string settings;

    settings += "Stream_Type=" + std::to_string(stream_type) + "\n";
    if (stream_type == TRANSPORT_STREAM) {
        char pids[100] = { 0 };
        snprintf(pids, 100, "%x,%x,%x", video_id, audio_id, 0);
        settings += "MPEG2_Transport_PID=";
        settings += pids;
        settings += "\n";

        settings += "Transport_Packet_Size=" + std::to_string(ts_packetsize) + "\n";
    }
    settings += "MPEG_Type=" + std::to_string(mpeg_type) + "\n";
    settings += "iDCT_Algorithm=6\n"; // "32-bit SSEMMX (Skal)". No one cares anyway.
    settings += "YUVRGB_Scale=" + std::to_string(yuvrgb_scale) + "\n";
    settings += "Luminance_Filter=0,0\n"; // We don't care.
    settings += "Clipping=0,0,0,0\n"; // We don't crop here.
    settings += "Aspect_Ratio=" + std::to_string(dar.num) + ":" + std::to_string(dar.den) + "\n";
    settings += "Picture_Size=" + std::to_string(width) + "x" + std::to_string(height) + "\n";
    settings += "Field_Operation=0\n"; // Always tell them honor the pulldown flags.
    settings += "Frame_Rate=" + std::to_string((int)((float)frame_rate.num * 1000 / frame_rate.den)) + " (" + std::to_string(frame_rate.num) + "/" + std::to_string(frame_rate.den) + ")\n";
    settings += "Location=0,0,0,0\n"; // Whatever.

    if (fprintf(d2v_file, "%s", settings.c_str()) < 0) {
        error = "Failed to print d2v settings section: fprintf() failed.";
        return false;
    }

    return true;
}
Example #23
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *video_enc;
    AVStream *video_st = NULL;
    int64_t list1, list2, strh, strf;
    AVDictionaryEntry *t = NULL;
    int padding;

    if (s->nb_streams > AVI_MAX_STREAM_COUNT) {
        av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n",
               AVI_MAX_STREAM_COUNT);
        return AVERROR(EINVAL);
    }

    for (n = 0; n < s->nb_streams; n++) {
        s->streams[n]->priv_data = av_mallocz(sizeof(AVIStream));
        if (!s->streams[n]->priv_data)
            return AVERROR(ENOMEM);
    }

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");

    /* avi header */
    ffio_wfourcc(pb, "avih");
    avio_wl32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for (n = 0; n < s->nb_streams; n++) {
        AVCodecContext *codec = s->streams[n]->codec;
        bitrate += codec->bit_rate;
        if (codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_enc = codec;
            video_st = s->streams[n];
        }
    }

    nb_frames = 0;

    // TODO: should be avg_frame_rate
    if (video_st)
        avio_wl32(pb, (uint32_t) (INT64_C(1000000) * video_st->time_base.num /
                                  video_st->time_base.den));
    else
        avio_wl32(pb, 0);
    avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */
    avio_wl32(pb, 0); /* padding */
    if (!pb->seekable)
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED);  /* flags */
    else
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED);  /* flags */
    avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */
    avio_wl32(pb, nb_frames); /* nb frames, filled later */
    avio_wl32(pb, 0); /* initial frame */
    avio_wl32(pb, s->nb_streams); /* nb streams */
    avio_wl32(pb, 1024 * 1024); /* suggested buffer size */
    if (video_enc) {
        avio_wl32(pb, video_enc->width);
        avio_wl32(pb, video_enc->height);
    } else {
        avio_wl32(pb, 0);
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */

    /* stream list */
    for (i = 0; i < n; i++) {
        AVStream *st = s->streams[i];
        AVCodecContext *enc = st->codec;
        AVIStream *avist = st->priv_data;
        list2 = ff_start_tag(pb, "LIST");
        ffio_wfourcc(pb, "strl");

        /* stream generic header */
        strh = ff_start_tag(pb, "strh");
        switch (enc->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (enc->codec_id != AV_CODEC_ID_XSUB) {
                av_log(s, AV_LOG_ERROR,
                       "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
                return AVERROR_PATCHWELCOME;
            }
        case AVMEDIA_TYPE_VIDEO:
            ffio_wfourcc(pb, "vids");
            break;
        case AVMEDIA_TYPE_AUDIO:
            ffio_wfourcc(pb, "auds");
            break;
//      case AVMEDIA_TYPE_TEXT:
//          ffio_wfourcc(pb, "txts");
//          break;
        case AVMEDIA_TYPE_DATA:
            ffio_wfourcc(pb, "dats");
            break;
        }
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO ||
            enc->codec_id == AV_CODEC_ID_XSUB)
            avio_wl32(pb, enc->codec_tag);
        else
            avio_wl32(pb, 1);
        avio_wl32(pb, 0); /* flags */
        avio_wl16(pb, 0); /* priority */
        avio_wl16(pb, 0); /* language */
        avio_wl32(pb, 0); /* initial frame */

        ff_parse_specific_params(st, &au_byterate, &au_ssize, &au_scale);

        if (   enc->codec_type == AVMEDIA_TYPE_VIDEO
            && enc->codec_id != AV_CODEC_ID_XSUB
            && au_byterate > 1000LL*au_scale) {
            au_byterate = 600;
            au_scale    = 1;
        }
        avpriv_set_pts_info(st, 64, au_scale, au_byterate);
        if (enc->codec_id == AV_CODEC_ID_XSUB)
            au_scale = au_byterate = 0;

        avio_wl32(pb, au_scale); /* scale */
        avio_wl32(pb, au_byterate); /* rate */

        avio_wl32(pb, 0); /* start */
        /* remember this offset to fill later */
        avist->frames_hdr_strm = avio_tell(pb);
        if (!pb->seekable)
            /* FIXME: this may be broken, but who cares */
            avio_wl32(pb, AVI_MAX_RIFF_SIZE);
        else
            avio_wl32(pb, 0);  /* length, XXX: filled later */

        /* suggested buffer size, is set to largest chunk size in avi_write_trailer */
        if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
            avio_wl32(pb, 1024 * 1024);
        else if (enc->codec_type == AVMEDIA_TYPE_AUDIO)
            avio_wl32(pb, 12 * 1024);
        else
            avio_wl32(pb, 0);
        avio_wl32(pb, -1); /* quality */
        avio_wl32(pb, au_ssize); /* sample size */
        avio_wl32(pb, 0);
        avio_wl16(pb, enc->width);
        avio_wl16(pb, enc->height);
        ff_end_tag(pb, strh);

        if (enc->codec_type != AVMEDIA_TYPE_DATA) {
            int ret;
            enum AVPixelFormat pix_fmt;

            strf = ff_start_tag(pb, "strf");
            switch (enc->codec_type) {
            case AVMEDIA_TYPE_SUBTITLE:
                /* XSUB subtitles behave like video tracks, other subtitles
                 * are not (yet) supported. */
                if (enc->codec_id != AV_CODEC_ID_XSUB)
                    break;
            case AVMEDIA_TYPE_VIDEO:
                /* WMP expects RGB 5:5:5 rawvideo in avi to have bpp set to 16. */
                if (  !enc->codec_tag
                    && enc->codec_id == AV_CODEC_ID_RAWVIDEO
                    && enc->pix_fmt == AV_PIX_FMT_RGB555LE
                    && enc->bits_per_coded_sample == 15)
                    enc->bits_per_coded_sample = 16;
                ff_put_bmp_header(pb, enc, ff_codec_bmp_tags, 0, 0);
                pix_fmt = avpriv_find_pix_fmt(avpriv_pix_fmt_bps_avi,
                                              enc->bits_per_coded_sample);
                if (   !enc->codec_tag
                    && enc->codec_id == AV_CODEC_ID_RAWVIDEO
                    && enc->pix_fmt != pix_fmt
                    && enc->pix_fmt != AV_PIX_FMT_NONE)
                    av_log(s, AV_LOG_ERROR, "%s rawvideo cannot be written to avi, output file will be unreadable\n",
                          av_get_pix_fmt_name(enc->pix_fmt));
                break;
            case AVMEDIA_TYPE_AUDIO:
                if ((ret = ff_put_wav_header(pb, enc, 0)) < 0)
                    return ret;
                break;
            default:
                av_log(s, AV_LOG_ERROR,
                    "Invalid or not supported codec type '%s' found in the input\n",
                    (char *)av_x_if_null(av_get_media_type_string(enc->codec_type), "?"));
                return AVERROR(EINVAL);
            }
            ff_end_tag(pb, strf);
            if ((t = av_dict_get(st->metadata, "title", NULL, 0))) {
                ff_riff_write_info_tag(s->pb, "strn", t->value);
                t = NULL;
            }
            if (enc->codec_id == AV_CODEC_ID_XSUB
            && (t = av_dict_get(s->streams[i]->metadata, "language", NULL, 0))) {
                const char* langstr = av_convert_lang_to(t->value, AV_LANG_ISO639_1);
                t = NULL;
                if (langstr) {
                    char* str = av_asprintf("Subtitle - %s-xx;02", langstr);
                    if (!str)
                        return AVERROR(ENOMEM);
                    ff_riff_write_info_tag(s->pb, "strn", str);
                    av_free(str);
                }
            }
        }

        if (pb->seekable) {
            write_odml_master(s, i);
        }

        if (enc->codec_type == AVMEDIA_TYPE_VIDEO   &&
            st->sample_aspect_ratio.num > 0 &&
            st->sample_aspect_ratio.den > 0) {
            int vprp       = ff_start_tag(pb, "vprp");
            AVRational dar = av_mul_q(st->sample_aspect_ratio,
                                      (AVRational) { enc->width,
                                                     enc->height });
            int num, den;
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            avio_wl32(pb, 0); // video format   = unknown
            avio_wl32(pb, 0); // video standard = unknown
            // TODO: should be avg_frame_rate
            avio_wl32(pb, (2LL*st->time_base.den + st->time_base.num - 1) / (2LL * st->time_base.num));
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_wl16(pb, den);
            avio_wl16(pb, num);
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_wl32(pb, 1); // progressive FIXME

            avio_wl32(pb, enc->height);
            avio_wl32(pb, enc->width);
            avio_wl32(pb, enc->height);
            avio_wl32(pb, enc->width);
            avio_wl32(pb, 0);
            avio_wl32(pb, 0);

            avio_wl32(pb, 0);
            avio_wl32(pb, 0);
            ff_end_tag(pb, vprp);
        }

        ff_end_tag(pb, list2);
    }

    if (pb->seekable) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = ff_start_tag(pb, "JUNK");
        ffio_wfourcc(pb, "odml");
        ffio_wfourcc(pb, "dmlh");
        avio_wl32(pb, 248);
        for (i = 0; i < 248; i += 4)
            avio_wl32(pb, 0);
        ff_end_tag(pb, avi->odml_list);
    }

    ff_end_tag(pb, list1);

    ff_riff_write_info(s);


    padding = s->metadata_header_padding;
    if (padding < 0)
        padding = 1016;

    /* some padding for easier tag editing */
    if (padding) {
        list2 = ff_start_tag(pb, "JUNK");
        for (i = padding; i > 0; i -= 4)
            avio_wl32(pb, 0);
        ff_end_tag(pb, list2);
    }

    avi->movi_list = ff_start_tag(pb, "LIST");
    ffio_wfourcc(pb, "movi");

    avio_flush(pb);

    return 0;
}
Example #24
0
/**********************************************************************
 * AVIInit
 **********************************************************************
 * Allocates things, create file, initialize and write headers
 *********************************************************************/
static int AVIInit( hb_mux_object_t * m )
{
    hb_job_t   * job   = m->job;
    hb_title_t * title = job->title;

    hb_audio_t    * audio;
    hb_mux_data_t * mux_data;

    int audio_count = hb_list_count( title->list_audio );
    int is_passthru = 0;
    int is_ac3      = 0;
    int hdrl_bytes;
    int i;

    /* Allocate index */
    m->index       = hb_buffer_init( 1024 * 1024 );
    m->index->size = 0;

    /* Open destination file */
    hb_log( "muxavi: opening %s", job->file );
    m->file = fopen( job->file, "wb" );

#define m m->main_header
    /* AVI main header */
    m.FourCC           = FOURCC( "avih" );
    m.BytesCount       = sizeof( hb_avi_main_header_t ) - 8;
    m.MicroSecPerFrame = (uint64_t) 1000000 * job->vrate_base / job->vrate;
    m.Streams          = 1 + audio_count;
    m.Width            = job->width;
    m.Height           = job->height;
#undef m

    /* Video track */
    mux_data = calloc( sizeof( hb_mux_data_t ), 1 );
    job->mux_data = mux_data;

#define h mux_data->header
    /* Video stream header */
    h.FourCC     = FOURCC( "strh" );
    h.BytesCount = sizeof( hb_avi_stream_header_t ) - 8;
    h.Type       = FOURCC( "vids" );

    if( job->vcodec == HB_VCODEC_FFMPEG )
        h.Handler = FOURCC( "divx" );
    else if( job->vcodec == HB_VCODEC_X264 )
        h.Handler = FOURCC( "h264" );

    h.Scale      = job->vrate_base;
    h.Rate       = job->vrate;
#undef h

#define f mux_data->format.v
    /* Video stream format */
    f.FourCC      = FOURCC( "strf" );
    f.BytesCount  = sizeof( hb_bitmap_info_t ) - 8;
    f.Size        = f.BytesCount;
    f.Width       = job->width;
    f.Height      = job->height;
    f.Planes      = 1;
    f.BitCount    = 24;
    if( job->vcodec == HB_VCODEC_FFMPEG )
        f.Compression = FOURCC( "DX50" );
    else if( job->vcodec == HB_VCODEC_X264 )
        f.Compression = FOURCC( "H264" );
#undef f

#define g mux_data->vprp_header
    /* Vprp video stream header */	
    AVRational sample_aspect_ratio = ( AVRational ){ job->anamorphic.par_width, job->anamorphic.par_height };
    AVRational dar = av_mul_q( sample_aspect_ratio, ( AVRational ){ job->width, job->height } );
    int num, den;
    av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

    g.FourCC                = FOURCC( "vprp" );
    g.BytesCount            = sizeof( hb_avi_vprp_info_t ) - 8;
    g.VideoFormatToken      = 0;
    g.VideoStandard         = 0;
    g.dwVerticalRefreshRate = job->vrate / job->vrate_base;
    g.dwHTotalInT           = job->width;
    g.dwVTotalInLines       = job->height;
    g.dwFrameAspectRatioDen = den;
    g.dwFrameAspectRatioNum = num;
    g.dwFrameWidthInPixels  = job->width;
    g.dwFrameHeightInLines  = job->height;
    g.nbFieldPerFrame       = 1;
    g.CompressedBMHeight    = job->height;
    g.CompressedBMWidth     = job->width;
    g.ValidBMHeight         = job->height;
    g.ValidBMWidth          = job->width;
    g.ValidBMXOffset        = 0;
    g.ValidBMYOffset        = 0;
    g.VideoXOffsetInT       = 0;
    g.VideoYValidStartLine  = 0;
#undef g

    /* Audio tracks */
    for( i = 0; i < hb_list_count( title->list_audio ); i++ )
    {
        audio = hb_list_item( title->list_audio, i );

        is_ac3 = (audio->config.out.codec == HB_ACODEC_AC3);
        is_passthru = (audio->config.out.codec == HB_ACODEC_AC3) ||
                      (audio->config.out.codec == HB_ACODEC_DCA);

        mux_data = calloc( sizeof( hb_mux_data_t ), 1 );
        audio->priv.mux_data = mux_data;

#define h mux_data->header
#define f mux_data->format.a.f
#define m mux_data->format.a.m
        /* Audio stream header */
        h.FourCC        = FOURCC( "strh" );
        h.BytesCount    = sizeof( hb_avi_stream_header_t ) - 8;
        h.Type          = FOURCC( "auds" );
        h.InitialFrames = 1;
        h.Scale         = 1;
        h.Rate          = is_passthru ? ( audio->config.in.bitrate / 8 ) :
                                   ( audio->config.out.bitrate * 1000 / 8 );
        h.Quality       = 0xFFFFFFFF;
        h.SampleSize    = 1;

        /* Audio stream format */
        f.FourCC         = FOURCC( "strf" );
        if( is_passthru )
        {
            f.BytesCount     = sizeof( hb_wave_formatex_t ) - 8;
            f.FormatTag      = is_ac3 ? 0x2000 : 0x2001;
            f.Channels       = HB_INPUT_CH_LAYOUT_GET_DISCRETE_COUNT(audio->config.in.channel_layout);
            f.SamplesPerSec  = audio->config.in.samplerate;
        }
        else
        {
            f.BytesCount     = sizeof( hb_wave_formatex_t ) +
                               sizeof( hb_wave_mp3_t ) - 8;
            f.FormatTag      = 0x55;
            f.Channels       = HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT(audio->config.out.mixdown);
            f.SamplesPerSec  = audio->config.out.samplerate;
        }
        f.AvgBytesPerSec = h.Rate;
        f.BlockAlign     = 1;
        if( is_passthru )
        {
            f.Size       = 0;
        }
        else
        {
            f.Size           = sizeof( hb_wave_mp3_t );
            m.Id             = 1;
            m.Flags          = 2;
            m.BlockSize      = 1152 * f.AvgBytesPerSec / audio->config.out.samplerate;
            m.FramesPerBlock = 1;
            m.CodecDelay     = 1393;
        }
#undef h
#undef f
#undef m
    }

    hdrl_bytes =
        /* Main header */
        4 + sizeof( hb_avi_main_header_t ) +
        /* strh for video + audios */
        ( 1 + audio_count ) * ( 12 + sizeof( hb_avi_stream_header_t ) ) +
        /* video strf */
		sizeof( hb_bitmap_info_t ) +
        /* video vprp */
        ( job->anamorphic.mode ? sizeof( hb_avi_vprp_info_t ) : 0 ) +
        /* audios strf */
        audio_count * ( sizeof( hb_wave_formatex_t ) +
                        ( is_passthru ? 0 : sizeof( hb_wave_mp3_t ) ) );

    /* Here we really start to write into the file */

    /* Main headers */
    WriteInt32( m->file, FOURCC( "RIFF" ) );
    WriteInt32( m->file, 2040 );
    WriteInt32( m->file, FOURCC( "AVI " ) );
    WriteInt32( m->file, FOURCC( "LIST" ) );
    WriteInt32( m->file, hdrl_bytes );
    WriteInt32( m->file, FOURCC( "hdrl" ) );
    WriteMainHeader( m->file, &m->main_header );

    /* Video track */
    mux_data          = job->mux_data;
    mux_data->fourcc = FOURCC( "00dc" );

    WriteInt32( m->file, FOURCC( "LIST" ) );
    WriteInt32( m->file, 4 + sizeof( hb_avi_stream_header_t ) +
                sizeof( hb_bitmap_info_t )  +
                ( job->anamorphic.mode ? sizeof( hb_avi_vprp_info_t ) : 0 ) );
    WriteInt32( m->file, FOURCC( "strl" ) );
    WriteStreamHeader( m->file, &mux_data->header );
    WriteBitmapInfo( m->file, &mux_data->format.v );
    if( job->anamorphic.mode )
    {
        WriteVprpInfo( m->file, &mux_data->vprp_header );
    }

    /* Audio tracks */
    for( i = 0; i < audio_count; i++ )
    {
        char fourcc[4] = "00wb";

        audio    = hb_list_item( title->list_audio, i );
        mux_data = audio->priv.mux_data;

        fourcc[1] = '1' + i; /* This is fine as we don't allow more
                                than 8 tracks */
        mux_data->fourcc = FOURCC( fourcc );

        WriteInt32( m->file, FOURCC( "LIST" ) );
        WriteInt32( m->file, 4 + sizeof( hb_avi_stream_header_t ) +
                             sizeof( hb_wave_formatex_t ) +
                             ( is_passthru ? 0 : sizeof( hb_wave_mp3_t ) ) );
        WriteInt32( m->file, FOURCC( "strl" ) );
        WriteStreamHeader( m->file, &mux_data->header );
        WriteWaveFormatEx( m->file, &mux_data->format.a.f );
        if( !is_passthru )
        {
            WriteWaveMp3( m->file, &mux_data->format.a.m );
        }
    }

    WriteInt32( m->file, FOURCC( "JUNK" ) );
    WriteInt32( m->file, 2020 - hdrl_bytes );
    for( i = 0; i < 2020 - hdrl_bytes; i++ )
    {
        WriteInt8( m->file, 0 );
    }
    WriteInt32( m->file, FOURCC( "LIST" ) );
    WriteInt32( m->file, 4 );
    WriteInt32( m->file, FOURCC( "movi" ) );

    return 0;
}
Example #25
0
static int rv20_decode_picture_header(RVDecContext *rv)
{
    MpegEncContext *s = &rv->m;
    int seq, mb_pos, i;
    int rpr_bits;

#if 0
    GetBitContext gb= s->gb;
    for(i=0; i<64; i++){
        av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&gb));
        if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
    }
    av_log(s->avctx, AV_LOG_DEBUG, "\n");
#endif
#if 0
    av_log(s->avctx, AV_LOG_DEBUG, "%3dx%03d/%02Xx%02X ", s->width, s->height, s->width/4, s->height/4);
    for(i=0; i<s->avctx->extradata_size; i++){
        av_log(s->avctx, AV_LOG_DEBUG, "%02X ", ((uint8_t*)s->avctx->extradata)[i]);
        if(i%4==3) av_log(s->avctx, AV_LOG_DEBUG, " ");
    }
    av_log(s->avctx, AV_LOG_DEBUG, "\n");
#endif

    i= get_bits(&s->gb, 2);
    switch(i){
    case 0: s->pict_type= AV_PICTURE_TYPE_I; break;
    case 1: s->pict_type= AV_PICTURE_TYPE_I; break; //hmm ...
    case 2: s->pict_type= AV_PICTURE_TYPE_P; break;
    case 3: s->pict_type= AV_PICTURE_TYPE_B; break;
    default:
        av_log(s->avctx, AV_LOG_ERROR, "unknown frame type\n");
        return -1;
    }

    if(s->last_picture_ptr==NULL && s->pict_type==AV_PICTURE_TYPE_B){
        av_log(s->avctx, AV_LOG_ERROR, "early B pix\n");
        return -1;
    }

    if (get_bits1(&s->gb)){
        av_log(s->avctx, AV_LOG_ERROR, "reserved bit set\n");
        return -1;
    }

    s->qscale = get_bits(&s->gb, 5);
    if(s->qscale==0){
        av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n");
        return -1;
    }

    if(RV_GET_MINOR_VER(rv->sub_id) >= 2)
        s->loop_filter = get_bits1(&s->gb);

    if(RV_GET_MINOR_VER(rv->sub_id) <= 1)
        seq = get_bits(&s->gb, 8) << 7;
    else
        seq = get_bits(&s->gb, 13) << 2;

    rpr_bits = s->avctx->extradata[1] & 7;
    if(rpr_bits){
        int f, new_w, new_h;
        rpr_bits = FFMIN((rpr_bits >> 1) + 1, 3);

        f = get_bits(&s->gb, rpr_bits);

        if(f){
            new_w= 4*((uint8_t*)s->avctx->extradata)[6+2*f];
            new_h= 4*((uint8_t*)s->avctx->extradata)[7+2*f];
        }else{
            new_w= s->orig_width ;
            new_h= s->orig_height;
        }
        if(new_w != s->width || new_h != s->height){
            AVRational old_aspect = s->avctx->sample_aspect_ratio;
            av_log(s->avctx, AV_LOG_DEBUG, "attempting to change resolution to %dx%d\n", new_w, new_h);
            if (av_image_check_size(new_w, new_h, 0, s->avctx) < 0)
                return -1;
            ff_MPV_common_end(s);

            // attempt to keep aspect during typical resolution switches
            if (!old_aspect.num)
                old_aspect = (AVRational){1, 1};
            if (2 * new_w * s->height == new_h * s->width)
                s->avctx->sample_aspect_ratio = av_mul_q(old_aspect, (AVRational){2, 1});
            if (new_w * s->height == 2 * new_h * s->width)
                s->avctx->sample_aspect_ratio = av_mul_q(old_aspect, (AVRational){1, 2});
            avcodec_set_dimensions(s->avctx, new_w, new_h);
            s->width  = new_w;
            s->height = new_h;
            if (ff_MPV_common_init(s) < 0)
                return -1;
        }

        if(s->avctx->debug & FF_DEBUG_PICT_INFO){
            av_log(s->avctx, AV_LOG_DEBUG, "F %d/%d\n", f, rpr_bits);
        }
    } else if (av_image_check_size(s->width, s->height, 0, s->avctx) < 0)
Example #26
0
static int avi_write_header(AVFormatContext *s)
{
    AVIContext *avi = s->priv_data;
    AVIOContext *pb = s->pb;
    int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
    AVCodecContext *stream, *video_enc;
    int64_t list1, list2, strh, strf;
    AVDictionaryEntry *t = NULL;

    if (s->nb_streams > AVI_MAX_STREAM_COUNT) {
        av_log(s, AV_LOG_ERROR, "AVI does not support >%d streams\n",
               AVI_MAX_STREAM_COUNT);
        return AVERROR(EINVAL);
    }

    for(n=0;n<s->nb_streams;n++) {
        s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream));
        if(!s->streams[n]->priv_data)
            return AVERROR(ENOMEM);
    }

    /* header list */
    avi->riff_id = 0;
    list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");

    /* avi header */
    ffio_wfourcc(pb, "avih");
    avio_wl32(pb, 14 * 4);
    bitrate = 0;

    video_enc = NULL;
    for(n=0;n<s->nb_streams;n++) {
        stream = s->streams[n]->codec;
        bitrate += stream->bit_rate;
        if (stream->codec_type == AVMEDIA_TYPE_VIDEO)
            video_enc = stream;
    }

    nb_frames = 0;

    if(video_enc){
        avio_wl32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
    } else {
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, bitrate / 8); /* XXX: not quite exact */
    avio_wl32(pb, 0); /* padding */
    if (!pb->seekable)
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
    else
        avio_wl32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
    avi->frames_hdr_all = avio_tell(pb); /* remember this offset to fill later */
    avio_wl32(pb, nb_frames); /* nb frames, filled later */
    avio_wl32(pb, 0); /* initial frame */
    avio_wl32(pb, s->nb_streams); /* nb streams */
    avio_wl32(pb, 1024 * 1024); /* suggested buffer size */
    if(video_enc){
        avio_wl32(pb, video_enc->width);
        avio_wl32(pb, video_enc->height);
    } else {
        avio_wl32(pb, 0);
        avio_wl32(pb, 0);
    }
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */
    avio_wl32(pb, 0); /* reserved */

    /* stream list */
    for(i=0;i<n;i++) {
        AVIStream *avist= s->streams[i]->priv_data;
        list2 = ff_start_tag(pb, "LIST");
        ffio_wfourcc(pb, "strl");

        stream = s->streams[i]->codec;

        /* stream generic header */
        strh = ff_start_tag(pb, "strh");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != AV_CODEC_ID_XSUB) {
                av_log(s, AV_LOG_ERROR, "Subtitle streams other than DivX XSUB are not supported by the AVI muxer.\n");
                return AVERROR_PATCHWELCOME;
            }
        case AVMEDIA_TYPE_VIDEO: ffio_wfourcc(pb, "vids"); break;
        case AVMEDIA_TYPE_AUDIO: ffio_wfourcc(pb, "auds"); break;
//      case AVMEDIA_TYPE_TEXT : ffio_wfourcc(pb, "txts"); break;
        case AVMEDIA_TYPE_DATA : ffio_wfourcc(pb, "dats"); break;
        }
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO ||
           stream->codec_id == AV_CODEC_ID_XSUB)
            avio_wl32(pb, stream->codec_tag);
        else
            avio_wl32(pb, 1);
        avio_wl32(pb, 0); /* flags */
        avio_wl16(pb, 0); /* priority */
        avio_wl16(pb, 0); /* language */
        avio_wl32(pb, 0); /* initial frame */

        ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);

        if (   stream->codec_type == AVMEDIA_TYPE_VIDEO
            && stream->codec_id != AV_CODEC_ID_XSUB
            && au_byterate > 1000LL*au_scale) {
            au_byterate = 600;
            au_scale    = 1;
        }
        avpriv_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
        if(stream->codec_id == AV_CODEC_ID_XSUB)
            au_scale = au_byterate = 0;

        avio_wl32(pb, au_scale); /* scale */
        avio_wl32(pb, au_byterate); /* rate */

        avio_wl32(pb, 0); /* start */
        avist->frames_hdr_strm = avio_tell(pb); /* remember this offset to fill later */
        if (!pb->seekable)
            avio_wl32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
        else
            avio_wl32(pb, 0); /* length, XXX: filled later */

        /* suggested buffer size */ //FIXME set at the end to largest chunk
        if(stream->codec_type == AVMEDIA_TYPE_VIDEO)
            avio_wl32(pb, 1024 * 1024);
        else if(stream->codec_type == AVMEDIA_TYPE_AUDIO)
            avio_wl32(pb, 12 * 1024);
        else
            avio_wl32(pb, 0);
        avio_wl32(pb, -1); /* quality */
        avio_wl32(pb, au_ssize); /* sample size */
        avio_wl32(pb, 0);
        avio_wl16(pb, stream->width);
        avio_wl16(pb, stream->height);
        ff_end_tag(pb, strh);

      if(stream->codec_type != AVMEDIA_TYPE_DATA){
          int ret;

        strf = ff_start_tag(pb, "strf");
        switch(stream->codec_type) {
        case AVMEDIA_TYPE_SUBTITLE:
            // XSUB subtitles behave like video tracks, other subtitles
            // are not (yet) supported.
            if (stream->codec_id != AV_CODEC_ID_XSUB) break;
        case AVMEDIA_TYPE_VIDEO:
            ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0, 0);
            break;
        case AVMEDIA_TYPE_AUDIO:
            if ((ret = ff_put_wav_header(pb, stream)) < 0) {
                return ret;
            }
            break;
        default:
            av_log(s, AV_LOG_ERROR,
                   "Invalid or not supported codec type '%s' found in the input\n",
                   (char *)av_x_if_null(av_get_media_type_string(stream->codec_type), "?"));
            return AVERROR(EINVAL);
        }
        ff_end_tag(pb, strf);
        if ((t = av_dict_get(s->streams[i]->metadata, "title", NULL, 0))) {
            ff_riff_write_info_tag(s->pb, "strn", t->value);
            t = NULL;
        }
        if(stream->codec_id == AV_CODEC_ID_XSUB
           && (t = av_dict_get(s->streams[i]->metadata, "language", NULL, 0))) {
            const char* langstr = av_convert_lang_to(t->value, AV_LANG_ISO639_1);
            t = NULL;
            if (langstr) {
                char* str = av_asprintf("Subtitle - %s-xx;02", langstr);
                ff_riff_write_info_tag(s->pb, "strn", str);
                av_free(str);
            }
        }
      }

        if (pb->seekable) {
            unsigned char tag[5];
            int j;

            /* Starting to lay out AVI OpenDML master index.
             * We want to make it JUNK entry for now, since we'd
             * like to get away without making AVI an OpenDML one
             * for compatibility reasons.
             */
            avist->indexes.entry = avist->indexes.ents_allocated = 0;
            avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
            avio_wl16(pb, 4);        /* wLongsPerEntry */
            avio_w8(pb, 0);          /* bIndexSubType (0 == frame index) */
            avio_w8(pb, 0);          /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
            avio_wl32(pb, 0);        /* nEntriesInUse (will fill out later on) */
            ffio_wfourcc(pb, avi_stream2fourcc(tag, i, stream->codec_type));
                                    /* dwChunkId */
            avio_wl64(pb, 0);        /* dwReserved[3]
            avio_wl32(pb, 0);           Must be 0.    */
            for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
                 avio_wl64(pb, 0);
            ff_end_tag(pb, avist->indexes.indx_start);
        }

        if(   stream->codec_type == AVMEDIA_TYPE_VIDEO
           && s->streams[i]->sample_aspect_ratio.num>0
           && s->streams[i]->sample_aspect_ratio.den>0){
            int vprp= ff_start_tag(pb, "vprp");
            AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio,
                                      (AVRational){stream->width, stream->height});
            int num, den;
            av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);

            avio_wl32(pb, 0); //video format  = unknown
            avio_wl32(pb, 0); //video standard= unknown
            avio_wl32(pb, lrintf(1.0/av_q2d(stream->time_base)));
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl16(pb, den);
            avio_wl16(pb, num);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl32(pb, 1); //progressive FIXME

            avio_wl32(pb, stream->height);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, stream->height);
            avio_wl32(pb, stream->width );
            avio_wl32(pb, 0);
            avio_wl32(pb, 0);

            avio_wl32(pb, 0);
            avio_wl32(pb, 0);
            ff_end_tag(pb, vprp);
        }

        ff_end_tag(pb, list2);
    }

    if (pb->seekable) {
        /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
        avi->odml_list = ff_start_tag(pb, "JUNK");
        ffio_wfourcc(pb, "odml");
        ffio_wfourcc(pb, "dmlh");
        avio_wl32(pb, 248);
        for (i = 0; i < 248; i+= 4)
             avio_wl32(pb, 0);
        ff_end_tag(pb, avi->odml_list);
    }

    ff_end_tag(pb, list1);

    ff_riff_write_info(s);

    /* some padding for easier tag editing */
    list2 = ff_start_tag(pb, "JUNK");
    for (i = 0; i < 1016; i += 4)
        avio_wl32(pb, 0);
    ff_end_tag(pb, list2);

    avi->movi_list = ff_start_tag(pb, "LIST");
    ffio_wfourcc(pb, "movi");

    avio_flush(pb);

    return 0;
}
static int config_out_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    AVFilterLink *inlink = outlink->src->inputs[0];
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(outlink->format);
    TInterlaceContext *tinterlace = ctx->priv;
    int i;

    tinterlace->vsub = desc->log2_chroma_h;
    outlink->w = inlink->w;
    outlink->h = tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2?
        inlink->h*2 : inlink->h;
    if (tinterlace->mode == MODE_MERGE || tinterlace->mode == MODE_PAD || tinterlace->mode == MODE_MERGEX2)
        outlink->sample_aspect_ratio = av_mul_q(inlink->sample_aspect_ratio,
                                                av_make_q(2, 1));

    if (tinterlace->mode == MODE_PAD) {
        uint8_t black[4] = { 0, 0, 0, 16 };
        int ret;
        ff_draw_init(&tinterlace->draw, outlink->format, 0);
        ff_draw_color(&tinterlace->draw, &tinterlace->color, black);
        if (ff_fmt_is_in(outlink->format, full_scale_yuvj_pix_fmts))
            tinterlace->color.comp[0].u8[0] = 0;
        ret = av_image_alloc(tinterlace->black_data, tinterlace->black_linesize,
                             outlink->w, outlink->h, outlink->format, 16);
        if (ret < 0)
            return ret;

        ff_fill_rectangle(&tinterlace->draw, &tinterlace->color, tinterlace->black_data,
                          tinterlace->black_linesize, 0, 0, outlink->w, outlink->h);
    }
    if (tinterlace->flags & (TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF)
            && !(tinterlace->mode == MODE_INTERLEAVE_TOP
              || tinterlace->mode == MODE_INTERLEAVE_BOTTOM)) {
        av_log(ctx, AV_LOG_WARNING, "low_pass_filter flags ignored with mode %d\n",
                tinterlace->mode);
        tinterlace->flags &= ~(TINTERLACE_FLAG_VLPF | TINTERLACE_FLAG_CVLPF);
    }
    tinterlace->preout_time_base = inlink->time_base;
    if (tinterlace->mode == MODE_INTERLACEX2) {
        tinterlace->preout_time_base.den *= 2;
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){2,1});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){1,2});
    } else if (tinterlace->mode == MODE_MERGEX2) {
        outlink->frame_rate = inlink->frame_rate;
        outlink->time_base  = inlink->time_base;
    } else if (tinterlace->mode != MODE_PAD) {
        outlink->frame_rate = av_mul_q(inlink->frame_rate, (AVRational){1,2});
        outlink->time_base  = av_mul_q(inlink->time_base , (AVRational){2,1});
    }

    for (i = 0; i<FF_ARRAY_ELEMS(standard_tbs); i++){
        if (!av_cmp_q(standard_tbs[i], outlink->time_base))
            break;
    }
    if (i == FF_ARRAY_ELEMS(standard_tbs) ||
        (tinterlace->flags & TINTERLACE_FLAG_EXACT_TB))
        outlink->time_base = tinterlace->preout_time_base;

    tinterlace->csp = av_pix_fmt_desc_get(outlink->format);
    if (tinterlace->flags & TINTERLACE_FLAG_CVLPF) {
        if (tinterlace->csp->comp[0].depth > 8)
            tinterlace->lowpass_line = lowpass_line_complex_c_16;
        else
            tinterlace->lowpass_line = lowpass_line_complex_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    } else if (tinterlace->flags & TINTERLACE_FLAG_VLPF) {
        if (tinterlace->csp->comp[0].depth > 8)
            tinterlace->lowpass_line = lowpass_line_c_16;
        else
            tinterlace->lowpass_line = lowpass_line_c;
        if (ARCH_X86)
            ff_tinterlace_init_x86(tinterlace);
    }

    av_log(ctx, AV_LOG_VERBOSE, "mode:%d filter:%s h:%d -> h:%d\n", tinterlace->mode,
           (tinterlace->flags & TINTERLACE_FLAG_CVLPF) ? "complex" :
           (tinterlace->flags & TINTERLACE_FLAG_VLPF) ? "linear" : "off",
           inlink->h, outlink->h);

    return 0;
}
Example #28
0
/**
 * Add a stream to the muxer
 */
static int
lav_muxer_add_stream(lav_muxer_t *lm, 
		     const streaming_start_component_t *ssc)
{
  AVStream *st;
  AVCodecContext *c;

  st = avformat_new_stream(lm->lm_oc, NULL);
  if (!st)
    return -1;

  st->id = ssc->ssc_index;
  c = st->codec;
  c->codec_id = streaming_component_type2codec_id(ssc->ssc_type);

  switch(lm->m_config.m_type) {
  case MC_MATROSKA:
  case MC_AVMATROSKA:
  case MC_AVMP4:
    st->time_base.num = 1000000;
    st->time_base.den = 1;
    break;

  case MC_MPEGPS:
    c->rc_buffer_size = 224*1024*8;
    //Fall-through
  case MC_MPEGTS:
    st->time_base.num = 90000;
    st->time_base.den = 1;
    break;

  default:
    st->time_base = AV_TIME_BASE_Q;
    break;
  }

  if(ssc->ssc_gh) {
    if (ssc->ssc_type == SCT_H264 || ssc->ssc_type == SCT_HEVC) {
      sbuf_t hdr;
      sbuf_init(&hdr);
      if (ssc->ssc_type == SCT_H264) {
          isom_write_avcc(&hdr, pktbuf_ptr(ssc->ssc_gh),
                          pktbuf_len(ssc->ssc_gh));
      } else {
          isom_write_hvcc(&hdr, pktbuf_ptr(ssc->ssc_gh),
                          pktbuf_len(ssc->ssc_gh));
      }
      c->extradata_size = hdr.sb_ptr;
      c->extradata = av_malloc(hdr.sb_ptr);
      memcpy(c->extradata, hdr.sb_data, hdr.sb_ptr);
      sbuf_free(&hdr);
    } else {
      c->extradata_size = pktbuf_len(ssc->ssc_gh);
      c->extradata = av_malloc(c->extradata_size);
      memcpy(c->extradata, pktbuf_ptr(ssc->ssc_gh),
             pktbuf_len(ssc->ssc_gh));
    }
  }

  if(SCT_ISAUDIO(ssc->ssc_type)) {
    c->codec_type    = AVMEDIA_TYPE_AUDIO;
    c->sample_fmt    = AV_SAMPLE_FMT_S16;

    c->sample_rate   = sri_to_rate(ssc->ssc_sri);
    c->channels      = ssc->ssc_channels;

#if 0
    c->time_base.num = 1;
    c->time_base.den = c->sample_rate;
#else
    c->time_base     = st->time_base;
#endif

    av_dict_set(&st->metadata, "language", ssc->ssc_lang, 0);

  } else if(SCT_ISVIDEO(ssc->ssc_type)) {
    c->codec_type = AVMEDIA_TYPE_VIDEO;
    c->width      = ssc->ssc_width;
    c->height     = ssc->ssc_height;

    c->time_base.num = 1;
    c->time_base.den = 25;

    c->sample_aspect_ratio.num = ssc->ssc_aspect_num;
    c->sample_aspect_ratio.den = ssc->ssc_aspect_den;

    if (lm->m_config.m_type == MC_AVMP4) {
      /* this is a whole hell */
      AVRational ratio = { c->height, c->width };
      c->sample_aspect_ratio = av_mul_q(c->sample_aspect_ratio, ratio);
    }

    st->sample_aspect_ratio.num = c->sample_aspect_ratio.num;
    st->sample_aspect_ratio.den = c->sample_aspect_ratio.den;

  } else if(SCT_ISSUBTITLE(ssc->ssc_type)) {
    c->codec_type = AVMEDIA_TYPE_SUBTITLE;
    av_dict_set(&st->metadata, "language", ssc->ssc_lang, 0);
  }

  if(lm->lm_oc->oformat->flags & AVFMT_GLOBALHEADER)
    c->flags |= CODEC_FLAG_GLOBAL_HEADER;

  return 0;
}
Example #29
0
void
video_deliver_frame_avctx(video_decoder_t *vd,
			  media_pipe_t *mp, media_queue_t *mq,
			  AVCodecContext *ctx, AVFrame *frame,
			  const media_buf_t *mb, int decode_time)
{
  frame_info_t fi;

  if(mb->mb_time != AV_NOPTS_VALUE)
    mp_set_current_time(mp, mb->mb_time);

  /* Compute aspect ratio */
  switch(mb->mb_aspect_override) {
  case 0:

    if(frame->pan_scan != NULL && frame->pan_scan->width != 0) {
      fi.dar.num = frame->pan_scan->width;
      fi.dar.den = frame->pan_scan->height;
    } else {
      fi.dar.num = ctx->width;
      fi.dar.den = ctx->height;
    }

    if(ctx->sample_aspect_ratio.num)
      fi.dar = av_mul_q(fi.dar, ctx->sample_aspect_ratio);
    break;
  case 1:
    fi.dar = (AVRational){4,3};
    break;
  case 2:
    fi.dar = (AVRational){16,9};
    break;
  }

  int64_t pts = mb->mb_pts;

  /* Compute duration and PTS of frame */
  if(pts == AV_NOPTS_VALUE && mb->mb_dts != AV_NOPTS_VALUE &&
     (ctx->has_b_frames == 0 || frame->pict_type == FF_B_TYPE)) {
    pts = mb->mb_dts;
  }

  int duration = mb->mb_duration;

  if(!vd_valid_duration(duration)) {
    /* duration is zero or very invalid, use duration from last output */
    duration = vd->vd_estimated_duration;
  }

  if(pts == AV_NOPTS_VALUE && vd->vd_nextpts != AV_NOPTS_VALUE)
    pts = vd->vd_nextpts; /* no pts set, use estimated pts */

  if(pts != AV_NOPTS_VALUE && vd->vd_prevpts != AV_NOPTS_VALUE) {
    /* we know PTS of a prior frame */
    int64_t t = (pts - vd->vd_prevpts) / vd->vd_prevpts_cnt;

    if(vd_valid_duration(t)) {
      /* inter frame duration seems valid, store it */
      vd->vd_estimated_duration = t;
      if(duration == 0)
	duration = t;

    } else if(t < 0 || t > 10000000LL) {
      /* PTS discontinuity, use estimated PTS from last output instead */
      pts = vd->vd_nextpts;
    }
  }
  
  duration += frame->repeat_pict * duration / 2;
 
  if(pts != AV_NOPTS_VALUE) {
    vd->vd_prevpts = pts;
    vd->vd_prevpts_cnt = 0;
  }
  vd->vd_prevpts_cnt++;

  if(duration == 0) {
    TRACE(TRACE_DEBUG, "Video", "Dropping frame with duration = 0");
    return;
  }

  prop_set_int(mq->mq_prop_too_slow, decode_time > duration);

  if(pts != AV_NOPTS_VALUE) {
    vd->vd_nextpts = pts + duration;
  } else {
    vd->vd_nextpts = AV_NOPTS_VALUE;
  }

  vd->vd_interlaced |=
    frame->interlaced_frame && !mb->mb_disable_deinterlacer;

  fi.width = ctx->width;
  fi.height = ctx->height;
  fi.pix_fmt = ctx->pix_fmt;
  fi.pts = pts;
  fi.epoch = mb->mb_epoch;
  fi.duration = duration;

  fi.interlaced = !!vd->vd_interlaced;
  fi.tff = !!frame->top_field_first;
  fi.prescaled = 0;

  fi.color_space = ctx->colorspace;
  fi.color_range = ctx->color_range;

  video_deliver_frame(vd, FRAME_BUFFER_TYPE_LIBAV_FRAME, frame, &fi,
		      mb->mb_send_pts);
}
static int CUDAAPI cuvid_handle_video_sequence(void *opaque, CUVIDEOFORMAT* format)
{
    AVCodecContext *avctx = opaque;
    CuvidContext *ctx = avctx->priv_data;
    AVHWFramesContext *hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
    CUVIDDECODECAPS *caps = NULL;
    CUVIDDECODECREATEINFO cuinfo;
    int surface_fmt;

    int old_width = avctx->width;
    int old_height = avctx->height;

    enum AVPixelFormat pix_fmts[3] = { AV_PIX_FMT_CUDA,
                                       AV_PIX_FMT_NONE,  // Will be updated below
                                       AV_PIX_FMT_NONE };

    av_log(avctx, AV_LOG_TRACE, "pfnSequenceCallback, progressive_sequence=%d\n", format->progressive_sequence);

    memset(&cuinfo, 0, sizeof(cuinfo));

    ctx->internal_error = 0;

    avctx->coded_width = cuinfo.ulWidth = format->coded_width;
    avctx->coded_height = cuinfo.ulHeight = format->coded_height;

    // apply cropping
    cuinfo.display_area.left = format->display_area.left + ctx->crop.left;
    cuinfo.display_area.top = format->display_area.top + ctx->crop.top;
    cuinfo.display_area.right = format->display_area.right - ctx->crop.right;
    cuinfo.display_area.bottom = format->display_area.bottom - ctx->crop.bottom;

    // width and height need to be set before calling ff_get_format
    if (ctx->resize_expr) {
        avctx->width = ctx->resize.width;
        avctx->height = ctx->resize.height;
    } else {
        avctx->width = cuinfo.display_area.right - cuinfo.display_area.left;
        avctx->height = cuinfo.display_area.bottom - cuinfo.display_area.top;
    }

    // target width/height need to be multiples of two
    cuinfo.ulTargetWidth = avctx->width = (avctx->width + 1) & ~1;
    cuinfo.ulTargetHeight = avctx->height = (avctx->height + 1) & ~1;

    // aspect ratio conversion, 1:1, depends on scaled resolution
    cuinfo.target_rect.left = 0;
    cuinfo.target_rect.top = 0;
    cuinfo.target_rect.right = cuinfo.ulTargetWidth;
    cuinfo.target_rect.bottom = cuinfo.ulTargetHeight;

    switch (format->bit_depth_luma_minus8) {
    case 0: // 8-bit
        pix_fmts[1] = AV_PIX_FMT_NV12;
        caps = &ctx->caps8;
        break;
    case 2: // 10-bit
        pix_fmts[1] = AV_PIX_FMT_P010;
        caps = &ctx->caps10;
        break;
    case 4: // 12-bit
        pix_fmts[1] = AV_PIX_FMT_P016;
        caps = &ctx->caps12;
        break;
    default:
        break;
    }

    if (!caps || !caps->bIsSupported) {
        av_log(avctx, AV_LOG_ERROR, "unsupported bit depth: %d\n",
               format->bit_depth_luma_minus8 + 8);
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    surface_fmt = ff_get_format(avctx, pix_fmts);
    if (surface_fmt < 0) {
        av_log(avctx, AV_LOG_ERROR, "ff_get_format failed: %d\n", surface_fmt);
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    av_log(avctx, AV_LOG_VERBOSE, "Formats: Original: %s | HW: %s | SW: %s\n",
           av_get_pix_fmt_name(avctx->pix_fmt),
           av_get_pix_fmt_name(surface_fmt),
           av_get_pix_fmt_name(avctx->sw_pix_fmt));

    avctx->pix_fmt = surface_fmt;

    // Update our hwframe ctx, as the get_format callback might have refreshed it!
    if (avctx->hw_frames_ctx) {
        av_buffer_unref(&ctx->hwframe);

        ctx->hwframe = av_buffer_ref(avctx->hw_frames_ctx);
        if (!ctx->hwframe) {
            ctx->internal_error = AVERROR(ENOMEM);
            return 0;
        }

        hwframe_ctx = (AVHWFramesContext*)ctx->hwframe->data;
    }

    ff_set_sar(avctx, av_div_q(
        (AVRational){ format->display_aspect_ratio.x, format->display_aspect_ratio.y },
        (AVRational){ avctx->width, avctx->height }));

    ctx->deint_mode_current = format->progressive_sequence
                              ? cudaVideoDeinterlaceMode_Weave
                              : ctx->deint_mode;

    if (!format->progressive_sequence && ctx->deint_mode_current == cudaVideoDeinterlaceMode_Weave)
        avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
    else
        avctx->flags &= ~AV_CODEC_FLAG_INTERLACED_DCT;

    if (format->video_signal_description.video_full_range_flag)
        avctx->color_range = AVCOL_RANGE_JPEG;
    else
        avctx->color_range = AVCOL_RANGE_MPEG;

    avctx->color_primaries = format->video_signal_description.color_primaries;
    avctx->color_trc = format->video_signal_description.transfer_characteristics;
    avctx->colorspace = format->video_signal_description.matrix_coefficients;

    if (format->bitrate)
        avctx->bit_rate = format->bitrate;

    if (format->frame_rate.numerator && format->frame_rate.denominator) {
        avctx->framerate.num = format->frame_rate.numerator;
        avctx->framerate.den = format->frame_rate.denominator;
    }

    if (ctx->cudecoder
            && avctx->coded_width == format->coded_width
            && avctx->coded_height == format->coded_height
            && avctx->width == old_width
            && avctx->height == old_height
            && ctx->chroma_format == format->chroma_format
            && ctx->codec_type == format->codec)
        return 1;

    if (ctx->cudecoder) {
        av_log(avctx, AV_LOG_TRACE, "Re-initializing decoder\n");
        ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidDestroyDecoder(ctx->cudecoder));
        if (ctx->internal_error < 0)
            return 0;
        ctx->cudecoder = NULL;
    }

    if (hwframe_ctx->pool && (
            hwframe_ctx->width < avctx->width ||
            hwframe_ctx->height < avctx->height ||
            hwframe_ctx->format != AV_PIX_FMT_CUDA ||
            hwframe_ctx->sw_format != avctx->sw_pix_fmt)) {
        av_log(avctx, AV_LOG_ERROR, "AVHWFramesContext is already initialized with incompatible parameters\n");
        av_log(avctx, AV_LOG_DEBUG, "width: %d <-> %d\n", hwframe_ctx->width, avctx->width);
        av_log(avctx, AV_LOG_DEBUG, "height: %d <-> %d\n", hwframe_ctx->height, avctx->height);
        av_log(avctx, AV_LOG_DEBUG, "format: %s <-> cuda\n", av_get_pix_fmt_name(hwframe_ctx->format));
        av_log(avctx, AV_LOG_DEBUG, "sw_format: %s <-> %s\n",
               av_get_pix_fmt_name(hwframe_ctx->sw_format), av_get_pix_fmt_name(avctx->sw_pix_fmt));
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    if (format->chroma_format != cudaVideoChromaFormat_420) {
        av_log(avctx, AV_LOG_ERROR, "Chroma formats other than 420 are not supported\n");
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    ctx->chroma_format = format->chroma_format;

    cuinfo.CodecType = ctx->codec_type = format->codec;
    cuinfo.ChromaFormat = format->chroma_format;

    switch (avctx->sw_pix_fmt) {
    case AV_PIX_FMT_NV12:
        cuinfo.OutputFormat = cudaVideoSurfaceFormat_NV12;
        break;
    case AV_PIX_FMT_P010:
    case AV_PIX_FMT_P016:
        cuinfo.OutputFormat = cudaVideoSurfaceFormat_P016;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Output formats other than NV12, P010 or P016 are not supported\n");
        ctx->internal_error = AVERROR(EINVAL);
        return 0;
    }

    cuinfo.ulNumDecodeSurfaces = ctx->nb_surfaces;
    cuinfo.ulNumOutputSurfaces = 1;
    cuinfo.ulCreationFlags = cudaVideoCreate_PreferCUVID;
    cuinfo.bitDepthMinus8 = format->bit_depth_luma_minus8;
    cuinfo.DeinterlaceMode = ctx->deint_mode_current;

    if (ctx->deint_mode_current != cudaVideoDeinterlaceMode_Weave && !ctx->drop_second_field)
        avctx->framerate = av_mul_q(avctx->framerate, (AVRational){2, 1});

    ctx->internal_error = CHECK_CU(ctx->cvdl->cuvidCreateDecoder(&ctx->cudecoder, &cuinfo));
    if (ctx->internal_error < 0)
        return 0;

    if (!hwframe_ctx->pool) {
        hwframe_ctx->format = AV_PIX_FMT_CUDA;
        hwframe_ctx->sw_format = avctx->sw_pix_fmt;
        hwframe_ctx->width = avctx->width;
        hwframe_ctx->height = avctx->height;

        if ((ctx->internal_error = av_hwframe_ctx_init(ctx->hwframe)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_init failed\n");
            return 0;
        }
    }

    return 1;
}