Example #1
0
static av_cold int yop_decode_init(AVCodecContext *avctx)
{
    YopDecContext *s = avctx->priv_data;
    s->avctx = avctx;

    if (avctx->width & 1 || avctx->height & 1 ||
        av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0) {
        av_log(avctx, AV_LOG_ERROR, "YOP has invalid dimensions\n");
        return -1;
    }

    avctx->pix_fmt = PIX_FMT_PAL8;

    avcodec_get_frame_defaults(&s->frame);
    s->num_pal_colors = avctx->extradata[0];
    s->first_color[0] = avctx->extradata[1];
    s->first_color[1] = avctx->extradata[2];

    if (s->num_pal_colors + s->first_color[0] > 256 ||
        s->num_pal_colors + s->first_color[1] > 256) {
        av_log(avctx, AV_LOG_ERROR,
               "YOP: palette parameters invalid, header probably corrupt\n");
        return AVERROR_INVALIDDATA;
    }

    return 0;
}
Example #2
0
static av_cold int init(AVCodecContext *avctx)
{
    AVRnContext *a = avctx->priv_data;
    int ret;

    // Support "Resolution 1:1" for Avid AVI Codec
    a->is_mjpeg = avctx->extradata_size < 31 || memcmp(&avctx->extradata[28], "1:1", 3);

    if(!a->is_mjpeg && avctx->lowres) {
        av_log(avctx, AV_LOG_ERROR, "lowres is not possible with rawvideo\n");
        return AVERROR(EINVAL);
    }

    if(a->is_mjpeg)
        return ff_mjpeg_decode_init(avctx);

    if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
        return ret;

    avctx->pix_fmt = AV_PIX_FMT_UYVY422;

    if(avctx->extradata_size >= 9 && avctx->extradata[4]+28 < avctx->extradata_size) {
        int ndx = avctx->extradata[4] + 4;
        a->interlace = !memcmp(avctx->extradata + ndx, "1:1(", 4);
        if(a->interlace) {
            a->tff = avctx->extradata[ndx + 24] == 1;
        }
    }

    return 0;
}
Example #3
0
static av_cold int screenpresso_init(AVCodecContext *avctx)
{
    ScreenpressoContext *ctx = avctx->priv_data;

    /* These needs to be set to estimate uncompressed buffer */
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Allocate current frame */
    ctx->current = av_frame_alloc();
    if (!ctx->current)
        return AVERROR(ENOMEM);

    avctx->pix_fmt = AV_PIX_FMT_BGR24;

    /* Allocate maximum size possible, a full frame */
    ctx->inflated_size = avctx->width * avctx->height * 3;
    ctx->inflated_buf  = av_malloc(ctx->inflated_size);
    if (!ctx->inflated_buf)
        return AVERROR(ENOMEM);

    return 0;
}
Example #4
0
static int source_config_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    Frei0rContext *s = ctx->priv;

    if (av_image_check_size(s->w, s->h, 0, ctx) < 0)
        return AVERROR(EINVAL);
    outlink->w = s->w;
    outlink->h = s->h;
    outlink->time_base = s->time_base;
    outlink->frame_rate = av_inv_q(s->time_base);

    if (s->destruct && s->instance)
        s->destruct(s->instance);
    if (!(s->instance = s->construct(outlink->w, outlink->h))) {
        av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance.\n");
        return AVERROR(EINVAL);
    }
    if (!s->params) {
        av_log(ctx, AV_LOG_ERROR, "frei0r filter parameters not set.\n");
        return AVERROR(EINVAL);
    }

    return set_params(ctx, s->params);
}
static void libschroedinger_handle_first_access_unit(AVCodecContext *avctx)
{
    SchroDecoderParams *p_schro_params = avctx->priv_data;
    SchroDecoder *decoder = p_schro_params->decoder;

    p_schro_params->format = schro_decoder_get_video_format(decoder);

    /* Tell FFmpeg about sequence details. */
    if (av_image_check_size(p_schro_params->format->width,
                            p_schro_params->format->height, 0, avctx) < 0) {
        av_log(avctx, AV_LOG_ERROR, "invalid dimensions (%dx%d)\n",
               p_schro_params->format->width, p_schro_params->format->height);
        avctx->height = avctx->width = 0;
        return;
    }
    avctx->height  = p_schro_params->format->height;
    avctx->width   = p_schro_params->format->width;
    avctx->pix_fmt = get_chroma_format(p_schro_params->format->chroma_format);

    if (ff_get_schro_frame_format(p_schro_params->format->chroma_format,
                                  &p_schro_params->frame_format) == -1) {
        av_log(avctx, AV_LOG_ERROR,
               "This codec currently only supports planar YUV 4:2:0, 4:2:2 "
               "and 4:4:4 formats.\n");
        return;
    }

    avctx->framerate.num = p_schro_params->format->frame_rate_numerator;
    avctx->framerate.den = p_schro_params->format->frame_rate_denominator;
}
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    GifState *s = avctx->priv_data;
    AVFrame *picture = data;
    int ret;

    s->bytestream = buf;
    s->bytestream_end = buf + buf_size;
    if (gif_read_header1(s) < 0)
        return -1;

    avctx->pix_fmt = PIX_FMT_PAL8;
    if (av_image_check_size(s->screen_width, s->screen_height, 0, avctx))
        return -1;
    avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);

    if (s->picture.data[0])
        avctx->release_buffer(avctx, &s->picture);
    if (avctx->get_buffer(avctx, &s->picture) < 0) {
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return -1;
    }
    s->image_palette = (uint32_t *)s->picture.data[1];
    ret = gif_parse_next_image(s);
    if (ret < 0)
        return ret;

    *picture = s->picture;
    *data_size = sizeof(AVPicture);
    return s->bytestream - buf;
}
Example #7
0
static int color_config_props(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->src;
    ColorContext *color = ctx->priv;
    uint8_t rgba_color[4];
    int is_packed_rgba;
    const AVPixFmtDescriptor *pix_desc = &av_pix_fmt_descriptors[inlink->format];

    color->hsub = pix_desc->log2_chroma_w;
    color->vsub = pix_desc->log2_chroma_h;

    color->w &= ~((1 << color->hsub) - 1);
    color->h &= ~((1 << color->vsub) - 1);
    if (av_image_check_size(color->w, color->h, 0, ctx) < 0)
        return AVERROR(EINVAL);

    memcpy(rgba_color, color->color, sizeof(rgba_color));
    ff_fill_line_with_color(color->line, color->line_step, color->w, color->color,
                            inlink->format, rgba_color, &is_packed_rgba, NULL);

    av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d r:%d/%d color:0x%02x%02x%02x%02x[%s]\n",
           color->w, color->h, color->time_base.den, color->time_base.num,
           color->color[0], color->color[1], color->color[2], color->color[3],
           is_packed_rgba ? "rgba" : "yuva");
    inlink->w = color->w;
    inlink->h = color->h;
    inlink->time_base = color->time_base;

    return 0;
}
Example #8
0
static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) {
    NuvContext *c = (NuvContext *)avctx->priv_data;
    width  = FFALIGN(width,  2);
    height = FFALIGN(height, 2);
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        // also reserve space for a possible additional header
        int buf_size = 24 + height * width * 3 / 2 + AV_LZO_OUTPUT_PADDING;
        if (av_image_check_size(height, width, 0, avctx) < 0 ||
            buf_size > INT_MAX/8)
            return -1;
        avctx->width = c->width = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size, buf_size);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
            return AVERROR(ENOMEM);
        }
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
        return 1;
    } else if (quality != c->quality)
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
    return 0;
}
Example #9
0
bool ImageConverter::prepareData()
{
    DPTR_D(ImageConverter);
    if (d.fmt_out == QTAV_PIX_FMT_C(NONE) || d.w_out <=0 || d.h_out <= 0)
        return false;
    AV_ENSURE(av_image_check_size(d.w_out, d.h_out, 0, NULL), false);
    const int nb_planes = qMax(av_pix_fmt_count_planes(d.fmt_out), 0);
    d.bits.resize(nb_planes);
    d.pitchs.resize(nb_planes);
    // alignment is 16. sws in ffmpeg is 16, libav10 is 8
    const int kAlign = 16;
    AV_ENSURE(av_image_fill_linesizes((int*)d.pitchs.constData(), d.fmt_out, kAlign > 7 ? FFALIGN(d.w_out, 8) : d.w_out), false);
    for (int i = 0; i < d.pitchs.size(); ++i)
        d.pitchs[i] = FFALIGN(d.pitchs[i], kAlign);
    int s = av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, NULL, d.pitchs.constData());
    if (s < 0)
        return false;
    d.data_out.resize(s + kAlign-1);
    const int offset = (kAlign - ((uintptr_t)d.data_out.constData() & (kAlign-1))) & (kAlign-1);
    AV_ENSURE(av_image_fill_pointers((uint8_t**)d.bits.constData(), d.fmt_out, d.h_out, (uint8_t*)d.data_out.constData()+offset, d.pitchs.constData()), false);
    // TODO: special formats
    //if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
       //    avpriv_set_systematic_pal2((uint32_t*)pointers[1], pix_fmt);
    d.update_data = false;
    for (int i = 0; i < d.pitchs.size(); ++i) {
        Q_ASSERT(d.pitchs[i]%kAlign == 0);
        Q_ASSERT(qintptr(d.bits[i])%kAlign == 0);
    }
    return true;
}
Example #10
0
static av_cold int rscc_init(AVCodecContext *avctx)
{
    RsccContext *ctx = avctx->priv_data;

    /* These needs to be set to estimate uncompressed buffer */
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Allocate reference frame */
    ctx->reference = av_frame_alloc();
    if (!ctx->reference)
        return AVERROR(ENOMEM);

    avctx->pix_fmt = AV_PIX_FMT_BGRA;

    /* Store the value to check for keyframes */
    ctx->inflated_size = avctx->width * avctx->height * 4;

    /* Allocate maximum size possible, a full frame */
    ctx->inflated_buf = av_malloc(ctx->inflated_size);
    if (!ctx->inflated_buf)
        return AVERROR(ENOMEM);

    return 0;
}
Example #11
0
/**
 * Parse the presentation segment packet.
 *
 * The presentation segment contains details on the video
 * width, video height, x & y subtitle position.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 * @todo TODO: Implement cropping
 * @todo TODO: Implement forcing of subtitles
 */
static void parse_presentation_segment(AVCodecContext *avctx,
                                       const uint8_t *buf, int buf_size)
{
    PGSSubContext *ctx = avctx->priv_data;

    int x, y;

    int w = bytestream_get_be16(&buf);
    int h = bytestream_get_be16(&buf);

    av_dlog(avctx, "Video Dimensions %dx%d\n",
            w, h);
    if (av_image_check_size(w, h, 0, avctx) >= 0)
        avcodec_set_dimensions(avctx, w, h);

    /* Skip 1 bytes of unknown, frame rate? */
    buf++;

    ctx->presentation.id_number = bytestream_get_be16(&buf);

    /*
     * Skip 3 bytes of unknown:
     *     state
     *     palette_update_flag (0x80),
     *     palette_id_to_use,
     */
    buf += 3;

    ctx->presentation.object_number = bytestream_get_byte(&buf);
    if (!ctx->presentation.object_number)
        return;

    /*
     * Skip 4 bytes of unknown:
     *     object_id_ref (2 bytes),
     *     window_id_ref,
     *     composition_flag (0x80 - object cropped, 0x40 - object forced)
     */
    buf += 4;

    x = bytestream_get_be16(&buf);
    y = bytestream_get_be16(&buf);

    /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/

    av_dlog(avctx, "Subtitle Placement x=%d, y=%d\n", x, y);

    if (x > avctx->width || y > avctx->height)
    {
        av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
               x, y, avctx->width, avctx->height);
        x = 0;
        y = 0;
    }

    /* Fill in dimensions */
    ctx->presentation.x = x;
    ctx->presentation.y = y;
}
Example #12
0
/**
 * Parse the presentation segment packet.
 *
 * The presentation segment contains details on the video
 * width, video height, x & y subtitle position.
 *
 * @param avctx contains the current codec context
 * @param buf pointer to the packet to process
 * @param buf_size size of packet to process
 * @todo TODO: Implement cropping
 * @todo TODO: Implement forcing of subtitles
 * @todo TODO: Blanking of subtitle
 */
static void parse_presentation_segment(AVCodecContext *avctx,
                                       const uint8_t *buf, int buf_size)
{
    PGSSubContext *ctx = avctx->priv_data;

    int x, y;
    uint8_t block;

    int w = bytestream_get_be16(&buf);
    int h = bytestream_get_be16(&buf);

    dprintf(avctx, "Video Dimensions %dx%d\n",
            w, h);
    if (av_image_check_size(w, h, 0, avctx) >= 0)
        avcodec_set_dimensions(avctx, w, h);

    /* Skip 1 bytes of unknown, frame rate? */
    buf++;

    ctx->presentation.id_number = bytestream_get_be16(&buf);

    /* Next byte is the state. */
    block = bytestream_get_byte(&buf);;
    if (block == 0x80) {
        /*
         * Skip 7 bytes of unknown:
         *     palette_update_flag (0x80),
         *     palette_id_to_use,
         *     Object Number (if > 0 determines if more data to process),
         *     object_id_ref (2 bytes),
         *     window_id_ref,
         *     composition_flag (0x80 - object cropped, 0x40 - object forced)
         */
        buf += 7;

        x = bytestream_get_be16(&buf);
        y = bytestream_get_be16(&buf);

        /* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/

        dprintf(avctx, "Subtitle Placement x=%d, y=%d\n", x, y);

        if (x > avctx->width || y > avctx->height) {
            av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
                   x, y, avctx->width, avctx->height);
            x = 0; y = 0;
        }

        /* Fill in dimensions */
        ctx->presentation.x = x;
        ctx->presentation.y = y;
    } else if (block == 0x00) {
        /* TODO: Blank context as subtitle should not be displayed.
         *       If the subtitle is blanked now the subtitle is not
         *       on screen long enough to read, due to a delay in
         *       initial display timing.
         */
    }
}
Example #13
0
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_picture, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    GifState *s = avctx->priv_data;
    AVFrame *picture = data;
    int ret;

    s->picture.pts          = avpkt->pts;
    s->picture.pkt_pts      = avpkt->pts;
    s->picture.pkt_dts      = avpkt->dts;
    s->picture.pkt_duration = avpkt->duration;

    s->bytestream = buf;
    s->bytestream_end = buf + buf_size;

    if (buf_size >= 6) {
        s->keyframe = memcmp(s->bytestream, gif87a_sig, 6) == 0 ||
                      memcmp(s->bytestream, gif89a_sig, 6) == 0;
    } else {
        s->keyframe = 0;
    }

    if (s->keyframe) {
        if ((ret = gif_read_header1(s)) < 0)
            return ret;

        if ((ret = av_image_check_size(s->screen_width, s->screen_height, 0, avctx)) < 0)
            return ret;
        avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);

        if (s->picture.data[0])
            avctx->release_buffer(avctx, &s->picture);

        if ((ret = avctx->get_buffer(avctx, &s->picture)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
            return ret;
        }

        s->picture.pict_type = AV_PICTURE_TYPE_I;
        s->picture.key_frame = 1;
    } else {
        if ((ret = avctx->reget_buffer(avctx, &s->picture)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
            return ret;
        }

        s->picture.pict_type = AV_PICTURE_TYPE_P;
        s->picture.key_frame = 0;
    }

    ret = gif_parse_next_image(s, got_picture);
    if (ret < 0)
        return ret;
    else if (*got_picture)
        *picture = s->picture;

    return s->bytestream - buf;
}
Example #14
0
static av_cold int rscc_init(AVCodecContext *avctx)
{
    RsccContext *ctx = avctx->priv_data;

    /* These needs to be set to estimate uncompressed buffer */
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Allocate reference frame */
    ctx->reference = av_frame_alloc();
    if (!ctx->reference)
        return AVERROR(ENOMEM);

    /* Get pixel format and the size of the pixel */
    if (avctx->codec_tag == MKTAG('I', 'S', 'C', 'C')) {
        avctx->pix_fmt = AV_PIX_FMT_BGRA;
        ctx->component_size = 4;
    } else if (avctx->codec_tag == MKTAG('R', 'S', 'C', 'C')) {
        ctx->component_size = avctx->bits_per_coded_sample / 8;
        switch (avctx->bits_per_coded_sample) {
        case 8:
            avpriv_report_missing_feature(avctx, "8 bits per pixel");
            return AVERROR_PATCHWELCOME;
        case 16:
            avctx->pix_fmt = AV_PIX_FMT_RGB555LE;
            break;
        case 24:
            avctx->pix_fmt = AV_PIX_FMT_BGR24;
            break;
        case 32:
            avctx->pix_fmt = AV_PIX_FMT_BGR0;
            break;
        default:
            av_log(avctx, AV_LOG_ERROR, "Invalid bits per pixel value (%d)\n",
                   avctx->bits_per_coded_sample);
            return AVERROR_INVALIDDATA;
        }
    } else {
        avctx->pix_fmt = AV_PIX_FMT_BGR0;
        ctx->component_size = 4;
        av_log(avctx, AV_LOG_WARNING, "Invalid codec tag\n");
    }

    /* Store the value to check for keyframes */
    ctx->inflated_size = avctx->width * avctx->height * ctx->component_size;

    /* Allocate maximum size possible, a full frame */
    ctx->inflated_buf = av_malloc(ctx->inflated_size);
    if (!ctx->inflated_buf)
        return AVERROR(ENOMEM);

    return 0;
}
Example #15
0
/*
 * To be a valid APNG file, we mandate, in this order:
 *     PNGSIG
 *     IHDR
 *     ...
 *     acTL
 *     ...
 *     IDAT
 */
static int apng_probe(AVProbeData *p)
{
    GetByteContext gb;
    int state = 0;
    uint32_t len, tag;

    bytestream2_init(&gb, p->buf, p->buf_size);

    if (bytestream2_get_be64(&gb) != PNGSIG)
        return 0;

    for (;;) {
        len = bytestream2_get_be32(&gb);
        if (len > 0x7fffffff)
            return 0;

        tag = bytestream2_get_le32(&gb);
        /* we don't check IDAT size, as this is the last tag
         * we check, and it may be larger than the probe buffer */
        if (tag != MKTAG('I', 'D', 'A', 'T') &&
            len > bytestream2_get_bytes_left(&gb))
            return 0;

        switch (tag) {
        case MKTAG('I', 'H', 'D', 'R'):
            if (len != 13)
                return 0;
            if (av_image_check_size(bytestream2_get_be32(&gb), bytestream2_get_be32(&gb), 0, NULL))
                return 0;
            bytestream2_skip(&gb, 9);
            state++;
            break;
        case MKTAG('a', 'c', 'T', 'L'):
            if (state != 1 ||
                len != 8 ||
                bytestream2_get_be32(&gb) == 0) /* 0 is not a valid value for number of frames */
                return 0;
            bytestream2_skip(&gb, 8);
            state++;
            break;
        case MKTAG('I', 'D', 'A', 'T'):
            if (state != 2)
                return 0;
            goto end;
        default:
            /* skip other tags */
            bytestream2_skip(&gb, len + 4);
            break;
        }
    }

end:
    return AVPROBE_SCORE_MAX;
}
Example #16
0
static av_cold int qtrle_encode_init(AVCodecContext *avctx)
{
    QtrleEncContext *s = avctx->priv_data;

    if (av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0) {
        return AVERROR(EINVAL);
    }
    s->avctx=avctx;
    s->logical_width=avctx->width;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
        if (avctx->width % 4) {
            av_log(avctx, AV_LOG_ERROR, "Width not being a multiple of 4 is not supported\n");
            return AVERROR(EINVAL);
        }
        s->logical_width = avctx->width / 4;
        s->pixel_size = 4;
        break;
    case AV_PIX_FMT_RGB555BE:
        s->pixel_size = 2;
        break;
    case AV_PIX_FMT_RGB24:
        s->pixel_size = 3;
        break;
    case AV_PIX_FMT_ARGB:
        s->pixel_size = 4;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Unsupported colorspace.\n");
        break;
    }
    avctx->bits_per_coded_sample = avctx->pix_fmt == AV_PIX_FMT_GRAY8 ? 40 : s->pixel_size*8;

    s->rlecode_table = av_mallocz(s->logical_width);
    s->skip_table    = av_mallocz(s->logical_width);
    s->length_table  = av_mallocz_array(s->logical_width + 1, sizeof(int));
    if (!s->skip_table || !s->length_table || !s->rlecode_table) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating memory.\n");
        return AVERROR(ENOMEM);
    }
    s->previous_frame = av_frame_alloc();
    if (!s->previous_frame) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating picture\n");
        return AVERROR(ENOMEM);
    }

    s->max_buf_size = s->logical_width*s->avctx->height*s->pixel_size*2 /* image base material */
                      + 15                                            /* header + footer */
                      + s->avctx->height*2                            /* skip code+rle end */
                      + s->logical_width/MAX_RLE_BULK + 1             /* rle codes */;

    return 0;
}
Example #17
0
int av_hwframe_ctx_init(AVBufferRef *ref)
{
    AVHWFramesContext *ctx = (AVHWFramesContext*)ref->data;
    const enum AVPixelFormat *pix_fmt;
    int ret;

    if (ctx->internal->source_frames) {
        /* A derived frame context is already initialised. */
        return 0;
    }

    /* validate the pixel format */
    for (pix_fmt = ctx->internal->hw_type->pix_fmts; *pix_fmt != AV_PIX_FMT_NONE; pix_fmt++) {
        if (*pix_fmt == ctx->format)
            break;
    }
    if (*pix_fmt == AV_PIX_FMT_NONE) {
        av_log(ctx, AV_LOG_ERROR,
               "The hardware pixel format '%s' is not supported by the device type '%s'\n",
               av_get_pix_fmt_name(ctx->format), ctx->internal->hw_type->name);
        return AVERROR(ENOSYS);
    }

    /* validate the dimensions */
    ret = av_image_check_size(ctx->width, ctx->height, 0, ctx);
    if (ret < 0)
        return ret;

    /* format-specific init */
    if (ctx->internal->hw_type->frames_init) {
        ret = ctx->internal->hw_type->frames_init(ctx);
        if (ret < 0)
            goto fail;
    }

    if (ctx->internal->pool_internal && !ctx->pool)
        ctx->pool = ctx->internal->pool_internal;

    /* preallocate the frames in the pool, if requested */
    if (ctx->initial_pool_size > 0) {
        ret = hwframe_pool_prealloc(ref);
        if (ret < 0)
            goto fail;
    }

    return 0;
fail:
    if (ctx->internal->hw_type->frames_uninit)
        ctx->internal->hw_type->frames_uninit(ctx);
    return ret;
}
Example #18
0
static int get_video_buffer(AVFrame *frame, int align)
{
    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
    int ret, i;

    if (!desc)
        return AVERROR(EINVAL);

    if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
        return ret;

    if (!frame->linesize[0]) {
        for(i=1; i<=align; i+=i) {
            ret = av_image_fill_linesizes(frame->linesize, frame->format,
                                          FFALIGN(frame->width, i));
            if (ret < 0)
                return ret;
            if (!(frame->linesize[0] & (align-1)))
                break;
        }

        for (i = 0; i < 4 && frame->linesize[i]; i++)
            frame->linesize[i] = FFALIGN(frame->linesize[i], align);
    }

    for (i = 0; i < 4 && frame->linesize[i]; i++) {
        int h = FFALIGN(frame->height, 32);
        if (i == 1 || i == 2)
            h = FF_CEIL_RSHIFT(h, desc->log2_chroma_h);

        frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
        if (!frame->buf[i])
            goto fail;

        frame->data[i] = frame->buf[i]->data;
    }
    if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL) {
        av_buffer_unref(&frame->buf[1]);
        frame->buf[1] = av_buffer_alloc(1024);
        if (!frame->buf[1])
            goto fail;
        frame->data[1] = frame->buf[1]->data;
    }

    frame->extended_data = frame->data;

    return 0;
fail:
    av_frame_unref(frame);
    return AVERROR(ENOMEM);
}
Example #19
0
static av_cold int hap_init(AVCodecContext *avctx)
{
    HapContext *ctx = avctx->priv_data;
    const char *texture_name;
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);

    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid video size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4 */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    ff_texturedsp_init(&ctx->dxtc);

    switch (avctx->codec_tag) {
    case MKTAG('H','a','p','1'):
        texture_name = "DXT1";
        ctx->tex_rat = 8;
        ctx->tex_fun = ctx->dxtc.dxt1_block;
        avctx->pix_fmt = AV_PIX_FMT_RGB0;
        break;
    case MKTAG('H','a','p','5'):
        texture_name = "DXT5";
        ctx->tex_rat = 16;
        ctx->tex_fun = ctx->dxtc.dxt5_block;
        avctx->pix_fmt = AV_PIX_FMT_RGBA;
        break;
    case MKTAG('H','a','p','Y'):
        texture_name = "DXT5-YCoCg-scaled";
        ctx->tex_rat = 16;
        ctx->tex_fun = ctx->dxtc.dxt5ys_block;
        avctx->pix_fmt = AV_PIX_FMT_RGB0;
        break;
    default:
        return AVERROR_DECODER_NOT_FOUND;
    }

    av_log(avctx, AV_LOG_DEBUG, "%s texture\n", texture_name);

    ctx->slice_count = av_clip(avctx->thread_count, 1,
                               avctx->coded_height / TEXTURE_BLOCK_H);

    return 0;
}
static av_cold int qtrle_encode_init(AVCodecContext *avctx)
{
	QtrleEncContext *s = avctx->priv_data;

	if (av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)
	{
		return -1;
	}
	s->avctx=avctx;

	switch (avctx->pix_fmt)
	{
	case PIX_FMT_RGB555BE:
		s->pixel_size = 2;
		break;
	case PIX_FMT_RGB24:
		s->pixel_size = 3;
		break;
	case PIX_FMT_ARGB:
		s->pixel_size = 4;
		break;
	default:
		av_log(avctx, AV_LOG_ERROR, "Unsupported colorspace.\n");
		break;
	}
	avctx->bits_per_coded_sample = s->pixel_size*8;

	s->rlecode_table = av_mallocz(s->avctx->width);
	s->skip_table    = av_mallocz(s->avctx->width);
	s->length_table  = av_mallocz((s->avctx->width + 1)*sizeof(int));
	if (!s->skip_table || !s->length_table || !s->rlecode_table)
	{
		av_log(avctx, AV_LOG_ERROR, "Error allocating memory.\n");
		return -1;
	}
	if (avpicture_alloc(&s->previous_frame, avctx->pix_fmt, avctx->width, avctx->height) < 0)
	{
		av_log(avctx, AV_LOG_ERROR, "Error allocating picture\n");
		return -1;
	}

	s->max_buf_size = s->avctx->width*s->avctx->height*s->pixel_size /* image base material */
	                  + 15                                           /* header + footer */
	                  + s->avctx->height*2                           /* skip code+rle end */
	                  + s->avctx->width/MAX_RLE_BULK + 1             /* rle codes */;
	avctx->coded_frame = &s->frame;
	return 0;
}
Example #21
0
/* [DIRAC_STD] 10. Sequence Header. sequence_header() */
int avpriv_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb,
                                   dirac_source_params *source)
{
    unsigned version_major;
    unsigned video_format, picture_coding_mode;
    int ret;

    /* [DIRAC_SPEC] 10.1 Parse Parameters. parse_parameters() */
    version_major  = svq3_get_ue_golomb(gb);
    svq3_get_ue_golomb(gb); /* version_minor */
    avctx->profile = svq3_get_ue_golomb(gb);
    avctx->level   = svq3_get_ue_golomb(gb);
    /* [DIRAC_SPEC] sequence_header() -> base_video_format as defined in
     * 10.2 Base Video Format, table 10.1 Dirac predefined video formats */
    video_format   = svq3_get_ue_golomb(gb);

    if (version_major < 2)
        av_log(avctx, AV_LOG_WARNING, "Stream is old and may not work\n");
    else if (version_major > 2)
        av_log(avctx, AV_LOG_WARNING, "Stream may have unhandled features\n");

    if (video_format > 20)
        return AVERROR_INVALIDDATA;

    // Fill in defaults for the source parameters.
    *source = dirac_source_parameters_defaults[video_format];

    /* [DIRAC_STD] 10.3 Source Parameters
     * Override the defaults. */
    if (ret = parse_source_parameters(avctx, gb, source))
        return ret;

    if (ret = av_image_check_size(source->width, source->height, 0, avctx))
        return ret;

    avcodec_set_dimensions(avctx, source->width, source->height);

    /* [DIRAC_STD] picture_coding_mode shall be 0 for fields and 1 for frames
     * currently only used to signal field coding */
    picture_coding_mode = svq3_get_ue_golomb(gb);
    if (picture_coding_mode != 0) {
        av_log(avctx, AV_LOG_ERROR, "Unsupported picture coding mode %d",
               picture_coding_mode);
        return AVERROR_INVALIDDATA;
    }
    return 0;
}
Example #22
0
static int source_config_props(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    Frei0rContext *frei0r = ctx->priv;

    if (av_image_check_size(frei0r->w, frei0r->h, 0, ctx) < 0)
        return AVERROR(EINVAL);
    outlink->w = frei0r->w;
    outlink->h = frei0r->h;
    outlink->time_base = frei0r->time_base;

    if (!(frei0r->instance = frei0r->construct(outlink->w, outlink->h))) {
        av_log(ctx, AV_LOG_ERROR, "Impossible to load frei0r instance");
        return AVERROR(EINVAL);
    }

    return set_params(ctx, frei0r->params);
}
Example #23
0
int ff_dirac_parse_sequence_header(AVCodecContext *avctx, GetBitContext *gb,
                                   dirac_source_params *source)
{
    unsigned version_major, version_minor;
    unsigned video_format, picture_coding_mode;

    version_major  = svq3_get_ue_golomb(gb);
    version_minor  = svq3_get_ue_golomb(gb);
    avctx->profile = svq3_get_ue_golomb(gb);
    avctx->level   = svq3_get_ue_golomb(gb);
    video_format   = svq3_get_ue_golomb(gb);

    if (version_major < 2)
        av_log(avctx, AV_LOG_WARNING, "Stream is old and may not work\n");
    else if (version_major > 2)
        av_log(avctx, AV_LOG_WARNING, "Stream may have unhandled features\n");

    if (video_format > 20)
        return -1;

    // Fill in defaults for the source parameters.
    *source = dirac_source_parameters_defaults[video_format];

    // Override the defaults.
    if (parse_source_parameters(avctx, gb, source))
        return -1;

    if (av_image_check_size(source->width, source->height, 0, avctx))
        return -1;

    avcodec_set_dimensions(avctx, source->width, source->height);

    // currently only used to signal field coding
    picture_coding_mode = svq3_get_ue_golomb(gb);
    if (picture_coding_mode != 0)
    {
        av_log(avctx, AV_LOG_ERROR, "Unsupported picture coding mode %d",
               picture_coding_mode);
        return -1;
    }
    return 0;
}
Example #24
0
File: nuv.c Project: felixge/FFmpeg
static int codec_reinit(AVCodecContext *avctx, int width, int height, int quality) {
    NuvContext *c = avctx->priv_data;
    width = (width + 1) & ~1;
    height = (height + 1) & ~1;
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        if (av_image_check_size(height, width, 0, avctx) < 0)
            return 0;
        avctx->width = c->width = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size, c->height * c->width * 3 / 2);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
            return 0;
        }
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
    } else if (quality != c->quality)
        rtjpeg_decode_init(&c->rtj, &c->dsp, c->width, c->height, c->lq, c->cq);
    return 1;
}
Example #25
0
static av_cold int hap_init(AVCodecContext *avctx)
{
    HapContext *ctx = avctx->priv_data;
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);

    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid video size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4 */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    /* Technically only one mode has alpha, but 32 bits are easier to handle */
    avctx->pix_fmt = AV_PIX_FMT_RGBA;

    ff_texturedsp_init(&ctx->dxtc);

    return 0;
}
Example #26
0
static int color_config_props(AVFilterLink *inlink)
{
    AVFilterContext *ctx = inlink->src;
    ColorContext *color = ctx->priv;

    ff_draw_init(&color->draw, inlink->format, 0);
    ff_draw_color(&color->draw, &color->color, color->color_rgba);

    color->w = ff_draw_round_to_sub(&color->draw, 0, -1, color->w);
    color->h = ff_draw_round_to_sub(&color->draw, 1, -1, color->h);
    if (av_image_check_size(color->w, color->h, 0, ctx) < 0)
        return AVERROR(EINVAL);

    av_log(ctx, AV_LOG_INFO, "w:%d h:%d r:%d/%d color:0x%02x%02x%02x%02x\n",
           color->w, color->h, color->time_base.den, color->time_base.num,
           color->color_rgba[0], color->color_rgba[1], color->color_rgba[2], color->color_rgba[3]);
    inlink->w = color->w;
    inlink->h = color->h;
    inlink->time_base = color->time_base;

    return 0;
}
Example #27
0
static av_cold int hap_init(AVCodecContext *avctx)
{
    HapContext *ctx = avctx->priv_data;
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);

    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid video size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Since codec is based on 4x4 blocks, size is aligned to 4 */
    avctx->coded_width  = FFALIGN(avctx->width,  TEXTURE_BLOCK_W);
    avctx->coded_height = FFALIGN(avctx->height, TEXTURE_BLOCK_H);

    /* Technically only one mode has alpha, but 32 bits are easier to handle */
    avctx->pix_fmt = AV_PIX_FMT_RGBA;

    ff_texturedsp_init(&ctx->dxtc);

    switch (avctx->codec_tag) {
    case MKTAG('H','a','p','1'):
        ctx->tex_rat = 8;
        ctx->tex_fun = ctx->dxtc.dxt1_block;
        break;
    case MKTAG('H','a','p','5'):
        ctx->tex_rat = 16;
        ctx->tex_fun = ctx->dxtc.dxt5_block;
        break;
    case MKTAG('H','a','p','Y'):
        ctx->tex_rat = 16;
        ctx->tex_fun = ctx->dxtc.dxt5ys_block;
        break;
    default:
        return AVERROR_DECODER_NOT_FOUND;
    }
    return 0;
}
Example #28
0
File: dxv.c Project: AVLeo/libav
static int dxv_init(AVCodecContext *avctx)
{
    DXVContext *ctx = avctx->priv_data;
    int ret = av_image_check_size(avctx->width, avctx->height, 0, avctx);

    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n",
               avctx->width, avctx->height);
        return ret;
    }

    /* Codec requires 16x16 alignment. */
    avctx->coded_width  = FFALIGN(avctx->width,  16);
    avctx->coded_height = FFALIGN(avctx->height, 16);

    ff_texturedsp_init(&ctx->texdsp);
    avctx->pix_fmt = AV_PIX_FMT_RGBA;

    ctx->slice_count = av_clip(avctx->thread_count, 1,
                               avctx->coded_height / TEXTURE_BLOCK_H);

    return 0;
}
Example #29
0
static int codec_reinit(AVCodecContext *avctx, int width, int height,
                        int quality)
{
    NuvContext *c = avctx->priv_data;
    int ret;

    width  = FFALIGN(width,  2);
    height = FFALIGN(height, 2);
    if (quality >= 0)
        get_quant_quality(c, quality);
    if (width != c->width || height != c->height) {
        // also reserve space for a possible additional header
        int buf_size = height * width * 3 / 2
                     + FFMAX(AV_LZO_OUTPUT_PADDING, FF_INPUT_BUFFER_PADDING_SIZE)
                     + RTJPEG_HEADER_SIZE;
        if (buf_size > INT_MAX/8)
            return -1;
        if ((ret = av_image_check_size(height, width, 0, avctx)) < 0)
            return ret;
        avctx->width  = c->width  = width;
        avctx->height = c->height = height;
        av_fast_malloc(&c->decomp_buf, &c->decomp_size,
                       buf_size);
        if (!c->decomp_buf) {
            av_log(avctx, AV_LOG_ERROR,
                   "Can't allocate decompression buffer.\n");
            return AVERROR(ENOMEM);
        }
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);
        av_frame_unref(c->pic);
        return 1;
    } else if (quality != c->quality)
        ff_rtjpeg_decode_init(&c->rtj, c->width, c->height, c->lq, c->cq);

    return 0;
}
Example #30
0
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
                        AVPacket *avpkt) {
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    AVSubtitle *sub = data;
    const uint8_t *buf_end = buf + buf_size;
    uint8_t *bitmap;
    int w, h, x, y, i;
    int64_t packet_time = 0;
    GetBitContext gb;
    int has_alpha = avctx->codec_tag == MKTAG('D','X','S','A');

    // check that at least header fits
    if (buf_size < 27 + 7 * 2 + 4 * 3) {
        av_log(avctx, AV_LOG_ERROR, "coded frame too small\n");
        return -1;
    }

    // read start and end time
    if (buf[0] != '[' || buf[13] != '-' || buf[26] != ']') {
        av_log(avctx, AV_LOG_ERROR, "invalid time code\n");
        return -1;
    }
    if (avpkt->pts != AV_NOPTS_VALUE)
        packet_time = av_rescale_q(avpkt->pts, AV_TIME_BASE_Q, (AVRational){1, 1000});
    sub->start_display_time = parse_timecode(buf +  1, packet_time);
    sub->end_display_time   = parse_timecode(buf + 14, packet_time);
    buf += 27;

    // read header
    w = bytestream_get_le16(&buf);
    h = bytestream_get_le16(&buf);
    if (av_image_check_size(w, h, 0, avctx) < 0)
        return -1;
    x = bytestream_get_le16(&buf);
    y = bytestream_get_le16(&buf);
    // skip bottom right position, it gives no new information
    bytestream_get_le16(&buf);
    bytestream_get_le16(&buf);
    // The following value is supposed to indicate the start offset
    // (relative to the palette) of the data for the second field,
    // however there are files  where it has a bogus value and thus
    // we just ignore it
    bytestream_get_le16(&buf);

    // allocate sub and set values
    sub->rects =  av_mallocz(sizeof(*sub->rects));
    sub->rects[0] = av_mallocz(sizeof(*sub->rects[0]));
    sub->num_rects = 1;
    sub->rects[0]->x = x; sub->rects[0]->y = y;
    sub->rects[0]->w = w; sub->rects[0]->h = h;
    sub->rects[0]->type = SUBTITLE_BITMAP;
    sub->rects[0]->pict.linesize[0] = w;
    sub->rects[0]->pict.data[0] = av_malloc(w * h);
    sub->rects[0]->nb_colors = 4;
    sub->rects[0]->pict.data[1] = av_mallocz(AVPALETTE_SIZE);

    // read palette
    for (i = 0; i < sub->rects[0]->nb_colors; i++)
        ((uint32_t*)sub->rects[0]->pict.data[1])[i] = bytestream_get_be24(&buf);
    // make all except background (first entry) non-transparent
    for (i = 0; i < sub->rects[0]->nb_colors; i++)
        ((uint32_t*)sub->rects[0]->pict.data[1])[i] |= (has_alpha ? *buf++ : (i ? 0xff : 0)) << 24;

    // process RLE-compressed data
    init_get_bits(&gb, buf, (buf_end - buf) * 8);
    bitmap = sub->rects[0]->pict.data[0];
    for (y = 0; y < h; y++) {
        // interlaced: do odd lines
        if (y == (h + 1) / 2) bitmap = sub->rects[0]->pict.data[0] + w;
        for (x = 0; x < w; ) {
            int log2 = ff_log2_tab[show_bits(&gb, 8)];
            int run = get_bits(&gb, 14 - 4 * (log2 >> 1));
            int color = get_bits(&gb, 2);
            run = FFMIN(run, w - x);
            // run length 0 means till end of row
            if (!run) run = w - x;
            memset(bitmap, color, run);
            bitmap += run;
            x += run;
        }
        // interlaced, skip every second line
        bitmap += w;
        align_get_bits(&gb);
    }
    *data_size = 1;
    return buf_size;
}