예제 #1
0
파일: gif.c 프로젝트: Flameeyes/libav
/* better than nothing gif encoder */
static int gif_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *pict, int *got_packet)
{
    GIFContext *s = avctx->priv_data;
    AVFrame *const p = &s->picture;
    uint8_t *outbuf_ptr, *end;
    int ret;

    if ((ret = ff_alloc_packet(pkt, avctx->width*avctx->height*7/5 + FF_MIN_BUFFER_SIZE)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
        return ret;
    }
    outbuf_ptr = pkt->data;
    end        = pkt->data + pkt->size;

    *p = *pict;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;
    gif_image_write_header(avctx, &outbuf_ptr, (uint32_t *)pict->data[1]);
    gif_image_write_image(avctx, &outbuf_ptr, end, pict->data[0], pict->linesize[0]);

    pkt->size   = outbuf_ptr - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #2
0
파일: qsvenc.c 프로젝트: ikeraiza/FFmpeg
int ff_qsv_encode(AVCodecContext *avctx, QSVEncContext *q,
                  AVPacket *pkt, const AVFrame *frame, int *got_packet)
{
    mfxBitstream bs = { { { 0 } } };

    mfxFrameSurface1 *surf = NULL;
    mfxSyncPoint sync      = NULL;
    int ret;

    if (frame) {
        ret = submit_frame(q, frame, &surf);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR, "Error submitting the frame for encoding.\n");
            return ret;
        }
    }

    ret = ff_alloc_packet(pkt, q->packet_size);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error allocating the output packet\n");
        return ret;
    }
    bs.Data      = pkt->data;
    bs.MaxLength = pkt->size;

    do {
        ret = MFXVideoENCODE_EncodeFrameAsync(q->session, NULL, surf, &bs, &sync);
        if (ret == MFX_WRN_DEVICE_BUSY)
            av_usleep(1);
    } while (ret > 0);

    if (ret < 0)
        return (ret == MFX_ERR_MORE_DATA) ? 0 : ff_qsv_error(ret);

    if (ret == MFX_WRN_INCOMPATIBLE_VIDEO_PARAM && frame->interlaced_frame)
        print_interlace_msg(avctx, q);

    if (sync) {
        MFXVideoCORE_SyncOperation(q->session, sync, 60000);

        if (bs.FrameType & MFX_FRAMETYPE_I || bs.FrameType & MFX_FRAMETYPE_xI)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        else if (bs.FrameType & MFX_FRAMETYPE_P || bs.FrameType & MFX_FRAMETYPE_xP)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
        else if (bs.FrameType & MFX_FRAMETYPE_B || bs.FrameType & MFX_FRAMETYPE_xB)
            avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;

        pkt->dts  = av_rescale_q(bs.DecodeTimeStamp, (AVRational){1, 90000}, avctx->time_base);
        pkt->pts  = av_rescale_q(bs.TimeStamp,       (AVRational){1, 90000}, avctx->time_base);
        pkt->size = bs.DataLength;

        if (bs.FrameType & MFX_FRAMETYPE_IDR ||
            bs.FrameType & MFX_FRAMETYPE_xIDR)
            pkt->flags |= AV_PKT_FLAG_KEY;

        *got_packet = 1;
    }

    return 0;
}
예제 #3
0
파일: xbmenc.c 프로젝트: JSinglan/libav
static int xbm_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *p, int *got_packet)
{
    int i, j, ret, size, linesize;
    uint8_t *ptr, *buf;

    linesize = (avctx->width + 7) / 8;
    size     = avctx->height * (linesize * 7 + 2) + 110;
    if ((ret = ff_alloc_packet(pkt, size)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
        return ret;
    }

    buf = pkt->data;
    ptr = p->data[0];

    buf += snprintf(buf, 32, "#define image_width %u\n", avctx->width);
    buf += snprintf(buf, 33, "#define image_height %u\n", avctx->height);
    buf += snprintf(buf, 40, "static unsigned char image_bits[] = {\n");
    for (i = 0; i < avctx->height; i++) {
        for (j = 0; j < linesize; j++)
            buf += snprintf(buf, 7, " 0x%02X,", av_reverse[*ptr++]);
        ptr += p->linesize[0] - linesize;
        buf += snprintf(buf, 2, "\n");
    }
    buf += snprintf(buf, 5, " };\n");

    pkt->size   = buf - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #4
0
파일: cngenc.c 프로젝트: AVLeo/libav
static int cng_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    CNGContext *p = avctx->priv_data;
    int ret, i;
    double energy = 0;
    int qdbov;
    int16_t *samples = (int16_t*) frame->data[0];

    if ((ret = ff_alloc_packet(avpkt, 1 + p->order))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

    for (i = 0; i < frame->nb_samples; i++) {
        p->samples32[i] = samples[i];
        energy += samples[i] * samples[i];
    }
    energy /= frame->nb_samples;
    if (energy > 0) {
        double dbov = 10 * log10(energy / 1081109975);
        qdbov = av_clip(-floor(dbov), 0, 127);
    } else {
        qdbov = 127;
    }
    ret = ff_lpc_calc_ref_coefs(&p->lpc, p->samples32, p->order, p->ref_coef);
    avpkt->data[0] = qdbov;
    for (i = 0; i < p->order; i++)
        avpkt->data[1 + i] = p->ref_coef[i] * 127 + 127;

    *got_packet_ptr = 1;
    avpkt->size = 1 + p->order;

    return 0;
}
예제 #5
0
파일: adxenc.c 프로젝트: AVbin/libav
static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    ADXContext *c          = avctx->priv_data;
    const int16_t *samples = (const int16_t *)frame->data[0];
    uint8_t *dst;
    int ch, out_size, ret;

    out_size = BLOCK_SIZE * avctx->channels + !c->header_parsed * HEADER_SIZE;
    if ((ret = ff_alloc_packet(avpkt, out_size)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }
    dst = avpkt->data;

    if (!c->header_parsed) {
        int hdrsize;
        if ((hdrsize = adx_encode_header(avctx, dst, avpkt->size)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
            return AVERROR(EINVAL);
        }
        dst      += hdrsize;
        c->header_parsed = 1;
    }

    for (ch = 0; ch < avctx->channels; ch++) {
        adx_encode(c, dst, samples + ch, &c->prev[ch], avctx->channels);
        dst += BLOCK_SIZE;
    }

    *got_packet_ptr = 1;
    return 0;
}
예제 #6
0
파일: nvenc.c 프로젝트: theambient/libav
static int nvenc_get_frame(AVCodecContext *avctx, AVPacket *pkt)
{
    NVENCContext *ctx               = avctx->priv_data;
    NV_ENCODE_API_FUNCTION_LIST *nv = &ctx->nvel.nvenc_funcs;
    NV_ENC_LOCK_BITSTREAM params    = { 0 };
    NVENCOutputSurface *out         = NULL;
    int ret;

    ret = nvenc_dequeue_surface(ctx->pending, &out);
    if (ret)
        return ret;

    params.version         = NV_ENC_LOCK_BITSTREAM_VER;
    params.outputBitstream = out->out;

    ret = nv->nvEncLockBitstream(ctx->nvenc_ctx, &params);
    if (ret < 0)
        return AVERROR_UNKNOWN;

    ret = ff_alloc_packet(pkt, params.bitstreamSizeInBytes);
    if (ret < 0)
        return ret;

    memcpy(pkt->data, params.bitstreamBufferPtr, pkt->size);

    ret = nv->nvEncUnlockBitstream(ctx->nvenc_ctx, out->out);
    if (ret < 0)
        return AVERROR_UNKNOWN;

    out->busy = out->in->locked = 0;

    ret = nvenc_set_timestamp(ctx, &params, pkt);
    if (ret < 0)
        return ret;

    switch (params.pictureType) {
    case NV_ENC_PIC_TYPE_IDR:
        pkt->flags |= AV_PKT_FLAG_KEY;
#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
    case NV_ENC_PIC_TYPE_INTRA_REFRESH:
    case NV_ENC_PIC_TYPE_I:
        avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
        break;
    case NV_ENC_PIC_TYPE_P:
        avctx->coded_frame->pict_type = AV_PICTURE_TYPE_P;
        break;
    case NV_ENC_PIC_TYPE_B:
        avctx->coded_frame->pict_type = AV_PICTURE_TYPE_B;
        break;
    case NV_ENC_PIC_TYPE_BI:
        avctx->coded_frame->pict_type = AV_PICTURE_TYPE_BI;
        break;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
    }

    return 0;
}
예제 #7
0
static int libx265_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                const AVFrame *pic, int *got_packet)
{
    libx265Context *ctx = avctx->priv_data;
    x265_picture x265pic;
    x265_picture x265pic_out = { { 0 } };
    x265_nal *nal;
    uint8_t *dst;
    int payload = 0;
    int nnal;
    int ret;
    int i;

    x265_picture_init(ctx->params, &x265pic);

    if (pic) {
        for (i = 0; i < 3; i++) {
           x265pic.planes[i] = pic->data[i];
           x265pic.stride[i] = pic->linesize[i];
        }

        x265pic.pts      = pic->pts;
        x265pic.bitDepth = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth_minus1 + 1;
    }

    ret = x265_encoder_encode(ctx->encoder, &nal, &nnal,
                              pic ? &x265pic : NULL, &x265pic_out);
    if (ret < 0)
        return AVERROR_UNKNOWN;

    if (!nnal)
        return 0;

    for (i = 0; i < nnal; i++)
        payload += nal[i].sizeBytes;

    ret = ff_alloc_packet(pkt, payload);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
        return ret;
    }
    dst = pkt->data;

    for (i = 0; i < nnal; i++) {
        memcpy(dst, nal[i].payload, nal[i].sizeBytes);
        dst += nal[i].sizeBytes;

        if (is_keyframe(nal[i].type))
            pkt->flags |= AV_PKT_FLAG_KEY;
    }

    pkt->pts = x265pic_out.pts;
    pkt->dts = x265pic_out.dts;

    *got_packet = 1;
    return 0;
}
예제 #8
0
static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *frame, int *got_packet)
{
    RoqContext *enc = avctx->priv_data;
    int size, ret;

    enc->avctx = avctx;

    enc->frame_to_enc = frame;

    if (frame->quality)
        enc->lambda = frame->quality - 1;
    else
        enc->lambda = 2*ROQ_LAMBDA_SCALE;

    /* 138 bits max per 8x8 block +
     *     256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
    size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
    if ((ret = ff_alloc_packet(pkt, size)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet with size %d.\n", size);
        return ret;
    }
    enc->out_buf = pkt->data;

    /* Check for I frame */
    if (enc->framesSinceKeyframe == avctx->gop_size)
        enc->framesSinceKeyframe = 0;

    if (enc->first_frame) {
        /* Alloc memory for the reconstruction data (we must know the stride
         for that) */
        if (avctx->get_buffer(avctx, enc->current_frame) ||
            avctx->get_buffer(avctx, enc->last_frame)) {
            av_log(avctx, AV_LOG_ERROR, "  RoQ: get_buffer() failed\n");
            return -1;
        }

        /* Before the first video frame, write a "video info" chunk */
        roq_write_video_info_chunk(enc);

        enc->first_frame = 0;
    }

    /* Encode the actual frame */
    roq_encode_video(enc);

    pkt->size   = enc->out_buf - pkt->data;
    if (enc->framesSinceKeyframe == 1)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #9
0
파일: roqaudioenc.c 프로젝트: AVLeo/libav
static int roq_dpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                                 const AVFrame *frame, int *got_packet_ptr)
{
    int i, stereo, data_size, ret;
    const int16_t *in = frame ? (const int16_t *)frame->data[0] : NULL;
    uint8_t *out;
    ROQDPCMContext *context = avctx->priv_data;

    stereo = (avctx->channels == 2);

    if (!in && context->input_frames >= 8)
        return 0;

    if (in && context->input_frames < 8) {
        memcpy(&context->frame_buffer[context->buffered_samples * avctx->channels],
               in, avctx->frame_size * avctx->channels * sizeof(*in));
        context->buffered_samples += avctx->frame_size;
        if (context->input_frames == 0)
            context->first_pts = frame->pts;
        if (context->input_frames < 7) {
            context->input_frames++;
            return 0;
        }
    }
    if (context->input_frames < 8)
        in = context->frame_buffer;

    if (stereo) {
        context->lastSample[0] &= 0xFF00;
        context->lastSample[1] &= 0xFF00;
    }

    if (context->input_frames == 7)
        data_size = avctx->channels * context->buffered_samples;
    else
        data_size = avctx->channels * avctx->frame_size;

    if ((ret = ff_alloc_packet(avpkt, ROQ_HEADER_SIZE + data_size))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }
    out = avpkt->data;

    bytestream_put_byte(&out, stereo ? 0x21 : 0x20);
    bytestream_put_byte(&out, 0x10);
    bytestream_put_le32(&out, data_size);

    if (stereo) {
        bytestream_put_byte(&out, (context->lastSample[1])>>8);
        bytestream_put_byte(&out, (context->lastSample[0])>>8);
    } else
예제 #10
0
파일: v408enc.c 프로젝트: Bret4494/FFmpeg
static int v408_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst;
    uint8_t *y, *u, *v, *a;
    int i, j, ret;

    if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height * 4)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Out buffer is too small.\n");
        return ret;
    }
    dst = pkt->data;

    avctx->coded_frame->reference = 0;
    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

    y = pic->data[0];
    u = pic->data[1];
    v = pic->data[2];
    a = pic->data[3];

    for (i = 0; i < avctx->height; i++) {
        for (j = 0; j < avctx->width; j++) {
           if (avctx->codec_id==CODEC_ID_AYUV) {
                *dst++ = v[j];
                *dst++ = u[j];
                *dst++ = y[j];
                *dst++ = a[j];
            } else {
                *dst++ = u[j];
                *dst++ = y[j];
                *dst++ = v[j];
                *dst++ = a[j];
            }
        }
        y += pic->linesize[0];
        u += pic->linesize[1];
        v += pic->linesize[2];
        a += pic->linesize[3];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #11
0
파일: libilbc.c 프로젝트: elnormous/libav
static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    ILBCEncContext *s = avctx->priv_data;
    int ret;

    if ((ret = ff_alloc_packet(avpkt, 50))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

    WebRtcIlbcfix_EncodeImpl((uint16_t *) avpkt->data, (const int16_t *) frame->data[0], &s->encoder);

    avpkt->size     = s->encoder.no_of_bytes;
    *got_packet_ptr = 1;
    return 0;
}
예제 #12
0
static int libwebp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                const AVFrame *frame, int *got_packet)
{
    LibWebPContext *s  = avctx->priv_data;
    WebPPicture *pic = NULL;
    AVFrame *alt_frame = NULL;
    WebPMemoryWriter mw = { 0 };

    int ret = ff_libwebp_get_frame(avctx, s, frame, &alt_frame, &pic);
    if (ret < 0)
        goto end;

    WebPMemoryWriterInit(&mw);
    pic->custom_ptr = &mw;
    pic->writer     = WebPMemoryWrite;

    ret = WebPEncode(&s->config, pic);
    if (!ret) {
        av_log(avctx, AV_LOG_ERROR, "WebPEncode() failed with error: %d\n",
               pic->error_code);
        ret = ff_libwebp_error_to_averror(pic->error_code);
        goto end;
    }

    ret = ff_alloc_packet(pkt, mw.size);
    if (ret < 0)
        goto end;
    memcpy(pkt->data, mw.mem, mw.size);

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

end:
#if (WEBP_ENCODER_ABI_VERSION > 0x0203)
    WebPMemoryWriterClear(&mw);
#else
    free(mw.mem); /* must use free() according to libwebp documentation */
#endif
    WebPPictureFree(pic);
    av_freep(&pic);
    av_frame_free(&alt_frame);

    return ret;
}
예제 #13
0
static int avui_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst;
    int i, j, skip, ret, size, interlaced;

    interlaced = avctx->field_order > AV_FIELD_PROGRESSIVE;

    if (avctx->height == 486) {
        skip = 10;
    } else {
        skip = 16;
    }
    size = 2 * avctx->width * (avctx->height + skip) + 8 * interlaced;
    if ((ret = ff_alloc_packet(pkt, size)) < 0)
        return ret;
    dst = pkt->data;
    if (!interlaced) {
        memset(dst, 0, avctx->width * skip);
        dst += avctx->width * skip;
    }

    for (i = 0; i <= interlaced; i++) {
        uint8_t *src;
        if (interlaced && avctx->height == 486) {
            src = pic->data[0] + (1 - i) * pic->linesize[0];
        } else {
            src = pic->data[0] + i * pic->linesize[0];
        }
        memset(dst, 0, avctx->width * skip + 4 * i);
        dst += avctx->width * skip + 4 * i;
        for (j = 0; j < avctx->height; j += interlaced + 1) {
            memcpy(dst, src, avctx->width * 2);
            src += (interlaced + 1) * pic->linesize[0];
            dst += avctx->width * 2;
        }
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #14
0
파일: v410enc.c 프로젝트: JSinglan/libav
static int v410_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst;
    uint16_t *y, *u, *v;
    uint32_t val;
    int i, j, ret;

    if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height * 4)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
        return ret;
    }
    dst = pkt->data;

    avctx->coded_frame->reference = 0;
    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

    y = (uint16_t *)pic->data[0];
    u = (uint16_t *)pic->data[1];
    v = (uint16_t *)pic->data[2];

    for (i = 0; i < avctx->height; i++) {
        for (j = 0; j < avctx->width; j++) {
            val  = u[j] << 2;
            val |= y[j] << 12;
            val |= (uint32_t) v[j] << 22;
            AV_WL32(dst, val);
            dst += 4;
        }
        y += pic->linesize[0] >> 1;
        u += pic->linesize[1] >> 1;
        v += pic->linesize[2] >> 1;
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #15
0
static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    FaacAudioContext *s = avctx->priv_data;
    int bytes_written, ret;
    int num_samples  = frame ? frame->nb_samples : 0;
    void *samples    = frame ? frame->data[0]    : NULL;

    if ((ret = ff_alloc_packet(avpkt, (7 + 768) * avctx->channels))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

    bytes_written = faacEncEncode(s->faac_handle, samples,
                                  num_samples * avctx->channels,
                                  avpkt->data, avpkt->size);
    if (bytes_written < 0) {
        av_log(avctx, AV_LOG_ERROR, "faacEncEncode() error\n");
        return bytes_written;
    }

    /* add current frame to the queue */
    if (frame) {
        if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
            return ret;
    }

    if (!bytes_written)
        return 0;

    /* Get the next frame pts/duration */
    ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
                       &avpkt->duration);

    avpkt->size = bytes_written;
    *got_packet_ptr = 1;
    return 0;
}
예제 #16
0
파일: cljr.c 프로젝트: Acidburn0zzz/libav
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *p, int *got_packet)
{
    PutBitContext pb;
    int x, y, ret;

    if ((ret = ff_alloc_packet(pkt, 32*avctx->height*avctx->width/4)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
        return ret;
    }

    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    avctx->coded_frame->key_frame = 1;

    init_put_bits(&pb, pkt->data, pkt->size);

    for (y = 0; y < avctx->height; y++) {
        uint8_t *luma = &p->data[0][y * p->linesize[0]];
        uint8_t *cb   = &p->data[1][y * p->linesize[1]];
        uint8_t *cr   = &p->data[2][y * p->linesize[2]];
        for (x = 0; x < avctx->width; x += 4) {
            put_bits(&pb, 5, luma[3] >> 3);
            put_bits(&pb, 5, luma[2] >> 3);
            put_bits(&pb, 5, luma[1] >> 3);
            put_bits(&pb, 5, luma[0] >> 3);
            luma += 4;
            put_bits(&pb, 6, *(cb++) >> 2);
            put_bits(&pb, 6, *(cr++) >> 2);
        }
    }

    flush_put_bits(&pb);

    pkt->size   = put_bits_count(&pb) / 8;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #17
0
파일: libgsm.c 프로젝트: AVbin/libav
static int libgsm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                               const AVFrame *frame, int *got_packet_ptr)
{
    int ret;
    gsm_signal *samples = (gsm_signal *)frame->data[0];
    struct gsm_state *state = avctx->priv_data;

    if ((ret = ff_alloc_packet(avpkt, avctx->block_align))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

    switch(avctx->codec_id) {
    case AV_CODEC_ID_GSM:
        gsm_encode(state, samples, avpkt->data);
        break;
    case AV_CODEC_ID_GSM_MS:
        gsm_encode(state, samples,                  avpkt->data);
        gsm_encode(state, samples + GSM_FRAME_SIZE, avpkt->data + 32);
    }

    *got_packet_ptr = 1;
    return 0;
}
예제 #18
0
파일: qtrleenc.c 프로젝트: Flameeyes/libav
static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *pict, int *got_packet)
{
    QtrleEncContext * const s = avctx->priv_data;
    AVFrame * const p = &s->frame;
    int ret;

    *p = *pict;

    if ((ret = ff_alloc_packet(pkt, s->max_buf_size)) < 0) {
        /* Upper bound check for compressed data */
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", s->max_buf_size);
        return ret;
    }

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        p->pict_type = AV_PICTURE_TYPE_I;
        p->key_frame = 1;
    } else {
        /* P-Frame */
        p->pict_type = AV_PICTURE_TYPE_P;
        p->key_frame = 0;
    }

    pkt->size = encode_frame(s, pict, pkt->data);

    /* save the current frame */
    av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);

    if (p->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #19
0
파일: rawenc.c 프로젝트: Bret4494/FFmpeg
static int raw_encode(AVCodecContext *avctx, AVPacket *pkt,
                      const AVFrame *frame, int *got_packet)
{
    int ret = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);

    if (ret < 0)
        return ret;

    if ((ret = ff_alloc_packet(pkt, ret)) < 0)
        return ret;
    if ((ret = avpicture_layout((const AVPicture *)frame, avctx->pix_fmt, avctx->width,
                                avctx->height, pkt->data, pkt->size)) < 0)
        return ret;

    if(avctx->codec_tag == AV_RL32("yuv2") && ret > 0 &&
       avctx->pix_fmt   == PIX_FMT_YUYV422) {
        int x;
        for(x = 1; x < avctx->height*avctx->width*2; x += 2)
            pkt->data[x] ^= 0x80;
    }
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #20
0
static int libschroedinger_encode_frame(AVCodecContext *avccontext, AVPacket *pkt,
                                        const AVFrame *frame, int *got_packet)
{
    int enc_size = 0;
    SchroEncoderParams *p_schro_params = avccontext->priv_data;
    SchroEncoder *encoder = p_schro_params->encoder;
    struct FFSchroEncodedFrame *p_frame_output = NULL;
    int go = 1;
    SchroBuffer *enc_buf;
    int presentation_frame;
    int parse_code;
    int last_frame_in_sequence = 0;
    int pkt_size, ret;

    if (!frame) {
        /* Push end of sequence if not already signalled. */
        if (!p_schro_params->eos_signalled) {
            schro_encoder_end_of_stream(encoder);
            p_schro_params->eos_signalled = 1;
        }
    } else {
        /* Allocate frame data to schro input buffer. */
        SchroFrame *in_frame = libschroedinger_frame_from_data(avccontext,
                                                               frame);
        /* Load next frame. */
        schro_encoder_push_frame(encoder, in_frame);
    }

    if (p_schro_params->eos_pulled)
        go = 0;

    /* Now check to see if we have any output from the encoder. */
    while (go) {
        SchroStateEnum state;
        state = schro_encoder_wait(encoder);
        switch (state) {
        case SCHRO_STATE_HAVE_BUFFER:
        case SCHRO_STATE_END_OF_STREAM:
            enc_buf = schro_encoder_pull(encoder, &presentation_frame);
            assert(enc_buf->length > 0);
            assert(enc_buf->length <= buf_size);
            parse_code = enc_buf->data[4];

            /* All non-frame data is prepended to actual frame data to
             * be able to set the pts correctly. So we don't write data
             * to the frame output queue until we actually have a frame
             */
            p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf,
                                                 p_schro_params->enc_buf_size + enc_buf->length);

            memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size,
                   enc_buf->data, enc_buf->length);
            p_schro_params->enc_buf_size += enc_buf->length;


            if (state == SCHRO_STATE_END_OF_STREAM) {
                p_schro_params->eos_pulled = 1;
                go = 0;
            }

            if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) {
                schro_buffer_unref(enc_buf);
                break;
            }

            /* Create output frame. */
            p_frame_output = av_mallocz(sizeof(FFSchroEncodedFrame));
            /* Set output data. */
            p_frame_output->size     = p_schro_params->enc_buf_size;
            p_frame_output->p_encbuf = p_schro_params->enc_buf;
            if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) &&
                SCHRO_PARSE_CODE_IS_REFERENCE(parse_code))
                p_frame_output->key_frame = 1;

            /* Parse the coded frame number from the bitstream. Bytes 14
             * through 17 represesent the frame number. */
            p_frame_output->frame_num = AV_RB32(enc_buf->data + 13);

            ff_schro_queue_push_back(&p_schro_params->enc_frame_queue,
                                     p_frame_output);
            p_schro_params->enc_buf_size = 0;
            p_schro_params->enc_buf      = NULL;

            schro_buffer_unref(enc_buf);

            break;

        case SCHRO_STATE_NEED_FRAME:
            go = 0;
            break;

        case SCHRO_STATE_AGAIN:
            break;

        default:
            av_log(avccontext, AV_LOG_ERROR, "Unknown Schro Encoder state\n");
            return -1;
        }
    }

    /* Copy 'next' frame in queue. */

    if (p_schro_params->enc_frame_queue.size == 1 &&
        p_schro_params->eos_pulled)
        last_frame_in_sequence = 1;

    p_frame_output = ff_schro_queue_pop(&p_schro_params->enc_frame_queue);

    if (!p_frame_output)
        return 0;

    pkt_size = p_frame_output->size;
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0)
        pkt_size += p_schro_params->enc_buf_size;
    if ((ret = ff_alloc_packet(pkt, pkt_size)) < 0) {
        av_log(avccontext, AV_LOG_ERROR, "Error getting output packet of size %d.\n", pkt_size);
        goto error;
    }

    memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size);
    avccontext->coded_frame->key_frame = p_frame_output->key_frame;
    /* Use the frame number of the encoded frame as the pts. It is OK to
     * do so since Dirac is a constant frame rate codec. It expects input
     * to be of constant frame rate. */
    pkt->pts =
    avccontext->coded_frame->pts = p_frame_output->frame_num;
    pkt->dts = p_schro_params->dts++;
    enc_size = p_frame_output->size;

    /* Append the end of sequence information to the last frame in the
     * sequence. */
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
        memcpy(pkt->data + enc_size, p_schro_params->enc_buf,
               p_schro_params->enc_buf_size);
        enc_size += p_schro_params->enc_buf_size;
        av_freep(&p_schro_params->enc_buf);
        p_schro_params->enc_buf_size = 0;
    }

    if (p_frame_output->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

error:
    /* free frame */
    libschroedinger_free_frame(p_frame_output);
    return ret;
}
예제 #21
0
파일: v210enc.c 프로젝트: Flameeyes/libav
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *pic, int *got_packet)
{
    int aligned_width = ((avctx->width + 47) / 48) * 48;
    int stride = aligned_width * 8 / 3;
    int line_padding = stride - ((avctx->width * 8 + 11) / 12) * 4;
    int h, w, ret;
    const uint16_t *y = (const uint16_t*)pic->data[0];
    const uint16_t *u = (const uint16_t*)pic->data[1];
    const uint16_t *v = (const uint16_t*)pic->data[2];
    PutByteContext p;

    if ((ret = ff_alloc_packet(pkt, avctx->height * stride)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
        return ret;
    }

    bytestream2_init_writer(&p, pkt->data, pkt->size);

#define CLIP(v) av_clip(v, 4, 1019)

#define WRITE_PIXELS(a, b, c)           \
    do {                                \
        val =   CLIP(*a++);             \
        val |= (CLIP(*b++) << 10) |     \
               (CLIP(*c++) << 20);      \
        bytestream2_put_le32u(&p, val); \
    } while (0)

    for (h = 0; h < avctx->height; h++) {
        uint32_t val;
        for (w = 0; w < avctx->width - 5; w += 6) {
            WRITE_PIXELS(u, y, v);
            WRITE_PIXELS(y, u, y);
            WRITE_PIXELS(v, y, u);
            WRITE_PIXELS(y, v, y);
        }
        if (w < avctx->width - 1) {
            WRITE_PIXELS(u, y, v);

            val = CLIP(*y++);
            if (w == avctx->width - 2)
                bytestream2_put_le32u(&p, val);
        }
        if (w < avctx->width - 3) {
            val |= (CLIP(*u++) << 10) | (CLIP(*y++) << 20);
            bytestream2_put_le32u(&p, val);

            val = CLIP(*v++) | (CLIP(*y++) << 10);
            bytestream2_put_le32u(&p, val);
        }

        bytestream2_set_buffer(&p, 0, line_padding);

        y += pic->linesize[0] / 2 - avctx->width;
        u += pic->linesize[1] / 2 - avctx->width / 2;
        v += pic->linesize[2] / 2 - avctx->width / 2;
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
예제 #22
0
파일: msccoder.c 프로젝트: sogorkis/FFmpeg
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,	const AVFrame *frame, int *got_packet_ptr) {
	MscEncoderContext * mscEncoderContext;
	MscCodecContext * mscContext;
	uint32_t arithBytesEncoded;
	PutBitContext pb;
	int mb_y, mb_x, value, lastNonZero, max, arithCoderIndex = -1, keyFrame;

	// initialize arithmetic encoder registers
	initialize_arithmetic_encoder();

	mscEncoderContext = avctx->priv_data;
	mscContext = &mscEncoderContext->mscContext;

	init_put_bits(&pb, mscEncoderContext->arithBuff, mscEncoderContext->arithBuffSize);

	keyFrame = isKeyFrame(avctx->frame_number);

	if (avctx->frame_number == 0) {
		av_image_alloc(mscContext->referenceFrame->data, mscContext->referenceFrame->linesize, frame->width, frame->height, frame->format, 128);
	}

	avctx->coded_frame->reference = 0;
	avctx->coded_frame->key_frame = 1;
	avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

	int * qmatrix = keyFrame ? mscContext->q_intra_matrix : mscContext->q_non_intra_matrix;

	for (mb_x = 0; mb_x < mscContext->mb_width; mb_x++) {
		for (mb_y = 0; mb_y < mscContext->mb_height; mb_y++) {
			get_blocks(mscEncoderContext, frame, mb_x, mb_y, mscContext->block);

			if (!keyFrame) {
				get_blocks(mscEncoderContext, mscContext->referenceFrame, mb_x, mb_y, mscContext->tmpBlock);

				diff_blocks(mscContext->block, mscContext->tmpBlock);
			}

			for (int n = 0; n < 6; ++n) {

//				if (avctx->frame_number == 1 && mb_x == 0 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "Block x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				mscContext->dsp.fdct(mscContext->block[n]);

//				if (avctx->frame_number == 0 && mb_x == 0 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "DCT block x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				lastNonZero = quantize(mscContext->block[n], qmatrix, &max);

				av_assert1(lastNonZero < 64);

//				if (overflow) {
//					clip_coeffs(m, m->block[n], m->block_last_index[n]);
//					av_log(avctx, AV_LOG_WARNING, "Overflow detected, frame: %d, mb_x: %d, mb_y: %d, n: %d\n",
//							avctx->frame_number, mb_x, mb_y, n);
//				}

//				if (avctx->frame_number == 0 && mb_x == 3 && mb_y == 0) {
//					av_log(avctx, AV_LOG_INFO, "DCT quantized block x=%d, y=%d, n=%d\n", mb_x, mb_y, n);
//					print_debug_block(avctx, mscContext->block[n]);
//				}

				encode_arith_symbol(&mscContext->lastZeroCodingModel, &pb, lastNonZero);

				if (lastNonZero > 0) {
					arithCoderIndex = get_arith_model_index(max);

					encode_arith_symbol(&mscContext->arithModelIndexCodingModel, &pb, arithCoderIndex);
				}

				for (int i = 0; i <= lastNonZero; ++i) {
					int arithCoderBits = i == 0 ? ARITH_CODER_BITS : arithCoderIndex;

					value = mscContext->block[n][scantab[i]] + mscContext->arithModelAddValue[arithCoderBits];

			        encode_arith_symbol(&mscContext->arithModels[arithCoderBits], &pb, value);
				}

				dequantize(mscContext->block[n], mscContext, keyFrame);
			}

			if (keyFrame) {
				idct_put_block(mscContext, mscContext->referenceFrame, mb_x, mb_y);
			}
			else {
				idct_add_block(mscContext, mscContext->referenceFrame, mb_x, mb_y);
			}
		}
	}

	emms_c();

	// flush arithmetic encoder
	flush_arithmetic_encoder(&pb);
	flush_put_bits(&pb);

	arithBytesEncoded = pb.buf_ptr - pb.buf;

	// alocate packet
	if ((value = ff_alloc_packet(avpkt, arithBytesEncoded)) < 0) {
		return value;
	}

	avpkt->flags |= AV_PKT_FLAG_KEY;

	// store encoded data
	memcpy(avpkt->data, mscEncoderContext->arithBuff, arithBytesEncoded);
	*got_packet_ptr = 1;

	return 0;
}
예제 #23
0
static int encode_frame(AVCodecContext* avc_context, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    th_ycbcr_buffer t_yuv_buffer;
    TheoraContext *h = avc_context->priv_data;
    ogg_packet o_packet;
    int result, i, ret;

    // EOS, finish and get 1st pass stats if applicable
    if (!frame) {
        th_encode_packetout(h->t_state, 1, &o_packet);
        if (avc_context->flags & CODEC_FLAG_PASS1)
            if (get_stats(avc_context, 1))
                return -1;
        return 0;
    }

    /* Copy planes to the theora yuv_buffer */
    for (i = 0; i < 3; i++) {
        t_yuv_buffer[i].width  = FFALIGN(avc_context->width,  16) >> (i && h->uv_hshift);
        t_yuv_buffer[i].height = FFALIGN(avc_context->height, 16) >> (i && h->uv_vshift);
        t_yuv_buffer[i].stride = frame->linesize[i];
        t_yuv_buffer[i].data   = frame->data[i];
    }

    if (avc_context->flags & CODEC_FLAG_PASS2)
        if (submit_stats(avc_context))
            return -1;

    /* Now call into theora_encode_YUVin */
    result = th_encode_ycbcr_in(h->t_state, t_yuv_buffer);
    if (result) {
        const char* message;
        switch (result) {
        case -1:
            message = "differing frame sizes";
            break;
        case TH_EINVAL:
            message = "encoder is not ready or is finished";
            break;
        default:
            message = "unknown reason";
            break;
        }
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_YUVin failed (%s) [%d]\n", message, result);
        return -1;
    }

    if (avc_context->flags & CODEC_FLAG_PASS1)
        if (get_stats(avc_context, 0))
            return -1;

    /* Pick up returned ogg_packet */
    result = th_encode_packetout(h->t_state, 0, &o_packet);
    switch (result) {
    case 0:
        /* No packet is ready */
        return 0;
    case 1:
        /* Success, we have a packet */
        break;
    default:
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_packetout failed [%d]\n", result);
        return -1;
    }

    /* Copy ogg_packet content out to buffer */
    if ((ret = ff_alloc_packet(pkt, o_packet.bytes)) < 0) {
        av_log(avc_context, AV_LOG_ERROR, "Error getting output packet of size %ld.\n", o_packet.bytes);
        return ret;
    }
    memcpy(pkt->data, o_packet.packet, o_packet.bytes);

    // HACK: assumes no encoder delay, this is true until libtheora becomes
    // multithreaded (which will be disabled unless explicitly requested)
    pkt->pts = pkt->dts = frame->pts;
    avc_context->coded_frame->key_frame = !(o_packet.granulepos & h->keyframe_mask);
    if (avc_context->coded_frame->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #24
0
파일: wmaenc.c 프로젝트: JSinglan/libav
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    WMACodecContext *s = avctx->priv_data;
    int i, total_gain, ret;

    s->block_len_bits= s->frame_len_bits; //required by non variable block len
    s->block_len = 1 << s->block_len_bits;

    apply_window_and_mdct(avctx, frame);

    if (s->ms_stereo) {
        float a, b;
        int i;

        for(i = 0; i < s->block_len; i++) {
            a = s->coefs[0][i]*0.5;
            b = s->coefs[1][i]*0.5;
            s->coefs[0][i] = a + b;
            s->coefs[1][i] = a - b;
        }
    }

    if ((ret = ff_alloc_packet(avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

#if 1
    total_gain= 128;
    for(i=64; i; i>>=1){
        int error = encode_frame(s, s->coefs, avpkt->data, avpkt->size,
                                 total_gain - i);
        if(error<0)
            total_gain-= i;
    }
#else
    total_gain= 90;
    best = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain);
    for(i=32; i; i>>=1){
        int scoreL = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain - i);
        int scoreR = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain + i);
        av_log(NULL, AV_LOG_ERROR, "%d %d %d (%d)\n", scoreL, best, scoreR, total_gain);
        if(scoreL < FFMIN(best, scoreR)){
            best = scoreL;
            total_gain -= i;
        }else if(scoreR < best){
            best = scoreR;
            total_gain += i;
        }
    }
#endif

    if ((i = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain)) >= 0) {
        av_log(avctx, AV_LOG_ERROR, "required frame size too large. please "
               "use a higher bit rate.\n");
        return AVERROR(EINVAL);
    }
    assert((put_bits_count(&s->pb) & 7) == 0);
    while (i++)
        put_bits(&s->pb, 8, 'N');

    flush_put_bits(&s->pb);

    if (frame->pts != AV_NOPTS_VALUE)
        avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->delay);

    avpkt->size = s->block_align;
    *got_packet_ptr = 1;
    return 0;
}
예제 #25
0
파일: sgienc.c 프로젝트: Arcen/libav
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    SgiContext *s = avctx->priv_data;
    AVFrame * const p = &s->picture;
    uint8_t *offsettab, *lengthtab, *in_buf, *encode_buf, *buf;
    int x, y, z, length, tablesize, ret;
    unsigned int width, height, depth, dimension;
    unsigned char *end_buf;

    *p = *frame;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;

    switch (avctx->pix_fmt) {
    case PIX_FMT_GRAY8:
        dimension = SGI_SINGLE_CHAN;
        depth     = SGI_GRAYSCALE;
        break;
    case PIX_FMT_RGB24:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGB;
        break;
    case PIX_FMT_RGBA:
        dimension = SGI_MULTI_CHAN;
        depth     = SGI_RGBA;
        break;
    default:
        return AVERROR_INVALIDDATA;
    }

    tablesize = depth * height * 4;
    length = SGI_HEADER_SIZE;
    if (avctx->coder_type == FF_CODER_TYPE_RAW)
        length += depth * height * width;
    else // assume ff_rl_encode() produces at most 2x size of input
        length += tablesize * 2 + depth * height * (2 * width + 1);

    if ((ret = ff_alloc_packet(pkt, length)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length);
        return ret;
    }
    buf     = pkt->data;
    end_buf = pkt->data + pkt->size;

    /* Encode header. */
    bytestream_put_be16(&buf, SGI_MAGIC);
    bytestream_put_byte(&buf, avctx->coder_type != FF_CODER_TYPE_RAW); /* RLE 1 - VERBATIM 0*/
    bytestream_put_byte(&buf, 1); /* bytes_per_channel */
    bytestream_put_be16(&buf, dimension);
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be16(&buf, depth);

    /* The rest are constant in this implementation. */
    bytestream_put_be32(&buf, 0L); /* pixmin */
    bytestream_put_be32(&buf, 255L); /* pixmax */
    bytestream_put_be32(&buf, 0L); /* dummy */

    /* name */
    memset(buf, 0, SGI_HEADER_SIZE);
    buf += 80;

     /* colormap */
    bytestream_put_be32(&buf, 0L);

    /* The rest of the 512 byte header is unused. */
    buf += 404;
    offsettab = buf;

    if (avctx->coder_type  != FF_CODER_TYPE_RAW) {
        /* Skip RLE offset table. */
        buf += tablesize;
        lengthtab = buf;

        /* Skip RLE length table. */
        buf += tablesize;

        /* Make an intermediate consecutive buffer. */
        if (!(encode_buf = av_malloc(width)))
            return -1;

        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                bytestream_put_be32(&offsettab, buf - pkt->data);

                for (x = 0; x < width; x++)
                    encode_buf[x] = in_buf[depth * x];

                if ((length = ff_rle_encode(buf, end_buf - buf - 1, encode_buf, 1, width, 0, 0, 0x80, 0)) < 1) {
                    av_free(encode_buf);
                    return -1;
                }

                buf += length;
                bytestream_put_byte(&buf, 0);
                bytestream_put_be32(&lengthtab, length + 1);
                in_buf -= p->linesize[0];
            }
        }

        av_free(encode_buf);
    } else {
        for (z = 0; z < depth; z++) {
            in_buf = p->data[0] + p->linesize[0] * (height - 1) + z;

            for (y = 0; y < height; y++) {
                for (x = 0; x < width * depth; x += depth)
                    bytestream_put_byte(&buf, in_buf[x]);

                in_buf -= p->linesize[0];
            }
        }
    }

    /* total length */
    pkt->size = buf - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #26
0
파일: pnmenc.c 프로젝트: raff/libav
static int pnm_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *pict, int *got_packet)
{
    PNMContext *s     = avctx->priv_data;
    AVFrame * const p = (AVFrame*)&s->picture;
    int i, h, h1, c, n, linesize, ret;
    uint8_t *ptr, *ptr1, *ptr2;

    if ((ret = ff_alloc_packet(pkt, avpicture_get_size(avctx->pix_fmt,
                                                       avctx->width,
                                                       avctx->height) + 200)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
        return ret;
    }

    *p           = *pict;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    s->bytestream_start =
    s->bytestream       = pkt->data;
    s->bytestream_end   = pkt->data + pkt->size;

    h  = avctx->height;
    h1 = h;
    switch (avctx->pix_fmt) {
    case PIX_FMT_MONOWHITE:
        c  = '4';
        n  = (avctx->width + 7) >> 3;
        break;
    case PIX_FMT_GRAY8:
        c  = '5';
        n  = avctx->width;
        break;
    case PIX_FMT_GRAY16BE:
        c  = '5';
        n  = avctx->width * 2;
        break;
    case PIX_FMT_RGB24:
        c  = '6';
        n  = avctx->width * 3;
        break;
    case PIX_FMT_RGB48BE:
        c  = '6';
        n  = avctx->width * 6;
        break;
    case PIX_FMT_YUV420P:
        c  = '5';
        n  = avctx->width;
        h1 = (h * 3) / 2;
        break;
    default:
        return -1;
    }
    snprintf(s->bytestream, s->bytestream_end - s->bytestream,
             "P%c\n%d %d\n", c, avctx->width, h1);
    s->bytestream += strlen(s->bytestream);
    if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
        snprintf(s->bytestream, s->bytestream_end - s->bytestream,
                 "%d\n", (avctx->pix_fmt != PIX_FMT_GRAY16BE && avctx->pix_fmt != PIX_FMT_RGB48BE) ? 255 : 65535);
        s->bytestream += strlen(s->bytestream);
    }

    ptr      = p->data[0];
    linesize = p->linesize[0];
    for (i = 0; i < h; i++) {
        memcpy(s->bytestream, ptr, n);
        s->bytestream += n;
        ptr           += linesize;
    }

    if (avctx->pix_fmt == PIX_FMT_YUV420P) {
        h >>= 1;
        n >>= 1;
        ptr1 = p->data[1];
        ptr2 = p->data[2];
        for (i = 0; i < h; i++) {
            memcpy(s->bytestream, ptr1, n);
            s->bytestream += n;
            memcpy(s->bytestream, ptr2, n);
            s->bytestream += n;
                ptr1 += p->linesize[1];
                ptr2 += p->linesize[2];
        }
    }
예제 #27
0
static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *p, int *got_packet)
{
    int bpp, picsize, datasize = -1, ret;
    uint8_t *out;

    if(avctx->width > 0xffff || avctx->height > 0xffff) {
        av_log(avctx, AV_LOG_ERROR, "image dimensions too large\n");
        return AVERROR(EINVAL);
    }
    picsize = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);
    if ((ret = ff_alloc_packet(pkt, picsize + 45)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
        return ret;
    }

    /* zero out the header and only set applicable fields */
    memset(pkt->data, 0, 12);
    AV_WL16(pkt->data+12, avctx->width);
    AV_WL16(pkt->data+14, avctx->height);
    /* image descriptor byte: origin is always top-left, bits 0-3 specify alpha */
    pkt->data[17] = 0x20 | (avctx->pix_fmt == PIX_FMT_BGRA ? 8 : 0);

    switch(avctx->pix_fmt) {
    case PIX_FMT_GRAY8:
        pkt->data[2]  = TGA_BW;     /* uncompressed grayscale image */
        pkt->data[16] = 8;          /* bpp */
        break;
    case PIX_FMT_RGB555LE:
        pkt->data[2]  = TGA_RGB;    /* uncompresses true-color image */
        pkt->data[16] = 16;         /* bpp */
        break;
    case PIX_FMT_BGR24:
        pkt->data[2]  = TGA_RGB;    /* uncompressed true-color image */
        pkt->data[16] = 24;         /* bpp */
        break;
    case PIX_FMT_BGRA:
        pkt->data[2]  = TGA_RGB;    /* uncompressed true-color image */
        pkt->data[16] = 32;         /* bpp */
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "Pixel format '%s' not supported.\n",
               av_get_pix_fmt_name(avctx->pix_fmt));
        return AVERROR(EINVAL);
    }
    bpp = pkt->data[16] >> 3;

    out = pkt->data + 18;  /* skip past the header we just output */

    /* try RLE compression */
    if (avctx->coder_type != FF_CODER_TYPE_RAW)
        datasize = targa_encode_rle(out, picsize, p, bpp, avctx->width, avctx->height);

    /* if that worked well, mark the picture as RLE compressed */
    if(datasize >= 0)
        pkt->data[2] |= 8;

    /* if RLE didn't make it smaller, go back to no compression */
    else datasize = targa_encode_normal(out, p, bpp, avctx->width, avctx->height);

    out += datasize;

    /* The standard recommends including this section, even if we don't use
     * any of the features it affords. TODO: take advantage of the pixel
     * aspect ratio and encoder ID fields available? */
    memcpy(out, "\0\0\0\0\0\0\0\0TRUEVISION-XFILE.", 26);

    pkt->size   = out + 26 - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #28
0
static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                    const AVFrame *frame, int *got_packet)
{
    LibOpenJPEGContext *ctx = avctx->priv_data;
    opj_cinfo_t *compress = ctx->compress;
    opj_image_t *image    = ctx->image;
    opj_cio_t *stream;
    int ret, len;

    // x0, y0 is the top left corner of the image
    // x1, y1 is the width, height of the reference grid
    image->x0 = 0;
    image->y0 = 0;
    image->x1 = (avctx->width  - 1) * ctx->enc_params.subsampling_dx + 1;
    image->y1 = (avctx->height - 1) * ctx->enc_params.subsampling_dy + 1;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_RGB24:
    case AV_PIX_FMT_RGBA:
    case AV_PIX_FMT_Y400A:
        libopenjpeg_copy_packed8(avctx, frame, image);
        break;
    case AV_PIX_FMT_RGB48:
        libopenjpeg_copy_packed16(avctx, frame, image);
        break;
    case AV_PIX_FMT_GRAY8:
    case AV_PIX_FMT_YUV410P:
    case AV_PIX_FMT_YUV411P:
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV440P:
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUVA420P:
        libopenjpeg_copy_unpacked8(avctx, frame, image);
        break;
    case AV_PIX_FMT_GRAY16:
    case AV_PIX_FMT_YUV420P9:
    case AV_PIX_FMT_YUV422P9:
    case AV_PIX_FMT_YUV444P9:
    case AV_PIX_FMT_YUV444P10:
    case AV_PIX_FMT_YUV422P10:
    case AV_PIX_FMT_YUV420P10:
    case AV_PIX_FMT_YUV444P16:
    case AV_PIX_FMT_YUV422P16:
    case AV_PIX_FMT_YUV420P16:
        libopenjpeg_copy_unpacked16(avctx, frame, image);
        break;
    default:
        av_log(avctx, AV_LOG_ERROR,
               "The frame's pixel format '%s' is not supported\n",
               av_get_pix_fmt_name(avctx->pix_fmt));
        return AVERROR(EINVAL);
        break;
    }

    opj_setup_encoder(compress, &ctx->enc_params, image);
    stream = opj_cio_open((opj_common_ptr)compress, NULL, 0);
    if (!stream) {
        av_log(avctx, AV_LOG_ERROR, "Error creating the cio stream\n");
        return AVERROR(ENOMEM);
    }

    if (!opj_encode(compress, stream, image, NULL)) {
        opj_cio_close(stream);
        av_log(avctx, AV_LOG_ERROR, "Error during the opj encode\n");
        return -1;
    }

    len = cio_tell(stream);
    if ((ret = ff_alloc_packet(pkt, len)) < 0) {
        opj_cio_close(stream);
        return ret;
    }

    memcpy(pkt->data, stream->buffer, len);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    opj_cio_close(stream);
    return 0;
}
예제 #29
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *frame, int *got_packet)
{
    int width, height, bits_pixel, i, j, length, ret;
    uint8_t *in_buf, *buf;

    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    avctx->coded_frame->key_frame = 1;

    width  = avctx->width;
    height = avctx->height;

    if (width > 65535 || height > 65535 ||
        width * height >= INT_MAX / 4 - ALIAS_HEADER_SIZE) {
        av_log(avctx, AV_LOG_ERROR, "Invalid image size %dx%d.\n", width, height);
        return AVERROR_INVALIDDATA;
    }

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
        bits_pixel = 8;
        break;
    case AV_PIX_FMT_BGR24:
        bits_pixel = 24;
        break;
    default:
        return AVERROR(EINVAL);
    }

    length = ALIAS_HEADER_SIZE + 4 * width * height; // max possible
    if ((ret = ff_alloc_packet(pkt, length)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", length);
        return ret;
    }

    buf = pkt->data;

    /* Encode header. */
    bytestream_put_be16(&buf, width);
    bytestream_put_be16(&buf, height);
    bytestream_put_be32(&buf, 0); /* X, Y offset */
    bytestream_put_be16(&buf, bits_pixel);

    for (j = 0; j < height; j++) {
        in_buf = frame->data[0] + frame->linesize[0] * j;
        for (i = 0; i < width; ) {
            int count = 0;
            int pixel;

            if (avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
                pixel = *in_buf;
                while (count < 255 && count + i < width && pixel == *in_buf) {
                    count++;
                    in_buf++;
                }
                bytestream_put_byte(&buf, count);
                bytestream_put_byte(&buf, pixel);
            } else { /* AV_PIX_FMT_BGR24 */
                pixel = AV_RB24(in_buf);
                while (count < 255 && count + i < width &&
                       pixel == AV_RB24(in_buf)) {
                    count++;
                    in_buf += 3;
                }
                bytestream_put_byte(&buf, count);
                bytestream_put_be24(&buf, pixel);
            }
            i += count;
        }
    }

    /* Total length */
    av_shrink_packet(pkt, buf - pkt->data);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
예제 #30
0
파일: pamenc.c 프로젝트: Bret4494/FFmpeg
static int pam_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *pict, int *got_packet)
{
    PNMContext *s     = avctx->priv_data;
    AVFrame * const p = &s->picture;
    int i, h, w, n, linesize, depth, maxval, ret;
    const char *tuple_type;
    uint8_t *ptr;

    h = avctx->height;
    w = avctx->width;
    switch (avctx->pix_fmt) {
    case PIX_FMT_MONOBLACK:
        n          = w;
        depth      = 1;
        maxval     = 1;
        tuple_type = "BLACKANDWHITE";
        break;
    case PIX_FMT_GRAY8:
        n          = w;
        depth      = 1;
        maxval     = 255;
        tuple_type = "GRAYSCALE";
        break;
    case PIX_FMT_GRAY16BE:
        n          = w * 2;
        depth      = 1;
        maxval     = 0xFFFF;
        tuple_type = "GRAYSCALE";
        break;
    case PIX_FMT_GRAY8A:
        n          = w * 2;
        depth      = 2;
        maxval     = 255;
        tuple_type = "GRAYSCALE_ALPHA";
        break;
    case PIX_FMT_RGB24:
        n          = w * 3;
        depth      = 3;
        maxval     = 255;
        tuple_type = "RGB";
        break;
    case PIX_FMT_RGBA:
        n          = w * 4;
        depth      = 4;
        maxval     = 255;
        tuple_type = "RGB_ALPHA";
        break;
    case PIX_FMT_RGB48BE:
        n          = w * 6;
        depth      = 3;
        maxval     = 0xFFFF;
        tuple_type = "RGB";
        break;
    case PIX_FMT_RGBA64BE:
        n          = w * 8;
        depth      = 4;
        maxval     = 0xFFFF;
        tuple_type = "RGB_ALPHA";
        break;
    default:
        return -1;
    }

    if ((ret = ff_alloc_packet(pkt, n*h + 200)) < 0) {
        av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
        return ret;
    }

    *p           = *pict;
    p->pict_type = AV_PICTURE_TYPE_I;
    p->key_frame = 1;

    s->bytestream_start =
    s->bytestream       = pkt->data;
    s->bytestream_end   = pkt->data + pkt->size;

    snprintf(s->bytestream, s->bytestream_end - s->bytestream,
             "P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLTYPE %s\nENDHDR\n",
             w, h, depth, maxval, tuple_type);
    s->bytestream += strlen(s->bytestream);

    ptr      = p->data[0];
    linesize = p->linesize[0];

    if (avctx->pix_fmt == PIX_FMT_MONOBLACK){
        int j;
        for (i = 0; i < h; i++) {
            for (j = 0; j < w; j++)
                *s->bytestream++ = ptr[j >> 3] >> (7 - j & 7) & 1;
            ptr += linesize;
        }
    } else {