Ejemplo n.º 1
0
static int hap_encode(AVCodecContext *avctx, AVPacket *pkt,
                      const AVFrame *frame, int *got_packet)
{
    HapContext *ctx = avctx->priv_data;
    int header_length = hap_header_length(ctx);
    int final_data_size, ret;
    int pktsize = FFMAX(ctx->tex_size, ctx->max_snappy * ctx->chunk_count) + header_length;

    /* Allocate maximum size packet, shrink later. */
    ret = ff_alloc_packet2(avctx, pkt, pktsize, header_length);
    if (ret < 0)
        return ret;

    /* DXTC compression. */
    compress_texture(avctx, frame);

    /* Compress (using Snappy) the frame */
    final_data_size = hap_compress_frame(avctx, pkt->data + header_length);
    if (final_data_size < 0)
        return final_data_size;

    /* Write header at the start. */
    hap_write_frame_header(ctx, pkt->data, final_data_size + header_length);

    av_shrink_packet(pkt, final_data_size + header_length);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 2
0
static int adx_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    ADXContext *c          = avctx->priv_data;
    const int16_t *samples = (const int16_t *)frame->data[0];
    uint8_t *dst;
    int ch, out_size, ret;

    out_size = BLOCK_SIZE * avctx->channels + !c->header_parsed * HEADER_SIZE;
    if ((ret = ff_alloc_packet2(avctx, avpkt, out_size)) < 0)
        return ret;
    dst = avpkt->data;

    if (!c->header_parsed) {
        int hdrsize;
        if ((hdrsize = adx_encode_header(avctx, dst, avpkt->size)) < 0) {
            av_log(avctx, AV_LOG_ERROR, "output buffer is too small\n");
            return AVERROR(EINVAL);
        }
        dst      += hdrsize;
        c->header_parsed = 1;
    }

    for (ch = 0; ch < avctx->channels; ch++) {
        adx_encode(c, dst, samples + ch, &c->prev[ch], avctx->channels);
        dst += BLOCK_SIZE;
    }

    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 3
0
static int xbm_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *p, int *got_packet)
{
    int i, j, ret, size, linesize;
    uint8_t *ptr, *buf;

    linesize = (avctx->width + 7) / 8;
    size     = avctx->height * (linesize * 7 + 2) + 110;
    if ((ret = ff_alloc_packet2(avctx, pkt, size)) < 0)
        return ret;

    buf = pkt->data;
    ptr = p->data[0];

    buf += snprintf(buf, 32, "#define image_width %u\n", avctx->width);
    buf += snprintf(buf, 33, "#define image_height %u\n", avctx->height);
    buf += snprintf(buf, 40, "static unsigned char image_bits[] = {\n");
    for (i = 0; i < avctx->height; i++) {
        for (j = 0; j < linesize; j++)
            buf += snprintf(buf, 7, " 0x%02X,", av_reverse[*ptr++]);
        ptr += p->linesize[0] - linesize;
        buf += snprintf(buf, 2, "\n");
    }
    buf += snprintf(buf, 5, " };\n");

    pkt->size   = buf - pkt->data;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 4
0
Archivo: cngenc.c Proyecto: BtbN/FFmpeg
static int cng_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    CNGContext *p = avctx->priv_data;
    int ret, i;
    double energy = 0;
    int qdbov;
    int16_t *samples = (int16_t*) frame->data[0];

    if ((ret = ff_alloc_packet2(avctx, avpkt, 1 + p->order, 1 + p->order))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

    for (i = 0; i < frame->nb_samples; i++) {
        p->samples32[i] = samples[i];
        energy += samples[i] * samples[i];
    }
    energy /= frame->nb_samples;
    if (energy > 0) {
        double dbov = 10 * log10(energy / 1081109975);
        qdbov = av_clip_uintp2(-floor(dbov), 7);
    } else {
        qdbov = 127;
    }
    ff_lpc_calc_ref_coefs(&p->lpc, p->samples32, p->order, p->ref_coef);
    avpkt->data[0] = qdbov;
    for (i = 0; i < p->order; i++)
        avpkt->data[1 + i] = p->ref_coef[i] * 127 + 127;

    *got_packet_ptr = 1;
    av_assert1(avpkt->size == 1 + p->order);

    return 0;
}
Ejemplo n.º 5
0
static int v308_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst;
    uint8_t *y, *u, *v;
    int i, j, ret;

    if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width * avctx->height * 3, 0)) < 0)
        return ret;
    dst = pkt->data;

    y = pic->data[0];
    u = pic->data[1];
    v = pic->data[2];

    for (i = 0; i < avctx->height; i++) {
        for (j = 0; j < avctx->width; j++) {
            *dst++ = v[j];
            *dst++ = y[j];
            *dst++ = u[j];
        }
        y += pic->linesize[0];
        u += pic->linesize[1];
        v += pic->linesize[2];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 6
0
static int encode_superframe(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    WMACodecContext *s = avctx->priv_data;
    int i, total_gain, ret, error;

    s->block_len_bits = s->frame_len_bits; // required by non variable block len
    s->block_len      = 1 << s->block_len_bits;

    ret = apply_window_and_mdct(avctx, frame);

    if (ret < 0)
        return ret;

    if (s->ms_stereo) {
        float a, b;
        int i;

        for (i = 0; i < s->block_len; i++) {
            a              = s->coefs[0][i] * 0.5;
            b              = s->coefs[1][i] * 0.5;
            s->coefs[0][i] = a + b;
            s->coefs[1][i] = a - b;
        }
    }

    if ((ret = ff_alloc_packet2(avctx, avpkt, 2 * MAX_CODED_SUPERFRAME_SIZE, 0)) < 0)
        return ret;

    total_gain = 128;
    for (i = 64; i; i >>= 1) {
        error = encode_frame(s, s->coefs, avpkt->data, avpkt->size,
                                 total_gain - i);
        if (error <= 0)
            total_gain -= i;
    }

    while(total_gain <= 128 && error > 0)
        error = encode_frame(s, s->coefs, avpkt->data, avpkt->size, total_gain++);
    if (error > 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid input data or requested bitrate too low, cannot encode\n");
        avpkt->size = 0;
        return AVERROR(EINVAL);
    }
    av_assert0((put_bits_count(&s->pb) & 7) == 0);
    i= avctx->block_align - (put_bits_count(&s->pb)+7)/8;
    av_assert0(i>=0);
    while(i--)
        put_bits(&s->pb, 8, 'N');

    flush_put_bits(&s->pb);
    av_assert0(put_bits_ptr(&s->pb) - s->pb.buf == avctx->block_align);

    if (frame->pts != AV_NOPTS_VALUE)
        avpkt->pts = frame->pts - ff_samples_to_time_base(avctx, avctx->initial_padding);

    avpkt->size     = avctx->block_align;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 7
0
static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *pict, int *got_packet)
{
    QtrleEncContext * const s = avctx->priv_data;
    AVFrame * const p = &s->frame;
    int ret;

    *p = *pict;

    if ((ret = ff_alloc_packet2(avctx, pkt, s->max_buf_size)) < 0)
        return ret;

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        p->pict_type = AV_PICTURE_TYPE_I;
        p->key_frame = 1;
    } else {
        /* P-Frame */
        p->pict_type = AV_PICTURE_TYPE_P;
        p->key_frame = 0;
    }

    pkt->size = encode_frame(s, pict, pkt->data);

    /* save the current frame */
    av_picture_copy(&s->previous_frame, (AVPicture *)p, avctx->pix_fmt, avctx->width, avctx->height);

    if (p->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Ejemplo n.º 8
0
static int roq_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *frame, int *got_packet)
{
    RoqContext *enc = avctx->priv_data;
    int size, ret;

    enc->avctx = avctx;

    enc->frame_to_enc = frame;

    if (frame->quality)
        enc->lambda = frame->quality - 1;
    else
        enc->lambda = 2*ROQ_LAMBDA_SCALE;

    /* 138 bits max per 8x8 block +
     *     256 codebooks*(6 bytes 2x2 + 4 bytes 4x4) + 8 bytes frame header */
    size = ((enc->width * enc->height / 64) * 138 + 7) / 8 + 256 * (6 + 4) + 8;
    if ((ret = ff_alloc_packet2(avctx, pkt, size)) < 0)
        return ret;
    enc->out_buf = pkt->data;

    /* Check for I frame */
    if (enc->framesSinceKeyframe == avctx->gop_size)
        enc->framesSinceKeyframe = 0;

    if (enc->first_frame) {
        /* Alloc memory for the reconstruction data (we must know the stride
         for that) */
        if (avctx->get_buffer(avctx, enc->current_frame) ||
                avctx->get_buffer(avctx, enc->last_frame)) {
            av_log(avctx, AV_LOG_ERROR, "  RoQ: get_buffer() failed\n");
            return -1;
        }

        /* Before the first video frame, write a "video info" chunk */
        roq_write_video_info_chunk(enc);

        enc->first_frame = 0;
    }

    /* Encode the actual frame */
    roq_encode_video(enc);

    pkt->size   = enc->out_buf - pkt->data;
    if (enc->framesSinceKeyframe == 1)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Ejemplo n.º 9
0
static int libshine_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                                 const AVFrame *frame, int *got_packet_ptr)
{
    SHINEContext *s = avctx->priv_data;
    MPADecodeHeader hdr;
    unsigned char *data;
    long written;
    int ret, len;

    if (frame)
        data = shine_encode_buffer(s->shine, (int16_t **)frame->data, &written);
    else
        data = shine_flush(s->shine, &written);
    if (written < 0)
        return -1;
    if (written > 0) {
        if (s->buffer_index + written > BUFFER_SIZE) {
            av_log(avctx, AV_LOG_ERROR, "internal buffer too small\n");
            return AVERROR_BUG;
        }
        memcpy(s->buffer + s->buffer_index, data, written);
        s->buffer_index += written;
    }
    if (frame) {
        if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
            return ret;
    }

    if (s->buffer_index < 4 || !s->afq.frame_count)
        return 0;
    if (avpriv_mpegaudio_decode_header(&hdr, AV_RB32(s->buffer))) {
        av_log(avctx, AV_LOG_ERROR, "free format output not supported\n");
        return -1;
    }

    len = hdr.frame_size;
    if (len <= s->buffer_index) {
        if ((ret = ff_alloc_packet2(avctx, avpkt, len)))
            return ret;
        memcpy(avpkt->data, s->buffer, len);
        s->buffer_index -= len;
        memmove(s->buffer, s->buffer + len, s->buffer_index);

        ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
                           &avpkt->duration);

        avpkt->size = len;
        *got_packet_ptr = 1;
    }
    return 0;
}
Ejemplo n.º 10
0
static int roq_dpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                                 const AVFrame *frame, int *got_packet_ptr)
{
    int i, stereo, data_size, ret;
    const int16_t *in = frame ? (const int16_t *)frame->data[0] : NULL;
    uint8_t *out;
    ROQDPCMContext *context = avctx->priv_data;

    stereo = (avctx->channels == 2);

    if (!in && context->input_frames >= 8)
        return 0;

    if (in && context->input_frames < 8) {
        memcpy(&context->frame_buffer[context->buffered_samples * avctx->channels],
               in, avctx->frame_size * avctx->channels * sizeof(*in));
        context->buffered_samples += avctx->frame_size;
        if (context->input_frames == 0)
            context->first_pts = frame->pts;
        if (context->input_frames < 7) {
            context->input_frames++;
            return 0;
        }
    }
    if (context->input_frames < 8) {
        in = context->frame_buffer;
    }

    if (stereo) {
        context->lastSample[0] &= 0xFF00;
        context->lastSample[1] &= 0xFF00;
    }

    if (context->input_frames == 7)
        data_size = avctx->channels * context->buffered_samples;
    else
        data_size = avctx->channels * avctx->frame_size;

    if ((ret = ff_alloc_packet2(avctx, avpkt, ROQ_HEADER_SIZE + data_size)) < 0)
        return ret;
    out = avpkt->data;

    bytestream_put_byte(&out, stereo ? 0x21 : 0x20);
    bytestream_put_byte(&out, 0x10);
    bytestream_put_le32(&out, data_size);

    if (stereo) {
        bytestream_put_byte(&out, (context->lastSample[1])>>8);
        bytestream_put_byte(&out, (context->lastSample[0])>>8);
    } else
Ejemplo n.º 11
0
/*
 *
 * Encode a frame
 *
 */
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *pict, int *got_packet)
{
    LclEncContext *c = avctx->priv_data;
    AVFrame * const p = &c->pic;
    int i, ret;
    int zret; // Zlib return code
    int max_size = deflateBound(&c->zstream, avctx->width * avctx->height * 3);

    if ((ret = ff_alloc_packet2(avctx, pkt, max_size)) < 0)
            return ret;

    *p = *pict;
    p->pict_type= AV_PICTURE_TYPE_I;
    p->key_frame= 1;

    if(avctx->pix_fmt != PIX_FMT_BGR24){
        av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
        return -1;
    }

    zret = deflateReset(&c->zstream);
    if (zret != Z_OK) {
        av_log(avctx, AV_LOG_ERROR, "Deflate reset error: %d\n", zret);
        return -1;
    }
    c->zstream.next_out  = pkt->data;
    c->zstream.avail_out = pkt->size;

    for(i = avctx->height - 1; i >= 0; i--) {
        c->zstream.next_in = p->data[0]+p->linesize[0]*i;
        c->zstream.avail_in = avctx->width*3;
        zret = deflate(&c->zstream, Z_NO_FLUSH);
        if (zret != Z_OK) {
            av_log(avctx, AV_LOG_ERROR, "Deflate error: %d\n", zret);
            return -1;
        }
    }
    zret = deflate(&c->zstream, Z_FINISH);
    if (zret != Z_STREAM_END) {
        av_log(avctx, AV_LOG_ERROR, "Deflate error: %d\n", zret);
        return -1;
    }

    pkt->size   = c->zstream.total_out;
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Ejemplo n.º 12
0
static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    ILBCEncContext *s = avctx->priv_data;
    int ret;

    if ((ret = ff_alloc_packet2(avctx, avpkt, 50)) < 0)
        return ret;

    WebRtcIlbcfix_EncodeImpl((WebRtc_UWord16*) avpkt->data, (const WebRtc_Word16*) frame->data[0], &s->encoder);

    avpkt->size     = s->encoder.no_of_bytes;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 13
0
static int s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt,
                               const AVFrame *frame, int *got_packet_ptr)
{
    S302MEncContext *s = avctx->priv_data;
    const int buf_size = AES3_HEADER_LEN +
                        (frame->nb_samples *
                         avctx->channels *
                        (avctx->bits_per_raw_sample + 4)) / 8;
    int ret, c, channels;
    uint8_t *o;
    PutBitContext pb;

    if ((ret = ff_alloc_packet2(avctx, avpkt, buf_size)) < 0)
        return ret;

    o = avpkt->data;
    init_put_bits(&pb, o, buf_size * 8);
    put_bits(&pb, 16, buf_size - AES3_HEADER_LEN);
    put_bits(&pb, 2, (avctx->channels - 2) >> 1);   // number of channels
    put_bits(&pb, 8, 0);                            // channel ID
    put_bits(&pb, 2, (avctx->bits_per_raw_sample - 16) / 4); // bits per samples (0 = 16bit, 1 = 20bit, 2 = 24bit)
    put_bits(&pb, 4, 0);                            // alignments
    flush_put_bits(&pb);
    o += AES3_HEADER_LEN;

    if (avctx->bits_per_raw_sample == 24) {
        const uint32_t *samples = (uint32_t *)frame->data[0];

        for (c = 0; c < frame->nb_samples; c++) {
            uint8_t vucf = s->framing_index == 0 ? 0x10: 0;

            for (channels = 0; channels < avctx->channels; channels += 2) {
                o[0] = ff_reverse[(samples[0] & 0x0000FF00) >> 8];
                o[1] = ff_reverse[(samples[0] & 0x00FF0000) >> 16];
                o[2] = ff_reverse[(samples[0] & 0xFF000000) >> 24];
                o[3] = ff_reverse[(samples[1] & 0x00000F00) >> 4] | vucf;
                o[4] = ff_reverse[(samples[1] & 0x000FF000) >> 12];
                o[5] = ff_reverse[(samples[1] & 0x0FF00000) >> 20];
                o[6] = ff_reverse[(samples[1] & 0xF0000000) >> 28];
                o += 7;
                samples += 2;
            }

            s->framing_index++;
            if (s->framing_index >= 192)
                s->framing_index = 0;
        }
    } else if (avctx->bits_per_raw_sample == 20) {
Ejemplo n.º 14
0
static int avui_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst, *src = pic->data[0];
    int i, j, skip, ret, size, interlaced = pic->interlaced_frame;

    if (avctx->height == 486) {
        skip = 10;
    } else {
        skip = 16;
    }
    size = 2 * avctx->width * (avctx->height + skip) + 8 * interlaced;
    if ((ret = ff_alloc_packet2(avctx, pkt, size)) < 0)
        return ret;
    dst = pkt->data;
    if (!(avctx->extradata = av_mallocz(24 + FF_INPUT_BUFFER_PADDING_SIZE)))
        return AVERROR(ENOMEM);
    avctx->extradata_size = 24;
    memcpy(avctx->extradata, "\0\0\0\x18""APRGAPRG0001", 16);
    if (interlaced) {
        avctx->extradata[19] = 2;
    } else {
        avctx->extradata[19] = 1;
        dst += avctx->width * skip;
    }

    avctx->coded_frame->reference = 0;
    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

    for (i = 0; i <= interlaced; i++) {
        if (interlaced && avctx->height == 486) {
            src = pic->data[0] + (1 - i) * pic->linesize[0];
        } else {
            src = pic->data[0] + i * pic->linesize[0];
        }
        dst += avctx->width * skip + 4 * i;
        for (j = 0; j < avctx->height; j += interlaced + 1) {
            memcpy(dst, src, avctx->width * 2);
            src += (interlaced + 1) * pic->linesize[0];
            dst += avctx->width * 2;
        }
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 15
0
static int v408_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst;
    uint8_t *y, *u, *v, *a;
    int i, j, ret;

    if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width * avctx->height * 4)) < 0)
        return ret;
    dst = pkt->data;

    avctx->coded_frame->reference = 0;
    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

    y = pic->data[0];
    u = pic->data[1];
    v = pic->data[2];
    a = pic->data[3];

    for (i = 0; i < avctx->height; i++) {
        for (j = 0; j < avctx->width; j++) {
           if (avctx->codec_id==CODEC_ID_AYUV) {
                *dst++ = v[j];
                *dst++ = u[j];
                *dst++ = y[j];
                *dst++ = a[j];
            } else {
                *dst++ = u[j];
                *dst++ = y[j];
                *dst++ = v[j];
                *dst++ = a[j];
            }
        }
        y += pic->linesize[0];
        u += pic->linesize[1];
        v += pic->linesize[2];
        a += pic->linesize[3];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 16
0
static int avui_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
    uint8_t *dst;
    int i, j, skip, ret, size, interlaced;

    interlaced = avctx->field_order > AV_FIELD_PROGRESSIVE;

    if (avctx->height == 486) {
        skip = 10;
    } else {
        skip = 16;
    }
    size = 2 * avctx->width * (avctx->height + skip) + 8 * interlaced;
    if ((ret = ff_alloc_packet2(avctx, pkt, size)) < 0)
        return ret;
    dst = pkt->data;
    if (!interlaced) {
        dst += avctx->width * skip;
    }

    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;

    for (i = 0; i <= interlaced; i++) {
        uint8_t *src;
        if (interlaced && avctx->height == 486) {
            src = pic->data[0] + (1 - i) * pic->linesize[0];
        } else {
            src = pic->data[0] + i * pic->linesize[0];
        }
        dst += avctx->width * skip + 4 * i;
        for (j = 0; j < avctx->height; j += interlaced + 1) {
            memcpy(dst, src, avctx->width * 2);
            src += (interlaced + 1) * pic->linesize[0];
            dst += avctx->width * 2;
        }
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 17
0
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                        const AVFrame *pic, int *got_packet)
{
    int i, j, ret;
    int aligned_width = FFALIGN(avctx->width,
                                avctx->codec_id == AV_CODEC_ID_R10K ? 1 : 64);
    int pad = (aligned_width - avctx->width) * 4;
    uint8_t *src_line;
    uint8_t *dst;

    if ((ret = ff_alloc_packet2(avctx, pkt, 4 * aligned_width * avctx->height)) < 0)
        return ret;

    avctx->coded_frame->key_frame = 1;
    avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
    src_line = pic->data[0];
    dst = pkt->data;

    for (i = 0; i < avctx->height; i++) {
        uint16_t *src = (uint16_t *)src_line;
        for (j = 0; j < avctx->width; j++) {
            uint32_t pixel;
            uint16_t r = *src++ >> 6;
            uint16_t g = *src++ >> 6;
            uint16_t b = *src++ >> 6;
            if (avctx->codec_id == AV_CODEC_ID_R210)
                pixel = (r << 20) | (g << 10) | b;
            else
                pixel = (r << 22) | (g << 12) | (b << 2);
            if (avctx->codec_id == AV_CODEC_ID_AVRP)
                bytestream_put_le32(&dst, pixel);
            else
                bytestream_put_be32(&dst, pixel);
        }
        memset(dst, 0, pad);
        dst += pad;
        src_line += pic->linesize[0];
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 18
0
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                        const AVFrame *frame, int *got_packet_ptr)
{
    DCAEncContext *c = avctx->priv_data;
    const int32_t *samples;
    int ret, i;

    if ((ret = ff_alloc_packet2(avctx, avpkt, c->frame_size, 0)) < 0)
        return ret;

    samples = (const int32_t *)frame->data[0];

    subband_transform(c, samples);
    if (c->lfe_channel)
        lfe_downsample(c, samples);

    calc_masking(c, samples);
    find_peaks(c);
    assign_bits(c);
    calc_scales(c);
    quantize_all(c);
    shift_history(c, samples);

    init_put_bits(&c->pb, avpkt->data, avpkt->size);
    put_frame_header(c);
    put_primary_audio_header(c);
    for (i = 0; i < SUBFRAMES; i++)
        put_subframe(c, i);


    for (i = put_bits_count(&c->pb); i < 8*c->frame_size; i++)
        put_bits(&c->pb, 1, 0);

    flush_put_bits(&c->pb);

    avpkt->pts      = frame->pts;
    avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples);
    avpkt->size     = put_bits_count(&c->pb) >> 3;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 19
0
static int qtrle_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                              const AVFrame *pict, int *got_packet)
{
    QtrleEncContext * const s = avctx->priv_data;
    int ret;

    if ((ret = ff_alloc_packet2(avctx, pkt, s->max_buf_size, 0)) < 0)
        return ret;

    if (avctx->gop_size == 0 || (s->avctx->frame_number % avctx->gop_size) == 0) {
        /* I-Frame */
        s->key_frame = 1;
    } else {
        /* P-Frame */
        s->key_frame = 0;
    }

    pkt->size = encode_frame(s, pict, pkt->data);

    /* save the current frame */
    av_frame_unref(s->previous_frame);
    ret = av_frame_ref(s->previous_frame, pict);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "cannot add reference\n");
        return ret;
    }

#if FF_API_CODED_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
    avctx->coded_frame->key_frame = s->key_frame;
    avctx->coded_frame->pict_type = s->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
FF_ENABLE_DEPRECATION_WARNINGS
#endif

    if (s->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

    return 0;
}
Ejemplo n.º 20
0
static int y41p_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                             const AVFrame *pic, int *got_packet)
{
  uint8_t *dst;
  uint8_t *y, *u, *v;
  int i, j, ret;

  if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width * avctx->height * 1.5)) < 0)
    return ret;

  avctx->coded_frame->key_frame = 1;
  avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  dst = pkt->data;

  for (i = avctx->height - 1; i >= 0; i--) {
    y = &pic->data[0][i * pic->linesize[0]];
    u = &pic->data[1][i * pic->linesize[1]];
    v = &pic->data[2][i * pic->linesize[2]];
    for (j = 0; j < avctx->width; j += 8) {
      *(dst++) = *(u++);
      *(dst++) = *(y++);
      *(dst++) = *(v++);
      *(dst++) = *(y++);

      *(dst++) = *(u++);
      *(dst++) = *(y++);
      *(dst++) = *(v++);
      *(dst++) = *(y++);

      *(dst++) = *(y++);
      *(dst++) = *(y++);
      *(dst++) = *(y++);
      *(dst++) = *(y++);
    }
  }

  pkt->flags |= AV_PKT_FLAG_KEY;
  *got_packet = 1;
  return 0;
}
Ejemplo n.º 21
0
static int encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                        const AVFrame *frame, int *got_packet_ptr)
{
    int i, k, channel;
    DCAContext *c = avctx->priv_data;
    const int16_t *samples;
    int ret, real_channel = 0;

    if ((ret = ff_alloc_packet2(avctx, avpkt, DCA_MAX_FRAME_SIZE + DCA_HEADER_SIZE)))
        return ret;

    samples = (const int16_t *)frame->data[0];
    for (i = 0; i < PCM_SAMPLES; i ++) { /* i is the decimated sample number */
        for (channel = 0; channel < c->prim_channels + 1; channel++) {
            real_channel = c->channel_order_tab[channel];
            if (real_channel >= 0) {
                /* Get 32 PCM samples */
                for (k = 0; k < 32; k++) { /* k is the sample number in a 32-sample block */
                    c->pcm[k] = samples[avctx->channels * (32 * i + k) + channel] << 16;
                }
                /* Put subband samples into the proper place */
                qmf_decompose(c, c->pcm, &c->subband[i][real_channel][0], real_channel);
            }
        }
    }

    if (c->lfe_channel) {
        for (i = 0; i < PCM_SAMPLES / 2; i++) {
            for (k = 0; k < LFE_INTERPOLATION; k++) /* k is the sample number in a 32-sample block */
                c->pcm[k] = samples[avctx->channels * (LFE_INTERPOLATION*i+k) + c->lfe_offset] << 16;
            c->lfe_data[i] = lfe_downsample(c, c->pcm);
        }
    }

    put_frame(c, c->subband, avpkt->data);

    avpkt->size     = c->frame_size;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 22
0
static int libgsm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                               const AVFrame *frame, int *got_packet_ptr)
{
    int ret;
    gsm_signal *samples = (gsm_signal *)frame->data[0];
    struct gsm_state *state = avctx->priv_data;

    if ((ret = ff_alloc_packet2(avctx, avpkt, avctx->block_align)))
        return ret;

    switch(avctx->codec_id) {
    case CODEC_ID_GSM:
        gsm_encode(state, samples, avpkt->data);
        break;
    case CODEC_ID_GSM_MS:
        gsm_encode(state, samples,                  avpkt->data);
        gsm_encode(state, samples + GSM_FRAME_SIZE, avpkt->data + 32);
    }

    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 23
0
static int Faac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                             const AVFrame *frame, int *got_packet_ptr)
{
    FaacAudioContext *s = avctx->priv_data;
    int bytes_written, ret;
    int num_samples  = frame ? frame->nb_samples : 0;
    void *samples    = frame ? frame->data[0]    : NULL;

    if ((ret = ff_alloc_packet2(avctx, avpkt, (7 + 768) * avctx->channels))) {
        av_log(avctx, AV_LOG_ERROR, "Error getting output packet\n");
        return ret;
    }

    bytes_written = faacEncEncode(s->faac_handle, samples,
                                  num_samples * avctx->channels,
                                  avpkt->data, avpkt->size);
    if (bytes_written < 0) {
        av_log(avctx, AV_LOG_ERROR, "faacEncEncode() error\n");
        return bytes_written;
    }

    /* add current frame to the queue */
    if (frame) {
        if ((ret = ff_af_queue_add(&s->afq, frame) < 0))
            return ret;
    }

    if (!bytes_written)
        return 0;

    /* Get the next frame pts/duration */
    ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
                       &avpkt->duration);

    avpkt->size = bytes_written;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 24
0
static int raw_encode(AVCodecContext *avctx, AVPacket *pkt,
                      const AVFrame *frame, int *got_packet)
{
    int ret = avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height);

    if (ret < 0)
        return ret;

    if ((ret = ff_alloc_packet2(avctx, pkt, ret)) < 0)
        return ret;
    if ((ret = avpicture_layout((const AVPicture *)frame, avctx->pix_fmt, avctx->width,
                                avctx->height, pkt->data, pkt->size)) < 0)
        return ret;

    if(avctx->codec_tag == AV_RL32("yuv2") && ret > 0 &&
       avctx->pix_fmt   == AV_PIX_FMT_YUYV422) {
        int x;
        for(x = 1; x < avctx->height*avctx->width*2; x += 2)
            pkt->data[x] ^= 0x80;
    }
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 25
0
static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                    const AVFrame *frame, int *got_packet)
{
    LibOpenJPEGContext *ctx = avctx->priv_data;
    opj_cinfo_t *compress   = ctx->compress;
    opj_image_t *image      = ctx->image;
    opj_cio_t *stream       = ctx->stream;
    int cpyresult = 0;
    int ret, len;
    AVFrame *gbrframe;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_RGB24:
    case AV_PIX_FMT_RGBA:
    case AV_PIX_FMT_YA8:
        cpyresult = libopenjpeg_copy_packed8(avctx, frame, image);
        break;
    case AV_PIX_FMT_XYZ12:
        cpyresult = libopenjpeg_copy_packed12(avctx, frame, image);
        break;
    case AV_PIX_FMT_RGB48:
    case AV_PIX_FMT_RGBA64:
    case AV_PIX_FMT_YA16:
        cpyresult = libopenjpeg_copy_packed16(avctx, frame, image);
        break;
    case AV_PIX_FMT_GBR24P:
    case AV_PIX_FMT_GBRP9:
    case AV_PIX_FMT_GBRP10:
    case AV_PIX_FMT_GBRP12:
    case AV_PIX_FMT_GBRP14:
    case AV_PIX_FMT_GBRP16:
        gbrframe = av_frame_clone(frame);
        if (!gbrframe)
            return AVERROR(ENOMEM);
        gbrframe->data[0] = frame->data[2]; // swap to be rgb
        gbrframe->data[1] = frame->data[0];
        gbrframe->data[2] = frame->data[1];
        gbrframe->linesize[0] = frame->linesize[2];
        gbrframe->linesize[1] = frame->linesize[0];
        gbrframe->linesize[2] = frame->linesize[1];
        if (avctx->pix_fmt == AV_PIX_FMT_GBR24P) {
            cpyresult = libopenjpeg_copy_unpacked8(avctx, gbrframe, image);
        } else {
            cpyresult = libopenjpeg_copy_unpacked16(avctx, gbrframe, image);
        }
        av_frame_free(&gbrframe);
        break;
    case AV_PIX_FMT_GRAY8:
    case AV_PIX_FMT_YUV410P:
    case AV_PIX_FMT_YUV411P:
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV440P:
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUVA420P:
    case AV_PIX_FMT_YUVA422P:
    case AV_PIX_FMT_YUVA444P:
        cpyresult = libopenjpeg_copy_unpacked8(avctx, frame, image);
        break;
    case AV_PIX_FMT_GRAY16:
    case AV_PIX_FMT_YUV420P9:
    case AV_PIX_FMT_YUV422P9:
    case AV_PIX_FMT_YUV444P9:
    case AV_PIX_FMT_YUVA420P9:
    case AV_PIX_FMT_YUVA422P9:
    case AV_PIX_FMT_YUVA444P9:
    case AV_PIX_FMT_YUV444P10:
    case AV_PIX_FMT_YUV422P10:
    case AV_PIX_FMT_YUV420P10:
    case AV_PIX_FMT_YUVA444P10:
    case AV_PIX_FMT_YUVA422P10:
    case AV_PIX_FMT_YUVA420P10:
    case AV_PIX_FMT_YUV420P12:
    case AV_PIX_FMT_YUV422P12:
    case AV_PIX_FMT_YUV444P12:
    case AV_PIX_FMT_YUV420P14:
    case AV_PIX_FMT_YUV422P14:
    case AV_PIX_FMT_YUV444P14:
    case AV_PIX_FMT_YUV444P16:
    case AV_PIX_FMT_YUV422P16:
    case AV_PIX_FMT_YUV420P16:
    case AV_PIX_FMT_YUVA444P16:
    case AV_PIX_FMT_YUVA422P16:
    case AV_PIX_FMT_YUVA420P16:
        cpyresult = libopenjpeg_copy_unpacked16(avctx, frame, image);
        break;
    default:
        av_log(avctx, AV_LOG_ERROR,
               "The frame's pixel format '%s' is not supported\n",
               av_get_pix_fmt_name(avctx->pix_fmt));
        return AVERROR(EINVAL);
        break;
    }

    if (!cpyresult) {
        av_log(avctx, AV_LOG_ERROR,
               "Could not copy the frame data to the internal image buffer\n");
        return -1;
    }

    opj_setup_encoder(compress, &ctx->enc_params, image);

    stream = opj_cio_open((opj_common_ptr) compress, NULL, 0);
    if (!stream) {
        av_log(avctx, AV_LOG_ERROR, "Error creating the cio stream\n");
        return AVERROR(ENOMEM);
    }

    if (!opj_encode(compress, stream, image, NULL)) {
        av_log(avctx, AV_LOG_ERROR, "Error during the opj encode\n");
        return -1;
    }

    len = cio_tell(stream);
    if ((ret = ff_alloc_packet2(avctx, pkt, len)) < 0) {
        return ret;
    }

    memcpy(pkt->data, stream->buffer, len);
    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 26
0
static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    AACEncContext *s = avctx->priv_data;
    float **samples = s->planar_samples, *samples2, *la, *overlap;
    ChannelElement *cpe;
    SingleChannelElement *sce;
    IndividualChannelStream *ics;
    int i, its, ch, w, chans, tag, start_ch, ret, frame_bits;
    int target_bits, rate_bits, too_many_bits, too_few_bits;
    int ms_mode = 0, is_mode = 0, tns_mode = 0, pred_mode = 0;
    int chan_el_counter[4];
    FFPsyWindowInfo windows[AAC_MAX_CHANNELS];

    if (s->last_frame == 2)
        return 0;

    /* add current frame to queue */
    if (frame) {
        if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
            return ret;
    }

    copy_input_samples(s, frame);
    if (s->psypp)
        ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);

    if (!avctx->frame_number)
        return 0;

    start_ch = 0;
    for (i = 0; i < s->chan_map[0]; i++) {
        FFPsyWindowInfo* wi = windows + start_ch;
        tag      = s->chan_map[i+1];
        chans    = tag == TYPE_CPE ? 2 : 1;
        cpe      = &s->cpe[i];
        for (ch = 0; ch < chans; ch++) {
            float clip_avoidance_factor;
            sce = &cpe->ch[ch];
            ics = &sce->ics;
            s->cur_channel = start_ch + ch;
            overlap  = &samples[s->cur_channel][0];
            samples2 = overlap + 1024;
            la       = samples2 + (448+64);
            if (!frame)
                la = NULL;
            if (tag == TYPE_LFE) {
                wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
                wi[ch].window_shape   = 0;
                wi[ch].num_windows    = 1;
                wi[ch].grouping[0]    = 1;

                /* Only the lowest 12 coefficients are used in a LFE channel.
                 * The expression below results in only the bottom 8 coefficients
                 * being used for 11.025kHz to 16kHz sample rates.
                 */
                ics->num_swb = s->samplerate_index >= 8 ? 1 : 3;
            } else {
                wi[ch] = s->psy.model->window(&s->psy, samples2, la, s->cur_channel,
                                              ics->window_sequence[0]);
            }
            ics->window_sequence[1] = ics->window_sequence[0];
            ics->window_sequence[0] = wi[ch].window_type[0];
            ics->use_kb_window[1]   = ics->use_kb_window[0];
            ics->use_kb_window[0]   = wi[ch].window_shape;
            ics->num_windows        = wi[ch].num_windows;
            ics->swb_sizes          = s->psy.bands    [ics->num_windows == 8];
            ics->num_swb            = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8];
            ics->max_sfb            = FFMIN(ics->max_sfb, ics->num_swb);
            ics->swb_offset         = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ?
                                        ff_swb_offset_128 [s->samplerate_index]:
                                        ff_swb_offset_1024[s->samplerate_index];
            ics->tns_max_bands      = wi[ch].window_type[0] == EIGHT_SHORT_SEQUENCE ?
                                        ff_tns_max_bands_128 [s->samplerate_index]:
                                        ff_tns_max_bands_1024[s->samplerate_index];
            clip_avoidance_factor = 0.0f;
            for (w = 0; w < ics->num_windows; w++)
                ics->group_len[w] = wi[ch].grouping[w];
            for (w = 0; w < ics->num_windows; w++) {
                if (wi[ch].clipping[w] > CLIP_AVOIDANCE_FACTOR) {
                    ics->window_clipping[w] = 1;
                    clip_avoidance_factor = FFMAX(clip_avoidance_factor, wi[ch].clipping[w]);
                } else {
                    ics->window_clipping[w] = 0;
                }
            }
            if (clip_avoidance_factor > CLIP_AVOIDANCE_FACTOR) {
                ics->clip_avoidance_factor = CLIP_AVOIDANCE_FACTOR / clip_avoidance_factor;
            } else {
                ics->clip_avoidance_factor = 1.0f;
            }

            apply_window_and_mdct(s, sce, overlap);

            if (s->options.ltp && s->coder->update_ltp) {
                s->coder->update_ltp(s, sce);
                apply_window[sce->ics.window_sequence[0]](s->fdsp, sce, &sce->ltp_state[0]);
                s->mdct1024.mdct_calc(&s->mdct1024, sce->lcoeffs, sce->ret_buf);
            }

            if (!(isfinite(cpe->ch[ch].coeffs[    0]) &&
                  isfinite(cpe->ch[ch].coeffs[  128]) &&
                  isfinite(cpe->ch[ch].coeffs[2*128]) &&
                  isfinite(cpe->ch[ch].coeffs[3*128]) &&
                  isfinite(cpe->ch[ch].coeffs[4*128]) &&
                  isfinite(cpe->ch[ch].coeffs[5*128]) &&
                  isfinite(cpe->ch[ch].coeffs[6*128]) &&
                  isfinite(cpe->ch[ch].coeffs[7*128]))
            ) {
                av_log(avctx, AV_LOG_ERROR, "Input contains NaN/+-Inf\n");
                return AVERROR(EINVAL);
            }
            avoid_clipping(s, sce);
        }
        start_ch += chans;
    }
    if ((ret = ff_alloc_packet2(avctx, avpkt, 8192 * s->channels, 0)) < 0)
        return ret;
    frame_bits = its = 0;
    do {
        init_put_bits(&s->pb, avpkt->data, avpkt->size);

        if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & AV_CODEC_FLAG_BITEXACT))
            put_bitstream_info(s, LIBAVCODEC_IDENT);
        start_ch = 0;
        target_bits = 0;
        memset(chan_el_counter, 0, sizeof(chan_el_counter));
        for (i = 0; i < s->chan_map[0]; i++) {
            FFPsyWindowInfo* wi = windows + start_ch;
            const float *coeffs[2];
            tag      = s->chan_map[i+1];
            chans    = tag == TYPE_CPE ? 2 : 1;
            cpe      = &s->cpe[i];
            cpe->common_window = 0;
            memset(cpe->is_mask, 0, sizeof(cpe->is_mask));
            memset(cpe->ms_mask, 0, sizeof(cpe->ms_mask));
            put_bits(&s->pb, 3, tag);
            put_bits(&s->pb, 4, chan_el_counter[tag]++);
            for (ch = 0; ch < chans; ch++) {
                sce = &cpe->ch[ch];
                coeffs[ch] = sce->coeffs;
                sce->ics.predictor_present = 0;
                sce->ics.ltp.present = 0;
                memset(sce->ics.ltp.used, 0, sizeof(sce->ics.ltp.used));
                memset(sce->ics.prediction_used, 0, sizeof(sce->ics.prediction_used));
                memset(&sce->tns, 0, sizeof(TemporalNoiseShaping));
                for (w = 0; w < 128; w++)
                    if (sce->band_type[w] > RESERVED_BT)
                        sce->band_type[w] = 0;
            }
            s->psy.bitres.alloc = -1;
            s->psy.bitres.bits = s->last_frame_pb_count / s->channels;
            s->psy.model->analyze(&s->psy, start_ch, coeffs, wi);
            if (s->psy.bitres.alloc > 0) {
                /* Lambda unused here on purpose, we need to take psy's unscaled allocation */
                target_bits += s->psy.bitres.alloc
                    * (s->lambda / (avctx->global_quality ? avctx->global_quality : 120));
                s->psy.bitres.alloc /= chans;
            }
            s->cur_type = tag;
            for (ch = 0; ch < chans; ch++) {
                s->cur_channel = start_ch + ch;
                if (s->options.pns && s->coder->mark_pns)
                    s->coder->mark_pns(s, avctx, &cpe->ch[ch]);
                s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda);
            }
            if (chans > 1
                && wi[0].window_type[0] == wi[1].window_type[0]
                && wi[0].window_shape   == wi[1].window_shape) {

                cpe->common_window = 1;
                for (w = 0; w < wi[0].num_windows; w++) {
                    if (wi[0].grouping[w] != wi[1].grouping[w]) {
                        cpe->common_window = 0;
                        break;
                    }
                }
            }
            for (ch = 0; ch < chans; ch++) { /* TNS and PNS */
                sce = &cpe->ch[ch];
                s->cur_channel = start_ch + ch;
                if (s->options.tns && s->coder->search_for_tns)
                    s->coder->search_for_tns(s, sce);
                if (s->options.tns && s->coder->apply_tns_filt)
                    s->coder->apply_tns_filt(s, sce);
                if (sce->tns.present)
                    tns_mode = 1;
                if (s->options.pns && s->coder->search_for_pns)
                    s->coder->search_for_pns(s, avctx, sce);
            }
            s->cur_channel = start_ch;
            if (s->options.intensity_stereo) { /* Intensity Stereo */
                if (s->coder->search_for_is)
                    s->coder->search_for_is(s, avctx, cpe);
                if (cpe->is_mode) is_mode = 1;
                apply_intensity_stereo(cpe);
            }
            if (s->options.pred) { /* Prediction */
                for (ch = 0; ch < chans; ch++) {
                    sce = &cpe->ch[ch];
                    s->cur_channel = start_ch + ch;
                    if (s->options.pred && s->coder->search_for_pred)
                        s->coder->search_for_pred(s, sce);
                    if (cpe->ch[ch].ics.predictor_present) pred_mode = 1;
                }
                if (s->coder->adjust_common_pred)
                    s->coder->adjust_common_pred(s, cpe);
                for (ch = 0; ch < chans; ch++) {
                    sce = &cpe->ch[ch];
                    s->cur_channel = start_ch + ch;
                    if (s->options.pred && s->coder->apply_main_pred)
                        s->coder->apply_main_pred(s, sce);
                }
                s->cur_channel = start_ch;
            }
            if (s->options.mid_side) { /* Mid/Side stereo */
                if (s->options.mid_side == -1 && s->coder->search_for_ms)
                    s->coder->search_for_ms(s, cpe);
                else if (cpe->common_window)
                    memset(cpe->ms_mask, 1, sizeof(cpe->ms_mask));
                apply_mid_side_stereo(cpe);
            }
            adjust_frame_information(cpe, chans);
            if (s->options.ltp) { /* LTP */
                for (ch = 0; ch < chans; ch++) {
                    sce = &cpe->ch[ch];
                    s->cur_channel = start_ch + ch;
                    if (s->coder->search_for_ltp)
                        s->coder->search_for_ltp(s, sce, cpe->common_window);
                    if (sce->ics.ltp.present) pred_mode = 1;
                }
                s->cur_channel = start_ch;
                if (s->coder->adjust_common_ltp)
                    s->coder->adjust_common_ltp(s, cpe);
            }
            if (chans == 2) {
                put_bits(&s->pb, 1, cpe->common_window);
                if (cpe->common_window) {
                    put_ics_info(s, &cpe->ch[0].ics);
                    if (s->coder->encode_main_pred)
                        s->coder->encode_main_pred(s, &cpe->ch[0]);
                    if (s->coder->encode_ltp_info)
                        s->coder->encode_ltp_info(s, &cpe->ch[0], 1);
                    encode_ms_info(&s->pb, cpe);
                    if (cpe->ms_mode) ms_mode = 1;
                }
            }
            for (ch = 0; ch < chans; ch++) {
                s->cur_channel = start_ch + ch;
                encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window);
            }
            start_ch += chans;
        }

        if (avctx->flags & CODEC_FLAG_QSCALE) {
            /* When using a constant Q-scale, don't mess with lambda */
            break;
        }

        /* rate control stuff
         * allow between the nominal bitrate, and what psy's bit reservoir says to target
         * but drift towards the nominal bitrate always
         */
        frame_bits = put_bits_count(&s->pb);
        rate_bits = avctx->bit_rate * 1024 / avctx->sample_rate;
        rate_bits = FFMIN(rate_bits, 6144 * s->channels - 3);
        too_many_bits = FFMAX(target_bits, rate_bits);
        too_many_bits = FFMIN(too_many_bits, 6144 * s->channels - 3);
        too_few_bits = FFMIN(FFMAX(rate_bits - rate_bits/4, target_bits), too_many_bits);

        /* When using ABR, be strict (but only for increasing) */
        too_few_bits = too_few_bits - too_few_bits/8;
        too_many_bits = too_many_bits + too_many_bits/2;

        if (   its == 0 /* for steady-state Q-scale tracking */
            || (its < 5 && (frame_bits < too_few_bits || frame_bits > too_many_bits))
            || frame_bits >= 6144 * s->channels - 3  )
        {
            float ratio = ((float)rate_bits) / frame_bits;

            if (frame_bits >= too_few_bits && frame_bits <= too_many_bits) {
                /*
                 * This path is for steady-state Q-scale tracking
                 * When frame bits fall within the stable range, we still need to adjust
                 * lambda to maintain it like so in a stable fashion (large jumps in lambda
                 * create artifacts and should be avoided), but slowly
                 */
                ratio = sqrtf(sqrtf(ratio));
                ratio = av_clipf(ratio, 0.9f, 1.1f);
            } else {
                /* Not so fast though */
                ratio = sqrtf(ratio);
            }
            s->lambda = FFMIN(s->lambda * ratio, 65536.f);

            /* Keep iterating if we must reduce and lambda is in the sky */
            if (ratio > 0.9f && ratio < 1.1f) {
                break;
            } else {
                if (is_mode || ms_mode || tns_mode || pred_mode) {
                    for (i = 0; i < s->chan_map[0]; i++) {
                        // Must restore coeffs
                        chans = tag == TYPE_CPE ? 2 : 1;
                        cpe = &s->cpe[i];
                        for (ch = 0; ch < chans; ch++)
                            memcpy(cpe->ch[ch].coeffs, cpe->ch[ch].pcoeffs, sizeof(cpe->ch[ch].coeffs));
                    }
                }
                its++;
            }
        } else {
            break;
        }
    } while (1);

    if (s->options.ltp && s->coder->ltp_insert_new_frame)
        s->coder->ltp_insert_new_frame(s);

    put_bits(&s->pb, 3, TYPE_END);
    flush_put_bits(&s->pb);

    s->last_frame_pb_count = put_bits_count(&s->pb);

    s->lambda_sum += s->lambda;
    s->lambda_count++;

    if (!frame)
        s->last_frame++;

    ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
                       &avpkt->duration);

    avpkt->size = put_bits_count(&s->pb) >> 3;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 27
0
static int bmp_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                            const AVFrame *pict, int *got_packet)
{
    BMPContext *s = avctx->priv_data;
    AVFrame * const p = &s->picture;
    int n_bytes_image, n_bytes_per_row, n_bytes, i, n, hsize, ret;
    const uint32_t *pal = NULL;
    uint32_t palette256[256];
    int pad_bytes_per_row, pal_entries = 0, compression = BMP_RGB;
    int bit_count = avctx->bits_per_coded_sample;
    uint8_t *ptr, *buf;
    *p = *pict;
    p->pict_type= AV_PICTURE_TYPE_I;
    p->key_frame= 1;
    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_RGB444:
        compression = BMP_BITFIELDS;
        pal = rgb444_masks; // abuse pal to hold color masks
        pal_entries = 3;
        break;
    case AV_PIX_FMT_RGB565:
        compression = BMP_BITFIELDS;
        pal = rgb565_masks; // abuse pal to hold color masks
        pal_entries = 3;
        break;
    case AV_PIX_FMT_RGB8:
    case AV_PIX_FMT_BGR8:
    case AV_PIX_FMT_RGB4_BYTE:
    case AV_PIX_FMT_BGR4_BYTE:
    case AV_PIX_FMT_GRAY8:
        av_assert1(bit_count == 8);
        ff_set_systematic_pal2(palette256, avctx->pix_fmt);
        pal = palette256;
        break;
    case AV_PIX_FMT_PAL8:
        pal = (uint32_t *)p->data[1];
        break;
    case AV_PIX_FMT_MONOBLACK:
        pal = monoblack_pal;
        break;
    }
    if (pal && !pal_entries) pal_entries = 1 << bit_count;
    n_bytes_per_row = ((int64_t)avctx->width * (int64_t)bit_count + 7LL) >> 3LL;
    pad_bytes_per_row = (4 - n_bytes_per_row) & 3;
    n_bytes_image = avctx->height * (n_bytes_per_row + pad_bytes_per_row);

    // STRUCTURE.field refer to the MSVC documentation for BITMAPFILEHEADER
    // and related pages.
#define SIZE_BITMAPFILEHEADER 14
#define SIZE_BITMAPINFOHEADER 40
    hsize = SIZE_BITMAPFILEHEADER + SIZE_BITMAPINFOHEADER + (pal_entries << 2);
    n_bytes = n_bytes_image + hsize;
    if ((ret = ff_alloc_packet2(avctx, pkt, n_bytes)) < 0)
        return ret;
    buf = pkt->data;
    bytestream_put_byte(&buf, 'B');                   // BITMAPFILEHEADER.bfType
    bytestream_put_byte(&buf, 'M');                   // do.
    bytestream_put_le32(&buf, n_bytes);               // BITMAPFILEHEADER.bfSize
    bytestream_put_le16(&buf, 0);                     // BITMAPFILEHEADER.bfReserved1
    bytestream_put_le16(&buf, 0);                     // BITMAPFILEHEADER.bfReserved2
    bytestream_put_le32(&buf, hsize);                 // BITMAPFILEHEADER.bfOffBits
    bytestream_put_le32(&buf, SIZE_BITMAPINFOHEADER); // BITMAPINFOHEADER.biSize
    bytestream_put_le32(&buf, avctx->width);          // BITMAPINFOHEADER.biWidth
    bytestream_put_le32(&buf, avctx->height);         // BITMAPINFOHEADER.biHeight
    bytestream_put_le16(&buf, 1);                     // BITMAPINFOHEADER.biPlanes
    bytestream_put_le16(&buf, bit_count);             // BITMAPINFOHEADER.biBitCount
    bytestream_put_le32(&buf, compression);           // BITMAPINFOHEADER.biCompression
    bytestream_put_le32(&buf, n_bytes_image);         // BITMAPINFOHEADER.biSizeImage
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biXPelsPerMeter
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biYPelsPerMeter
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biClrUsed
    bytestream_put_le32(&buf, 0);                     // BITMAPINFOHEADER.biClrImportant
    for (i = 0; i < pal_entries; i++)
        bytestream_put_le32(&buf, pal[i] & 0xFFFFFF);
    // BMP files are bottom-to-top so we start from the end...
    ptr = p->data[0] + (avctx->height - 1) * p->linesize[0];
    buf = pkt->data + hsize;
    for(i = 0; i < avctx->height; i++) {
        if (bit_count == 16) {
            const uint16_t *src = (const uint16_t *) ptr;
            uint16_t *dst = (uint16_t *) buf;
            for(n = 0; n < avctx->width; n++)
                AV_WL16(dst + n, src[n]);
        } else {
            memcpy(buf, ptr, n_bytes_per_row);
        }
        buf += n_bytes_per_row;
        memset(buf, 0, pad_bytes_per_row);
        buf += pad_bytes_per_row;
        ptr -= p->linesize[0]; // ... and go back
    }

    pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;
    return 0;
}
Ejemplo n.º 28
0
static int libkvazaar_encode(AVCodecContext *avctx,
                             AVPacket *avpkt,
                             const AVFrame *frame,
                             int *got_packet_ptr)
{
    LibkvazaarContext *ctx = avctx->priv_data;
    kvz_picture *input_pic = NULL;
    kvz_picture *recon_pic = NULL;
    kvz_frame_info frame_info;
    kvz_data_chunk *data_out = NULL;
    uint32_t len_out = 0;
    int retval = 0;

    *got_packet_ptr = 0;

    if (frame) {
        if (frame->width != ctx->config->width ||
                frame->height != ctx->config->height) {
            av_log(avctx, AV_LOG_ERROR,
                   "Changing video dimensions during encoding is not supported. "
                   "(changed from %dx%d to %dx%d)\n",
                   ctx->config->width, ctx->config->height,
                   frame->width, frame->height);
            retval = AVERROR_INVALIDDATA;
            goto done;
        }

        if (frame->format != avctx->pix_fmt) {
            av_log(avctx, AV_LOG_ERROR,
                   "Changing pixel format during encoding is not supported. "
                   "(changed from %s to %s)\n",
                   av_get_pix_fmt_name(avctx->pix_fmt),
                   av_get_pix_fmt_name(frame->format));
            retval = AVERROR_INVALIDDATA;
            goto done;
        }

        // Allocate input picture for kvazaar.
        input_pic = ctx->api->picture_alloc(frame->width, frame->height);
        if (!input_pic) {
            av_log(avctx, AV_LOG_ERROR, "Failed to allocate picture.\n");
            retval = AVERROR(ENOMEM);
            goto done;
        }

        // Copy pixels from frame to input_pic.
        {
            int dst_linesizes[4] = {
              frame->width,
              frame->width / 2,
              frame->width / 2,
              0
            };
            av_image_copy(input_pic->data, dst_linesizes,
                          (const uint8_t **)frame->data, frame->linesize,
                          frame->format, frame->width, frame->height);
        }

        input_pic->pts = frame->pts;
    }

    retval = ctx->api->encoder_encode(ctx->encoder,
                                      input_pic,
                                      &data_out, &len_out,
                                      &recon_pic, NULL,
                                      &frame_info);
    if (!retval) {
        av_log(avctx, AV_LOG_ERROR, "Failed to encode frame.\n");
        retval = AVERROR_INVALIDDATA;
        goto done;
    }
    else
        retval = 0; /* kvazaar returns 1 on success */

    if (data_out) {
        kvz_data_chunk *chunk = NULL;
        uint64_t written = 0;

        retval = ff_alloc_packet2(avctx, avpkt, len_out, len_out);
        if (retval < 0) {
            av_log(avctx, AV_LOG_ERROR, "Failed to allocate output packet.\n");
            goto done;
        }

        for (chunk = data_out; chunk != NULL; chunk = chunk->next) {
            av_assert0(written + chunk->len <= len_out);
            memcpy(avpkt->data + written, chunk->data, chunk->len);
            written += chunk->len;
        }

        avpkt->pts = recon_pic->pts;
        avpkt->dts = recon_pic->dts;
        avpkt->flags = 0;
        // IRAP VCL NAL unit types span the range
        // [BLA_W_LP (16), RSV_IRAP_VCL23 (23)].
        if (frame_info.nal_unit_type >= KVZ_NAL_BLA_W_LP &&
                frame_info.nal_unit_type <= KVZ_NAL_RSV_IRAP_VCL23) {
            avpkt->flags |= AV_PKT_FLAG_KEY;
        }

        *got_packet_ptr = 1;
    }

done:
    ctx->api->picture_free(input_pic);
    ctx->api->picture_free(recon_pic);
    ctx->api->chunk_free(data_out);
    return retval;
}
Ejemplo n.º 29
0
static int aac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
                            const AVFrame *frame, int *got_packet_ptr)
{
    AACEncContext *s = avctx->priv_data;
    float **samples = s->planar_samples, *samples2, *la, *overlap;
    ChannelElement *cpe;
    int i, ch, w, g, chans, tag, start_ch, ret;
    int chan_el_counter[4];
    FFPsyWindowInfo windows[AAC_MAX_CHANNELS];

    if (s->last_frame == 2)
        return 0;

    /* add current frame to queue */
    if (frame) {
        if ((ret = ff_af_queue_add(&s->afq, frame)) < 0)
            return ret;
    }

    copy_input_samples(s, frame);
    if (s->psypp)
        ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);

    if (!avctx->frame_number)
        return 0;

    start_ch = 0;
    for (i = 0; i < s->chan_map[0]; i++) {
        FFPsyWindowInfo* wi = windows + start_ch;
        tag      = s->chan_map[i+1];
        chans    = tag == TYPE_CPE ? 2 : 1;
        cpe      = &s->cpe[i];
        for (ch = 0; ch < chans; ch++) {
            IndividualChannelStream *ics = &cpe->ch[ch].ics;
            int cur_channel = start_ch + ch;
            overlap  = &samples[cur_channel][0];
            samples2 = overlap + 1024;
            la       = samples2 + (448+64);
            if (!frame)
                la = NULL;
            if (tag == TYPE_LFE) {
                wi[ch].window_type[0] = ONLY_LONG_SEQUENCE;
                wi[ch].window_shape   = 0;
                wi[ch].num_windows    = 1;
                wi[ch].grouping[0]    = 1;

                /* Only the lowest 12 coefficients are used in a LFE channel.
                 * The expression below results in only the bottom 8 coefficients
                 * being used for 11.025kHz to 16kHz sample rates.
                 */
                ics->num_swb = s->samplerate_index >= 8 ? 1 : 3;
            } else {
                wi[ch] = s->psy.model->window(&s->psy, samples2, la, cur_channel,
                                              ics->window_sequence[0]);
            }
            ics->window_sequence[1] = ics->window_sequence[0];
            ics->window_sequence[0] = wi[ch].window_type[0];
            ics->use_kb_window[1]   = ics->use_kb_window[0];
            ics->use_kb_window[0]   = wi[ch].window_shape;
            ics->num_windows        = wi[ch].num_windows;
            ics->swb_sizes          = s->psy.bands    [ics->num_windows == 8];
            ics->num_swb            = tag == TYPE_LFE ? ics->num_swb : s->psy.num_bands[ics->num_windows == 8];
            for (w = 0; w < ics->num_windows; w++)
                ics->group_len[w] = wi[ch].grouping[w];

            apply_window_and_mdct(s, &cpe->ch[ch], overlap);
        }
        start_ch += chans;
    }
    if ((ret = ff_alloc_packet2(avctx, avpkt, 8192 * s->channels)) < 0)
        return ret;
    do {
        int frame_bits;

        init_put_bits(&s->pb, avpkt->data, avpkt->size);

        if ((avctx->frame_number & 0xFF)==1 && !(avctx->flags & CODEC_FLAG_BITEXACT))
            put_bitstream_info(s, LIBAVCODEC_IDENT);
        start_ch = 0;
        memset(chan_el_counter, 0, sizeof(chan_el_counter));
        for (i = 0; i < s->chan_map[0]; i++) {
            FFPsyWindowInfo* wi = windows + start_ch;
            const float *coeffs[2];
            tag      = s->chan_map[i+1];
            chans    = tag == TYPE_CPE ? 2 : 1;
            cpe      = &s->cpe[i];
            put_bits(&s->pb, 3, tag);
            put_bits(&s->pb, 4, chan_el_counter[tag]++);
            for (ch = 0; ch < chans; ch++)
                coeffs[ch] = cpe->ch[ch].coeffs;
            s->psy.model->analyze(&s->psy, start_ch, coeffs, wi);
            for (ch = 0; ch < chans; ch++) {
                s->cur_channel = start_ch * 2 + ch;
                s->coder->search_for_quantizers(avctx, s, &cpe->ch[ch], s->lambda);
            }
            cpe->common_window = 0;
            if (chans > 1
                && wi[0].window_type[0] == wi[1].window_type[0]
                && wi[0].window_shape   == wi[1].window_shape) {

                cpe->common_window = 1;
                for (w = 0; w < wi[0].num_windows; w++) {
                    if (wi[0].grouping[w] != wi[1].grouping[w]) {
                        cpe->common_window = 0;
                        break;
                    }
                }
            }
            s->cur_channel = start_ch * 2;
            if (s->options.stereo_mode && cpe->common_window) {
                if (s->options.stereo_mode > 0) {
                    IndividualChannelStream *ics = &cpe->ch[0].ics;
                    for (w = 0; w < ics->num_windows; w += ics->group_len[w])
                        for (g = 0;  g < ics->num_swb; g++)
                            cpe->ms_mask[w*16+g] = 1;
                } else if (s->coder->search_for_ms) {
                    s->coder->search_for_ms(s, cpe, s->lambda);
                }
            }
            adjust_frame_information(cpe, chans);
            if (chans == 2) {
                put_bits(&s->pb, 1, cpe->common_window);
                if (cpe->common_window) {
                    put_ics_info(s, &cpe->ch[0].ics);
                    encode_ms_info(&s->pb, cpe);
                }
            }
            for (ch = 0; ch < chans; ch++) {
                s->cur_channel = start_ch + ch;
                encode_individual_channel(avctx, s, &cpe->ch[ch], cpe->common_window);
            }
            start_ch += chans;
        }

        frame_bits = put_bits_count(&s->pb);
        if (frame_bits <= 6144 * s->channels - 3) {
            s->psy.bitres.bits = frame_bits / s->channels;
            break;
        }

        s->lambda *= avctx->bit_rate * 1024.0f / avctx->sample_rate / frame_bits;

    } while (1);

    put_bits(&s->pb, 3, TYPE_END);
    flush_put_bits(&s->pb);
    avctx->frame_bits = put_bits_count(&s->pb);

    // rate control stuff
    if (!(avctx->flags & CODEC_FLAG_QSCALE)) {
        float ratio = avctx->bit_rate * 1024.0f / avctx->sample_rate / avctx->frame_bits;
        s->lambda *= ratio;
        s->lambda = FFMIN(s->lambda, 65536.f);
    }

    if (!frame)
        s->last_frame++;

    ff_af_queue_remove(&s->afq, avctx->frame_size, &avpkt->pts,
                       &avpkt->duration);

    avpkt->size = put_bits_count(&s->pb) >> 3;
    *got_packet_ptr = 1;
    return 0;
}
Ejemplo n.º 30
0
static int libschroedinger_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                                        const AVFrame *frame, int *got_packet)
{
    int enc_size = 0;
    SchroEncoderParams *p_schro_params = avctx->priv_data;
    SchroEncoder *encoder = p_schro_params->encoder;
    struct FFSchroEncodedFrame *p_frame_output = NULL;
    int go = 1;
    SchroBuffer *enc_buf;
    int presentation_frame;
    int parse_code;
    int last_frame_in_sequence = 0;
    int pkt_size, ret;

    if (!frame) {
        /* Push end of sequence if not already signalled. */
        if (!p_schro_params->eos_signalled) {
            schro_encoder_end_of_stream(encoder);
            p_schro_params->eos_signalled = 1;
        }
    } else {
        /* Allocate frame data to schro input buffer. */
        SchroFrame *in_frame = libschroedinger_frame_from_data(avctx, frame);
        /* Load next frame. */
        schro_encoder_push_frame(encoder, in_frame);
    }

    if (p_schro_params->eos_pulled)
        go = 0;

    /* Now check to see if we have any output from the encoder. */
    while (go) {
        SchroStateEnum state;
        state = schro_encoder_wait(encoder);
        switch (state) {
        case SCHRO_STATE_HAVE_BUFFER:
        case SCHRO_STATE_END_OF_STREAM:
            enc_buf = schro_encoder_pull(encoder, &presentation_frame);
            av_assert0(enc_buf->length > 0);
            parse_code = enc_buf->data[4];

            /* All non-frame data is prepended to actual frame data to
             * be able to set the pts correctly. So we don't write data
             * to the frame output queue until we actually have a frame
             */
            p_schro_params->enc_buf = av_realloc(p_schro_params->enc_buf,
                                                 p_schro_params->enc_buf_size + enc_buf->length);

            memcpy(p_schro_params->enc_buf + p_schro_params->enc_buf_size,
                   enc_buf->data, enc_buf->length);
            p_schro_params->enc_buf_size += enc_buf->length;


            if (state == SCHRO_STATE_END_OF_STREAM) {
                p_schro_params->eos_pulled = 1;
                go = 0;
            }

            if (!SCHRO_PARSE_CODE_IS_PICTURE(parse_code)) {
                schro_buffer_unref(enc_buf);
                break;
            }

            /* Create output frame. */
            p_frame_output = av_mallocz(sizeof(FFSchroEncodedFrame));
            /* Set output data. */
            p_frame_output->size     = p_schro_params->enc_buf_size;
            p_frame_output->p_encbuf = p_schro_params->enc_buf;
            if (SCHRO_PARSE_CODE_IS_INTRA(parse_code) &&
                SCHRO_PARSE_CODE_IS_REFERENCE(parse_code))
                p_frame_output->key_frame = 1;

            /* Parse the coded frame number from the bitstream. Bytes 14
             * through 17 represesent the frame number. */
            p_frame_output->frame_num = AV_RB32(enc_buf->data + 13);

            ff_schro_queue_push_back(&p_schro_params->enc_frame_queue,
                                     p_frame_output);
            p_schro_params->enc_buf_size = 0;
            p_schro_params->enc_buf      = NULL;

            schro_buffer_unref(enc_buf);

            break;

        case SCHRO_STATE_NEED_FRAME:
            go = 0;
            break;

        case SCHRO_STATE_AGAIN:
            break;

        default:
            av_log(avctx, AV_LOG_ERROR, "Unknown Schro Encoder state\n");
            return -1;
        }
    }

    /* Copy 'next' frame in queue. */

    if (p_schro_params->enc_frame_queue.size == 1 &&
        p_schro_params->eos_pulled)
        last_frame_in_sequence = 1;

    p_frame_output = ff_schro_queue_pop(&p_schro_params->enc_frame_queue);

    if (!p_frame_output)
        return 0;

    pkt_size = p_frame_output->size;
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0)
        pkt_size += p_schro_params->enc_buf_size;
    if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
        goto error;

    memcpy(pkt->data, p_frame_output->p_encbuf, p_frame_output->size);
    avctx->coded_frame->key_frame = p_frame_output->key_frame;
    /* Use the frame number of the encoded frame as the pts. It is OK to
     * do so since Dirac is a constant frame rate codec. It expects input
     * to be of constant frame rate. */
    pkt->pts =
    avctx->coded_frame->pts = p_frame_output->frame_num;
    pkt->dts = p_schro_params->dts++;
    enc_size = p_frame_output->size;

    /* Append the end of sequence information to the last frame in the
     * sequence. */
    if (last_frame_in_sequence && p_schro_params->enc_buf_size > 0) {
        memcpy(pkt->data + enc_size, p_schro_params->enc_buf,
               p_schro_params->enc_buf_size);
        enc_size += p_schro_params->enc_buf_size;
        av_freep(&p_schro_params->enc_buf);
        p_schro_params->enc_buf_size = 0;
    }

    if (p_frame_output->key_frame)
        pkt->flags |= AV_PKT_FLAG_KEY;
    *got_packet = 1;

error:
    /* free frame */
    libschroedinger_free_frame(p_frame_output);
    return ret;
}