Пример #1
0
static int libdirac_encode_init(AVCodecContext *avccontext)
{

    FfmpegDiracEncoderParams* p_dirac_params = avccontext->priv_data;
    int no_local = 1;
    int verbose = avccontext->debug;
    VideoFormat preset;

    /* get Dirac preset */
    preset = GetDiracVideoFormatPreset(avccontext);

    /* initialize the encoder context */
    dirac_encoder_context_init (&(p_dirac_params->enc_ctx), preset);

    p_dirac_params->enc_ctx.src_params.chroma =
                                GetDiracChromaFormat(avccontext->pix_fmt);

    if (p_dirac_params->enc_ctx.src_params.chroma == formatNK) {
        av_log (avccontext, AV_LOG_ERROR,
                "Unsupported pixel format %d. This codec supports only "
                "Planar YUV formats (yuv420p, yuv422p, yuv444p\n",
                avccontext->pix_fmt);
        return -1;
    }

    p_dirac_params->enc_ctx.src_params.frame_rate.numerator   =
                                             avccontext->time_base.den;
    p_dirac_params->enc_ctx.src_params.frame_rate.denominator =
                                             avccontext->time_base.num;

    p_dirac_params->enc_ctx.src_params.width  = avccontext->width;
    p_dirac_params->enc_ctx.src_params.height = avccontext->height;

    p_dirac_params->frame_size = avpicture_get_size(avccontext->pix_fmt,
                                                    avccontext->width,
                                                    avccontext->height);

    avccontext->coded_frame = &p_dirac_params->picture;

    if (no_local) {
        p_dirac_params->enc_ctx.decode_flag = 0;
        p_dirac_params->enc_ctx.instr_flag  = 0;
    } else {
        p_dirac_params->enc_ctx.decode_flag = 1;
        p_dirac_params->enc_ctx.instr_flag  = 1;
    }

    /* Intra-only sequence */
    if (avccontext->gop_size == 0 )
        p_dirac_params->enc_ctx.enc_params.num_L1 = 0;
    else
        avccontext->has_b_frames = 1;

    if (avccontext->flags & CODEC_FLAG_QSCALE) {
        if (avccontext->global_quality != 0) {
            p_dirac_params->enc_ctx.enc_params.qf =
                            avccontext->global_quality / (FF_QP2LAMBDA*10.0);
            /* if it is not default bitrate then send target rate. */
            if (avccontext->bit_rate >= 1000 &&
                avccontext->bit_rate != 200000) {
               p_dirac_params->enc_ctx.enc_params.trate =
                             avccontext->bit_rate / 1000;
            }
        } else
            p_dirac_params->enc_ctx.enc_params.lossless = 1;
    } else if (avccontext->bit_rate >= 1000)
        p_dirac_params->enc_ctx.enc_params.trate = avccontext->bit_rate / 1000;

    if ((preset > VIDEO_FORMAT_QCIF || preset < VIDEO_FORMAT_QSIF525) &&
         avccontext->bit_rate == 200000) {
        p_dirac_params->enc_ctx.enc_params.trate = 0;
    }

    if (avccontext->flags & CODEC_FLAG_INTERLACED_ME) {
        /* all material can be coded as interlaced or progressive
         * irrespective of the type of source material */
        p_dirac_params->enc_ctx.enc_params.picture_coding_mode = 1;
    }

    p_dirac_params->p_encoder = dirac_encoder_init (&(p_dirac_params->enc_ctx),
                                                    verbose );

    if (!p_dirac_params->p_encoder) {
        av_log(avccontext, AV_LOG_ERROR,
               "Unrecoverable Error: dirac_encoder_init failed. ");
        return EXIT_FAILURE;
    }

    /* allocate enough memory for the incoming data */
    p_dirac_params->p_in_frame_buf = av_malloc(p_dirac_params->frame_size);

    /* initialize the encoded frame queue */
    ff_dirac_schro_queue_init(&p_dirac_params->enc_frame_queue);

    return 0 ;
}
Пример #2
0
/****************************************************************************
 * Encode: the whole thing
 ****************************************************************************
 * This function spits out encapsulation units.
 ****************************************************************************/
static block_t *Encode( encoder_t *p_enc, picture_t *p_pic )
{
    encoder_sys_t *p_sys = p_enc->p_sys;
    block_t *p_block, *p_output_chain = NULL;
    int i_plane, i_line, i_width, i_src_stride;
    uint8_t *p_dst;

    if( !p_pic ) return NULL;
    /* we only know if the sequence is interlaced when the first
     * picture arrives, so final setup is done here */
    /* XXX todo, detect change of interlace */
    p_sys->ctx.src_params.topfieldfirst = p_pic->b_top_field_first;
    p_sys->ctx.src_params.source_sampling = !p_pic->b_progressive;

    if( p_sys->b_auto_field_coding )
        p_sys->ctx.enc_params.picture_coding_mode = !p_pic->b_progressive;

    if( !p_sys->p_dirac )
    {
        date_t date;
        /* Initialise the encoder with the encoder context */
        p_sys->p_dirac = dirac_encoder_init( &p_sys->ctx, 0 );
        if( !p_sys->p_dirac )
        {
            msg_Err( p_enc, "Failed to initialize dirac encoder" );
            return NULL;
        }
        date_Init( &date, p_enc->fmt_in.video.i_frame_rate, p_enc->fmt_in.video.i_frame_rate_base );
#if DIRAC_RESEARCH_VERSION_ATLEAST(1,0,2)
        int i_delayinpics = dirac_encoder_pts_offset( p_sys->p_dirac );
        i_delayinpics /= p_sys->ctx.enc_params.picture_coding_mode + 1;
        date_Increment( &date, i_delayinpics );
#else
        date_Increment( &date, 1 );
#endif
        p_sys->i_pts_offset = date_Get( &date );

        /* picture_coding_mode = 1 == FIELD_CODING, two pictures are produced
         * for each frame input. Calculate time between fields for offsetting
         * the second field later. */
        if( 1 == p_sys->ctx.enc_params.picture_coding_mode )
        {
            date_Set( &date, 0 );
            date_Increment( &date, 1 );
            p_sys->i_field_time = date_Get( &date ) / 2;
        }
    }

    /* Copy input picture into encoder input buffer (stride by stride) */
    /* Would be lovely to just pass the picture in, but there is noway for the
     * library to free it */
    p_dst = p_sys->p_buffer_in;
    for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ )
    {
        uint8_t *p_src = p_pic->p[i_plane].p_pixels;
        i_width = p_pic->p[i_plane].i_visible_pitch;
        i_src_stride = p_pic->p[i_plane].i_pitch;

        for( i_line = 0; i_line < p_pic->p[i_plane].i_visible_lines; i_line++ )
        {
            vlc_memcpy( p_dst, p_src, i_width );
            p_dst += i_width;
            p_src += i_src_stride;
        }
    }

    /* Load one frame of data into encoder */
    if( dirac_encoder_load( p_sys->p_dirac, p_sys->p_buffer_in,
                            p_sys->i_buffer_in ) < 0 )
    {
        msg_Dbg( p_enc, "dirac_encoder_load() error" );
        return NULL;
    }

    /* store pts in a lookaside buffer, so that the same pts may
     * be used for the picture in coded order */
    StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date );
    p_sys->i_input_picnum++;

    /* store dts in a queue, so that they appear in order in
     * coded order */
    p_block = block_New( p_enc, 1 );
    if( !p_block )
        return NULL;
    p_block->i_dts = p_pic->date - p_sys->i_pts_offset;
    block_FifoPut( p_sys->p_dts_fifo, p_block );
    p_block = NULL;

    /* for field coding mode, insert an extra value into both the
     * pts lookaside buffer and dts queue, offset to correspond
     * to a one field delay. */
    if( 1 == p_sys->ctx.enc_params.picture_coding_mode )
    {
        StorePicturePTS( p_enc, p_sys->i_input_picnum, p_pic->date + p_sys->i_field_time );
        p_sys->i_input_picnum++;

        p_block = block_New( p_enc, 1 );
        if( !p_block )
            return NULL;
        p_block->i_dts = p_pic->date - p_sys->i_pts_offset + p_sys->i_field_time;
        block_FifoPut( p_sys->p_dts_fifo, p_block );
        p_block = NULL;
    }

    dirac_encoder_state_t state;
    /* Retrieve encoded frames from encoder */
    do
    {
        p_sys->p_dirac->enc_buf.buffer = p_sys->p_buffer_out;
        p_sys->p_dirac->enc_buf.size = p_sys->i_buffer_out;
        state = dirac_encoder_output( p_sys->p_dirac );
        switch( state )
        {
        case ENC_STATE_AVAIL: {
            uint32_t pic_num;

            /* extract data from encoder temporary buffer. */
            p_block = block_New( p_enc, p_sys->p_dirac->enc_buf.size );
            if( !p_block )
                return NULL;
            memcpy( p_block->p_buffer, p_sys->p_dirac->enc_buf.buffer,
                    p_sys->p_dirac->enc_buf.size );

            /* if some flags were set for a previous block, prevent
             * them from getting lost */
            if( p_sys->p_chain )
                p_block->i_flags |= p_sys->p_chain->i_flags;

            /* store all extracted blocks in a chain and gather up when an
             * entire encapsulation unit is avaliable (ends with a picture) */
            block_ChainAppend( &p_sys->p_chain, p_block );

            /* Presence of a Sequence header indicates a seek point */
            if( 0 == p_block->p_buffer[4] )
            {
                p_block->i_flags |= BLOCK_FLAG_TYPE_I;

                if( !p_enc->fmt_out.p_extra ) {
                    const uint8_t eos[] = { 'B','B','C','D',0x10,0,0,0,13,0,0,0,0 };
                    uint32_t len = GetDWBE( p_block->p_buffer + 5 );
                    /* if it hasn't been done so far, stash a copy of the
                     * sequence header for muxers such as ogg */
                    /* The OggDirac spec advises that a Dirac EOS DataUnit
                     * is appended to the sequence header to allow guard
                     * against poor streaming servers */
                    /* XXX, should this be done using the packetizer ? */
                    p_enc->fmt_out.p_extra = malloc( len + sizeof(eos) );
                    if( !p_enc->fmt_out.p_extra )
                        return NULL;
                    memcpy( p_enc->fmt_out.p_extra, p_block->p_buffer, len);
                    memcpy( (uint8_t*)p_enc->fmt_out.p_extra + len, eos, sizeof(eos) );
                    SetDWBE( (uint8_t*)p_enc->fmt_out.p_extra + len + 10, len );
                    p_enc->fmt_out.i_extra = len + sizeof(eos);
                }
            }

            if( ReadDiracPictureNumber( &pic_num, p_block ) )
            {
                /* Finding a picture terminates an ecapsulation unit, gather
                 * all data and output; use the next dts value queued up
                 * and find correct pts in the tlb */
                p_block = block_FifoGet( p_sys->p_dts_fifo );
                p_sys->p_chain->i_dts = p_block->i_dts;
                p_sys->p_chain->i_pts = GetPicturePTS( p_enc, pic_num );
                block_Release( p_block );
                block_ChainAppend( &p_output_chain, block_ChainGather( p_sys->p_chain ) );
                p_sys->p_chain = NULL;
            } else {
                p_block = NULL;
            }
            break;
            }

        case ENC_STATE_BUFFER:
            break;
        case ENC_STATE_INVALID:
        default:
            break;
        }
    } while( state == ENC_STATE_AVAIL );

    return p_output_chain;
}