예제 #1
0
bool gst_vlc_picture_plane_allocator_hold(
    GstVlcPicturePlaneAllocator *p_allocator, GstBuffer *p_buffer )
{
    picture_t* p_pic = NULL;
    decoder_t* p_dec = p_allocator->p_dec;
    GstVlcPicturePlane *p_mem;
    int i_plane;

    if( !decoder_UpdateVideoFormat( p_dec ) )
        p_pic = decoder_NewPicture( p_dec );
    if( !p_pic )
    {
        msg_Err( p_allocator->p_dec, "failed to acquire picture from vout" );
        return false;
    }

    for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ )
    {
        p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory ( p_buffer,
                i_plane );
        p_mem->p_pic = p_pic;
        p_mem->p_plane = &p_pic->p[ i_plane ];
    }

    return true;
}
예제 #2
0
파일: invmem.c 프로젝트: Kafay/vlc
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with a complete compressed frame.
 ****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;

    if( !pp_block || !*pp_block ) return NULL;

    p_block = *pp_block;

    // create new picture
    if( p_sys->p_pic != NULL )
        picture_Release( p_sys->p_pic );
    p_sys->p_pic = decoder_NewPicture( p_dec );
    p_sys->p_pic->b_force = true;
    p_sys->p_pic->p->i_pitch = p_dec->p_sys->i_pitch;
    p_sys->p_pic->date = p_block->i_pts > 0 ? p_block->i_pts : p_block->i_dts;

    // lock input and copy to picture
    p_sys->p_pic->p->p_pixels = p_sys->pf_lock( p_dec->p_sys->p_data );

    // unlock input
    p_sys->pf_unlock( p_dec->p_sys->p_data );

    block_Release( *pp_block ); *pp_block = NULL;
    return p_sys->p_pic;
}
예제 #3
0
파일: theora.c 프로젝트: cobr123/qtVlc
/*****************************************************************************
 * DecodePacket: decodes a Theora packet.
 *****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;
    yuv_buffer yuv;

    theora_decode_packetin( &p_sys->td, p_oggpacket );

    /* Check for keyframe */
    if( !(p_oggpacket->packet[0] & 0x80) /* data packet */ &&
        !(p_oggpacket->packet[0] & 0x40) /* intra frame */ )
        p_sys->b_decoded_first_keyframe = true;

    /* If we haven't seen a single keyframe yet, don't let Theora decode
     * anything, otherwise we'll get display artifacts.  (This is impossible
     * in the general case, but can happen if e.g. we play a network stream
     * using a timed URL, such that the server doesn't start the video with a
     * keyframe). */
    if( p_sys->b_decoded_first_keyframe )
        theora_decode_YUVout( &p_sys->td, &yuv );
    else
        return NULL;

    /* Get a new picture */
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic ) return NULL;

    theora_CopyPicture( p_pic, &yuv );

    p_pic->date = p_sys->i_pts;

    return p_pic;
}
예제 #4
0
파일: fake.c 프로젝트: Kafay/vlc
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = (decoder_sys_t*) p_dec->p_sys;
    picture_t *p_pic;

    if( pp_block == NULL || !*pp_block ) return NULL;
    p_pic = decoder_NewPicture( p_dec );
    if( p_pic == NULL )
    {
        msg_Err( p_dec, "cannot get picture" );
        goto error;
    }

    if( p_sys->b_reload && (mdate() >= p_sys->i_next) )
    {
        var_TriggerCallback( p_dec, "fake-file" );
        /* next period */
        p_sys->i_next = (mtime_t)(p_sys->i_reload + mdate());
    }
    vlc_mutex_lock( &p_dec->p_sys->lock );
    picture_Copy( p_pic, p_dec->p_sys->p_image );
    vlc_mutex_unlock( &p_dec->p_sys->lock );

    p_pic->date = (*pp_block)->i_pts;

error:
    block_Release( *pp_block );
    *pp_block = NULL;

    return p_pic;
}
예제 #5
0
/*****************************************************************************
 * DecodeFrame: decodes a video frame.
 *****************************************************************************/
static picture_t *DecodeFrame( decoder_t *p_dec, block_t *p_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;

    /* Get a new picture */
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic )
    {
        block_Release( p_block );
        return NULL;
    }

    FillPicture( p_dec, p_block, p_pic );

    p_pic->date = date_Get( &p_sys->pts );
    if( p_block->i_flags & BLOCK_FLAG_INTERLACED_MASK )
    {
        p_pic->b_progressive = false;
        p_pic->i_nb_fields = 2;
        if( p_block->i_flags & BLOCK_FLAG_TOP_FIELD_FIRST )
            p_pic->b_top_field_first = true;
        else
            p_pic->b_top_field_first = false;
    }
    else
        p_pic->b_progressive = true;

    block_Release( p_block );
    return p_pic;
}
예제 #6
0
파일: video.c 프로젝트: FLYKingdom/vlc
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
                                            AVCodecContext *p_context )
{
    decoder_sys_t *p_sys = p_dec->p_sys;

    p_dec->fmt_out.video.i_width = p_context->width;
    p_dec->fmt_out.video.i_height = p_context->height;

    if( !p_context->width || !p_context->height )
    {
        return NULL; /* invalid display size */
    }

    if( !p_sys->p_va && GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) )
    {
        /* we are doomed, but not really, because most codecs set their pix_fmt
         * much later
         * FIXME does it make sense here ? */
        p_dec->fmt_out.video.i_chroma = VLC_CODEC_I420;
    }
    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    /* If an aspect-ratio was specified in the input format then force it */
    if( p_dec->fmt_in.video.i_aspect )
    {
        p_dec->fmt_out.video.i_aspect = p_dec->fmt_in.video.i_aspect;
    }
    else
    {
        p_dec->fmt_out.video.i_aspect =
            VOUT_ASPECT_FACTOR * ( av_q2d(p_context->sample_aspect_ratio) *
                p_context->width / p_context->height );
        p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num;
        p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den;

        if( p_dec->fmt_out.video.i_aspect == 0 )
        {
            p_dec->fmt_out.video.i_aspect =
                VOUT_ASPECT_FACTOR * p_context->width / p_context->height;
        }
    }

    if( p_dec->fmt_in.video.i_frame_rate > 0 &&
        p_dec->fmt_in.video.i_frame_rate_base > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate =
            p_dec->fmt_in.video.i_frame_rate;
        p_dec->fmt_out.video.i_frame_rate_base =
            p_dec->fmt_in.video.i_frame_rate_base;
    }
    else if( p_context->time_base.num > 0 && p_context->time_base.den > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate = p_context->time_base.den;
        p_dec->fmt_out.video.i_frame_rate_base = p_context->time_base.num;
    }

    return decoder_NewPicture( p_dec );
}
예제 #7
0
파일: video.c 프로젝트: bobwxb/vlc
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
                                            AVCodecContext *p_context )
{
    bool hwaccel = p_dec->p_sys->p_va != NULL;

    if (lavc_UpdateVideoFormat(p_dec, p_context, hwaccel))
        return NULL;
    return decoder_NewPicture( p_dec );
}
예제 #8
0
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************/
static picture_t *DecodeBlock(decoder_t *p_dec, block_t **pp_block)
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    picture_t *p_pic;
    cedarx_picture_t picture;
    mtime_t i_pts;

    if(!p_sys || !pp_block)
        return NULL;
        
    p_block = *pp_block;
    if(p_block && !(p_block->i_flags&(BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED)))
    {
        if (p_block->i_pts > VLC_TS_INVALID) {
            i_pts = p_block->i_pts;
        } else {
            i_pts = -1;
        }
    
        if (libcedarx_decoder_add_stream(p_block->p_buffer, p_block->i_buffer, i_pts, 0) < 0)
            msg_Warn(p_dec, "Failed to add stream!");
            
        /* Make sure we don't reuse the same timestamps twice */
        p_block->i_pts = p_block->i_dts = VLC_TS_INVALID;
        block_Release(p_block);
        *pp_block = NULL;

#ifndef DECODE_USE_ASYNC_THREAD
        if (libcedarx_decoder_decode_stream() < 0)
            msg_Warn(p_dec, "Failed to decode stream!");
#endif
    }
    
    if (!libcedarx_display_request_frame(&picture)) {
        if (!p_dec->fmt_out.video.i_width || !p_dec->fmt_out.video.i_height) {
            p_dec->fmt_out.video.i_width = picture.width;
            p_dec->fmt_out.video.i_height = picture.height;
        }
        
        p_pic = decoder_NewPicture(p_dec);
        if (p_pic) {
            p_pic->b_progressive     = picture.interlaced ? false : true;
            p_pic->b_top_field_first = picture.top_field_first ? true : false;
            p_pic->i_nb_fields       = picture.interlaced ? 2 : 1;
            p_pic->p[0].p_pixels     = picture.id;
            p_pic->p[1].p_pixels     = p_pic->pf_release;
            p_pic->pf_release        = Release;
            p_pic->date              = picture.pts;
            return p_pic;
        }
    }

    return NULL;
}
예제 #9
0
파일: libmpeg2.c 프로젝트: Geal/vlc
/*****************************************************************************
 * GetNewPicture: Get a new picture from the vout and set the buf struct
 *****************************************************************************/
static picture_t *GetNewPicture( decoder_t *p_dec )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;

    p_dec->fmt_out.video.i_width = p_sys->p_info->sequence->width;
    p_dec->fmt_out.video.i_visible_width =
        p_sys->p_info->sequence->picture_width;
    p_dec->fmt_out.video.i_height = p_sys->p_info->sequence->height;
    p_dec->fmt_out.video.i_visible_height =
        p_sys->p_info->sequence->picture_height;
    p_dec->fmt_out.video.i_sar_num = p_sys->i_sar_num;
    p_dec->fmt_out.video.i_sar_den = p_sys->i_sar_den;

    if( p_sys->p_info->sequence->frame_period > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate =
            (uint32_t)( (uint64_t)1001000000 * 27 /
                        p_sys->p_info->sequence->frame_period );
        p_dec->fmt_out.video.i_frame_rate_base = 1001;
    }

    p_dec->fmt_out.i_codec =
        ( p_sys->p_info->sequence->chroma_height <
          p_sys->p_info->sequence->height ) ?
        VLC_CODEC_I420 : VLC_CODEC_I422;

    /* Get a new picture */
    if( decoder_UpdateVideoFormat( p_dec ) )
        return NULL;
    p_pic = decoder_NewPicture( p_dec );

    if( p_pic == NULL )
        return NULL;

    p_pic->b_progressive = p_sys->p_info->current_picture != NULL ?
        p_sys->p_info->current_picture->flags & PIC_FLAG_PROGRESSIVE_FRAME : 1;
    p_pic->b_top_field_first = p_sys->p_info->current_picture != NULL ?
        p_sys->p_info->current_picture->flags & PIC_FLAG_TOP_FIELD_FIRST : 1;
    p_pic->i_nb_fields = p_sys->p_info->current_picture != NULL ?
        p_sys->p_info->current_picture->nb_fields : 2;

    return p_pic;
}
예제 #10
0
파일: rawvideo.c 프로젝트: chouquette/vlc
/*****************************************************************************
 * DecodeFrame: decodes a video frame.
 *****************************************************************************/
static int DecodeFrame( decoder_t *p_dec, block_t *p_block )
{
    if( p_block == NULL ) /* No Drain */
        return VLCDEC_SUCCESS;

    p_block = DecodeBlock( p_dec, p_block );
    if( p_block == NULL )
        return VLCDEC_SUCCESS;

    decoder_sys_t *p_sys = p_dec->p_sys;

    /* Get a new picture */
    picture_t *p_pic = NULL;
    if( !decoder_UpdateVideoFormat( p_dec ) )
        p_pic = decoder_NewPicture( p_dec );
    if( p_pic == NULL )
    {
        block_Release( p_block );
        return VLCDEC_SUCCESS;
    }

    FillPicture( p_dec, p_block, p_pic );

    /* Date management: 1 frame per packet */
    p_pic->date = date_Get( &p_dec->p_sys->pts );
    date_Increment( &p_sys->pts, 1 );

    if( p_block->i_flags & BLOCK_FLAG_INTERLACED_MASK )
    {
        p_pic->b_progressive = false;
        p_pic->i_nb_fields = (p_block->i_flags & BLOCK_FLAG_SINGLE_FIELD) ? 1 : 2;
        if( p_block->i_flags & BLOCK_FLAG_TOP_FIELD_FIRST )
            p_pic->b_top_field_first = true;
        else
            p_pic->b_top_field_first = false;
    }
    else
        p_pic->b_progressive = true;

    block_Release( p_block );
    decoder_QueueVideo( p_dec, p_pic );
    return VLCDEC_SUCCESS;
}
예제 #11
0
파일: theora.c 프로젝트: mstorsjo/vlc
/*****************************************************************************
 * DecodePacket: decodes a Theora packet.
 *****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;
    th_ycbcr_buffer ycbcr;

    /* TODO: Implement _granpos (3rd parameter here) and add the
     * call to TH_DECCTL_SET_GRANDPOS after seek */
    /* TODO: If the return is TH_DUPFRAME, we don't need to display a new
     * frame, but we do need to keep displaying the previous one. */
    if (th_decode_packetin( p_sys->tcx, p_oggpacket, NULL ) < 0)
        return NULL; /* bad packet */

    /* Check for keyframe */
    if( !(p_oggpacket->packet[0] & 0x80) /* data packet */ &&
        !(p_oggpacket->packet[0] & 0x40) /* intra frame */ )
        p_sys->b_decoded_first_keyframe = true;

    /* If we haven't seen a single keyframe yet, don't let Theora decode
     * anything, otherwise we'll get display artifacts.  (This is impossible
     * in the general case, but can happen if e.g. we play a network stream
     * using a timed URL, such that the server doesn't start the video with a
     * keyframe). */
    if( !p_sys->b_decoded_first_keyframe )
        return NULL; /* Wait until we've decoded the first keyframe */

    if( th_decode_ycbcr_out( p_sys->tcx, ycbcr ) ) /* returns 0 on success */
        return NULL;

    /* Get a new picture */
    if( decoder_UpdateVideoFormat( p_dec ) )
        return NULL;
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic ) return NULL;

    theora_CopyPicture( p_pic, ycbcr );

    p_pic->date = p_sys->i_pts;
    p_pic->b_progressive = true;

    return p_pic;
}
예제 #12
0
파일: daala.c 프로젝트: 9034725985/vlc
/*****************************************************************************
 * DecodePacket: decodes a Daala packet.
 *****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic;
    od_img ycbcr;

    if (daala_decode_packet_in( p_sys->dcx, &ycbcr, p_oggpacket ) < 0)
        return NULL; /* bad packet */

    /* Check for keyframe */
    if( daala_packet_iskeyframe( p_oggpacket->packet, p_oggpacket->bytes ) )
        p_sys->b_decoded_first_keyframe = true;

    /* Get a new picture */
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic ) return NULL;

    daala_CopyPicture( p_pic, &ycbcr );

    p_pic->date = p_sys->i_pts;

    return p_pic;
}
예제 #13
0
파일: crystalhd.c 프로젝트: 0xheart0/vlc
static BC_STATUS ourCallback(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut)
{
    VLC_UNUSED(width); VLC_UNUSED(height); VLC_UNUSED(stride);

    decoder_t *p_dec          = (decoder_t *)shnd;
    BC_DTS_PROC_OUT *proc_out = p_dec->p_sys->proc_out;
    BC_DTS_PROC_OUT *proc_in  = (BC_DTS_PROC_OUT*)pOut;

    /* Direct Rendering */
    /* Do not allocate for the second-field in the pair, in interlaced */
    if( !(proc_in->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) ||
        !(proc_in->PicInfo.flags & VDEC_FLAG_FIELDPAIR) )
        p_dec->p_sys->p_pic = decoder_NewPicture( p_dec );

    /* */
    picture_t *p_pic = p_dec->p_sys->p_pic;
    if( !p_pic )
        return BC_STS_ERROR;

    /* Interlacing */
    p_pic->b_progressive     = !(proc_in->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC);
    p_pic->b_top_field_first = !(proc_in->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
    p_pic->i_nb_fields       = p_pic->b_progressive? 1: 2;

    /* Filling out the struct */
    proc_out->Ybuff      = !(proc_in->PicInfo.flags & VDEC_FLAG_FIELDPAIR) ?
                             &p_pic->p[0].p_pixels[0] :
                             &p_pic->p[0].p_pixels[p_pic->p[0].i_pitch];
    proc_out->YbuffSz    = 2 * p_pic->p[0].i_pitch;
    proc_out->StrideSz   = (proc_in->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC)?
                            2 * (p_pic->p[0].i_pitch/2) - p_dec->fmt_out.video.i_width:
                            p_pic->p[0].i_pitch/2 - p_dec->fmt_out.video.i_width;
    proc_out->PoutFlags |= BC_POUT_FLAGS_STRIDE;              /* Trust Stride info */

    return BC_STS_SUCCESS;
}
예제 #14
0
파일: png.c 프로젝트: Janak-Nirmal/vlc
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with a complete compressed frame.
 ****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    picture_t *p_pic = 0;

    png_uint_32 i_width, i_height;
    int i_color_type, i_interlace_type, i_compression_type, i_filter_type;
    int i_bit_depth, i;

    png_structp p_png;
    png_infop p_info, p_end_info;
    png_bytep *p_row_pointers = NULL;

    if( !pp_block || !*pp_block ) return NULL;

    p_block = *pp_block;
    p_sys->b_error = false;

    if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY )
    {
        block_Release( p_block ); *pp_block = NULL;
        return NULL;
    }

    p_png = png_create_read_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
    if( p_png == NULL )
    {
        block_Release( p_block ); *pp_block = NULL;
        return NULL;
    }

    p_info = png_create_info_struct( p_png );
    if( p_info == NULL )
    {
        png_destroy_read_struct( &p_png, NULL, NULL );
        block_Release( p_block ); *pp_block = NULL;
        return NULL;
    }

    p_end_info = png_create_info_struct( p_png );
    if( p_end_info == NULL )
    {
        png_destroy_read_struct( &p_png, &p_info, NULL );
        block_Release( p_block ); *pp_block = NULL;
        return NULL;
    }

    /* libpng longjmp's there in case of error */
    if( setjmp( png_jmpbuf( p_png ) ) )
        goto error;

    png_set_read_fn( p_png, (void *)p_block, user_read );
    png_set_error_fn( p_png, (void *)p_dec, user_error, user_warning );

    png_read_info( p_png, p_info );
    if( p_sys->b_error ) goto error;

    png_get_IHDR( p_png, p_info, &i_width, &i_height,
                  &i_bit_depth, &i_color_type, &i_interlace_type,
                  &i_compression_type, &i_filter_type);
    if( p_sys->b_error ) goto error;

    /* Set output properties */
    p_dec->fmt_out.i_codec = VLC_CODEC_RGBA;
    p_dec->fmt_out.video.i_visible_width = p_dec->fmt_out.video.i_width = i_width;
    p_dec->fmt_out.video.i_visible_height = p_dec->fmt_out.video.i_height = i_height;
    p_dec->fmt_out.video.i_sar_num = 1;
    p_dec->fmt_out.video.i_sar_den = 1;
    p_dec->fmt_out.video.i_rmask = 0x000000ff;
    p_dec->fmt_out.video.i_gmask = 0x0000ff00;
    p_dec->fmt_out.video.i_bmask = 0x00ff0000;

    if( i_color_type == PNG_COLOR_TYPE_PALETTE )
        png_set_palette_to_rgb( p_png );

    if( i_color_type == PNG_COLOR_TYPE_GRAY ||
        i_color_type == PNG_COLOR_TYPE_GRAY_ALPHA )
          png_set_gray_to_rgb( p_png );

    /* Strip to 8 bits per channel */
    if( i_bit_depth == 16 ) png_set_strip_16( p_png );

    if( png_get_valid( p_png, p_info, PNG_INFO_tRNS ) )
    {
        png_set_tRNS_to_alpha( p_png );
    }
    else if( !(i_color_type & PNG_COLOR_MASK_ALPHA) )
    {
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
    }

    /* Get a new picture */
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic ) goto error;

    /* Decode picture */
    p_row_pointers = malloc( sizeof(png_bytep) * i_height );
    if( !p_row_pointers )
        goto error;
    for( i = 0; i < (int)i_height; i++ )
        p_row_pointers[i] = p_pic->p->p_pixels + p_pic->p->i_pitch * i;

    png_read_image( p_png, p_row_pointers );
    if( p_sys->b_error ) goto error;
    png_read_end( p_png, p_end_info );
    if( p_sys->b_error ) goto error;

    png_destroy_read_struct( &p_png, &p_info, &p_end_info );
    free( p_row_pointers );

    p_pic->date = p_block->i_pts > VLC_TS_INVALID ? p_block->i_pts : p_block->i_dts;

    block_Release( p_block ); *pp_block = NULL;
    return p_pic;

 error:

    free( p_row_pointers );
    png_destroy_read_struct( &p_png, &p_info, &p_end_info );
    block_Release( p_block ); *pp_block = NULL;
    return NULL;
}
예제 #15
0
파일: svg.c 프로젝트: mstorsjo/vlc
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with a complete image.
 ****************************************************************************/
static int DecodeBlock( decoder_t *p_dec, block_t *p_block )
{
    decoder_sys_t *p_sys  = (decoder_sys_t *) p_dec->p_sys;
    picture_t *p_pic = NULL;
    int32_t i_width, i_height;

    RsvgHandle *rsvg = NULL;
    cairo_surface_t *surface = NULL;
    cairo_t *cr = NULL;

    if( p_block == NULL ) /* No Drain */
        return VLCDEC_SUCCESS;

    if( p_block->i_flags & BLOCK_FLAG_CORRUPTED)
    {
        block_Release( p_block );
        return VLCDEC_SUCCESS;
    }

    rsvg = rsvg_handle_new_from_data( p_block->p_buffer, p_block->i_buffer, NULL );
    if( !rsvg )
        goto done;

    RsvgDimensionData dim;
    rsvg_handle_get_dimensions( rsvg, &dim );

    if( p_sys->f_scale > 0.0 )
    {
        i_width  = (int32_t)(p_sys->f_scale * dim.width);
        i_height = (int32_t)(p_sys->f_scale * dim.height);
    }
    else
    {
        /* Keep aspect */
        if( p_sys->i_width < 0 && p_sys->i_height > 0 )
        {
            i_width  = dim.width * p_sys->i_height / dim.height;
            i_height = p_sys->i_height;
        }
        else if( p_sys->i_width > 0 && p_sys->i_height < 0 )
        {
            i_width  = p_sys->i_width;
            i_height = dim.height * p_sys->i_width / dim.height;
        }
        else if( p_sys->i_width > 0 && p_sys->i_height > 0 )
        {
            i_width  = dim.width * p_sys->i_height / dim.height;
            i_height = p_sys->i_height;
        }
        else
        {
            i_width  = dim.width;
            i_height = dim.height;
        }
    }

    p_dec->fmt_out.i_codec =
    p_dec->fmt_out.video.i_chroma = VLC_CODEC_BGRA;
    p_dec->fmt_out.video.i_width  = i_width;
    p_dec->fmt_out.video.i_height = i_height;
    p_dec->fmt_out.video.i_visible_width  = i_width;
    p_dec->fmt_out.video.i_visible_height = i_height;
    p_dec->fmt_out.video.i_sar_num = 1;
    p_dec->fmt_out.video.i_sar_den = 1;
    p_dec->fmt_out.video.i_rmask = 0x80800000; /* Since librsvg v1.0 */
    p_dec->fmt_out.video.i_gmask = 0x0000ff00;
    p_dec->fmt_out.video.i_bmask = 0x000000ff;
    video_format_FixRgb(&p_dec->fmt_out.video);

    /* Get a new picture */
    if( decoder_UpdateVideoFormat( p_dec ) )
        goto done;
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic )
        goto done;

    /* NOTE: Do not use the stride calculation from cairo, because it is wrong:
     * stride = cairo_format_stride_for_width(CAIRO_FORMAT_ARGB32, dim.width);
     * Use the stride from VLC its picture_t::p[0].i_pitch, which is correct.
     */
    memset(p_pic->p[0].p_pixels, 0, p_pic->p[0].i_pitch * p_pic->p[0].i_lines);
    surface = cairo_image_surface_create_for_data( p_pic->p->p_pixels,
                                                   CAIRO_FORMAT_ARGB32,
                                                   i_width, i_height,
                                                   p_pic->p[0].i_pitch );
    if( !surface )
    {
        picture_Release( p_pic );
        p_pic = NULL;
        goto done;
    }

    /* Decode picture */
    cr = cairo_create( surface );
    if( !cr )
    {
        picture_Release( p_pic );
        p_pic = NULL;
        goto done;
    }

    if ( i_width != dim.width || i_height != dim.height )
    {
        double sw, sh;
        if ( p_sys->f_scale > 0.0 && !(p_sys->i_width > 0 || p_sys->i_height > 0) )
            sw = sh = p_sys->f_scale;
        else
        {
            double aspect = (double) (dim.width * p_dec->fmt_out.video.i_sar_num) /
                    (dim.height * p_dec->fmt_out.video.i_sar_den);
            sw = aspect * i_width / dim.width;
            sh = aspect * i_height / dim.height;
        }
        cairo_scale(cr, sw, sh);
    }

    if( !rsvg_handle_render_cairo( rsvg, cr ) )
    {
        picture_Release( p_pic );
        p_pic = NULL;
        goto done;
    }

    p_pic->date = p_block->i_pts != VLC_TICK_INVALID ? p_block->i_pts : p_block->i_dts;

done:
    if( rsvg )
        g_object_unref( G_OBJECT( rsvg ) );
    if( cr )
        cairo_destroy( cr );
    if( surface )
        cairo_surface_destroy( surface );

    block_Release( p_block );
    if( p_pic != NULL )
        decoder_QueueVideo( p_dec, p_pic );
    return VLCDEC_SUCCESS;
}
예제 #16
0
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
                                            AVCodecContext *p_context )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    int width = p_context->coded_width;
    int height = p_context->coded_height;

    if( p_sys->p_va == NULL )
    {
        int aligns[AV_NUM_DATA_POINTERS];

        avcodec_align_dimensions2(p_context, &width, &height, aligns);
    }


    if( width == 0 || height == 0 || width > 8192 || height > 8192 )
    {
        msg_Err( p_dec, "Invalid frame size %dx%d.", width, height );
        return NULL; /* invalid display size */
    }
    p_dec->fmt_out.video.i_width = width;
    p_dec->fmt_out.video.i_height = height;

    if( width != p_context->width || height != p_context->height )
    {
        p_dec->fmt_out.video.i_visible_width = p_context->width;
        p_dec->fmt_out.video.i_visible_height = p_context->height;
    }
    else
    {
        p_dec->fmt_out.video.i_visible_width = width;
        p_dec->fmt_out.video.i_visible_height = height;
    }

    if( !p_sys->p_va && GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) )
    {
        /* we are doomed, but not really, because most codecs set their pix_fmt
         * much later
         * FIXME does it make sense here ? */
        p_dec->fmt_out.video.i_chroma = VLC_CODEC_I420;
    }
    p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;

    /* If an aspect-ratio was specified in the input format then force it */
    if( p_dec->fmt_in.video.i_sar_num > 0 && p_dec->fmt_in.video.i_sar_den > 0 )
    {
        p_dec->fmt_out.video.i_sar_num = p_dec->fmt_in.video.i_sar_num;
        p_dec->fmt_out.video.i_sar_den = p_dec->fmt_in.video.i_sar_den;
    }
    else
    {
        p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num;
        p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den;

        if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den )
        {
            p_dec->fmt_out.video.i_sar_num = 1;
            p_dec->fmt_out.video.i_sar_den = 1;
        }
    }

    if( p_dec->fmt_in.video.i_frame_rate > 0 &&
        p_dec->fmt_in.video.i_frame_rate_base > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate =
            p_dec->fmt_in.video.i_frame_rate;
        p_dec->fmt_out.video.i_frame_rate_base =
            p_dec->fmt_in.video.i_frame_rate_base;
    }
    else if( p_context->time_base.num > 0 && p_context->time_base.den > 0 )
    {
        p_dec->fmt_out.video.i_frame_rate = p_context->time_base.den;
        p_dec->fmt_out.video.i_frame_rate_base = p_context->time_base.num * __MAX( p_context->ticks_per_frame, 1 );
    }

    return decoder_NewPicture( p_dec );
}
예제 #17
0
/****************************************************************************
 * Decode: the whole thing
 ****************************************************************************/
static picture_t *Decode(decoder_t *dec, block_t **pp_block)
{
    struct vpx_codec_ctx *ctx = &dec->p_sys->ctx;

    block_t *block = *pp_block;
    if (!block)
        return NULL;

    if (block->i_flags & (BLOCK_FLAG_DISCONTINUITY|BLOCK_FLAG_CORRUPTED))
        return NULL;

    /* Associate packet PTS with decoded frame */
    mtime_t *pkt_pts = malloc(sizeof(*pkt_pts));
    if (!pkt_pts) {
        block_Release(block);
        *pp_block = NULL;
        return NULL;
    }

    *pkt_pts = block->i_pts;

    vpx_codec_err_t err;
    err = vpx_codec_decode(ctx, block->p_buffer, block->i_buffer, pkt_pts, 0);

    block_Release(block);
    *pp_block = NULL;

    if (err != VPX_CODEC_OK) {
        free(pkt_pts);
        const char *error  = vpx_codec_error(ctx);
        const char *detail = vpx_codec_error_detail(ctx);
        if (!detail)
            detail = "no specific information";
        msg_Err(dec, "Failed to decode frame: %s (%s)", error, detail);
        return NULL;
    }

    const void *iter = NULL;
    struct vpx_image *img = vpx_codec_get_frame(ctx, &iter);
    if (!img) {
        free(pkt_pts);
        return NULL;
    }

    /* fetches back the PTS */
    pkt_pts = img->user_priv;
    mtime_t pts = *pkt_pts;
    free(pkt_pts);

    if (img->fmt != VPX_IMG_FMT_I420) {
        msg_Err(dec, "Unsupported output colorspace %d", img->fmt);
        return NULL;
    }

    video_format_t *v = &dec->fmt_out.video;

    if (img->d_w != v->i_visible_width || img->d_h != v->i_visible_height) {
        v->i_visible_width = img->d_w;
        v->i_visible_height = img->d_h;
    }

    picture_t *pic = decoder_NewPicture(dec);
    if (!pic)
        return NULL;

    for (int plane = 0; plane < pic->i_planes; plane++ ) {
        uint8_t *src = img->planes[plane];
        uint8_t *dst = pic->p[plane].p_pixels;
        int src_stride = img->stride[plane];
        int dst_stride = pic->p[plane].i_pitch;

        int size = __MIN( src_stride, dst_stride );
        for( int line = 0; line < pic->p[plane].i_visible_lines; line++ ) {
            memcpy( dst, src, size );
            src += src_stride;
            dst += dst_stride;
        }
    }

    pic->b_progressive = true; /* codec does not support interlacing */
    pic->date = pts;

    return pic;
}
예제 #18
0
파일: jpeg.c 프로젝트: katakk/vlc
/*
 * This function must be fed with a complete compressed frame.
 */
static picture_t *DecodeBlock(decoder_t *p_dec, block_t **pp_block)
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    picture_t *p_pic = 0;

    JSAMPARRAY p_row_pointers = NULL;

    if (!pp_block || !*pp_block)
    {
        return NULL;
    }

    p_block = *pp_block;

    if (p_block->i_flags & BLOCK_FLAG_DISCONTINUITY)
    {
        block_Release(p_block);
        *pp_block = NULL;
        return NULL;
    }

    /* libjpeg longjmp's there in case of error */
    if (setjmp(p_sys->setjmp_buffer))
    {
        goto error;
    }

    jpeg_create_decompress(&p_sys->p_jpeg);
    jpeg_mem_src(&p_sys->p_jpeg, p_block->p_buffer, p_block->i_buffer);
    jpeg_read_header(&p_sys->p_jpeg, TRUE);

    p_sys->p_jpeg.out_color_space = JCS_RGB;

    jpeg_start_decompress(&p_sys->p_jpeg);

    /* Set output properties */
    p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
    p_dec->fmt_out.video.i_width = p_sys->p_jpeg.output_width;
    p_dec->fmt_out.video.i_height = p_sys->p_jpeg.output_height;
    p_dec->fmt_out.video.i_sar_num = 1;
    p_dec->fmt_out.video.i_sar_den = 1;
    p_dec->fmt_out.video.i_rmask = 0x000000ff;
    p_dec->fmt_out.video.i_gmask = 0x0000ff00;
    p_dec->fmt_out.video.i_bmask = 0x00ff0000;

    /* Get a new picture */
    p_pic = decoder_NewPicture(p_dec);
    if (!p_pic)
    {
        goto error;
    }

    /* Decode picture */
    p_row_pointers = malloc(sizeof(JSAMPROW) * p_sys->p_jpeg.output_height);
    if (!p_row_pointers)
    {
        goto error;
    }
    for (unsigned i = 0; i < p_sys->p_jpeg.output_height; i++) {
        p_row_pointers[i] = p_pic->p->p_pixels + p_pic->p->i_pitch * i;
    }

    while (p_sys->p_jpeg.output_scanline < p_sys->p_jpeg.output_height)
    {
        jpeg_read_scanlines(&p_sys->p_jpeg,
                p_row_pointers + p_sys->p_jpeg.output_scanline,
                p_sys->p_jpeg.output_height - p_sys->p_jpeg.output_scanline);
    }

    jpeg_finish_decompress(&p_sys->p_jpeg);
    jpeg_destroy_decompress(&p_sys->p_jpeg);
    free(p_row_pointers);

    p_pic->date = p_block->i_pts > VLC_TS_INVALID ? p_block->i_pts : p_block->i_dts;

    block_Release(p_block);
    *pp_block = NULL;

    return p_pic;

error:

    jpeg_destroy_decompress(&p_sys->p_jpeg);
    free(p_row_pointers);

    block_Release(p_block);
    *pp_block = NULL;

    return NULL;
}
예제 #19
0
파일: bpg.c 프로젝트: 0xheart0/vlc
/*
 * This function must be fed with a complete compressed frame.
 */
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    picture_t *p_pic = 0;
    BPGImageInfo img_info;

    if( !pp_block || !*pp_block )
        return NULL;

    p_block = *pp_block;
    *pp_block = NULL;

    if( p_block->i_flags & BLOCK_FLAG_CORRUPTED )
        goto error;

    /* Decode picture */

    if( bpg_decoder_decode( p_sys->p_bpg,
                            p_block->p_buffer,
                            p_block->i_buffer ) < 0 )
    {
        msg_Err( p_dec, "Could not decode block" );
        goto error;
    }

    if( bpg_decoder_get_info( p_sys->p_bpg, &img_info ) )
    {
        msg_Err( p_dec, "Could not get info for decoder" );
        goto error;
    }

    if( bpg_decoder_start( p_sys->p_bpg, BPG_OUTPUT_FORMAT_RGB24 ) )
    {
        msg_Err( p_dec, "Could not start decoder" );
        goto error;
    }

    /* Set output properties */
    p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
    p_dec->fmt_out.video.i_visible_width  = p_dec->fmt_out.video.i_width  = img_info.width;
    p_dec->fmt_out.video.i_visible_height = p_dec->fmt_out.video.i_height = img_info.height;
    p_dec->fmt_out.video.i_sar_num = 1;
    p_dec->fmt_out.video.i_sar_den = 1;
    p_dec->fmt_out.video.i_rmask = 0x000000ff;
    p_dec->fmt_out.video.i_gmask = 0x0000ff00;
    p_dec->fmt_out.video.i_bmask = 0x00ff0000;

    /* Get a new picture */
    p_pic = decoder_NewPicture( p_dec );
    if( !p_pic )
    {
        goto error;
    }

    const int img_height = img_info.height;
    for (int i = 0; i < img_height; i++)
    {
        if( bpg_decoder_get_line( p_sys->p_bpg,
                                  p_pic->p->p_pixels + p_pic->p->i_pitch * i )
                                  < 0 )
        {
            msg_Err( p_dec, "Could not decode line" );
            goto error;
        }
    }

    p_pic->date = p_block->i_pts > VLC_TS_INVALID ? p_block->i_pts : p_block->i_dts;

    block_Release( p_block );
    return p_pic;

error:
    block_Release( p_block );
    return NULL;
}
예제 #20
0
/*****************************************************************************
 * DecodeVideo:
 *****************************************************************************/
static picture_t *DecodeVideo( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t       *p_block;
    picture_t     *p_pic;
    mtime_t       i_pts;

    ComponentResult cres;

#ifdef LOADER
    /* We must do open and close in the same thread (unless we do
     * Setup_LDT_Keeper in the main thread before all others */
    if( p_sys == NULL )
    {
        if( OpenVideo( p_dec ) )
        {
            /* Fatal */
            p_dec->b_error = true;
            return NULL;
        }
        p_sys = p_dec->p_sys;
    }
#endif

    if( pp_block == NULL || *pp_block == NULL )
    {
        return NULL;
    }
    p_block = *pp_block;
    *pp_block = NULL;
 
    i_pts = p_block->i_pts ? p_block->i_pts : p_block->i_dts;

    mtime_t i_display_date = 0;
    if( !(p_block->i_flags & BLOCK_FLAG_PREROLL) )
        i_display_date = decoder_GetDisplayDate( p_dec, i_pts );

    if( i_display_date > 0 && i_display_date < mdate() )
    {
        p_sys->i_late++;
    }
    else
    {
        p_sys->i_late = 0;
    }
#ifndef NDEBUG
    msg_Dbg( p_dec, "bufsize: %d", (int)p_block->i_buffer);
#endif

    if( p_sys->i_late > 10 )
    {
        msg_Dbg( p_dec, "late buffer dropped (%"PRId64")", i_pts );
        block_Release( p_block );
        return NULL;
    }
 
    vlc_mutex_lock( &qt_mutex );

    if( ( p_pic = decoder_NewPicture( p_dec ) ) )
    {
        p_sys->decpar.data                  = (Ptr)p_block->p_buffer;
        p_sys->decpar.bufferSize            = p_block->i_buffer;
        (**p_sys->framedescHandle).dataSize = p_block->i_buffer;

        cres = p_sys->ImageCodecBandDecompress( p_sys->ci, &p_sys->decpar );

        ++p_sys->decpar.frameNumber;

        if( cres &0xFFFF )
        {
            msg_Dbg( p_dec, "quicktime_video: ImageCodecBandDecompress"
                     " cres=0x%X (-0x%X) %d :(",
                     (int)cres,(int)-cres, (int)cres );
        }

        memcpy( p_pic->p[0].p_pixels, p_sys->plane,
                p_dec->fmt_in.video.i_width * p_dec->fmt_in.video.i_height * 2 );
        p_pic->date = i_pts;
    }
 
    vlc_mutex_unlock( &qt_mutex );

    block_Release( p_block );
    return p_pic;
}
예제 #21
0
파일: mft.c 프로젝트: IAPark/vlc
static int ProcessOutputStream(decoder_t *p_dec, DWORD stream_id)
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    HRESULT hr;
    picture_t *picture = NULL;
    block_t *aout_buffer = NULL;

    DWORD output_status = 0;
    MFT_OUTPUT_DATA_BUFFER output_buffer = { stream_id, p_sys->output_sample, 0, NULL };
    hr = IMFTransform_ProcessOutput(p_sys->mft, 0, 1, &output_buffer, &output_status);
    if (output_buffer.pEvents)
        IMFCollection_Release(output_buffer.pEvents);
    /* Use the returned sample since it can be provided by the MFT. */
    IMFSample *output_sample = output_buffer.pSample;

    if (hr == S_OK)
    {
        if (!output_sample)
            return VLC_SUCCESS;

        LONGLONG sample_time;
        hr = IMFSample_GetSampleTime(output_sample, &sample_time);
        if (FAILED(hr))
            goto error;
        /* Convert from 100 nanoseconds unit to microseconds. */
        sample_time /= 10;

        DWORD total_length = 0;
        hr = IMFSample_GetTotalLength(output_sample, &total_length);
        if (FAILED(hr))
            goto error;

        if (p_dec->fmt_in.i_cat == VIDEO_ES)
        {
            if (decoder_UpdateVideoFormat(p_dec))
                return VLC_SUCCESS;
            picture = decoder_NewPicture(p_dec);
            if (!picture)
                return VLC_SUCCESS;

            UINT32 interlaced = false;
            hr = IMFSample_GetUINT32(output_sample, &MFSampleExtension_Interlaced, &interlaced);
            picture->b_progressive = !interlaced;

            picture->date = sample_time;
        }
        else
        {
            if (decoder_UpdateAudioFormat(p_dec))
                goto error;
            if (p_dec->fmt_out.audio.i_bitspersample == 0 || p_dec->fmt_out.audio.i_channels == 0)
                goto error;
            int samples = total_length / (p_dec->fmt_out.audio.i_bitspersample * p_dec->fmt_out.audio.i_channels / 8);
            aout_buffer = decoder_NewAudioBuffer(p_dec, samples);
            if (!aout_buffer)
                return VLC_SUCCESS;
            if (aout_buffer->i_buffer < total_length)
                goto error;

            aout_buffer->i_pts = sample_time;
        }

        IMFMediaBuffer *output_media_buffer = NULL;
        hr = IMFSample_GetBufferByIndex(output_sample, 0, &output_media_buffer);

        BYTE *buffer_start;
        hr = IMFMediaBuffer_Lock(output_media_buffer, &buffer_start, NULL, NULL);
        if (FAILED(hr))
            goto error;

        if (p_dec->fmt_in.i_cat == VIDEO_ES)
            CopyPackedBufferToPicture(picture, buffer_start);
        else
            memcpy(aout_buffer->p_buffer, buffer_start, total_length);

        hr = IMFMediaBuffer_Unlock(output_media_buffer);
        IMFSample_Release(output_media_buffer);
        if (FAILED(hr))
            goto error;

        if (p_sys->output_sample)
        {
            /* Sample is not provided by the MFT: clear its content. */
            hr = IMFMediaBuffer_SetCurrentLength(output_media_buffer, 0);
            if (FAILED(hr))
                goto error;
        }
        else
        {
            /* Sample is provided by the MFT: decrease refcount. */
            IMFSample_Release(output_sample);
        }
    }
    else if (hr == MF_E_TRANSFORM_STREAM_CHANGE || hr == MF_E_TRANSFORM_TYPE_NOT_SET)
    {
        if (p_sys->output_type)
            IMFMediaType_Release(p_sys->output_type);
        if (SetOutputType(p_dec, p_sys->output_stream_id, &p_sys->output_type))
            goto error;

        /* Reallocate output sample. */
        if (p_sys->output_sample)
            IMFSample_Release(p_sys->output_sample);
        p_sys->output_sample = NULL;
        if (AllocateOutputSample(p_dec, 0, &p_sys->output_sample))
            goto error;
        return VLC_SUCCESS;
    }
    else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT)
    {
        return VLC_SUCCESS;
    }
    else /* An error not listed above occurred */
    {
        msg_Err(p_dec, "Unexpected error in IMFTransform::ProcessOutput: %#lx",
                hr);
        goto error;
    }

    if (p_dec->fmt_in.i_cat == VIDEO_ES)
        decoder_QueueVideo(p_dec, picture);
    else
        decoder_QueueAudio(p_dec, aout_buffer);

    return VLC_SUCCESS;

error:
    msg_Err(p_dec, "Error in ProcessOutputStream()");
    if (picture)
        picture_Release(picture);
    if (aout_buffer)
        block_Release(aout_buffer);
    return VLC_EGENERIC;
}
예제 #22
0
파일: sdl_image.c 프로젝트: mstorsjo/vlc
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with a complete compressed frame.
 ****************************************************************************/
static int DecodeBlock( decoder_t *p_dec, block_t *p_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic = NULL;
    SDL_Surface *p_surface;
    SDL_RWops *p_rw;

    if( p_block == NULL ) /* No Drain */
        return VLCDEC_SUCCESS;

    if( p_block->i_flags & BLOCK_FLAG_CORRUPTED )
    {
        block_Release( p_block );
        return VLCDEC_SUCCESS;
    }

    p_rw = SDL_RWFromConstMem( p_block->p_buffer, p_block->i_buffer );

    /* Decode picture. */
    p_surface = IMG_LoadTyped_RW( p_rw, 1, (char*)p_sys->psz_sdl_type );
    if ( p_surface == NULL )
    {
        msg_Warn( p_dec, "SDL_image couldn't load the image (%s)",
                  IMG_GetError() );
        goto error;
    }

    switch ( p_surface->format->BitsPerPixel )
    {
    case 16:
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB16;
        break;
    case 8:
    case 24:
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
        break;
    case 32:
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB32;
        break;
    default:
        msg_Warn( p_dec, "unknown bits/pixel format (%d)",
                  p_surface->format->BitsPerPixel );
        goto error;
    }
    p_dec->fmt_out.video.i_width = p_surface->w;
    p_dec->fmt_out.video.i_height = p_surface->h;
    p_dec->fmt_out.video.i_sar_num = 1;
    p_dec->fmt_out.video.i_sar_den = 1;

    /* Get a new picture. */
    if( decoder_UpdateVideoFormat( p_dec ) )
        goto error;
    p_pic = decoder_NewPicture( p_dec );
    if ( p_pic == NULL ) goto error;

    switch ( p_surface->format->BitsPerPixel )
    {
        case 8:
        {
            for ( int i = 0; i < p_surface->h; i++ )
            {
                uint8_t *p_src = (uint8_t*)p_surface->pixels + i * p_surface->pitch;
                uint8_t *p_dst = p_pic->p[0].p_pixels + i * p_pic->p[0].i_pitch;
                for ( int j = 0; j < p_surface->w; j++ )
                {
                    uint8_t r, g, b;
                    SDL_GetRGB( *(p_src++), p_surface->format,
                                &r, &g, &b );
                    *(p_dst++) = r;
                    *(p_dst++) = g;
                    *(p_dst++) = b;
                }
            }
            break;
        }
        case 16:
        {
            uint8_t *p_src = p_surface->pixels;
            uint8_t *p_dst = p_pic->p[0].p_pixels;
            int i_pitch = p_pic->p[0].i_pitch < p_surface->pitch ?
                p_pic->p[0].i_pitch : p_surface->pitch;

            for ( int i = 0; i < p_surface->h; i++ )
            {
                memcpy( p_dst, p_src, i_pitch );
                p_src += p_surface->pitch;
                p_dst += p_pic->p[0].i_pitch;
            }
            break;
        }
        case 24:
        {
            for ( int i = 0; i < p_surface->h; i++ )
            {
                uint8_t *p_src = (uint8_t*)p_surface->pixels + i * p_surface->pitch;
                uint8_t *p_dst = p_pic->p[0].p_pixels + i * p_pic->p[0].i_pitch;
                for ( int j = 0; j < p_surface->w; j++ )
                {
                    uint8_t r, g, b;
                    SDL_GetRGB( *(uint32_t*)p_src, p_surface->format,
                                &r, &g, &b );
                    *(p_dst++) = r;
                    *(p_dst++) = g;
                    *(p_dst++) = b;
                    p_src += 3;
                }
            }
            break;
        }
        case 32:
        {
            for ( int i = 0; i < p_surface->h; i++ )
            {
                uint8_t *p_src = (uint8_t*)p_surface->pixels + i * p_surface->pitch;
                uint8_t *p_dst = p_pic->p[0].p_pixels + i * p_pic->p[0].i_pitch;
                for ( int j = 0; j < p_surface->w; j++ )
                {
                    uint8_t r, g, b, a;
                    SDL_GetRGBA( *(uint32_t*)p_src, p_surface->format,
                                &r, &g, &b, &a );
                    *(p_dst++) = b;
                    *(p_dst++) = g;
                    *(p_dst++) = r;
                    *(p_dst++) = a;
                    p_src += 4;
                }
            }
            break;
        }
    }

    p_pic->date = (p_block->i_pts != VLC_TICK_INVALID) ?
        p_block->i_pts : p_block->i_dts;

    decoder_QueueVideo( p_dec, p_pic );

error:
    if ( p_surface != NULL ) SDL_FreeSurface( p_surface );
    block_Release( p_block );
    return VLCDEC_SUCCESS;
}
예제 #23
0
파일: jpeg.c 프로젝트: chouquette/vlc
/*
 * This function must be fed with a complete compressed frame.
 */
static int DecodeBlock(decoder_t *p_dec, block_t *p_block)
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    picture_t *p_pic = 0;

    JSAMPARRAY p_row_pointers = NULL;

    if (!p_block) /* No Drain */
        return VLCDEC_SUCCESS;

    if (p_block->i_flags & BLOCK_FLAG_CORRUPTED )
    {
        block_Release(p_block);
        return VLCDEC_SUCCESS;
    }

    /* libjpeg longjmp's there in case of error */
    if (setjmp(p_sys->setjmp_buffer))
    {
        goto error;
    }

    jpeg_create_decompress(&p_sys->p_jpeg);
    jpeg_mem_src(&p_sys->p_jpeg, p_block->p_buffer, p_block->i_buffer);
    jpeg_save_markers( &p_sys->p_jpeg, EXIF_JPEG_MARKER, 0xffff );
    jpeg_read_header(&p_sys->p_jpeg, TRUE);

    p_sys->p_jpeg.out_color_space = JCS_RGB;

    jpeg_start_decompress(&p_sys->p_jpeg);

    /* Set output properties */
    p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
    p_dec->fmt_out.video.i_visible_width  = p_dec->fmt_out.video.i_width  = p_sys->p_jpeg.output_width;
    p_dec->fmt_out.video.i_visible_height = p_dec->fmt_out.video.i_height = p_sys->p_jpeg.output_height;
    p_dec->fmt_out.video.i_sar_num = 1;
    p_dec->fmt_out.video.i_sar_den = 1;

    int i_otag; /* Orientation tag has valid range of 1-8. 1 is normal orientation, 0 = unspecified = normal */
    i_otag = jpeg_GetOrientation( &p_sys->p_jpeg );
    if ( i_otag > 1 )
    {
        msg_Dbg( p_dec, "Jpeg orientation is %d", i_otag );
        p_dec->fmt_out.video.orientation = ORIENT_FROM_EXIF( i_otag );
    }
    jpeg_GetProjection(&p_sys->p_jpeg, &p_dec->fmt_out.video);

    /* Get a new picture */
    if (decoder_UpdateVideoFormat(p_dec))
    {
        goto error;
    }
    p_pic = decoder_NewPicture(p_dec);
    if (!p_pic)
    {
        goto error;
    }

    /* Decode picture */
    p_row_pointers = malloc(sizeof(JSAMPROW) * p_sys->p_jpeg.output_height);
    if (!p_row_pointers)
    {
        goto error;
    }
    for (unsigned i = 0; i < p_sys->p_jpeg.output_height; i++) {
        p_row_pointers[i] = p_pic->p->p_pixels + p_pic->p->i_pitch * i;
    }

    while (p_sys->p_jpeg.output_scanline < p_sys->p_jpeg.output_height)
    {
        jpeg_read_scanlines(&p_sys->p_jpeg,
                p_row_pointers + p_sys->p_jpeg.output_scanline,
                p_sys->p_jpeg.output_height - p_sys->p_jpeg.output_scanline);
    }

    jpeg_finish_decompress(&p_sys->p_jpeg);
    jpeg_destroy_decompress(&p_sys->p_jpeg);
    free(p_row_pointers);

    p_pic->date = p_block->i_pts > VLC_TS_INVALID ? p_block->i_pts : p_block->i_dts;

    block_Release(p_block);
    decoder_QueueVideo( p_dec, p_pic );
    return VLCDEC_SUCCESS;

error:

    jpeg_destroy_decompress(&p_sys->p_jpeg);
    free(p_row_pointers);

    block_Release(p_block);
    return VLCDEC_SUCCESS;
}
예제 #24
0
파일: mediacodec.c 프로젝트: 9034725985/vlc
static int Video_ProcessOutput(decoder_t *p_dec, mc_api_out *p_out,
                               picture_t **pp_out_pic, block_t **pp_out_block)
{
    decoder_sys_t *p_sys = p_dec->p_sys;

    assert(pp_out_pic && !pp_out_block);

    if (p_out->type == MC_OUT_TYPE_BUF)
    {
        picture_t *p_pic = NULL;

        /* Use the aspect ratio provided by the input (ie read from packetizer).
         * Don't check the current value of the aspect ratio in fmt_out, since we
         * want to allow changes in it to propagate. */
        if (p_dec->fmt_in.video.i_sar_num != 0 && p_dec->fmt_in.video.i_sar_den != 0
         && (p_dec->fmt_out.video.i_sar_num != p_dec->fmt_in.video.i_sar_num ||
             p_dec->fmt_out.video.i_sar_den != p_dec->fmt_in.video.i_sar_den))
        {
            p_dec->fmt_out.video.i_sar_num = p_dec->fmt_in.video.i_sar_num;
            p_dec->fmt_out.video.i_sar_den = p_dec->fmt_in.video.i_sar_den;
            p_sys->b_update_format = true;
        }

        if (p_sys->b_update_format)
        {
            p_sys->b_update_format = false;
            if (decoder_UpdateVideoFormat(p_dec) != 0)
            {
                msg_Err(p_dec, "decoder_UpdateVideoFormat failed");
                p_sys->api->release_out(p_sys->api, p_out->u.buf.i_index, false);
                return -1;
            }
        }

        /* If the oldest input block had no PTS, the timestamp of
         * the frame returned by MediaCodec might be wrong so we
         * overwrite it with the corresponding dts. Call FifoGet
         * first in order to avoid a gap if buffers are released
         * due to an invalid format or a preroll */
        int64_t forced_ts = timestamp_FifoGet(p_sys->u.video.timestamp_fifo);

        if (!p_sys->b_has_format) {
            msg_Warn(p_dec, "Buffers returned before output format is set, dropping frame");
            return p_sys->api->release_out(p_sys->api, p_out->u.buf.i_index, false);
        }

        if (p_out->u.buf.i_ts <= p_sys->i_preroll_end)
            return p_sys->api->release_out(p_sys->api, p_out->u.buf.i_index, false);

        p_pic = decoder_NewPicture(p_dec);
        if (!p_pic) {
            msg_Warn(p_dec, "NewPicture failed");
            return p_sys->api->release_out(p_sys->api, p_out->u.buf.i_index, false);
        }

        if (forced_ts == VLC_TS_INVALID)
            p_pic->date = p_out->u.buf.i_ts;
        else
            p_pic->date = forced_ts;

        if (p_sys->api->b_direct_rendering)
        {
            picture_sys_t *p_picsys = p_pic->p_sys;
            p_picsys->pf_lock_pic = NULL;
            p_picsys->pf_unlock_pic = UnlockPicture;
            p_picsys->priv.hw.p_dec = p_dec;
            p_picsys->priv.hw.i_index = p_out->u.buf.i_index;
            p_picsys->priv.hw.b_valid = true;

            vlc_mutex_lock(get_android_opaque_mutex());
            InsertInflightPicture(p_dec, p_pic, p_out->u.buf.i_index);
            vlc_mutex_unlock(get_android_opaque_mutex());
        } else {
            unsigned int chroma_div;
            GetVlcChromaSizes(p_dec->fmt_out.i_codec,
                              p_dec->fmt_out.video.i_width,
                              p_dec->fmt_out.video.i_height,
                              NULL, NULL, &chroma_div);
            CopyOmxPicture(p_sys->u.video.i_pixel_format, p_pic,
                           p_sys->u.video.i_slice_height, p_sys->u.video.i_stride,
                           (uint8_t *)p_out->u.buf.p_ptr, chroma_div,
                           &p_sys->u.video.ascd);

            if (p_sys->api->release_out(p_sys->api, p_out->u.buf.i_index, false))
            {
                picture_Release(p_pic);
                return -1;
            }
        }
        assert(!(*pp_out_pic));
        *pp_out_pic = p_pic;
        return 1;
    } else {
        assert(p_out->type == MC_OUT_TYPE_CONF);
        p_sys->u.video.i_pixel_format = p_out->u.conf.video.pixel_format;
        ArchitectureSpecificCopyHooksDestroy(p_sys->u.video.i_pixel_format,
                                             &p_sys->u.video.ascd);

        const char *name = "unknown";
        if (p_sys->api->b_direct_rendering)
            p_dec->fmt_out.i_codec = VLC_CODEC_ANDROID_OPAQUE;
        else
        {
            if (!GetVlcChromaFormat(p_sys->u.video.i_pixel_format,
                                    &p_dec->fmt_out.i_codec, &name)) {
                msg_Err(p_dec, "color-format not recognized");
                return -1;
            }
        }

        msg_Err(p_dec, "output: %d %s, %dx%d stride %d %d, crop %d %d %d %d",
                p_sys->u.video.i_pixel_format, name, p_out->u.conf.video.width, p_out->u.conf.video.height,
                p_out->u.conf.video.stride, p_out->u.conf.video.slice_height,
                p_out->u.conf.video.crop_left, p_out->u.conf.video.crop_top,
                p_out->u.conf.video.crop_right, p_out->u.conf.video.crop_bottom);

        p_dec->fmt_out.video.i_width = p_out->u.conf.video.crop_right + 1 - p_out->u.conf.video.crop_left;
        p_dec->fmt_out.video.i_height = p_out->u.conf.video.crop_bottom + 1 - p_out->u.conf.video.crop_top;
        if (p_dec->fmt_out.video.i_width <= 1
            || p_dec->fmt_out.video.i_height <= 1) {
            p_dec->fmt_out.video.i_width = p_out->u.conf.video.width;
            p_dec->fmt_out.video.i_height = p_out->u.conf.video.height;
        }
        p_dec->fmt_out.video.i_visible_width = p_dec->fmt_out.video.i_width;
        p_dec->fmt_out.video.i_visible_height = p_dec->fmt_out.video.i_height;

        p_sys->u.video.i_stride = p_out->u.conf.video.stride;
        p_sys->u.video.i_slice_height = p_out->u.conf.video.slice_height;
        if (p_sys->u.video.i_stride <= 0)
            p_sys->u.video.i_stride = p_out->u.conf.video.width;
        if (p_sys->u.video.i_slice_height <= 0)
            p_sys->u.video.i_slice_height = p_out->u.conf.video.height;

        ArchitectureSpecificCopyHooks(p_dec, p_out->u.conf.video.pixel_format,
                                      p_out->u.conf.video.slice_height,
                                      p_sys->u.video.i_stride, &p_sys->u.video.ascd);
        if (p_sys->u.video.i_pixel_format == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar)
            p_sys->u.video.i_slice_height -= p_out->u.conf.video.crop_top/2;
        if ((p_sys->i_quirks & OMXCODEC_VIDEO_QUIRKS_IGNORE_PADDING))
        {
            p_sys->u.video.i_slice_height = 0;
            p_sys->u.video.i_stride = p_dec->fmt_out.video.i_width;
        }
        p_sys->b_update_format = true;
        p_sys->b_has_format = true;
        return 0;
    }
}
예제 #25
0
파일: xwd.c 프로젝트: Geal/vlc
static picture_t *Decode (decoder_t *dec, block_t **pp)
{
    picture_t *pic = NULL;

    if (pp == NULL)
        return NULL;

    block_t *block = *pp;
    if (block == NULL)
        return NULL;
    *pp = NULL;

    if (block->i_pts <= VLC_TS_INVALID)
        goto drop; /* undated block, should never happen */
    if (block->i_buffer < sz_XWDheader)
        goto drop;

    /* Skip XWD header */
    const XWDFileHeader *hdr = (const void *)block->p_buffer;
    uint32_t hdrlen = ntohl(hdr->header_size);
    if (hdrlen < sz_XWDheader
     || ntohl(hdr->file_version) < XWD_FILE_VERSION
     || ntohl(hdr->pixmap_format) != 2 /* ZPixmap */)
        goto drop;

    hdrlen += ntohl(hdr->ncolors) * sz_XWDColor;
    if (hdrlen > block->i_buffer)
        goto drop;
    block->p_buffer += hdrlen;
    block->i_buffer -= hdrlen;

    /* Parse XWD header */
    vlc_fourcc_t chroma = 0;
    switch (ntohl(hdr->pixmap_depth))
    {
        case 8:
            if (ntohl(hdr->bits_per_pixel) == 8)
                chroma = VLC_CODEC_RGB8;
            break;
        case 15:
            if (ntohl(hdr->bits_per_pixel) == 16)
                chroma = VLC_CODEC_RGB15;
            break;
        case 16:
            if (ntohl(hdr->bits_per_pixel) == 16)
                chroma = VLC_CODEC_RGB16;
            break;
        case 24:
            switch (ntohl(hdr->bits_per_pixel))
            {
                case 32: chroma = VLC_CODEC_RGB32; break;
                case 24: chroma = VLC_CODEC_RGB24; break;
            }
            break;
        case 32:
            if (ntohl(hdr->bits_per_pixel) == 32)
                chroma = VLC_CODEC_ARGB;
            break;
    }
    /* TODO: check image endianess, set RGB mask */
    if (!chroma)
        goto drop;

    video_format_Setup(&dec->fmt_out.video, chroma,
                       ntohl(hdr->pixmap_width), ntohl(hdr->pixmap_height),
                       ntohl(hdr->pixmap_width), ntohl(hdr->pixmap_height),
                       dec->fmt_in.video.i_sar_num,
                       dec->fmt_in.video.i_sar_den);

    const size_t copy = dec->fmt_out.video.i_width
                        * (dec->fmt_out.video.i_bits_per_pixel / 8);
    const uint32_t pitch = ntohl(hdr->bytes_per_line);

    /* Build picture */
    if (pitch < copy
     || (block->i_buffer / pitch) < dec->fmt_out.video.i_height)
        goto drop;

    if (decoder_UpdateVideoFormat(dec))
        goto drop;
    pic = decoder_NewPicture(dec);
    if (pic == NULL)
        goto drop;

    const uint8_t *in = block->p_buffer;
    uint8_t *out = pic->p->p_pixels;
    for (unsigned i = 0; i < dec->fmt_out.video.i_height; i++)
    {
        memcpy(out, in, copy);
        in += pitch;
        out += pic->p->i_pitch;
    }
    pic->date = block->i_pts;
    pic->b_progressive = true;

drop:
    block_Release(block);
    return pic;
}
예제 #26
0
static int Video_GetOutput(decoder_t *p_dec, picture_t **pp_out_pic,
                           block_t **pp_out_block, bool *p_abort,
                           mtime_t i_timeout)
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    mc_api_out out;
    picture_t *p_pic = NULL;
    int i_ret;

    assert(pp_out_pic && !pp_out_block);

    /* FIXME: A new picture shouldn't be created each time.  If
     * decoder_NewPicture fails because the decoder is flushing/exiting,
     * GetVideoOutput will either fail (or crash in function of devices), or
     * never return an output buffer. Indeed, if the Decoder is flushing,
     * MediaCodec can be stalled since the input is waiting for the output or
     * vice-versa. Therefore, call decoder_NewPicture before GetVideoOutput as
     * a safeguard. */

    if (p_sys->b_has_format)
    {
        if (p_sys->b_update_format)
        {
            p_sys->b_update_format = false;
            if (decoder_UpdateVideoFormat(p_dec) != 0)
            {
                msg_Err(p_dec, "decoder_UpdateVideoFormat failed");
                return -1;
            }
        }
        p_pic = decoder_NewPicture(p_dec);
        if (!p_pic) {
            msg_Warn(p_dec, "NewPicture failed");
            /* abort current Decode call */
            *p_abort = true;
            return 0;
        }
    }

    i_ret = p_sys->api->get_out(p_sys->api, &out, i_timeout);
    if (i_ret != 1)
        goto end;

    if (out.type == MC_OUT_TYPE_BUF)
    {
        /* If the oldest input block had no PTS, the timestamp of
         * the frame returned by MediaCodec might be wrong so we
         * overwrite it with the corresponding dts. Call FifoGet
         * first in order to avoid a gap if buffers are released
         * due to an invalid format or a preroll */
        int64_t forced_ts = timestamp_FifoGet(p_sys->u.video.timestamp_fifo);

        if (!p_sys->b_has_format) {
            msg_Warn(p_dec, "Buffers returned before output format is set, dropping frame");
            i_ret = p_sys->api->release_out(p_sys->api, out.u.buf.i_index, false);
            goto end;
        }

        if (out.u.buf.i_ts <= p_sys->i_preroll_end)
        {
            i_ret = p_sys->api->release_out(p_sys->api, out.u.buf.i_index, false);
            goto end;
        }

        if (forced_ts == VLC_TS_INVALID)
            p_pic->date = out.u.buf.i_ts;
        else
            p_pic->date = forced_ts;

        if (p_sys->api->b_direct_rendering)
        {
            picture_sys_t *p_picsys = p_pic->p_sys;
            p_picsys->pf_lock_pic = NULL;
            p_picsys->pf_unlock_pic = UnlockPicture;
            p_picsys->priv.hw.p_dec = p_dec;
            p_picsys->priv.hw.i_index = out.u.buf.i_index;
            p_picsys->priv.hw.b_valid = true;

            vlc_mutex_lock(get_android_opaque_mutex());
            InsertInflightPicture(p_dec, p_pic, out.u.buf.i_index);
            vlc_mutex_unlock(get_android_opaque_mutex());
        } else {
            unsigned int chroma_div;
            GetVlcChromaSizes(p_dec->fmt_out.i_codec,
                              p_dec->fmt_out.video.i_width,
                              p_dec->fmt_out.video.i_height,
                              NULL, NULL, &chroma_div);
            CopyOmxPicture(p_sys->u.video.i_pixel_format, p_pic,
                           p_sys->u.video.i_slice_height, p_sys->u.video.i_stride,
                           (uint8_t *)out.u.buf.p_ptr, chroma_div,
                           &p_sys->u.video.ascd);

            if (p_sys->api->release_out(p_sys->api, out.u.buf.i_index, false))
                i_ret = -1;
        }
        i_ret = 1;
    } else {
        assert(out.type == MC_OUT_TYPE_CONF);
        p_sys->u.video.i_pixel_format = out.u.conf.video.pixel_format;
        ArchitectureSpecificCopyHooksDestroy(p_sys->u.video.i_pixel_format,
                                             &p_sys->u.video.ascd);

        const char *name = "unknown";
        if (p_sys->api->b_direct_rendering)
            p_dec->fmt_out.i_codec = VLC_CODEC_ANDROID_OPAQUE;
        else
        {
            if (!GetVlcChromaFormat(p_sys->u.video.i_pixel_format,
                                    &p_dec->fmt_out.i_codec, &name)) {
                msg_Err(p_dec, "color-format not recognized");
                i_ret = -1;
                goto end;
            }
        }

        msg_Err(p_dec, "output: %d %s, %dx%d stride %d %d, crop %d %d %d %d",
                p_sys->u.video.i_pixel_format, name, out.u.conf.video.width, out.u.conf.video.height,
                out.u.conf.video.stride, out.u.conf.video.slice_height,
                out.u.conf.video.crop_left, out.u.conf.video.crop_top,
                out.u.conf.video.crop_right, out.u.conf.video.crop_bottom);

        p_dec->fmt_out.video.i_width = out.u.conf.video.crop_right + 1 - out.u.conf.video.crop_left;
        p_dec->fmt_out.video.i_height = out.u.conf.video.crop_bottom + 1 - out.u.conf.video.crop_top;
        if (p_dec->fmt_out.video.i_width <= 1
            || p_dec->fmt_out.video.i_height <= 1) {
            p_dec->fmt_out.video.i_width = out.u.conf.video.width;
            p_dec->fmt_out.video.i_height = out.u.conf.video.height;
        }
        p_dec->fmt_out.video.i_visible_width = p_dec->fmt_out.video.i_width;
        p_dec->fmt_out.video.i_visible_height = p_dec->fmt_out.video.i_height;

        p_sys->u.video.i_stride = out.u.conf.video.stride;
        p_sys->u.video.i_slice_height = out.u.conf.video.slice_height;
        if (p_sys->u.video.i_stride <= 0)
            p_sys->u.video.i_stride = out.u.conf.video.width;
        if (p_sys->u.video.i_slice_height <= 0)
            p_sys->u.video.i_slice_height = out.u.conf.video.height;

        ArchitectureSpecificCopyHooks(p_dec, out.u.conf.video.pixel_format,
                                      out.u.conf.video.slice_height,
                                      p_sys->u.video.i_stride, &p_sys->u.video.ascd);
        if (p_sys->u.video.i_pixel_format == OMX_TI_COLOR_FormatYUV420PackedSemiPlanar)
            p_sys->u.video.i_slice_height -= out.u.conf.video.crop_top/2;
        if (IgnoreOmxDecoderPadding(p_sys->psz_name)) {
            p_sys->u.video.i_slice_height = 0;
            p_sys->u.video.i_stride = p_dec->fmt_out.video.i_width;
        }
        p_sys->b_update_format = true;
        p_sys->b_has_format = true;
        i_ret = 0;
    }
end:
    if (p_pic)
    {
        if (i_ret == 1)
            *pp_out_pic = p_pic;
        else
            picture_Release(p_pic);
    }
    return i_ret;
}
예제 #27
0
파일: tarkin.c 프로젝트: FLYKingdom/vlc
/*****************************************************************************
 * DecodePacket: decodes a Tarkin packet.
 *****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, block_t **pp_block,
                                ogg_packet *p_oggpacket )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    uint8_t *rgb;

    if( p_oggpacket->bytes )
    {
        tarkin_synthesis_packetin( p_sys->tarkin_stream, p_oggpacket );
        //block_Release( *pp_block ); /* FIXME duplicate packet */
        *pp_block = NULL;
    }

    if( tarkin_synthesis_frameout( p_sys->tarkin_stream,
                                   &rgb, 0, &p_sys->tarkdate ) == 0 )
    {
        int i_width, i_height, i_chroma, i_stride;
        picture_t *p_pic;

        msg_Err( p_dec, "Tarkin frame decoded" );

        i_width = p_sys->tarkin_stream->layer->desc.width;
        i_height = p_sys->tarkin_stream->layer->desc.height;

        switch( p_sys->tarkin_stream->layer->desc.format )
        {
        case TARKIN_RGB24:
            i_chroma = VLC_CODEC_RGB24;
            i_stride = i_width * 3;
            break;
        case TARKIN_RGB32:
            i_chroma = VLC_CODEC_RGB32;
            i_stride = i_width * 4;
            break;
        case TARKIN_RGBA:
            i_chroma = VLC_CODEC_RGBA;
            i_stride = i_width * 4;
            break;
        default:
            i_chroma = VLC_CODEC_I420;
            i_stride = i_width;
            break;
        }

        /* Set output properties */
        p_dec->fmt_out.video.i_width = i_width;
        p_dec->fmt_out.video.i_height = i_height;

        p_dec->fmt_out.video.i_aspect =
            VOUT_ASPECT_FACTOR * i_width / i_height;
        p_dec->fmt_out.i_codec = i_chroma;

        /* Get a new picture */
        if( (p_pic = decoder_NewPicture( p_dec )) )
        {
            tarkin_CopyPicture( p_dec, p_pic, rgb, i_stride );

            tarkin_synthesis_freeframe( p_sys->tarkin_stream, rgb );

            p_pic->date = mdate() + DEFAULT_PTS_DELAY/*i_pts*/;

            return p_pic;
        }
    }

    return NULL;
}
예제 #28
0
파일: codec.c 프로젝트: Adatan/vlc
static int send_output_buffer(decoder_t *dec)
{
    decoder_sys_t *sys = dec->p_sys;
    MMAL_BUFFER_HEADER_T *buffer;
    picture_sys_t *p_sys;
    picture_t *picture;
    MMAL_STATUS_T status;
    unsigned buffer_size = 0;
    int ret = 0;

    if (!sys->output->is_enabled)
        return VLC_EGENERIC;

    /* If local output pool is allocated, use it - this is only the case for
     * non-opaque modes */
    if (sys->output_pool) {
        buffer = mmal_queue_get(sys->output_pool->queue);
        if (!buffer) {
            msg_Warn(dec, "Failed to get new buffer");
            return VLC_EGENERIC;
        }
    }

    picture = decoder_NewPicture(dec);
    if (!picture) {
        msg_Warn(dec, "Failed to get new picture");
        ret = -1;
        goto err;
    }

    p_sys = picture->p_sys;
    for (int i = 0; i < picture->i_planes; i++)
        buffer_size += picture->p[i].i_lines * picture->p[i].i_pitch;

    if (sys->output_pool) {
        mmal_buffer_header_reset(buffer);
        buffer->user_data = picture;
        buffer->alloc_size = sys->output->buffer_size;
        if (buffer_size < sys->output->buffer_size) {
            msg_Err(dec, "Retrieved picture with too small data block (%d < %d)",
                    buffer_size, sys->output->buffer_size);
            ret = VLC_EGENERIC;
            goto err;
        }

        if (!sys->opaque)
            buffer->data = picture->p[0].p_pixels;
    } else {
        buffer = p_sys->buffer;
        if (!buffer) {
            msg_Warn(dec, "Picture has no buffer attached");
            picture_Release(picture);
            return VLC_EGENERIC;
        }
        buffer->data = p_sys->buffer->data;
    }
    buffer->cmd = 0;

    status = mmal_port_send_buffer(sys->output, buffer);
    if (status != MMAL_SUCCESS) {
        msg_Err(dec, "Failed to send buffer to output port (status=%"PRIx32" %s)",
                status, mmal_status_to_string(status));
        ret = -1;
        goto err;
    }
    atomic_fetch_add(&sys->output_in_transit, 1);

    return ret;

err:
    if (picture)
        picture_Release(picture);
    if (sys->output_pool && buffer) {
        buffer->data = NULL;
        mmal_buffer_header_release(buffer);
    }
    return ret;
}
예제 #29
0
파일: sdl_image.c 프로젝트: FLYKingdom/vlc
/****************************************************************************
 * DecodeBlock: the whole thing
 ****************************************************************************
 * This function must be fed with a complete compressed frame.
 ****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
    decoder_sys_t *p_sys = p_dec->p_sys;
    block_t *p_block;
    picture_t *p_pic = NULL;
    SDL_Surface *p_surface;
    SDL_RWops *p_rw;

    if( pp_block == NULL || *pp_block == NULL ) return NULL;
    p_block = *pp_block;

    if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY )
    {
        block_Release( p_block ); *pp_block = NULL;
        return NULL;
    }

    p_rw = SDL_RWFromConstMem( p_block->p_buffer, p_block->i_buffer );

    /* Decode picture. */
    p_surface = IMG_LoadTyped_RW( p_rw, 1, (char*)p_sys->psz_sdl_type );
    if ( p_surface == NULL )
    {
        msg_Warn( p_dec, "SDL_image couldn't load the image (%s)",
                  IMG_GetError() );
        goto error;
    }

    switch ( p_surface->format->BitsPerPixel )
    {
    case 16:
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB16;
        break;
    case 8:
    case 24:
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
        break;
    case 32:
        p_dec->fmt_out.i_codec = VLC_CODEC_RGB32;
        break;
    default:
        msg_Warn( p_dec, "unknown bits/pixel format (%d)",
                  p_surface->format->BitsPerPixel );
        goto error;
    }
    p_dec->fmt_out.video.i_width = p_surface->w;
    p_dec->fmt_out.video.i_height = p_surface->h;
    p_dec->fmt_out.video.i_aspect = VOUT_ASPECT_FACTOR * p_surface->w
                                     / p_surface->h;

    /* Get a new picture. */
    p_pic = decoder_NewPicture( p_dec );
    if ( p_pic == NULL ) goto error;

    switch ( p_surface->format->BitsPerPixel )
    {
        case 8:
        {
            int i, j;
            uint8_t *p_src, *p_dst;
            uint8_t r, g, b;
            for ( i = 0; i < p_surface->h; i++ )
            {
                p_src = (uint8_t*)p_surface->pixels + i * p_surface->pitch;
                p_dst = p_pic->p[0].p_pixels + i * p_pic->p[0].i_pitch;
                for ( j = 0; j < p_surface->w; j++ )
                {
                    SDL_GetRGB( *(p_src++), p_surface->format,
                                &r, &g, &b );
                    *(p_dst++) = r;
                    *(p_dst++) = g;
                    *(p_dst++) = b;
                }
            }
            break;
        }
        case 16:
        {
            int i;
            uint8_t *p_src = p_surface->pixels;
            uint8_t *p_dst = p_pic->p[0].p_pixels;
            int i_pitch = p_pic->p[0].i_pitch < p_surface->pitch ?
                p_pic->p[0].i_pitch : p_surface->pitch;

            for ( i = 0; i < p_surface->h; i++ )
            {
                vlc_memcpy( p_dst, p_src, i_pitch );
                p_src += p_surface->pitch;
                p_dst += p_pic->p[0].i_pitch;
            }
            break;
        }
        case 24:
        {
            int i, j;
            uint8_t *p_src, *p_dst;
            uint8_t r, g, b;
            for ( i = 0; i < p_surface->h; i++ )
            {
                p_src = (uint8_t*)p_surface->pixels + i * p_surface->pitch;
                p_dst = p_pic->p[0].p_pixels + i * p_pic->p[0].i_pitch;
                for ( j = 0; j < p_surface->w; j++ )
                {
                    SDL_GetRGB( *(uint32_t*)p_src, p_surface->format,
                                &r, &g, &b );
                    *(p_dst++) = r;
                    *(p_dst++) = g;
                    *(p_dst++) = b;
                    p_src += 3;
                }
            }
            break;
        }
        case 32:
        {
            int i, j;
            uint8_t *p_src, *p_dst;
            uint8_t r, g, b, a;
            for ( i = 0; i < p_surface->h; i++ )
            {
                p_src = (uint8_t*)p_surface->pixels + i * p_surface->pitch;
                p_dst = p_pic->p[0].p_pixels + i * p_pic->p[0].i_pitch;
                for ( j = 0; j < p_surface->w; j++ )
                {
                    SDL_GetRGBA( *(uint32_t*)p_src, p_surface->format,
                                &r, &g, &b, &a );
                    *(p_dst++) = b;
                    *(p_dst++) = g;
                    *(p_dst++) = r;
                    *(p_dst++) = a;
                    p_src += 4;
                }
            }
            break;
        }
    }

    p_pic->date = p_block->i_pts > 0 ? p_block->i_pts : p_block->i_dts;

    SDL_FreeSurface( p_surface );
    block_Release( p_block ); *pp_block = NULL;
    return p_pic;

error:
    if ( p_surface != NULL ) SDL_FreeSurface( p_surface );
    block_Release( p_block ); *pp_block = NULL;
    return NULL;
}
예제 #30
0
static bool gst_vlc_video_info_from_vout( GstVideoInfo *p_info,
        GstVideoAlignment *p_align, GstCaps *p_caps, decoder_t *p_dec,
        picture_t *p_pic_info )
{
    const GstVideoFormatInfo *p_vinfo = p_info->finfo;
    picture_t *p_pic = NULL;
    int i;

    /* Ensure the queue is empty */
    gst_vlc_dec_ensure_empty_queue( p_dec );
    gst_video_info_align( p_info, p_align );

    if( !gst_vlc_set_vout_fmt( p_info, p_align, p_caps, p_dec ))
    {
        msg_Err( p_dec, "failed to set output format to vout" );
        return false;
    }

    /* Acquire a picture and release it. This is to get the picture
     * stride/offsets info for the Gstreamer decoder looking to use
     * downstream bufferpool directly; Zero-Copy */
    if( !decoder_UpdateVideoFormat( p_dec ) )
        p_pic = decoder_NewPicture( p_dec );
    if( !p_pic )
    {
        msg_Err( p_dec, "failed to acquire picture from vout; for pic info" );
        return false;
    }

    /* reject if strides don't match */
    for( i = 0; i < p_pic->i_planes; i++ )
        if( p_info->stride[i] != p_pic->p[i].i_pitch )
            goto strides_mismatch;

    p_info->offset[0] = 0;
    for( i = 1; i < p_pic->i_planes; i++ )
    {
        p_info->offset[i] = p_info->offset[i-1] +
            p_pic->p[i-1].i_pitch * p_pic->p[i-1].i_lines;
    }
    GST_VIDEO_INFO_SIZE( p_info ) = p_info->offset[i-1] +
        p_pic->p[i-1].i_pitch * p_pic->p[i-1].i_lines;

    for( i = 0; i < p_pic->i_planes; i++ )
    {
        int i_v_edge, i_h_edge;

        i_h_edge =
            GST_VIDEO_FORMAT_INFO_SCALE_WIDTH( p_vinfo, i,
                    p_align->padding_left);
        i_v_edge =
            GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT( p_vinfo, i,
                    p_align->padding_top);

        p_info->offset[i] += ( i_v_edge * p_info->stride[i] ) +
            ( i_h_edge * GST_VIDEO_FORMAT_INFO_PSTRIDE( p_vinfo, i ));
    }

    memcpy( p_pic_info, p_pic, sizeof( picture_t ));
    picture_Release( p_pic );

    return true;

strides_mismatch:
    msg_Err( p_dec, "strides mismatch" );
    picture_Release( p_pic );
    return false;
}