Ejemplo n.º 1
0
static int config_props_input(AVFilterLink *link)
{
    TransContext *trans = link->dst->priv;

    avcodec_get_chroma_sub_sample(link->format, &trans->hsub, &trans->vsub);

    return 0;
}
Ejemplo n.º 2
0
static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVStream *st = s->streams[pkt->stream_index];
    AVIOContext *pb = s->pb;
    AVPicture *picture;
    int* first_pkt = s->priv_data;
    int width, height, h_chroma_shift, v_chroma_shift;
    int i;
    char buf2[Y4M_LINE_MAX + 1];
    char buf1[20];
    uint8_t *ptr, *ptr1, *ptr2;

    picture = (AVPicture *)pkt->data;

    /* for the first packet we have to output the header as well */
    if (*first_pkt) {
        *first_pkt = 0;
        if (yuv4_generate_header(s, buf2) < 0) {
            av_log(s, AV_LOG_ERROR,
                   "Error. YUV4MPEG stream header write failed.\n");
            return AVERROR(EIO);
        } else {
            avio_write(pb, buf2, strlen(buf2));
        }
    }

    /* construct frame header */

    snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
    avio_write(pb, buf1, strlen(buf1));

    width  = st->codec->width;
    height = st->codec->height;

    ptr = picture->data[0];
    for (i = 0; i < height; i++) {
        avio_write(pb, ptr, width);
        ptr += picture->linesize[0];
    }

    if (st->codec->pix_fmt != AV_PIX_FMT_GRAY8) {
        // Adjust for smaller Cb and Cr planes
        avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift,
                                      &v_chroma_shift);
        width  >>= h_chroma_shift;
        height >>= v_chroma_shift;

        ptr1 = picture->data[1];
        ptr2 = picture->data[2];
        for (i = 0; i < height; i++) {     /* Cb */
            avio_write(pb, ptr1, width);
            ptr1 += picture->linesize[1];
        }
        for (i = 0; i < height; i++) {     /* Cr */
            avio_write(pb, ptr2, width);
            ptr2 += picture->linesize[2];
        }
    }
Ejemplo n.º 3
0
static int config_props(AVFilterLink *link)
{
    SliceContext *slice = link->dst->priv;
    int tmp;

    avcodec_get_chroma_sub_sample(link->format, &tmp, &slice->vshift);

    /* ensure that slices play nice with chroma subsampling, and enforce
     * a reasonable minimum size for the slices */
    slice->h = FFMAX(8, slice->h & (-1 << slice->vshift));

    av_log(link->dst, AV_LOG_INFO, "h:%d\n", slice->h);

    return 0;
}
Ejemplo n.º 4
0
static int config_props(AVFilterLink *outlink)
{

	AVFilterContext *ctx = outlink->src;
	levelsContext *lctx = ctx->priv;
	AVFilterLink *inlink = outlink->src->inputs[0];

	avcodec_get_chroma_sub_sample(outlink->format, &lctx->hsub, &lctx->vsub);

	outlink->w = inlink->w;
    outlink->h = inlink->h;


	return 0;


}
Ejemplo n.º 5
0
raw_frame_ref alloc_frame(PixelFormat pix_fmt, int width, int height)
{
    int chroma_shift_horiz, chroma_shift_vert;
    avcodec_get_chroma_sub_sample(pix_fmt,
				  &chroma_shift_horiz, &chroma_shift_vert);

    raw_frame_ref frame;
    alloc_plane(frame, 0, width, height);
    alloc_plane(frame, 1,
		width >> chroma_shift_horiz, height >> chroma_shift_vert);
    alloc_plane(frame, 2,
		width >> chroma_shift_horiz, height >> chroma_shift_vert);
    frame.planes.data[3] = 0;
    frame.planes.linesize[3] = 0;
    frame.pix_fmt = pix_fmt;
    frame.height = height;
    return frame;
}
Ejemplo n.º 6
0
void fill_rect_colour(raw_frame_ref frame, rectangle rect, uint32_t colour)
{
    int chroma_shift_horiz, chroma_shift_vert;
    avcodec_get_chroma_sub_sample(frame.pix_fmt,
				  &chroma_shift_horiz, &chroma_shift_vert);

    for (int i = 0; i != 3; ++i)
    {
	if (i == 1)
	{
	    rect.left >>= chroma_shift_horiz;
	    rect.right >>= chroma_shift_horiz;
	    rect.top >>= chroma_shift_vert;
	    rect.bottom >>= chroma_shift_vert;
	}

	const uint8_t value = colour >> (16 - 8 * i);
	for (int y = rect.top; y != rect.bottom; ++y)
	    std::memset(frame.planes.data[i] + frame.planes.linesize[i] * y + rect.left,
			value, rect.right - rect.left);
    }
Ejemplo n.º 7
0
Archivo: switcher.c Proyecto: Kafay/vlc
/*****************************************************************************
 * VideoGetBuffer: Build an alternate video buffer
 *****************************************************************************/
static block_t *VideoGetBuffer( sout_stream_t *p_stream, sout_stream_id_t *id,
                                block_t *p_buffer )
{
    sout_stream_sys_t *p_sys = p_stream->p_sys;
    int i_out;
    block_t *p_out;

    id->p_frame->quality = p_sys->i_qscale * powf(2.0, FF_LAMBDA_SHIFT + 7.0)
                            / 139.0;
    id->p_frame->interlaced_frame = 0;
    id->p_frame->top_field_first = 1;
    id->p_frame->pts = p_buffer->i_dts;

    if ( id->i_nb_pred >= p_sys->i_gop )
    {
        id->p_frame->pict_type = FF_I_TYPE;
#if 0
        id->p_frame->me_threshold = 0;
        id->p_frame->mb_threshold = 0;
#endif
        id->i_nb_pred = 0;
    }
    else
    {
        id->p_frame->pict_type = FF_P_TYPE;
#if 0
        if ( id->p_frame->mb_type != NULL )
        {
            id->p_frame->me_threshold = MAX_THRESHOLD;
            id->p_frame->mb_threshold = MAX_THRESHOLD;
        }
#endif
        id->i_nb_pred++;
    }

    i_out = avcodec_encode_video( id->ff_enc_c, id->p_buffer_out,
                                  id->ff_enc_c->width * id->ff_enc_c->height * 3,
                                  id->p_frame );

    if ( i_out <= 0 )
        return NULL;

#if 0
    if ( id->p_frame->mb_type == NULL
          && id->ff_enc_c->coded_frame->pict_type != FF_I_TYPE )
    {
        int mb_width = (id->ff_enc_c->width + 15) / 16;
        int mb_height = (id->ff_enc_c->height + 15) / 16;
        int h_chroma_shift, v_chroma_shift;
        int i;

        avcodec_get_chroma_sub_sample( id->ff_enc_c->pix_fmt, &h_chroma_shift,
                                       &v_chroma_shift );

        id->p_frame->motion_subsample_log2
            = id->ff_enc_c->coded_frame->motion_subsample_log2;
        id->p_frame->mb_type = malloc( ((mb_width + 1) * (mb_height + 1) + 1)
                                    * sizeof(uint32_t) );
        vlc_memcpy( id->p_frame->mb_type, id->ff_enc_c->coded_frame->mb_type,
                    (mb_width + 1) * mb_height * sizeof(id->p_frame->mb_type[0]));

        for ( i = 0; i < 2; i++ )
        {
            int stride = ((16 * mb_width )
                    >> id->ff_enc_c->coded_frame->motion_subsample_log2) + 1;
            int height = ((16 * mb_height)
                    >> id->ff_enc_c->coded_frame->motion_subsample_log2);
            int b8_stride = mb_width * 2 + 1;

            if ( id->ff_enc_c->coded_frame->motion_val[i] )
            {
                id->p_frame->motion_val[i] = malloc( 2 * stride * height
                                                * sizeof(int16_t) );
                vlc_memcpy( id->p_frame->motion_val[i],
                            id->ff_enc_c->coded_frame->motion_val[i],
                            2 * stride * height * sizeof(int16_t) );
            }
            if ( id->ff_enc_c->coded_frame->ref_index[i] )
            {
                id->p_frame->ref_index[i] = malloc( b8_stride * 2 * mb_height
                                               * sizeof(int8_t) );
                vlc_memcpy( id->p_frame->ref_index[i],
                            id->ff_enc_c->coded_frame->ref_index[i],
                            b8_stride * 2 * mb_height * sizeof(int8_t));
            }
        }
    }
Ejemplo n.º 8
0
static int config_input(AVFilterLink *link)
{
    AVFilterContext *ctx = link->dst;
    CropContext *crop = ctx->priv;

    switch (link->format) {
    case PIX_FMT_RGB48BE:
    case PIX_FMT_RGB48LE:
        crop->bpp = 48;
        break;
    case PIX_FMT_ARGB:
    case PIX_FMT_RGBA:
    case PIX_FMT_ABGR:
    case PIX_FMT_BGRA:
        crop->bpp = 32;
        break;
    case PIX_FMT_RGB24:
    case PIX_FMT_BGR24:
        crop->bpp = 24;
        break;
    case PIX_FMT_RGB565BE:
    case PIX_FMT_RGB565LE:
    case PIX_FMT_RGB555BE:
    case PIX_FMT_RGB555LE:
    case PIX_FMT_BGR565BE:
    case PIX_FMT_BGR565LE:
    case PIX_FMT_BGR555BE:
    case PIX_FMT_BGR555LE:
    case PIX_FMT_GRAY16BE:
    case PIX_FMT_GRAY16LE:
    case PIX_FMT_YUV420P16LE:
    case PIX_FMT_YUV420P16BE:
    case PIX_FMT_YUV422P16LE:
    case PIX_FMT_YUV422P16BE:
    case PIX_FMT_YUV444P16LE:
    case PIX_FMT_YUV444P16BE:
        crop->bpp = 16;
        break;
    default:
        crop->bpp = 8;
    }

    avcodec_get_chroma_sub_sample(link->format, &crop->hsub, &crop->vsub);

    if (crop->w == 0)
        crop->w = link->w - crop->x;
    if (crop->h == 0)
        crop->h = link->h - crop->y;

    crop->x &= ~((1 << crop->hsub) - 1);
    crop->y &= ~((1 << crop->vsub) - 1);

    av_log(link->dst, AV_LOG_INFO, "x:%d y:%d w:%d h:%d\n",
           crop->x, crop->y, crop->w, crop->h);

    if (crop->x <  0 || crop->y <  0                    ||
        crop->w <= 0 || crop->h <= 0                    ||
        (unsigned)crop->x + (unsigned)crop->w > link->w ||
        (unsigned)crop->y + (unsigned)crop->h > link->h) {
        av_log(ctx, AV_LOG_ERROR,
               "Output area %d:%d:%d:%d not within the input area 0:0:%d:%d or zero-sized\n",
               crop->x, crop->y, crop->w, crop->h, link->w, link->h);
        return -1;
    }

    return 0;
}
Ejemplo n.º 9
0
static av_cold int encode_init(AVCodecContext* avc_context)
{
    th_info t_info;
    th_comment t_comment;
    ogg_packet o_packet;
    unsigned int offset;
    TheoraContext *h = avc_context->priv_data;
    uint32_t gop_size = avc_context->gop_size;
    int ret;

    /* Set up the theora_info struct */
    th_info_init(&t_info);
    t_info.frame_width  = FFALIGN(avc_context->width,  16);
    t_info.frame_height = FFALIGN(avc_context->height, 16);
    t_info.pic_width    = avc_context->width;
    t_info.pic_height   = avc_context->height;
    t_info.pic_x        = 0;
    t_info.pic_y        = 0;
    /* Swap numerator and denominator as time_base in AVCodecContext gives the
     * time period between frames, but theora_info needs the framerate.  */
    t_info.fps_numerator   = avc_context->time_base.den;
    t_info.fps_denominator = avc_context->time_base.num;
    if (avc_context->sample_aspect_ratio.num) {
        t_info.aspect_numerator   = avc_context->sample_aspect_ratio.num;
        t_info.aspect_denominator = avc_context->sample_aspect_ratio.den;
    } else {
        t_info.aspect_numerator   = 1;
        t_info.aspect_denominator = 1;
    }

    if (avc_context->color_primaries == AVCOL_PRI_BT470M)
        t_info.colorspace = TH_CS_ITU_REC_470M;
    else if (avc_context->color_primaries == AVCOL_PRI_BT470BG)
        t_info.colorspace = TH_CS_ITU_REC_470BG;
    else
        t_info.colorspace = TH_CS_UNSPECIFIED;

    if (avc_context->pix_fmt == AV_PIX_FMT_YUV420P)
        t_info.pixel_fmt = TH_PF_420;
    else if (avc_context->pix_fmt == AV_PIX_FMT_YUV422P)
        t_info.pixel_fmt = TH_PF_422;
    else if (avc_context->pix_fmt == AV_PIX_FMT_YUV444P)
        t_info.pixel_fmt = TH_PF_444;
    else {
        av_log(avc_context, AV_LOG_ERROR, "Unsupported pix_fmt\n");
        return AVERROR(EINVAL);
    }
    avcodec_get_chroma_sub_sample(avc_context->pix_fmt, &h->uv_hshift, &h->uv_vshift);

    if (avc_context->flags & AV_CODEC_FLAG_QSCALE) {
        /* Clip global_quality in QP units to the [0 - 10] range
           to be consistent with the libvorbis implementation.
           Theora accepts a quality parameter which is an int value in
           the [0 - 63] range.
        */
        t_info.quality        = av_clipf(avc_context->global_quality / (float)FF_QP2LAMBDA, 0, 10) * 6.3;
        t_info.target_bitrate = 0;
    } else {
        t_info.target_bitrate = avc_context->bit_rate;
        t_info.quality        = 0;
    }

    /* Now initialise libtheora */
    h->t_state = th_encode_alloc(&t_info);
    if (!h->t_state) {
        av_log(avc_context, AV_LOG_ERROR, "theora_encode_init failed\n");
        return AVERROR_EXTERNAL;
    }

    h->keyframe_mask = (1 << t_info.keyframe_granule_shift) - 1;
    /* Clear up theora_info struct */
    th_info_clear(&t_info);

    if (th_encode_ctl(h->t_state, TH_ENCCTL_SET_KEYFRAME_FREQUENCY_FORCE,
                      &gop_size, sizeof(gop_size))) {
        av_log(avc_context, AV_LOG_ERROR, "Error setting GOP size\n");
        return AVERROR_EXTERNAL;
    }

    // need to enable 2 pass (via TH_ENCCTL_2PASS_) before encoding headers
    if (avc_context->flags & AV_CODEC_FLAG_PASS1) {
        if ((ret = get_stats(avc_context, 0)) < 0)
            return ret;
    } else if (avc_context->flags & AV_CODEC_FLAG_PASS2) {
        if ((ret = submit_stats(avc_context)) < 0)
            return ret;
    }

    /*
        Output first header packet consisting of theora
        header, comment, and tables.

        Each one is prefixed with a 16-bit size, then they
        are concatenated together into libavcodec's extradata.
    */
    offset = 0;

    /* Headers */
    th_comment_init(&t_comment);

    while (th_encode_flushheader(h->t_state, &t_comment, &o_packet))
        if ((ret = concatenate_packet(&offset, avc_context, &o_packet)) < 0)
            return ret;

    th_comment_clear(&t_comment);

    return 0;
}
Ejemplo n.º 10
0
void
glw_video_input_yuvp(glw_video_t *gv,
		     uint8_t * const data[], const int pitch[],
		     const frame_info_t *fi)
{
  int hvec[3], wvec[3];
  int i, h, w;
  uint8_t *src;
  uint8_t *dst;
  int tff;
  int hshift, vshift;
  glw_video_surface_t *s;
  const int parity = 0;
  int64_t pts = fi->pts;

  avcodec_get_chroma_sub_sample(fi->pix_fmt, &hshift, &vshift);

  wvec[0] = fi->width;
  wvec[1] = fi->width >> hshift;
  wvec[2] = fi->width >> hshift;
  hvec[0] = fi->height >> fi->interlaced;
  hvec[1] = fi->height >> (vshift + fi->interlaced);
  hvec[2] = fi->height >> (vshift + fi->interlaced);

  if(glw_video_configure(gv, &glw_video_opengl, wvec, hvec, 3,
			 fi->interlaced ? (GVC_YHALF | GVC_CUTBORDER) : 0))
    return;
  
  gv_color_matrix_set(gv, fi);

  if((s = glw_video_get_surface(gv)) == NULL)
    return;

  if(!fi->interlaced) {

    for(i = 0; i < 3; i++) {
      w = wvec[i];
      h = hvec[i];
      src = data[i];
      dst = s->gvs_data[i];
 
      while(h--) {
	memcpy(dst, src, w);
	dst += w;
	src += pitch[i];
      }
    }

    glw_video_put_surface(gv, s, pts, fi->epoch, fi->duration, 0);

  } else {

    int duration = fi->duration >> 1;

    tff = fi->tff ^ parity;

    for(i = 0; i < 3; i++) {
      w = wvec[i];
      h = hvec[i];
      
      src = data[i]; 
      dst = s->gvs_data[i];
      
      while(h -= 2 > 0) {
	memcpy(dst, src, w);
	dst += w;
	src += pitch[i] * 2;
      }
    }
    
    glw_video_put_surface(gv, s, pts, fi->epoch, duration, !tff);

    if((s = glw_video_get_surface(gv)) == NULL)
      return;

    for(i = 0; i < 3; i++) {
      w = wvec[i];
      h = hvec[i];
      
      src = data[i] + pitch[i];
      dst = s->gvs_data[i];
      
      while(h -= 2 > 0) {
	memcpy(dst, src, w);
	dst += w;
	src += pitch[i] * 2;
      }
    }
    
    if(pts != AV_NOPTS_VALUE)
      pts += duration;

    glw_video_put_surface(gv, s, pts, fi->epoch, duration, tff);
  }
}
Ejemplo n.º 11
0
/* [DIRAC_STD] 10.3 Parse Source Parameters.
 * source_parameters(base_video_format) */
static int parse_source_parameters(AVCodecContext *avctx, GetBitContext *gb,
                                   dirac_source_params *source)
{
    AVRational frame_rate = { 0, 0 };
    unsigned luma_depth = 8, luma_offset = 16;
    int idx;
    int chroma_x_shift, chroma_y_shift;

    /* [DIRAC_STD] 10.3.2 Frame size. frame_size(video_params) */
    /* [DIRAC_STD] custom_dimensions_flag */
    if (get_bits1(gb)) {
        source->width  = svq3_get_ue_golomb(gb); /* [DIRAC_STD] FRAME_WIDTH  */
        source->height = svq3_get_ue_golomb(gb); /* [DIRAC_STD] FRAME_HEIGHT */
    }

    /* [DIRAC_STD] 10.3.3 Chroma Sampling Format.
     *  chroma_sampling_format(video_params) */
    /* [DIRAC_STD] custom_chroma_format_flag */
    if (get_bits1(gb))
        /* [DIRAC_STD] CHROMA_FORMAT_INDEX */
        source->chroma_format = svq3_get_ue_golomb(gb);
    if (source->chroma_format > 2U) {
        av_log(avctx, AV_LOG_ERROR, "Unknown chroma format %d\n",
               source->chroma_format);
        return AVERROR_INVALIDDATA;
    }

    /* [DIRAC_STD] 10.3.4 Scan Format. scan_format(video_params) */
    /* [DIRAC_STD] custom_scan_format_flag */
    if (get_bits1(gb))
        /* [DIRAC_STD] SOURCE_SAMPLING */
        source->interlaced = svq3_get_ue_golomb(gb);
    if (source->interlaced > 1U)
        return AVERROR_INVALIDDATA;

    /* [DIRAC_STD] 10.3.5 Frame Rate. frame_rate(video_params) */
    if (get_bits1(gb)) { /* [DIRAC_STD] custom_frame_rate_flag */
        source->frame_rate_index = svq3_get_ue_golomb(gb);

        if (source->frame_rate_index > 10U)
            return AVERROR_INVALIDDATA;

        if (!source->frame_rate_index) {
            /* [DIRAC_STD] FRAME_RATE_NUMER */
            frame_rate.num = svq3_get_ue_golomb(gb);
            /* [DIRAC_STD] FRAME_RATE_DENOM */
            frame_rate.den = svq3_get_ue_golomb(gb);
        }
    }
    /* [DIRAC_STD] preset_frame_rate(video_params, index) */
    if (source->frame_rate_index > 0) {
        if (source->frame_rate_index <= 8)
            frame_rate = ff_mpeg12_frame_rate_tab[source->frame_rate_index];
        else
            /* [DIRAC_STD] Table 10.3 values 9-10 */
            frame_rate = dirac_frame_rate[source->frame_rate_index - 9];
    }
    avctx->framerate = frame_rate;

    /* [DIRAC_STD] 10.3.6 Pixel Aspect Ratio.
     * pixel_aspect_ratio(video_params) */
    if (get_bits1(gb)) { /* [DIRAC_STD] custom_pixel_aspect_ratio_flag */
        /* [DIRAC_STD] index */
        source->aspect_ratio_index = svq3_get_ue_golomb(gb);

        if (source->aspect_ratio_index > 6U)
            return AVERROR_INVALIDDATA;

        if (!source->aspect_ratio_index) {
            avctx->sample_aspect_ratio.num = svq3_get_ue_golomb(gb);
            avctx->sample_aspect_ratio.den = svq3_get_ue_golomb(gb);
        }
    }
    /* [DIRAC_STD] Take value from Table 10.4 Available preset pixel
     *  aspect ratio values */
    if (source->aspect_ratio_index > 0)
        avctx->sample_aspect_ratio =
            dirac_preset_aspect_ratios[source->aspect_ratio_index - 1];

    /* [DIRAC_STD] 10.3.7 Clean area. clean_area(video_params) */
    if (get_bits1(gb)) { /* [DIRAC_STD] custom_clean_area_flag */
        /* [DIRAC_STD] CLEAN_WIDTH */
        source->clean_width = svq3_get_ue_golomb(gb);
        /* [DIRAC_STD] CLEAN_HEIGHT */
        source->clean_height = svq3_get_ue_golomb(gb);
        /* [DIRAC_STD] CLEAN_LEFT_OFFSET */
        source->clean_left_offset = svq3_get_ue_golomb(gb);
        /* [DIRAC_STD] CLEAN_RIGHT_OFFSET */
        source->clean_right_offset = svq3_get_ue_golomb(gb);
    }

    /* [DIRAC_STD] 10.3.8 Signal range. signal_range(video_params)
     * WARNING: Some adaptation seems to be done using the
     * AVCOL_RANGE_MPEG/JPEG values */
    if (get_bits1(gb)) { /* [DIRAC_STD] custom_signal_range_flag */
        /* [DIRAC_STD] index */
        source->pixel_range_index = svq3_get_ue_golomb(gb);

        if (source->pixel_range_index > 4U)
            return AVERROR_INVALIDDATA;

        /* This assumes either fullrange or MPEG levels only */
        if (!source->pixel_range_index) {
            luma_offset = svq3_get_ue_golomb(gb);
            luma_depth  = av_log2(svq3_get_ue_golomb(gb)) + 1;
            svq3_get_ue_golomb(gb); /* chroma offset    */
            svq3_get_ue_golomb(gb); /* chroma excursion */
            avctx->color_range = luma_offset ? AVCOL_RANGE_MPEG
                                             : AVCOL_RANGE_JPEG;
        }
    }
    /* [DIRAC_STD] Table 10.5
     * Available signal range presets <--> pixel_range_presets */
    if (source->pixel_range_index > 0) {
        idx                = source->pixel_range_index - 1;
        luma_depth         = pixel_range_presets[idx].bitdepth;
        avctx->color_range = pixel_range_presets[idx].color_range;
    }

    if (luma_depth > 8)
        av_log(avctx, AV_LOG_WARNING, "Bitdepth greater than 8\n");

    avctx->pix_fmt = dirac_pix_fmt[!luma_offset][source->chroma_format];
    avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_x_shift, &chroma_y_shift);
    if ((source->width % (1<<chroma_x_shift)) || (source->height % (1<<chroma_y_shift))) {
        av_log(avctx, AV_LOG_ERROR, "Dimensions must be an integer multiple of the chroma subsampling\n");
        return AVERROR_INVALIDDATA;
    }


    /* [DIRAC_STD] 10.3.9 Colour specification. colour_spec(video_params) */
    if (get_bits1(gb)) { /* [DIRAC_STD] custom_colour_spec_flag */
        /* [DIRAC_STD] index */
        idx = source->color_spec_index = svq3_get_ue_golomb(gb);

        if (source->color_spec_index > 4U)
            return AVERROR_INVALIDDATA;

        avctx->color_primaries = dirac_color_presets[idx].color_primaries;
        avctx->colorspace      = dirac_color_presets[idx].colorspace;
        avctx->color_trc       = dirac_color_presets[idx].color_trc;

        if (!source->color_spec_index) {
            /* [DIRAC_STD] 10.3.9.1 Colour primaries */
            if (get_bits1(gb)) {
                idx = svq3_get_ue_golomb(gb);
                if (idx < 3U)
                    avctx->color_primaries = dirac_primaries[idx];
            }
            /* [DIRAC_STD] 10.3.9.2 Colour matrix */
            if (get_bits1(gb)) {
                idx = svq3_get_ue_golomb(gb);
                if (!idx)
                    avctx->colorspace = AVCOL_SPC_BT709;
                else if (idx == 1)
                    avctx->colorspace = AVCOL_SPC_BT470BG;
            }
            /* [DIRAC_STD] 10.3.9.3 Transfer function */
            if (get_bits1(gb) && !svq3_get_ue_golomb(gb))
                avctx->color_trc = AVCOL_TRC_BT709;
        }
    } else {
        idx                    = source->color_spec_index;
        avctx->color_primaries = dirac_color_presets[idx].color_primaries;
        avctx->colorspace      = dirac_color_presets[idx].colorspace;
        avctx->color_trc       = dirac_color_presets[idx].color_trc;
    }

    return 0;
}
Ejemplo n.º 12
0
static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
{
    AVStream *st = s->streams[pkt->stream_index];
    AVIOContext *pb = s->pb;
    AVPicture *picture, picture_tmp;
    int* first_pkt = s->priv_data;
    int width, height, h_chroma_shift, v_chroma_shift;
    int i;
    char buf2[Y4M_LINE_MAX + 1];
    char buf1[20];
    uint8_t *ptr, *ptr1, *ptr2;

    memcpy(&picture_tmp, pkt->data, sizeof(AVPicture));
    picture = &picture_tmp;

    /* for the first packet we have to output the header as well */
    if (*first_pkt) {
        *first_pkt = 0;
        if (yuv4_generate_header(s, buf2) < 0) {
            av_log(s, AV_LOG_ERROR,
                   "Error. YUV4MPEG stream header write failed.\n");
            return AVERROR(EIO);
        } else {
            avio_write(pb, buf2, strlen(buf2));
        }
    }

    /* construct frame header */

    snprintf(buf1, sizeof(buf1), "%s\n", Y4M_FRAME_MAGIC);
    avio_write(pb, buf1, strlen(buf1));

    width  = st->codec->width;
    height = st->codec->height;

    ptr = picture->data[0];

    switch (st->codec->pix_fmt) {
    case AV_PIX_FMT_GRAY8:
    case AV_PIX_FMT_YUV411P:
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV444P:
        break;
    case AV_PIX_FMT_GRAY16:
    case AV_PIX_FMT_YUV420P9:
    case AV_PIX_FMT_YUV422P9:
    case AV_PIX_FMT_YUV444P9:
    case AV_PIX_FMT_YUV420P10:
    case AV_PIX_FMT_YUV422P10:
    case AV_PIX_FMT_YUV444P10:
    case AV_PIX_FMT_YUV420P12:
    case AV_PIX_FMT_YUV422P12:
    case AV_PIX_FMT_YUV444P12:
    case AV_PIX_FMT_YUV420P14:
    case AV_PIX_FMT_YUV422P14:
    case AV_PIX_FMT_YUV444P14:
    case AV_PIX_FMT_YUV420P16:
    case AV_PIX_FMT_YUV422P16:
    case AV_PIX_FMT_YUV444P16:
        width *= 2;
        break;
    default:
        av_log(s, AV_LOG_ERROR, "The pixel format '%s' is not supported.\n",
               av_get_pix_fmt_name(st->codec->pix_fmt));
        return AVERROR(EINVAL);
    }

    for (i = 0; i < height; i++) {
        avio_write(pb, ptr, width);
        ptr += picture->linesize[0];
    }

    if (st->codec->pix_fmt != AV_PIX_FMT_GRAY8 &&
        st->codec->pix_fmt != AV_PIX_FMT_GRAY16) {
        // Adjust for smaller Cb and Cr planes
        avcodec_get_chroma_sub_sample(st->codec->pix_fmt, &h_chroma_shift,
                                      &v_chroma_shift);
        width  >>= h_chroma_shift;
        height >>= v_chroma_shift;

        ptr1 = picture->data[1];
        ptr2 = picture->data[2];
        for (i = 0; i < height; i++) {     /* Cb */
            avio_write(pb, ptr1, width);
            ptr1 += picture->linesize[1];
        }
        for (i = 0; i < height; i++) {     /* Cr */
            avio_write(pb, ptr2, width);
            ptr2 += picture->linesize[2];
        }
    }
Ejemplo n.º 13
0
void
glw_video_input_rsx_mem(glw_video_t *gv, void *frame,
			const frame_info_t *fi)
{
  rsx_video_frame_t *rvf = frame;
  int hvec[3], wvec[3];
  int i;
  int hshift, vshift;
  glw_video_surface_t *gvs;

  avcodec_get_chroma_sub_sample(fi->fi_pix_fmt, &hshift, &vshift);

  wvec[0] = fi->fi_width;
  wvec[1] = fi->fi_width >> hshift;
  wvec[2] = fi->fi_width >> hshift;
  hvec[0] = fi->fi_height >> fi->fi_interlaced;
  hvec[1] = fi->fi_height >> (vshift + fi->fi_interlaced);
  hvec[2] = fi->fi_height >> (vshift + fi->fi_interlaced);


  if(glw_video_configure(gv, &glw_video_rsxmem, wvec, hvec, 3,
			 fi->fi_interlaced ? (GVC_YHALF | GVC_CUTBORDER) : 0))
    return;
  
  gv_color_matrix_set(gv, fi);

  if((gvs = glw_video_get_surface(gv)) == NULL)
    return;

  surface_reset(gv, gvs);

  gvs->gvs_size = rvf->rvf_size;
  gvs->gvs_offset = rvf->rvf_offset;

  int offset = gvs->gvs_offset;

  if(fi->fi_interlaced) {
    // Interlaced

    for(i = 0; i < 3; i++) {
      int w = wvec[i];
      int h = hvec[i];

      init_tex(&gvs->gvs_tex[i],
	       offset + !fi->fi_tff * wvec[i],
	       w, h, w*2,
	       NV30_3D_TEX_FORMAT_FORMAT_I8, 0,
	       NV30_3D_TEX_SWIZZLE_S0_X_S1 | NV30_3D_TEX_SWIZZLE_S0_Y_S1 |
	       NV30_3D_TEX_SWIZZLE_S0_Z_S1 | NV30_3D_TEX_SWIZZLE_S0_W_S1 |
	       NV30_3D_TEX_SWIZZLE_S1_X_X | NV30_3D_TEX_SWIZZLE_S1_Y_Y |
	       NV30_3D_TEX_SWIZZLE_S1_Z_Z | NV30_3D_TEX_SWIZZLE_S1_W_W
	       );
      offset += w * (fi->fi_height >> (i ? vshift : 0));
    }
    glw_video_put_surface(gv, gvs, fi->fi_pts, fi->fi_epoch,
			  fi->fi_duration/2, 0);

    if((gvs = glw_video_get_surface(gv)) == NULL)
      return;
  
    surface_reset(gv, gvs);

    gvs->gvs_size = rvf->rvf_size;
    gvs->gvs_offset = rvf->rvf_offset;

    offset = gvs->gvs_offset;

    for(i = 0; i < 3; i++) {
      int w = wvec[i];
      int h = hvec[i];

      init_tex(&gvs->gvs_tex[i],
	       offset + !!fi->fi_tff * wvec[i],
	       w, h, w*2,
	       NV30_3D_TEX_FORMAT_FORMAT_I8, 0,
	       NV30_3D_TEX_SWIZZLE_S0_X_S1 | NV30_3D_TEX_SWIZZLE_S0_Y_S1 |
	       NV30_3D_TEX_SWIZZLE_S0_Z_S1 | NV30_3D_TEX_SWIZZLE_S0_W_S1 |
	       NV30_3D_TEX_SWIZZLE_S1_X_X | NV30_3D_TEX_SWIZZLE_S1_Y_Y |
	       NV30_3D_TEX_SWIZZLE_S1_Z_Z | NV30_3D_TEX_SWIZZLE_S1_W_W
	       );
      offset += w * (fi->fi_height >> (i ? vshift : 0));
    }

    glw_video_put_surface(gv, gvs, fi->fi_pts + fi->fi_duration, fi->fi_epoch,
			  fi->fi_duration/2, 0);

  } else {
    // Progressive

    for(i = 0; i < 3; i++) {
Ejemplo n.º 14
0
static int config_props(AVFilterLink *outlink)
{

	AVFilterContext *ctx = outlink->src;
	OverlayContext *ovl = ctx->priv;

    AVFilterLink *inlink = outlink->src->inputs[0];

    AVFormatContext *pFormatCtx;
	AVInputFormat *avif = NULL;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
	AVPacket        packet;
	AVFrame *overlay, *tempMask;

	int avStream = -1;
	int frameFinished;

	struct SwsContext *sws;

	uint8_t *data;
	uint8_t *maskData;
	uint8_t *tempData;

    av_log(ctx, AV_LOG_DEBUG, ">>> config_props().\n");



	// make sure Chroma planes align.
	avcodec_get_chroma_sub_sample(outlink->format, &ovl->hsub, &ovl->vsub);

//	av_log(ctx,AV_LOG_INFO,"hsub: %d vsub: %d iformat: %d oformat %d\n",ovl->hsub,ovl->vsub,inlink->format,outlink->format);

	if ((ovl->printX % (1<<ovl->hsub) && ovl->hsub!=1)||(ovl->printY % (1<<ovl->vsub) && ovl->vsub!=1)) {
			av_log(ctx, AV_LOG_ERROR, "Cannot use this position with this chroma subsampling. Chroma plane will not align. (continuing with unaligned chroma planes, your watermark may look distorted)\n");
	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avformat_open_input(%s).\n",ovl->imageName);

    pFormatCtx = avformat_alloc_context();

	// open overlay image
	// avformat_open_input
	if(avformat_open_input(&pFormatCtx, ovl->imageName, avif, NULL)!=0) {
		av_log(ctx, AV_LOG_FATAL, "Cannot open overlay image (%s).\n",ovl->imageName);
		return -1;

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avformat_find_stream_info.\n");


	if(avformat_find_stream_info(pFormatCtx,NULL)<0) {
		av_log(ctx, AV_LOG_FATAL, "Cannot find stream in overlay image.\n");
		return -1;

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() pFormatCtx->streams.\n");


	for(int i=0; i<pFormatCtx->nb_streams; i++)
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			avStream=i;
			break;
		}

	if(avStream==-1) {
		av_log (ctx,AV_LOG_FATAL,"could not find an image stream in overlay image\n");
		return -1;
	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avcodec_find_decoder.\n");


	pCodecCtx=pFormatCtx->streams[avStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);

	if(pCodec==NULL) {
		av_log(ctx, AV_LOG_FATAL ,"could not find codec for overlay image\n");
		return -1;

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avcodec_open2.\n");


	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) {
		av_log(ctx, AV_LOG_FATAL,"could not open codec for overlay image\n");
		return -1;

	}

	// check for appropriate format.
	if (pCodecCtx->pix_fmt != PIX_FMT_ARGB &&
		pCodecCtx->pix_fmt != PIX_FMT_RGBA &&
		pCodecCtx->pix_fmt != PIX_FMT_ABGR &&
		pCodecCtx->pix_fmt != PIX_FMT_BGRA)
	{
		// warn if no alpha channel
		av_log(ctx,AV_LOG_WARNING, "overlay image has no alpha channel (assuming completely opaque)");

	}

    av_log(ctx, AV_LOG_DEBUG, "    config_props() avcodec_alloc_frame.\n");


	overlay = avcodec_alloc_frame();

	// read overlay file into overlay AVFrame

	av_read_frame(pFormatCtx, &packet);
	avcodec_decode_video2(pCodecCtx, overlay, &frameFinished, &packet);

	// will always be GRAY8

	// should be all or nothing, so no real need to test both
	// testing both incase one was missed.
	if (ovl->printW == -1 || ovl->printH == -1)
	{
		ovl->printW = pCodecCtx->width;
		ovl->printH = pCodecCtx->height;
	}

	// Allocate AVFrames and image buffers
	ovl->pFrame=avcodec_alloc_frame();
	ovl->maskFrame=avcodec_alloc_frame();
	tempMask = avcodec_alloc_frame();

	data = (uint8_t *) av_malloc(avpicture_get_size(inlink->format, ovl->printW, ovl->printH));
	maskData = (uint8_t *) av_malloc(avpicture_get_size(PIX_FMT_GRAY8, ovl->printW, ovl->printH));
	tempData = (uint8_t *) av_malloc(avpicture_get_size(PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height));

	avpicture_fill((AVPicture *)tempMask, tempData, PIX_FMT_GRAY8, pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)ovl->maskFrame, maskData, PIX_FMT_GRAY8, ovl->printW, ovl->printH);
	avpicture_fill((AVPicture *)ovl->pFrame, data, inlink->format, ovl->printW, ovl->printH);


	av_log(ctx,AV_LOG_DEBUG,"mask linesize %d\n",ovl->maskFrame->linesize[0]);

	// copy the alpha mask, it appears to be getting lost during sws_scale
	/*	copy the alpha mask, it appears to be getting lost during sws_scale
		copy the alpha if it exists and then scale it. */
	ovl->mask=0;
	if (pCodecCtx->pix_fmt == PIX_FMT_ARGB ||
		pCodecCtx->pix_fmt == PIX_FMT_RGBA ||
		pCodecCtx->pix_fmt == PIX_FMT_ABGR ||
		pCodecCtx->pix_fmt == PIX_FMT_BGRA)
	{

		// copy the alpha if it exists and then scale it.
		int alpha = 0;
		if (pCodecCtx->pix_fmt == PIX_FMT_RGBA || pCodecCtx->pix_fmt == PIX_FMT_BGRA) { alpha = 3; }

		for (int y=0; y < pCodecCtx->height; y++) {
			// memcpy((tempMask->data[0] + y * tempMask->linesize[0]),
			for (int x=0; x < pCodecCtx->width; x++) {
				*(tempMask->data[0] + y * tempMask->linesize[0] + x ) = *(overlay->data[0] + y * overlay->linesize[0] + x* 4 + alpha);
			}
		}
		// scale and copy

		av_log(ctx,AV_LOG_DEBUG," in: %dx%d, out %dx%d\n",pCodecCtx->width, pCodecCtx->height,ovl->printW, ovl->printH);

		// scale & copy, even if we don't scale, we still need to copy

		sws=sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_GRAY8,
						   ovl->printW, ovl->printH, PIX_FMT_GRAY8,
						   SWS_BILINEAR, NULL, NULL, NULL);

		sws_scale(sws, (const uint8_t * const *)tempMask->data, tempMask->linesize, 0, pCodecCtx->height,
				  ovl->maskFrame->data, ovl->maskFrame->linesize);

				  ovl->mask = 1;

	}

	av_log(ctx,AV_LOG_DEBUG, "config_props() sws_getContext\n");


	av_log(ctx,AV_LOG_DEBUG,"inlink format %d, png format %d\n",inlink->format,pCodecCtx->pix_fmt);

	// convert to output frame format.


	sws=sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
					   ovl->printW, ovl->printH, inlink->format,
					   SWS_BILINEAR, NULL, NULL, NULL);

	// set the output filter frame size to the input frame size.

	outlink->w = inlink->w;
    outlink->h = inlink->h;

	// convert the image

	sws_scale(sws, (const uint8_t * const *)overlay->data, overlay->linesize, 0, pCodecCtx->height,
				ovl->pFrame->data, ovl->pFrame->linesize);


	av_free(tempMask);
	av_free(overlay);
	sws_freeContext(sws);
    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    av_log(ctx, AV_LOG_DEBUG, "<<< config_props().\n");


    return 0;


}